repo_name
stringlengths
7
71
file_path
stringlengths
5
118
context
list
import_statement
stringlengths
45
12.5k
token_num
int64
641
99.4k
cropped_code
stringlengths
44
17k
all_code
stringlengths
43
754k
next_line
stringlengths
2
330
gold_snippet_index
int64
0
68
created_at
stringlengths
25
25
level
stringclasses
9 values
camenduru/FreeInit-hf
app.py
[ { "identifier": "UNet3DConditionModel", "path": "animatediff/models/unet.py", "snippet": "class UNet3DConditionModel(ModelMixin, ConfigMixin):\n _supports_gradient_checkpointing = True\n\n @register_to_config\n def __init__(\n self,\n sample_size: Optional[int] = None,\n in_channels: int = 4,\n out_channels: int = 4,\n center_input_sample: bool = False,\n flip_sin_to_cos: bool = True,\n freq_shift: int = 0, \n down_block_types: Tuple[str] = (\n \"CrossAttnDownBlock3D\",\n \"CrossAttnDownBlock3D\",\n \"CrossAttnDownBlock3D\",\n \"DownBlock3D\",\n ),\n mid_block_type: str = \"UNetMidBlock3DCrossAttn\",\n up_block_types: Tuple[str] = (\n \"UpBlock3D\",\n \"CrossAttnUpBlock3D\",\n \"CrossAttnUpBlock3D\",\n \"CrossAttnUpBlock3D\"\n ),\n only_cross_attention: Union[bool, Tuple[bool]] = False,\n block_out_channels: Tuple[int] = (320, 640, 1280, 1280),\n layers_per_block: int = 2,\n downsample_padding: int = 1,\n mid_block_scale_factor: float = 1,\n act_fn: str = \"silu\",\n norm_num_groups: int = 32,\n norm_eps: float = 1e-5,\n cross_attention_dim: int = 1280,\n attention_head_dim: Union[int, Tuple[int]] = 8,\n dual_cross_attention: bool = False,\n use_linear_projection: bool = False,\n class_embed_type: Optional[str] = None,\n num_class_embeds: Optional[int] = None,\n upcast_attention: bool = False,\n resnet_time_scale_shift: str = \"default\",\n \n use_inflated_groupnorm=False,\n \n # Additional\n use_motion_module = False,\n motion_module_resolutions = ( 1,2,4,8 ),\n motion_module_mid_block = False,\n motion_module_decoder_only = False,\n motion_module_type = None,\n motion_module_kwargs = {},\n unet_use_cross_frame_attention = None,\n unet_use_temporal_attention = None,\n ):\n super().__init__()\n \n self.sample_size = sample_size\n time_embed_dim = block_out_channels[0] * 4\n\n # input\n self.conv_in = InflatedConv3d(in_channels, block_out_channels[0], kernel_size=3, padding=(1, 1))\n\n # time\n self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift)\n timestep_input_dim = block_out_channels[0]\n\n self.time_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)\n\n # class embedding\n if class_embed_type is None and num_class_embeds is not None:\n self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim)\n elif class_embed_type == \"timestep\":\n self.class_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)\n elif class_embed_type == \"identity\":\n self.class_embedding = nn.Identity(time_embed_dim, time_embed_dim)\n else:\n self.class_embedding = None\n\n self.down_blocks = nn.ModuleList([])\n self.mid_block = None\n self.up_blocks = nn.ModuleList([])\n\n if isinstance(only_cross_attention, bool):\n only_cross_attention = [only_cross_attention] * len(down_block_types)\n\n if isinstance(attention_head_dim, int):\n attention_head_dim = (attention_head_dim,) * len(down_block_types)\n\n # down\n output_channel = block_out_channels[0]\n for i, down_block_type in enumerate(down_block_types):\n res = 2 ** i\n input_channel = output_channel\n output_channel = block_out_channels[i]\n is_final_block = i == len(block_out_channels) - 1\n\n down_block = get_down_block(\n down_block_type,\n num_layers=layers_per_block,\n in_channels=input_channel,\n out_channels=output_channel,\n temb_channels=time_embed_dim,\n add_downsample=not is_final_block,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n resnet_groups=norm_num_groups,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=attention_head_dim[i],\n downsample_padding=downsample_padding,\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention[i],\n upcast_attention=upcast_attention,\n resnet_time_scale_shift=resnet_time_scale_shift,\n\n unet_use_cross_frame_attention=unet_use_cross_frame_attention,\n unet_use_temporal_attention=unet_use_temporal_attention,\n use_inflated_groupnorm=use_inflated_groupnorm,\n \n use_motion_module=use_motion_module and (res in motion_module_resolutions) and (not motion_module_decoder_only),\n motion_module_type=motion_module_type,\n motion_module_kwargs=motion_module_kwargs,\n )\n self.down_blocks.append(down_block)\n\n # mid\n if mid_block_type == \"UNetMidBlock3DCrossAttn\":\n self.mid_block = UNetMidBlock3DCrossAttn(\n in_channels=block_out_channels[-1],\n temb_channels=time_embed_dim,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n output_scale_factor=mid_block_scale_factor,\n resnet_time_scale_shift=resnet_time_scale_shift,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=attention_head_dim[-1],\n resnet_groups=norm_num_groups,\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n upcast_attention=upcast_attention,\n\n unet_use_cross_frame_attention=unet_use_cross_frame_attention,\n unet_use_temporal_attention=unet_use_temporal_attention,\n use_inflated_groupnorm=use_inflated_groupnorm,\n \n use_motion_module=use_motion_module and motion_module_mid_block,\n motion_module_type=motion_module_type,\n motion_module_kwargs=motion_module_kwargs,\n )\n else:\n raise ValueError(f\"unknown mid_block_type : {mid_block_type}\")\n \n # count how many layers upsample the videos\n self.num_upsamplers = 0\n\n # up\n reversed_block_out_channels = list(reversed(block_out_channels))\n reversed_attention_head_dim = list(reversed(attention_head_dim))\n only_cross_attention = list(reversed(only_cross_attention))\n output_channel = reversed_block_out_channels[0]\n for i, up_block_type in enumerate(up_block_types):\n res = 2 ** (3 - i)\n is_final_block = i == len(block_out_channels) - 1\n\n prev_output_channel = output_channel\n output_channel = reversed_block_out_channels[i]\n input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)]\n\n # add upsample block for all BUT final layer\n if not is_final_block:\n add_upsample = True\n self.num_upsamplers += 1\n else:\n add_upsample = False\n\n up_block = get_up_block(\n up_block_type,\n num_layers=layers_per_block + 1,\n in_channels=input_channel,\n out_channels=output_channel,\n prev_output_channel=prev_output_channel,\n temb_channels=time_embed_dim,\n add_upsample=add_upsample,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n resnet_groups=norm_num_groups,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=reversed_attention_head_dim[i],\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention[i],\n upcast_attention=upcast_attention,\n resnet_time_scale_shift=resnet_time_scale_shift,\n\n unet_use_cross_frame_attention=unet_use_cross_frame_attention,\n unet_use_temporal_attention=unet_use_temporal_attention,\n use_inflated_groupnorm=use_inflated_groupnorm,\n\n use_motion_module=use_motion_module and (res in motion_module_resolutions),\n motion_module_type=motion_module_type,\n motion_module_kwargs=motion_module_kwargs,\n )\n self.up_blocks.append(up_block)\n prev_output_channel = output_channel\n\n # out\n if use_inflated_groupnorm:\n self.conv_norm_out = InflatedGroupNorm(num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=norm_eps)\n else:\n self.conv_norm_out = nn.GroupNorm(num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=norm_eps)\n self.conv_act = nn.SiLU()\n self.conv_out = InflatedConv3d(block_out_channels[0], out_channels, kernel_size=3, padding=1)\n\n def set_attention_slice(self, slice_size):\n r\"\"\"\n Enable sliced attention computation.\n\n When this option is enabled, the attention module will split the input tensor in slices, to compute attention\n in several steps. This is useful to save some memory in exchange for a small speed decrease.\n\n Args:\n slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `\"auto\"`):\n When `\"auto\"`, halves the input to the attention heads, so attention will be computed in two steps. If\n `\"max\"`, maxium amount of memory will be saved by running only one slice at a time. If a number is\n provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim`\n must be a multiple of `slice_size`.\n \"\"\"\n sliceable_head_dims = []\n\n def fn_recursive_retrieve_slicable_dims(module: torch.nn.Module):\n if hasattr(module, \"set_attention_slice\"):\n sliceable_head_dims.append(module.sliceable_head_dim)\n\n for child in module.children():\n fn_recursive_retrieve_slicable_dims(child)\n\n # retrieve number of attention layers\n for module in self.children():\n fn_recursive_retrieve_slicable_dims(module)\n\n num_slicable_layers = len(sliceable_head_dims)\n\n if slice_size == \"auto\":\n # half the attention head size is usually a good trade-off between\n # speed and memory\n slice_size = [dim // 2 for dim in sliceable_head_dims]\n elif slice_size == \"max\":\n # make smallest slice possible\n slice_size = num_slicable_layers * [1]\n\n slice_size = num_slicable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size\n\n if len(slice_size) != len(sliceable_head_dims):\n raise ValueError(\n f\"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different\"\n f\" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}.\"\n )\n\n for i in range(len(slice_size)):\n size = slice_size[i]\n dim = sliceable_head_dims[i]\n if size is not None and size > dim:\n raise ValueError(f\"size {size} has to be smaller or equal to {dim}.\")\n\n # Recursively walk through all the children.\n # Any children which exposes the set_attention_slice method\n # gets the message\n def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]):\n if hasattr(module, \"set_attention_slice\"):\n module.set_attention_slice(slice_size.pop())\n\n for child in module.children():\n fn_recursive_set_attention_slice(child, slice_size)\n\n reversed_slice_size = list(reversed(slice_size))\n for module in self.children():\n fn_recursive_set_attention_slice(module, reversed_slice_size)\n\n def _set_gradient_checkpointing(self, module, value=False):\n if isinstance(module, (CrossAttnDownBlock3D, DownBlock3D, CrossAttnUpBlock3D, UpBlock3D)):\n module.gradient_checkpointing = value\n\n def forward(\n self,\n sample: torch.FloatTensor,\n timestep: Union[torch.Tensor, float, int],\n encoder_hidden_states: torch.Tensor,\n class_labels: Optional[torch.Tensor] = None,\n attention_mask: Optional[torch.Tensor] = None,\n return_dict: bool = True,\n ) -> Union[UNet3DConditionOutput, Tuple]:\n r\"\"\"\n Args:\n sample (`torch.FloatTensor`): (batch, channel, height, width) noisy inputs tensor\n timestep (`torch.FloatTensor` or `float` or `int`): (batch) timesteps\n encoder_hidden_states (`torch.FloatTensor`): (batch, sequence_length, feature_dim) encoder hidden states\n return_dict (`bool`, *optional*, defaults to `True`):\n Whether or not to return a [`models.unet_2d_condition.UNet2DConditionOutput`] instead of a plain tuple.\n\n Returns:\n [`~models.unet_2d_condition.UNet2DConditionOutput`] or `tuple`:\n [`~models.unet_2d_condition.UNet2DConditionOutput`] if `return_dict` is True, otherwise a `tuple`. When\n returning a tuple, the first element is the sample tensor.\n \"\"\"\n # By default samples have to be AT least a multiple of the overall upsampling factor.\n # The overall upsampling factor is equal to 2 ** (# num of upsampling layears).\n # However, the upsampling interpolation output size can be forced to fit any upsampling size\n # on the fly if necessary.\n default_overall_up_factor = 2**self.num_upsamplers\n\n # upsample size should be forwarded when sample is not a multiple of `default_overall_up_factor`\n forward_upsample_size = False\n upsample_size = None\n\n if any(s % default_overall_up_factor != 0 for s in sample.shape[-2:]):\n logger.info(\"Forward upsample size to force interpolation output size.\")\n forward_upsample_size = True\n\n # prepare attention_mask\n if attention_mask is not None:\n attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0\n attention_mask = attention_mask.unsqueeze(1)\n\n # center input if necessary\n if self.config.center_input_sample:\n sample = 2 * sample - 1.0\n\n # time\n timesteps = timestep\n if not torch.is_tensor(timesteps):\n # This would be a good case for the `match` statement (Python 3.10+)\n is_mps = sample.device.type == \"mps\"\n if isinstance(timestep, float):\n dtype = torch.float32 if is_mps else torch.float64\n else:\n dtype = torch.int32 if is_mps else torch.int64\n timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device)\n elif len(timesteps.shape) == 0:\n timesteps = timesteps[None].to(sample.device)\n\n # broadcast to batch dimension in a way that's compatible with ONNX/Core ML\n timesteps = timesteps.expand(sample.shape[0])\n\n t_emb = self.time_proj(timesteps)\n\n # timesteps does not contain any weights and will always return f32 tensors\n # but time_embedding might actually be running in fp16. so we need to cast here.\n # there might be better ways to encapsulate this.\n t_emb = t_emb.to(dtype=self.dtype)\n emb = self.time_embedding(t_emb)\n\n if self.class_embedding is not None:\n if class_labels is None:\n raise ValueError(\"class_labels should be provided when num_class_embeds > 0\")\n\n if self.config.class_embed_type == \"timestep\":\n class_labels = self.time_proj(class_labels)\n\n class_emb = self.class_embedding(class_labels).to(dtype=self.dtype)\n emb = emb + class_emb\n\n # pre-process\n sample = self.conv_in(sample)\n\n # down\n down_block_res_samples = (sample,)\n for downsample_block in self.down_blocks:\n if hasattr(downsample_block, \"has_cross_attention\") and downsample_block.has_cross_attention:\n sample, res_samples = downsample_block(\n hidden_states=sample,\n temb=emb,\n encoder_hidden_states=encoder_hidden_states,\n attention_mask=attention_mask,\n )\n else:\n sample, res_samples = downsample_block(hidden_states=sample, temb=emb, encoder_hidden_states=encoder_hidden_states)\n\n down_block_res_samples += res_samples\n\n # mid\n sample = self.mid_block(\n sample, emb, encoder_hidden_states=encoder_hidden_states, attention_mask=attention_mask\n )\n\n # up\n for i, upsample_block in enumerate(self.up_blocks):\n is_final_block = i == len(self.up_blocks) - 1\n\n res_samples = down_block_res_samples[-len(upsample_block.resnets) :]\n down_block_res_samples = down_block_res_samples[: -len(upsample_block.resnets)]\n\n # if we have not reached the final block and need to forward the\n # upsample size, we do it here\n if not is_final_block and forward_upsample_size:\n upsample_size = down_block_res_samples[-1].shape[2:]\n\n if hasattr(upsample_block, \"has_cross_attention\") and upsample_block.has_cross_attention:\n sample = upsample_block(\n hidden_states=sample,\n temb=emb,\n res_hidden_states_tuple=res_samples,\n encoder_hidden_states=encoder_hidden_states,\n upsample_size=upsample_size,\n attention_mask=attention_mask,\n )\n else:\n sample = upsample_block(\n hidden_states=sample, temb=emb, res_hidden_states_tuple=res_samples, upsample_size=upsample_size, encoder_hidden_states=encoder_hidden_states,\n )\n\n # post-process\n sample = self.conv_norm_out(sample)\n sample = self.conv_act(sample)\n sample = self.conv_out(sample)\n\n if not return_dict:\n return (sample,)\n\n return UNet3DConditionOutput(sample=sample)\n\n @classmethod\n def from_pretrained_2d(cls, pretrained_model_path, subfolder=None, unet_additional_kwargs=None):\n if subfolder is not None:\n pretrained_model_path = os.path.join(pretrained_model_path, subfolder)\n print(f\"loaded temporal unet's pretrained weights from {pretrained_model_path} ...\")\n\n config_file = os.path.join(pretrained_model_path, 'config.json')\n if not os.path.isfile(config_file):\n raise RuntimeError(f\"{config_file} does not exist\")\n with open(config_file, \"r\") as f:\n config = json.load(f)\n config[\"_class_name\"] = cls.__name__\n config[\"down_block_types\"] = [\n \"CrossAttnDownBlock3D\",\n \"CrossAttnDownBlock3D\",\n \"CrossAttnDownBlock3D\",\n \"DownBlock3D\"\n ]\n config[\"up_block_types\"] = [\n \"UpBlock3D\",\n \"CrossAttnUpBlock3D\",\n \"CrossAttnUpBlock3D\",\n \"CrossAttnUpBlock3D\"\n ]\n\n from diffusers.utils import WEIGHTS_NAME\n model = cls.from_config(config, **unet_additional_kwargs)\n model_file = os.path.join(pretrained_model_path, WEIGHTS_NAME)\n if not os.path.isfile(model_file):\n raise RuntimeError(f\"{model_file} does not exist\")\n state_dict = torch.load(model_file, map_location=\"cpu\")\n\n m, u = model.load_state_dict(state_dict, strict=False)\n print(f\"### missing keys: {len(m)}; \\n### unexpected keys: {len(u)};\")\n # print(f\"### missing keys:\\n{m}\\n### unexpected keys:\\n{u}\\n\")\n \n params = [p.numel() if \"temporal\" in n else 0 for n, p in model.named_parameters()]\n print(f\"### Temporal Module Parameters: {sum(params) / 1e6} M\")\n \n return model" }, { "identifier": "AnimationFreeInitPipeline", "path": "animatediff/pipelines/pipeline_animation.py", "snippet": "class AnimationFreeInitPipeline(AnimationPipeline):\n _optional_components = []\n\n def __init__(\n self,\n vae: AutoencoderKL,\n text_encoder: CLIPTextModel,\n tokenizer: CLIPTokenizer,\n unet: UNet3DConditionModel,\n scheduler: Union[\n DDIMScheduler,\n PNDMScheduler,\n LMSDiscreteScheduler,\n EulerDiscreteScheduler,\n EulerAncestralDiscreteScheduler,\n DPMSolverMultistepScheduler,\n ],\n ):\n super().__init__(vae, text_encoder, tokenizer, unet, scheduler)\n self.freq_filter = None\n\n \n @torch.no_grad()\n def init_filter(self, video_length, height, width, filter_params):\n # initialize frequency filter for noise reinitialization\n batch_size = 1\n num_channels_latents = self.unet.in_channels\n filter_shape = [\n batch_size, \n num_channels_latents, \n video_length, \n height // self.vae_scale_factor, \n width // self.vae_scale_factor\n ]\n # self.freq_filter = get_freq_filter(filter_shape, device=self._execution_device, params=filter_params)\n self.freq_filter = get_freq_filter(\n filter_shape, \n device=self._execution_device, \n filter_type=filter_params.method,\n n=filter_params.n,\n d_s=filter_params.d_s,\n d_t=filter_params.d_t\n )\n\n @torch.no_grad()\n def __call__(\n self,\n prompt: Union[str, List[str]],\n video_length: Optional[int],\n height: Optional[int] = None,\n width: Optional[int] = None,\n num_inference_steps: int = 50,\n guidance_scale: float = 7.5,\n negative_prompt: Optional[Union[str, List[str]]] = None,\n num_videos_per_prompt: Optional[int] = 1,\n eta: float = 0.0,\n generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,\n latents: Optional[torch.FloatTensor] = None,\n output_type: Optional[str] = \"tensor\",\n return_dict: bool = True,\n callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,\n callback_steps: Optional[int] = 1,\n # freeinit args\n num_iters: int = 5,\n use_fast_sampling: bool = False,\n save_intermediate: bool = False,\n return_orig: bool = False,\n save_dir: str = None,\n save_name: str = None,\n use_fp16: bool = False,\n **kwargs\n ):\n if use_fp16:\n print('Warning: using half percision for inferencing!')\n self.vae.to(dtype=torch.float16)\n self.unet.to(dtype=torch.float16)\n self.text_encoder.to(dtype=torch.float16)\n # Default height and width to unet\n height = height or self.unet.config.sample_size * self.vae_scale_factor\n width = width or self.unet.config.sample_size * self.vae_scale_factor\n\n # Check inputs. Raise error if not correct\n # import pdb\n # pdb.set_trace()\n self.check_inputs(prompt, height, width, callback_steps)\n\n # Define call parameters\n # batch_size = 1 if isinstance(prompt, str) else len(prompt)\n batch_size = 1\n if latents is not None:\n batch_size = latents.shape[0]\n if isinstance(prompt, list):\n batch_size = len(prompt)\n\n device = self._execution_device\n # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)\n # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`\n # corresponds to doing no classifier free guidance.\n do_classifier_free_guidance = guidance_scale > 1.0\n\n # Encode input prompt\n prompt = prompt if isinstance(prompt, list) else [prompt] * batch_size\n if negative_prompt is not None:\n negative_prompt = negative_prompt if isinstance(negative_prompt, list) else [negative_prompt] * batch_size \n text_embeddings = self._encode_prompt(\n prompt, device, num_videos_per_prompt, do_classifier_free_guidance, negative_prompt\n )\n\n # Prepare timesteps\n self.scheduler.set_timesteps(num_inference_steps, device=device)\n timesteps = self.scheduler.timesteps\n\n # Prepare latent variables\n num_channels_latents = self.unet.in_channels\n latents = self.prepare_latents(\n batch_size * num_videos_per_prompt,\n num_channels_latents,\n video_length,\n height,\n width,\n text_embeddings.dtype,\n device,\n generator,\n latents,\n )\n latents_dtype = latents.dtype\n\n # Prepare extra step kwargs.\n extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)\n\n # Sampling with FreeInit.\n for iter in range(num_iters):\n # FreeInit ------------------------------------------------------------------\n if iter == 0:\n initial_noise = latents.detach().clone()\n else:\n # 1. DDPM Forward with initial noise, get noisy latents z_T\n # if use_fast_sampling:\n # current_diffuse_timestep = self.scheduler.config.num_train_timesteps / num_iters * (iter + 1) - 1\n # else:\n # current_diffuse_timestep = self.scheduler.config.num_train_timesteps - 1\n current_diffuse_timestep = self.scheduler.config.num_train_timesteps - 1 # diffuse to t=999 noise level\n diffuse_timesteps = torch.full((batch_size,),int(current_diffuse_timestep))\n diffuse_timesteps = diffuse_timesteps.long()\n z_T = self.scheduler.add_noise(\n original_samples=latents.to(device), \n noise=initial_noise.to(device), \n timesteps=diffuse_timesteps.to(device)\n )\n # 2. create random noise z_rand for high-frequency\n z_rand = torch.randn((batch_size * num_videos_per_prompt, num_channels_latents, video_length, height // self.vae_scale_factor, width // self.vae_scale_factor), device=device)\n # 3. Roise Reinitialization\n latents = freq_mix_3d(z_T.to(dtype=torch.float32), z_rand, LPF=self.freq_filter)\n latents = latents.to(latents_dtype)\n \n # Coarse-to-Fine Sampling for Fast Inference (can lead to sub-optimal results)\n if use_fast_sampling:\n current_num_inference_steps= int(num_inference_steps / num_iters * (iter + 1))\n self.scheduler.set_timesteps(current_num_inference_steps, device=device)\n timesteps = self.scheduler.timesteps\n # --------------------------------------------------------------------------\n\n # Denoising loop\n num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order\n with self.progress_bar(total=num_inference_steps) as progress_bar:\n # if use_fast_sampling:\n # # Coarse-to-Fine Sampling for Fast Inference\n # current_num_inference_steps= int(num_inference_steps / num_iters * (iter + 1))\n # current_timesteps = timesteps[:current_num_inference_steps]\n # else:\n current_timesteps = timesteps\n for i, t in enumerate(current_timesteps):\n # expand the latents if we are doing classifier free guidance\n latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents\n latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)\n\n # predict the noise residual\n noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings).sample.to(dtype=latents_dtype)\n\n # perform guidance\n if do_classifier_free_guidance:\n noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)\n noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)\n\n # compute the previous noisy sample x_t -> x_t-1\n latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample\n\n # call the callback, if provided\n if i == len(current_timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):\n progress_bar.update()\n if callback is not None and i % callback_steps == 0:\n callback(i, t, latents)\n \n # save intermediate results\n if save_intermediate:\n # Post-processing\n video = self.decode_latents(latents)\n video = torch.from_numpy(video)\n os.makedirs(save_dir, exist_ok=True)\n save_videos_grid(video, f\"{save_dir}/{save_name}_iter{iter}.gif\")\n \n if return_orig and iter==0:\n orig_video = self.decode_latents(latents)\n orig_video = torch.from_numpy(orig_video)\n\n # Post-processing\n video = self.decode_latents(latents)\n\n # Convert to tensor\n if output_type == \"tensor\":\n video = torch.from_numpy(video)\n\n if not return_dict:\n return video\n\n if return_orig:\n return AnimationFreeInitPipelineOutput(videos=video, orig_videos=orig_video)\n\n return AnimationFreeInitPipelineOutput(videos=video)" }, { "identifier": "save_videos_grid", "path": "animatediff/utils/util.py", "snippet": "def save_videos_grid(videos: torch.Tensor, path: str, rescale=False, n_rows=6, fps=8):\n videos = rearrange(videos, \"b c t h w -> t b c h w\")\n outputs = []\n for x in videos:\n x = torchvision.utils.make_grid(x, nrow=n_rows)\n x = x.transpose(0, 1).transpose(1, 2).squeeze(-1)\n if rescale:\n x = (x + 1.0) / 2.0 # -1,1 -> 0,1\n x = (x * 255).numpy().astype(np.uint8)\n outputs.append(x)\n\n os.makedirs(os.path.dirname(path), exist_ok=True)\n imageio.mimsave(path, outputs, fps=fps)" }, { "identifier": "convert_ldm_unet_checkpoint", "path": "animatediff/utils/convert_from_ckpt.py", "snippet": "def convert_ldm_unet_checkpoint(checkpoint, config, path=None, extract_ema=False, controlnet=False):\n \"\"\"\n Takes a state dict and a config, and returns a converted checkpoint.\n \"\"\"\n\n # extract state_dict for UNet\n unet_state_dict = {}\n keys = list(checkpoint.keys())\n\n if controlnet:\n unet_key = \"control_model.\"\n else:\n unet_key = \"model.diffusion_model.\"\n\n # at least a 100 parameters have to start with `model_ema` in order for the checkpoint to be EMA\n if sum(k.startswith(\"model_ema\") for k in keys) > 100 and extract_ema:\n print(f\"Checkpoint {path} has both EMA and non-EMA weights.\")\n print(\n \"In this conversion only the EMA weights are extracted. If you want to instead extract the non-EMA\"\n \" weights (useful to continue fine-tuning), please make sure to remove the `--extract_ema` flag.\"\n )\n for key in keys:\n if key.startswith(\"model.diffusion_model\"):\n flat_ema_key = \"model_ema.\" + \"\".join(key.split(\".\")[1:])\n unet_state_dict[key.replace(unet_key, \"\")] = checkpoint.pop(flat_ema_key)\n else:\n if sum(k.startswith(\"model_ema\") for k in keys) > 100:\n print(\n \"In this conversion only the non-EMA weights are extracted. If you want to instead extract the EMA\"\n \" weights (usually better for inference), please make sure to add the `--extract_ema` flag.\"\n )\n\n for key in keys:\n if key.startswith(unet_key):\n unet_state_dict[key.replace(unet_key, \"\")] = checkpoint.pop(key)\n\n new_checkpoint = {}\n\n new_checkpoint[\"time_embedding.linear_1.weight\"] = unet_state_dict[\"time_embed.0.weight\"]\n new_checkpoint[\"time_embedding.linear_1.bias\"] = unet_state_dict[\"time_embed.0.bias\"]\n new_checkpoint[\"time_embedding.linear_2.weight\"] = unet_state_dict[\"time_embed.2.weight\"]\n new_checkpoint[\"time_embedding.linear_2.bias\"] = unet_state_dict[\"time_embed.2.bias\"]\n\n if config[\"class_embed_type\"] is None:\n # No parameters to port\n ...\n elif config[\"class_embed_type\"] == \"timestep\" or config[\"class_embed_type\"] == \"projection\":\n new_checkpoint[\"class_embedding.linear_1.weight\"] = unet_state_dict[\"label_emb.0.0.weight\"]\n new_checkpoint[\"class_embedding.linear_1.bias\"] = unet_state_dict[\"label_emb.0.0.bias\"]\n new_checkpoint[\"class_embedding.linear_2.weight\"] = unet_state_dict[\"label_emb.0.2.weight\"]\n new_checkpoint[\"class_embedding.linear_2.bias\"] = unet_state_dict[\"label_emb.0.2.bias\"]\n else:\n raise NotImplementedError(f\"Not implemented `class_embed_type`: {config['class_embed_type']}\")\n\n new_checkpoint[\"conv_in.weight\"] = unet_state_dict[\"input_blocks.0.0.weight\"]\n new_checkpoint[\"conv_in.bias\"] = unet_state_dict[\"input_blocks.0.0.bias\"]\n\n if not controlnet:\n new_checkpoint[\"conv_norm_out.weight\"] = unet_state_dict[\"out.0.weight\"]\n new_checkpoint[\"conv_norm_out.bias\"] = unet_state_dict[\"out.0.bias\"]\n new_checkpoint[\"conv_out.weight\"] = unet_state_dict[\"out.2.weight\"]\n new_checkpoint[\"conv_out.bias\"] = unet_state_dict[\"out.2.bias\"]\n\n # Retrieves the keys for the input blocks only\n num_input_blocks = len({\".\".join(layer.split(\".\")[:2]) for layer in unet_state_dict if \"input_blocks\" in layer})\n input_blocks = {\n layer_id: [key for key in unet_state_dict if f\"input_blocks.{layer_id}\" in key]\n for layer_id in range(num_input_blocks)\n }\n\n # Retrieves the keys for the middle blocks only\n num_middle_blocks = len({\".\".join(layer.split(\".\")[:2]) for layer in unet_state_dict if \"middle_block\" in layer})\n middle_blocks = {\n layer_id: [key for key in unet_state_dict if f\"middle_block.{layer_id}\" in key]\n for layer_id in range(num_middle_blocks)\n }\n\n # Retrieves the keys for the output blocks only\n num_output_blocks = len({\".\".join(layer.split(\".\")[:2]) for layer in unet_state_dict if \"output_blocks\" in layer})\n output_blocks = {\n layer_id: [key for key in unet_state_dict if f\"output_blocks.{layer_id}\" in key]\n for layer_id in range(num_output_blocks)\n }\n\n for i in range(1, num_input_blocks):\n block_id = (i - 1) // (config[\"layers_per_block\"] + 1)\n layer_in_block_id = (i - 1) % (config[\"layers_per_block\"] + 1)\n\n resnets = [\n key for key in input_blocks[i] if f\"input_blocks.{i}.0\" in key and f\"input_blocks.{i}.0.op\" not in key\n ]\n attentions = [key for key in input_blocks[i] if f\"input_blocks.{i}.1\" in key]\n\n if f\"input_blocks.{i}.0.op.weight\" in unet_state_dict:\n new_checkpoint[f\"down_blocks.{block_id}.downsamplers.0.conv.weight\"] = unet_state_dict.pop(\n f\"input_blocks.{i}.0.op.weight\"\n )\n new_checkpoint[f\"down_blocks.{block_id}.downsamplers.0.conv.bias\"] = unet_state_dict.pop(\n f\"input_blocks.{i}.0.op.bias\"\n )\n\n paths = renew_resnet_paths(resnets)\n meta_path = {\"old\": f\"input_blocks.{i}.0\", \"new\": f\"down_blocks.{block_id}.resnets.{layer_in_block_id}\"}\n assign_to_checkpoint(\n paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config\n )\n\n if len(attentions):\n paths = renew_attention_paths(attentions)\n meta_path = {\"old\": f\"input_blocks.{i}.1\", \"new\": f\"down_blocks.{block_id}.attentions.{layer_in_block_id}\"}\n assign_to_checkpoint(\n paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config\n )\n\n resnet_0 = middle_blocks[0]\n attentions = middle_blocks[1]\n resnet_1 = middle_blocks[2]\n\n resnet_0_paths = renew_resnet_paths(resnet_0)\n assign_to_checkpoint(resnet_0_paths, new_checkpoint, unet_state_dict, config=config)\n\n resnet_1_paths = renew_resnet_paths(resnet_1)\n assign_to_checkpoint(resnet_1_paths, new_checkpoint, unet_state_dict, config=config)\n\n attentions_paths = renew_attention_paths(attentions)\n meta_path = {\"old\": \"middle_block.1\", \"new\": \"mid_block.attentions.0\"}\n assign_to_checkpoint(\n attentions_paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config\n )\n\n for i in range(num_output_blocks):\n block_id = i // (config[\"layers_per_block\"] + 1)\n layer_in_block_id = i % (config[\"layers_per_block\"] + 1)\n output_block_layers = [shave_segments(name, 2) for name in output_blocks[i]]\n output_block_list = {}\n\n for layer in output_block_layers:\n layer_id, layer_name = layer.split(\".\")[0], shave_segments(layer, 1)\n if layer_id in output_block_list:\n output_block_list[layer_id].append(layer_name)\n else:\n output_block_list[layer_id] = [layer_name]\n\n if len(output_block_list) > 1:\n resnets = [key for key in output_blocks[i] if f\"output_blocks.{i}.0\" in key]\n attentions = [key for key in output_blocks[i] if f\"output_blocks.{i}.1\" in key]\n\n resnet_0_paths = renew_resnet_paths(resnets)\n paths = renew_resnet_paths(resnets)\n\n meta_path = {\"old\": f\"output_blocks.{i}.0\", \"new\": f\"up_blocks.{block_id}.resnets.{layer_in_block_id}\"}\n assign_to_checkpoint(\n paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config\n )\n\n output_block_list = {k: sorted(v) for k, v in output_block_list.items()}\n if [\"conv.bias\", \"conv.weight\"] in output_block_list.values():\n index = list(output_block_list.values()).index([\"conv.bias\", \"conv.weight\"])\n new_checkpoint[f\"up_blocks.{block_id}.upsamplers.0.conv.weight\"] = unet_state_dict[\n f\"output_blocks.{i}.{index}.conv.weight\"\n ]\n new_checkpoint[f\"up_blocks.{block_id}.upsamplers.0.conv.bias\"] = unet_state_dict[\n f\"output_blocks.{i}.{index}.conv.bias\"\n ]\n\n # Clear attentions as they have been attributed above.\n if len(attentions) == 2:\n attentions = []\n\n if len(attentions):\n paths = renew_attention_paths(attentions)\n meta_path = {\n \"old\": f\"output_blocks.{i}.1\",\n \"new\": f\"up_blocks.{block_id}.attentions.{layer_in_block_id}\",\n }\n assign_to_checkpoint(\n paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config\n )\n else:\n resnet_0_paths = renew_resnet_paths(output_block_layers, n_shave_prefix_segments=1)\n for path in resnet_0_paths:\n old_path = \".\".join([\"output_blocks\", str(i), path[\"old\"]])\n new_path = \".\".join([\"up_blocks\", str(block_id), \"resnets\", str(layer_in_block_id), path[\"new\"]])\n\n new_checkpoint[new_path] = unet_state_dict[old_path]\n\n if controlnet:\n # conditioning embedding\n\n orig_index = 0\n\n new_checkpoint[\"controlnet_cond_embedding.conv_in.weight\"] = unet_state_dict.pop(\n f\"input_hint_block.{orig_index}.weight\"\n )\n new_checkpoint[\"controlnet_cond_embedding.conv_in.bias\"] = unet_state_dict.pop(\n f\"input_hint_block.{orig_index}.bias\"\n )\n\n orig_index += 2\n\n diffusers_index = 0\n\n while diffusers_index < 6:\n new_checkpoint[f\"controlnet_cond_embedding.blocks.{diffusers_index}.weight\"] = unet_state_dict.pop(\n f\"input_hint_block.{orig_index}.weight\"\n )\n new_checkpoint[f\"controlnet_cond_embedding.blocks.{diffusers_index}.bias\"] = unet_state_dict.pop(\n f\"input_hint_block.{orig_index}.bias\"\n )\n diffusers_index += 1\n orig_index += 2\n\n new_checkpoint[\"controlnet_cond_embedding.conv_out.weight\"] = unet_state_dict.pop(\n f\"input_hint_block.{orig_index}.weight\"\n )\n new_checkpoint[\"controlnet_cond_embedding.conv_out.bias\"] = unet_state_dict.pop(\n f\"input_hint_block.{orig_index}.bias\"\n )\n\n # down blocks\n for i in range(num_input_blocks):\n new_checkpoint[f\"controlnet_down_blocks.{i}.weight\"] = unet_state_dict.pop(f\"zero_convs.{i}.0.weight\")\n new_checkpoint[f\"controlnet_down_blocks.{i}.bias\"] = unet_state_dict.pop(f\"zero_convs.{i}.0.bias\")\n\n # mid block\n new_checkpoint[\"controlnet_mid_block.weight\"] = unet_state_dict.pop(\"middle_block_out.0.weight\")\n new_checkpoint[\"controlnet_mid_block.bias\"] = unet_state_dict.pop(\"middle_block_out.0.bias\")\n\n return new_checkpoint" }, { "identifier": "convert_ldm_clip_checkpoint", "path": "animatediff/utils/convert_from_ckpt.py", "snippet": "def convert_ldm_clip_checkpoint(checkpoint):\n text_model = CLIPTextModel.from_pretrained(\"openai/clip-vit-large-patch14\")\n keys = list(checkpoint.keys())\n\n text_model_dict = {}\n\n for key in keys:\n if key.startswith(\"cond_stage_model.transformer\"):\n text_model_dict[key[len(\"cond_stage_model.transformer.\") :]] = checkpoint[key]\n\n text_model.load_state_dict(text_model_dict)\n\n return text_model" }, { "identifier": "convert_ldm_vae_checkpoint", "path": "animatediff/utils/convert_from_ckpt.py", "snippet": "def convert_ldm_vae_checkpoint(checkpoint, config):\n # extract state dict for VAE\n vae_state_dict = {}\n vae_key = \"first_stage_model.\"\n keys = list(checkpoint.keys())\n for key in keys:\n if key.startswith(vae_key):\n vae_state_dict[key.replace(vae_key, \"\")] = checkpoint.get(key)\n\n new_checkpoint = {}\n\n new_checkpoint[\"encoder.conv_in.weight\"] = vae_state_dict[\"encoder.conv_in.weight\"]\n new_checkpoint[\"encoder.conv_in.bias\"] = vae_state_dict[\"encoder.conv_in.bias\"]\n new_checkpoint[\"encoder.conv_out.weight\"] = vae_state_dict[\"encoder.conv_out.weight\"]\n new_checkpoint[\"encoder.conv_out.bias\"] = vae_state_dict[\"encoder.conv_out.bias\"]\n new_checkpoint[\"encoder.conv_norm_out.weight\"] = vae_state_dict[\"encoder.norm_out.weight\"]\n new_checkpoint[\"encoder.conv_norm_out.bias\"] = vae_state_dict[\"encoder.norm_out.bias\"]\n\n new_checkpoint[\"decoder.conv_in.weight\"] = vae_state_dict[\"decoder.conv_in.weight\"]\n new_checkpoint[\"decoder.conv_in.bias\"] = vae_state_dict[\"decoder.conv_in.bias\"]\n new_checkpoint[\"decoder.conv_out.weight\"] = vae_state_dict[\"decoder.conv_out.weight\"]\n new_checkpoint[\"decoder.conv_out.bias\"] = vae_state_dict[\"decoder.conv_out.bias\"]\n new_checkpoint[\"decoder.conv_norm_out.weight\"] = vae_state_dict[\"decoder.norm_out.weight\"]\n new_checkpoint[\"decoder.conv_norm_out.bias\"] = vae_state_dict[\"decoder.norm_out.bias\"]\n\n new_checkpoint[\"quant_conv.weight\"] = vae_state_dict[\"quant_conv.weight\"]\n new_checkpoint[\"quant_conv.bias\"] = vae_state_dict[\"quant_conv.bias\"]\n new_checkpoint[\"post_quant_conv.weight\"] = vae_state_dict[\"post_quant_conv.weight\"]\n new_checkpoint[\"post_quant_conv.bias\"] = vae_state_dict[\"post_quant_conv.bias\"]\n\n # Retrieves the keys for the encoder down blocks only\n num_down_blocks = len({\".\".join(layer.split(\".\")[:3]) for layer in vae_state_dict if \"encoder.down\" in layer})\n down_blocks = {\n layer_id: [key for key in vae_state_dict if f\"down.{layer_id}\" in key] for layer_id in range(num_down_blocks)\n }\n\n # Retrieves the keys for the decoder up blocks only\n num_up_blocks = len({\".\".join(layer.split(\".\")[:3]) for layer in vae_state_dict if \"decoder.up\" in layer})\n up_blocks = {\n layer_id: [key for key in vae_state_dict if f\"up.{layer_id}\" in key] for layer_id in range(num_up_blocks)\n }\n\n for i in range(num_down_blocks):\n resnets = [key for key in down_blocks[i] if f\"down.{i}\" in key and f\"down.{i}.downsample\" not in key]\n\n if f\"encoder.down.{i}.downsample.conv.weight\" in vae_state_dict:\n new_checkpoint[f\"encoder.down_blocks.{i}.downsamplers.0.conv.weight\"] = vae_state_dict.pop(\n f\"encoder.down.{i}.downsample.conv.weight\"\n )\n new_checkpoint[f\"encoder.down_blocks.{i}.downsamplers.0.conv.bias\"] = vae_state_dict.pop(\n f\"encoder.down.{i}.downsample.conv.bias\"\n )\n\n paths = renew_vae_resnet_paths(resnets)\n meta_path = {\"old\": f\"down.{i}.block\", \"new\": f\"down_blocks.{i}.resnets\"}\n assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config)\n\n mid_resnets = [key for key in vae_state_dict if \"encoder.mid.block\" in key]\n num_mid_res_blocks = 2\n for i in range(1, num_mid_res_blocks + 1):\n resnets = [key for key in mid_resnets if f\"encoder.mid.block_{i}\" in key]\n\n paths = renew_vae_resnet_paths(resnets)\n meta_path = {\"old\": f\"mid.block_{i}\", \"new\": f\"mid_block.resnets.{i - 1}\"}\n assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config)\n\n mid_attentions = [key for key in vae_state_dict if \"encoder.mid.attn\" in key]\n paths = renew_vae_attention_paths(mid_attentions)\n meta_path = {\"old\": \"mid.attn_1\", \"new\": \"mid_block.attentions.0\"}\n assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config)\n conv_attn_to_linear(new_checkpoint)\n\n for i in range(num_up_blocks):\n block_id = num_up_blocks - 1 - i\n resnets = [\n key for key in up_blocks[block_id] if f\"up.{block_id}\" in key and f\"up.{block_id}.upsample\" not in key\n ]\n\n if f\"decoder.up.{block_id}.upsample.conv.weight\" in vae_state_dict:\n new_checkpoint[f\"decoder.up_blocks.{i}.upsamplers.0.conv.weight\"] = vae_state_dict[\n f\"decoder.up.{block_id}.upsample.conv.weight\"\n ]\n new_checkpoint[f\"decoder.up_blocks.{i}.upsamplers.0.conv.bias\"] = vae_state_dict[\n f\"decoder.up.{block_id}.upsample.conv.bias\"\n ]\n\n paths = renew_vae_resnet_paths(resnets)\n meta_path = {\"old\": f\"up.{block_id}.block\", \"new\": f\"up_blocks.{i}.resnets\"}\n assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config)\n\n mid_resnets = [key for key in vae_state_dict if \"decoder.mid.block\" in key]\n num_mid_res_blocks = 2\n for i in range(1, num_mid_res_blocks + 1):\n resnets = [key for key in mid_resnets if f\"decoder.mid.block_{i}\" in key]\n\n paths = renew_vae_resnet_paths(resnets)\n meta_path = {\"old\": f\"mid.block_{i}\", \"new\": f\"mid_block.resnets.{i - 1}\"}\n assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config)\n\n mid_attentions = [key for key in vae_state_dict if \"decoder.mid.attn\" in key]\n paths = renew_vae_attention_paths(mid_attentions)\n meta_path = {\"old\": \"mid.attn_1\", \"new\": \"mid_block.attentions.0\"}\n assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config)\n conv_attn_to_linear(new_checkpoint)\n return new_checkpoint" }, { "identifier": "get_freq_filter", "path": "animatediff/utils/freeinit_utils.py", "snippet": "def get_freq_filter(shape, device, filter_type, n, d_s, d_t):\n \"\"\"\n Form the frequency filter for noise reinitialization.\n\n Args:\n shape: shape of latent (B, C, T, H, W)\n filter_type: type of the freq filter\n n: (only for butterworth) order of the filter, larger n ~ ideal, smaller n ~ gaussian\n d_s: normalized stop frequency for spatial dimensions (0.0-1.0)\n d_t: normalized stop frequency for temporal dimension (0.0-1.0)\n \"\"\"\n if filter_type == \"gaussian\":\n return gaussian_low_pass_filter(shape=shape, d_s=d_s, d_t=d_t).to(device)\n elif filter_type == \"ideal\":\n return ideal_low_pass_filter(shape=shape, d_s=d_s, d_t=d_t).to(device)\n elif filter_type == \"box\":\n return box_low_pass_filter(shape=shape, d_s=d_s, d_t=d_t).to(device)\n elif filter_type == \"butterworth\":\n return butterworth_low_pass_filter(shape=shape, n=n, d_s=d_s, d_t=d_t).to(device)\n else:\n raise NotImplementedError" } ]
import os import torch import random import gradio as gr from glob import glob from omegaconf import OmegaConf from safetensors import safe_open from diffusers import AutoencoderKL from diffusers import EulerDiscreteScheduler, DDIMScheduler from diffusers.utils.import_utils import is_xformers_available from transformers import CLIPTextModel, CLIPTokenizer from animatediff.models.unet import UNet3DConditionModel from animatediff.pipelines.pipeline_animation import AnimationFreeInitPipeline from animatediff.utils.util import save_videos_grid from animatediff.utils.convert_from_ckpt import convert_ldm_unet_checkpoint, convert_ldm_clip_checkpoint, convert_ldm_vae_checkpoint from diffusers.training_utils import set_seed from animatediff.utils.freeinit_utils import get_freq_filter from collections import namedtuple
14,844
self.selected_base_model = None self.selected_motion_module = None self.selected_filter_type = None self.set_width = None self.set_height = None self.set_d_s = None self.set_d_t = None self.refresh_motion_module() self.refresh_personalized_model() # config models self.inference_config = OmegaConf.load(inference_config_path) self.tokenizer = CLIPTokenizer.from_pretrained(pretrained_model_path, subfolder="tokenizer") self.text_encoder = CLIPTextModel.from_pretrained(pretrained_model_path, subfolder="text_encoder").cuda() self.vae = AutoencoderKL.from_pretrained(pretrained_model_path, subfolder="vae").cuda() self.unet = UNet3DConditionModel.from_pretrained_2d(pretrained_model_path, subfolder="unet", unet_additional_kwargs=OmegaConf.to_container(self.inference_config.unet_additional_kwargs)).cuda() self.freq_filter = None self.update_base_model(self.base_model_list[-2]) self.update_motion_module(self.motion_module_list[0]) self.update_filter(512, 512, self.filter_type_list[0], 0.25, 0.25) def refresh_motion_module(self): motion_module_list = glob(os.path.join(self.motion_module_dir, "*.ckpt")) self.motion_module_list = sorted([os.path.basename(p) for p in motion_module_list]) def refresh_personalized_model(self): base_model_list = glob(os.path.join(self.personalized_model_dir, "*.safetensors")) self.base_model_list = sorted([os.path.basename(p) for p in base_model_list]) def update_base_model(self, base_model_dropdown): self.selected_base_model = base_model_dropdown base_model_dropdown = os.path.join(self.personalized_model_dir, base_model_dropdown) base_model_state_dict = {} with safe_open(base_model_dropdown, framework="pt", device="cpu") as f: for key in f.keys(): base_model_state_dict[key] = f.get_tensor(key) converted_vae_checkpoint = convert_ldm_vae_checkpoint(base_model_state_dict, self.vae.config) self.vae.load_state_dict(converted_vae_checkpoint) converted_unet_checkpoint = convert_ldm_unet_checkpoint(base_model_state_dict, self.unet.config) self.unet.load_state_dict(converted_unet_checkpoint, strict=False) self.text_encoder = convert_ldm_clip_checkpoint(base_model_state_dict) return gr.Dropdown.update() def update_motion_module(self, motion_module_dropdown): self.selected_motion_module = motion_module_dropdown motion_module_dropdown = os.path.join(self.motion_module_dir, motion_module_dropdown) motion_module_state_dict = torch.load(motion_module_dropdown, map_location="cpu") _, unexpected = self.unet.load_state_dict(motion_module_state_dict, strict=False) assert len(unexpected) == 0 return gr.Dropdown.update() # def update_filter(self, shape, method, n, d_s, d_t): def update_filter(self, width_slider, height_slider, filter_type_dropdown, d_s_slider, d_t_slider): self.set_width = width_slider self.set_height = height_slider self.selected_filter_type = filter_type_dropdown self.set_d_s = d_s_slider self.set_d_t = d_t_slider vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) shape = [1, 4, 16, self.set_width//vae_scale_factor, self.set_height//vae_scale_factor] self.freq_filter = get_freq_filter( shape, device="cuda", filter_type=self.selected_filter_type, n=4, d_s=self.set_d_s, d_t=self.set_d_t ) def animate( self, base_model_dropdown, motion_module_dropdown, prompt_textbox, negative_prompt_textbox, width_slider, height_slider, seed_textbox, # freeinit params filter_type_dropdown, d_s_slider, d_t_slider, num_iters_slider, # speed up speed_up_options ): # set global seed set_seed(42) d_s = float(d_s_slider) d_t = float(d_t_slider) num_iters = int(num_iters_slider) if self.selected_base_model != base_model_dropdown: self.update_base_model(base_model_dropdown) if self.selected_motion_module != motion_module_dropdown: self.update_motion_module(motion_module_dropdown) self.set_width = width_slider self.set_height = height_slider self.selected_filter_type = filter_type_dropdown self.set_d_s = d_s self.set_d_t = d_t if self.set_width != width_slider or self.set_height != height_slider or self.selected_filter_type != filter_type_dropdown or self.set_d_s != d_s or self.set_d_t != d_t: self.update_filter(width_slider, height_slider, filter_type_dropdown, d_s, d_t) if is_xformers_available(): self.unet.enable_xformers_memory_efficient_attention()
pretrained_model_path = "models/StableDiffusion/stable-diffusion-v1-5" inference_config_path = "configs/inference/inference-v1.yaml" css = """ .toolbutton { margin-buttom: 0em 0em 0em 0em; max-width: 2.5em; min-width: 2.5em !important; height: 2.5em; } """ examples = [ # 0-RealisticVision [ "realisticVisionV51_v20Novae.safetensors", "mm_sd_v14.ckpt", "A panda standing on a surfboard in the ocean under moonlight.", "worst quality, low quality, nsfw, logo", 512, 512, "2005563494988190", "butterworth", 0.25, 0.25, 3, ["use_fp16"] ], # 1-ToonYou [ "toonyou_beta3.safetensors", "mm_sd_v14.ckpt", "(best quality, masterpiece), 1girl, looking at viewer, blurry background, upper body, contemporary, dress", "(worst quality, low quality)", 512, 512, "478028150728261", "butterworth", 0.25, 0.25, 3, ["use_fp16"] ], # 2-Lyriel [ "lyriel_v16.safetensors", "mm_sd_v14.ckpt", "hypercars cyberpunk moving, muted colors, swirling color smokes, legend, cityscape, space", "3d, cartoon, anime, sketches, worst quality, low quality, nsfw, logo", 512, 512, "1566149281915957", "butterworth", 0.25, 0.25, 3, ["use_fp16"] ], # 3-RCNZ [ "rcnzCartoon3d_v10.safetensors", "mm_sd_v14.ckpt", "A cute raccoon playing guitar in a boat on the ocean", "worst quality, low quality, nsfw, logo", 512, 512, "1566149281915957", "butterworth", 0.25, 0.25, 3, ["use_fp16"] ], # 4-MajicMix [ "majicmixRealistic_v5Preview.safetensors", "mm_sd_v14.ckpt", "1girl, reading book", "(ng_deepnegative_v1_75t:1.2), (badhandv4:1), (worst quality:2), (low quality:2), (normal quality:2), lowres, bad anatomy, bad hands, watermark, moles", 512, 512, "2005563494988190", "butterworth", 0.25, 0.25, 3, ["use_fp16"] ], # # 5-RealisticVision # [ # "realisticVisionV51_v20Novae.safetensors", # "mm_sd_v14.ckpt", # "A panda standing on a surfboard in the ocean in sunset.", # "worst quality, low quality, nsfw, logo", # 512, 512, "2005563494988190", # "butterworth", 0.25, 0.25, 3, # ["use_fp16"] # ] ] # clean unrelated ckpts # ckpts = [ # "realisticVisionV40_v20Novae.safetensors", # "majicmixRealistic_v5Preview.safetensors", # "rcnzCartoon3d_v10.safetensors", # "lyriel_v16.safetensors", # "toonyou_beta3.safetensors" # ] # for path in glob(os.path.join("models", "DreamBooth_LoRA", "*.safetensors")): # for ckpt in ckpts: # if path.endswith(ckpt): break # else: # print(f"### Cleaning {path} ...") # os.system(f"rm -rf {path}") # os.system(f"rm -rf {os.path.join('models', 'DreamBooth_LoRA', '*.safetensors')}") # os.system(f"bash download_bashscripts/1-ToonYou.sh") # os.system(f"bash download_bashscripts/2-Lyriel.sh") # os.system(f"bash download_bashscripts/3-RcnzCartoon.sh") # os.system(f"bash download_bashscripts/4-MajicMix.sh") # os.system(f"bash download_bashscripts/5-RealisticVision.sh") # # clean Gradio cache # print(f"### Cleaning cached examples ...") # os.system(f"rm -rf gradio_cached_examples/") class AnimateController: def __init__(self): # config dirs self.basedir = os.getcwd() self.stable_diffusion_dir = os.path.join(self.basedir, "models", "StableDiffusion") self.motion_module_dir = os.path.join(self.basedir, "models", "Motion_Module") self.personalized_model_dir = os.path.join(self.basedir, "models", "DreamBooth_LoRA") self.savedir = os.path.join(self.basedir, "samples") os.makedirs(self.savedir, exist_ok=True) self.base_model_list = [] self.motion_module_list = [] self.filter_type_list = [ "butterworth", "gaussian", "box", "ideal" ] self.selected_base_model = None self.selected_motion_module = None self.selected_filter_type = None self.set_width = None self.set_height = None self.set_d_s = None self.set_d_t = None self.refresh_motion_module() self.refresh_personalized_model() # config models self.inference_config = OmegaConf.load(inference_config_path) self.tokenizer = CLIPTokenizer.from_pretrained(pretrained_model_path, subfolder="tokenizer") self.text_encoder = CLIPTextModel.from_pretrained(pretrained_model_path, subfolder="text_encoder").cuda() self.vae = AutoencoderKL.from_pretrained(pretrained_model_path, subfolder="vae").cuda() self.unet = UNet3DConditionModel.from_pretrained_2d(pretrained_model_path, subfolder="unet", unet_additional_kwargs=OmegaConf.to_container(self.inference_config.unet_additional_kwargs)).cuda() self.freq_filter = None self.update_base_model(self.base_model_list[-2]) self.update_motion_module(self.motion_module_list[0]) self.update_filter(512, 512, self.filter_type_list[0], 0.25, 0.25) def refresh_motion_module(self): motion_module_list = glob(os.path.join(self.motion_module_dir, "*.ckpt")) self.motion_module_list = sorted([os.path.basename(p) for p in motion_module_list]) def refresh_personalized_model(self): base_model_list = glob(os.path.join(self.personalized_model_dir, "*.safetensors")) self.base_model_list = sorted([os.path.basename(p) for p in base_model_list]) def update_base_model(self, base_model_dropdown): self.selected_base_model = base_model_dropdown base_model_dropdown = os.path.join(self.personalized_model_dir, base_model_dropdown) base_model_state_dict = {} with safe_open(base_model_dropdown, framework="pt", device="cpu") as f: for key in f.keys(): base_model_state_dict[key] = f.get_tensor(key) converted_vae_checkpoint = convert_ldm_vae_checkpoint(base_model_state_dict, self.vae.config) self.vae.load_state_dict(converted_vae_checkpoint) converted_unet_checkpoint = convert_ldm_unet_checkpoint(base_model_state_dict, self.unet.config) self.unet.load_state_dict(converted_unet_checkpoint, strict=False) self.text_encoder = convert_ldm_clip_checkpoint(base_model_state_dict) return gr.Dropdown.update() def update_motion_module(self, motion_module_dropdown): self.selected_motion_module = motion_module_dropdown motion_module_dropdown = os.path.join(self.motion_module_dir, motion_module_dropdown) motion_module_state_dict = torch.load(motion_module_dropdown, map_location="cpu") _, unexpected = self.unet.load_state_dict(motion_module_state_dict, strict=False) assert len(unexpected) == 0 return gr.Dropdown.update() # def update_filter(self, shape, method, n, d_s, d_t): def update_filter(self, width_slider, height_slider, filter_type_dropdown, d_s_slider, d_t_slider): self.set_width = width_slider self.set_height = height_slider self.selected_filter_type = filter_type_dropdown self.set_d_s = d_s_slider self.set_d_t = d_t_slider vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) shape = [1, 4, 16, self.set_width//vae_scale_factor, self.set_height//vae_scale_factor] self.freq_filter = get_freq_filter( shape, device="cuda", filter_type=self.selected_filter_type, n=4, d_s=self.set_d_s, d_t=self.set_d_t ) def animate( self, base_model_dropdown, motion_module_dropdown, prompt_textbox, negative_prompt_textbox, width_slider, height_slider, seed_textbox, # freeinit params filter_type_dropdown, d_s_slider, d_t_slider, num_iters_slider, # speed up speed_up_options ): # set global seed set_seed(42) d_s = float(d_s_slider) d_t = float(d_t_slider) num_iters = int(num_iters_slider) if self.selected_base_model != base_model_dropdown: self.update_base_model(base_model_dropdown) if self.selected_motion_module != motion_module_dropdown: self.update_motion_module(motion_module_dropdown) self.set_width = width_slider self.set_height = height_slider self.selected_filter_type = filter_type_dropdown self.set_d_s = d_s self.set_d_t = d_t if self.set_width != width_slider or self.set_height != height_slider or self.selected_filter_type != filter_type_dropdown or self.set_d_s != d_s or self.set_d_t != d_t: self.update_filter(width_slider, height_slider, filter_type_dropdown, d_s, d_t) if is_xformers_available(): self.unet.enable_xformers_memory_efficient_attention()
pipeline = AnimationFreeInitPipeline(
1
2023-12-19 21:06:32+00:00
24k
m-abr/FCPCodebase
world/World.py
[ { "identifier": "Logger", "path": "logs/Logger.py", "snippet": "class Logger():\n _folder = None\n\n def __init__(self, is_enabled:bool, topic:str) -> None:\n self.no_of_entries = 0 \n self.enabled = is_enabled\n self.topic = topic\n\n def write(self, msg:str, timestamp:bool=True, step:int=None) -> None:\n '''\n Write `msg` to file named `self.topic`\n\n Parameters\n ----------\n msg : str\n message to be written\n step : int\n simulation step is written before the message to provide additional information\n default is `None` (nothing is written before the message)\n '''\n if not self.enabled: return\n\n # The log folder is only created if needed\n if Logger._folder is None: \n rnd = ''.join(random.choices(ascii_uppercase, k=6)) # Useful if multiple processes are running in parallel \n Logger._folder = \"./logs/\" + datetime.now().strftime(\"%Y-%m-%d_%H.%M.%S__\") + rnd + \"/\"\n print(\"\\nLogger Info: see\",Logger._folder)\n Path(Logger._folder).mkdir(parents=True, exist_ok=True)\n\n self.no_of_entries += 1\n\n with open(Logger._folder + self.topic + \".log\", 'a+') as f:\n prefix = \"\"\n write_step = step is not None\n if timestamp or write_step:\n prefix = \"{\"\n if timestamp: \n prefix += datetime.now().strftime(\"%a %H:%M:%S\")\n if write_step: prefix += \" \"\n if write_step:\n prefix += f'Step:{step}'\n prefix += \"} \"\n f.write(prefix + msg + \"\\n\")" }, { "identifier": "Matrix_4x4", "path": "math_ops/Matrix_4x4.py", "snippet": "class Matrix_4x4():\n\n def __init__(self, matrix = None) -> None:\n '''\n Constructor examples:\n a = Matrix_4x4( ) # create identity matrix\n b = Matrix_4x4( [[1,1,1,1],[2,2,2,2],[3,3,3,3],[4,4,4,4]] ) # manually initialize matrix\n c = Matrix_4x4( [1,1,1,1,2,2,2,2,3,3,3,3,4,4,4,4] ) # manually initialize matrix\n d = Matrix_4x4( b ) # copy constructor\n '''\n if matrix is None:\n self.m = np.identity(4)\n elif type(matrix) == Matrix_4x4: \n self.m = np.copy(matrix.m)\n elif type(matrix) == Matrix_3x3: \n self.m = np.identity(4)\n self.m[0:3,0:3] = matrix.m\n else:\n self.m = np.asarray(matrix)\n self.m.shape = (4,4) #reshape if needed, throw error if impossible\n\n\n @classmethod\n def from_translation(cls, translation_vec):\n '''\n Create transformation matrix from translation_vec translation\n e.g. Matrix_4x4.from_translation((a,b,c))\n output: [[1,0,0,a],[0,1,0,b],[0,0,1,c],[0,0,0,1]]\n '''\n mat = np.identity(4)\n mat[0:3,3] = translation_vec\n return cls(mat)\n\n @classmethod\n def from_3x3_and_translation(cls, mat3x3:Matrix_3x3, translation_vec):\n '''\n Create transformation matrix from rotation matrix (3x3) and translation\n e.g. Matrix_4x4.from_3x3_and_translation(r,(a,b,c)) \n output: [[r00,r01,r02,a],[r10,r11,r12,b],[r20,r21,r22,c],[0,0,0,1]]\n '''\n mat = np.identity(4)\n mat[0:3,0:3] = mat3x3.m\n mat[0:3,3] = translation_vec\n return cls(mat)\n\n def translate(self, translation_vec, in_place=False):\n '''\n Translates the current transformation matrix\n\n Parameters\n ----------\n translation_vec : array_like, length 3\n translation vector\n in_place: bool, optional\n * True: the internal matrix is changed in-place\n * False: a new matrix is returned and the current one is not changed \n\n Returns\n -------\n result : Matrix_4x4 \n self is returned if in_place is True\n '''\n vec = np.array([*translation_vec,1])# conversion to 4D vector\n np.matmul(self.m, vec, out=vec) # compute only 4th column\n\n if in_place:\n self.m[:,3] = vec\n return self\n else:\n ret = Matrix_4x4(self.m)\n ret.m[:,3] = vec\n return ret\n\n\n def get_translation(self):\n ''' Get translation vector (x,y,z) '''\n return self.m[0:3,3] # return view\n\n def get_x(self):\n return self.m[0,3]\n\n def get_y(self):\n return self.m[1,3]\n\n def get_z(self):\n return self.m[2,3]\n\n def get_rotation_4x4(self):\n ''' Get Matrix_4x4 without translation ''' \n mat = Matrix_4x4(self)\n mat.m[0:3,3] = 0\n return mat\n\n def get_rotation(self):\n ''' Get rotation Matrix_3x3 '''\n return Matrix_3x3(self.m[0:3,0:3])\n\n def get_distance(self):\n ''' Get translation vector length '''\n return np.linalg.norm(self.m[0:3,3])\n\n def get_roll_deg(self):\n ''' Get angle around the x-axis in degrees, Rotation order: RotZ*RotY*RotX=Rot '''\n if self.m[2,1] == 0 and self.m[2,2] == 0: \n return 180\n return atan2(self.m[2,1], self.m[2,2]) * 180 / pi\n\n def get_pitch_deg(self):\n ''' Get angle around the y-axis in degrees, Rotation order: RotZ*RotY*RotX=Rot '''\n return atan2(-self.m[2,0], sqrt(self.m[2,1]*self.m[2,1] + self.m[2,2]*self.m[2,2])) * 180 / pi\n\n def get_yaw_deg(self):\n ''' Get angle around the z-axis in degrees, Rotation order: RotZ*RotY*RotX=Rot '''\n if self.m[1,0] == 0 and self.m[0,0] == 0: \n return atan2(self.m[0,1], self.m[1,1]) * 180 / pi\n return atan2(self.m[1,0], self.m[0,0]) * 180 / pi\n \n def get_inclination_deg(self):\n ''' Get inclination of z-axis in relation to reference z-axis '''\n return 90 - (asin(np.clip(self.m[2,2],-1,1)) * 180 / pi)\n\n def rotate_deg(self, rotation_vec, rotation_deg, in_place=False):\n '''\n Rotates the current transformation matrix\n\n Parameters\n ----------\n rotation_vec : array_like, length 3\n rotation vector\n rotation_rad : float\n rotation in degrees\n in_place: bool, optional\n * True: the internal matrix is changed in-place (default)\n * False: a new matrix is returned and the current one is not changed \n \n Returns\n -------\n result : Matrix_4x4 \n self is returned if in_place is True\n '''\n return self.rotate_rad(rotation_vec, rotation_deg * (pi/180) , in_place)\n\n \n def rotate_rad(self, rotation_vec, rotation_rad, in_place=False):\n '''\n Rotates the current transformation matrix\n\n Parameters\n ----------\n rotation_vec : array_like, length 3\n rotation vector\n rotation_rad : float\n rotation in radians\n in_place: bool, optional\n * True: the internal matrix is changed in-place (default)\n * False: a new matrix is returned and the current one is not changed \n \n Returns\n -------\n result : Matrix_4x4 \n self is returned if in_place is True\n '''\n\n if rotation_rad == 0: \n return self if in_place else Matrix_4x4(self)\n\n # shortcuts for rotation around 1 axis\n if rotation_vec[0]==0:\n if rotation_vec[1]==0:\n if rotation_vec[2]==1:\n return self.rotate_z_rad(rotation_rad, in_place)\n elif rotation_vec[2]==-1:\n return self.rotate_z_rad(-rotation_rad, in_place)\n elif rotation_vec[2]==0:\n if rotation_vec[1]==1:\n return self.rotate_y_rad(rotation_rad, in_place)\n elif rotation_vec[1]==-1:\n return self.rotate_y_rad(-rotation_rad, in_place)\n elif rotation_vec[1]==0 and rotation_vec[2]==0:\n if rotation_vec[0]==1:\n return self.rotate_x_rad(rotation_rad, in_place)\n elif rotation_vec[0]==-1:\n return self.rotate_x_rad(-rotation_rad, in_place)\n \n c = np.math.cos(rotation_rad)\n c1 = 1 - c\n s = np.math.sin(rotation_rad)\n x = rotation_vec[0]\n y = rotation_vec[1]\n z = rotation_vec[2]\n xxc1 = x * x * c1\n yyc1 = y * y * c1\n zzc1 = z * z * c1\n xyc1 = x * y * c1\n xzc1 = x * z * c1\n yzc1 = y * z * c1\n xs = x * s\n ys = y * s\n zs = z * s\n\n mat = np.array([\n [xxc1 + c, xyc1 - zs, xzc1 + ys, 0],\n [xyc1 + zs, yyc1 + c, yzc1 - xs, 0],\n [xzc1 - ys, yzc1 + xs, zzc1 + c, 0],\n [0, 0, 0, 1]])\n\n return self.multiply(mat, in_place)\n\n\n def rotate_x_rad(self, rotation_rad, in_place=False):\n '''\n Rotates the current transformation matrix around the x-axis\n\n Parameters\n ----------\n rotation_rad : float\n rotation in radians\n in_place: bool, optional\n * True: the internal matrix is changed in-place (default)\n * False: a new matrix is returned and the current one is not changed \n \n Returns\n -------\n result : Matrix_4x4 \n self is returned if in_place is True\n '''\n if rotation_rad == 0: \n return self if in_place else Matrix_4x4(self)\n \n c = np.math.cos(rotation_rad)\n s = np.math.sin(rotation_rad)\n\n mat = np.array([\n [1, 0, 0, 0],\n [0, c,-s, 0],\n [0, s, c, 0],\n [0, 0, 0, 1]])\n\n return self.multiply(mat, in_place)\n\n def rotate_y_rad(self, rotation_rad, in_place=False):\n '''\n Rotates the current transformation matrix around the y-axis\n\n Parameters\n ----------\n rotation_rad : float\n rotation in radians\n in_place: bool, optional\n * True: the internal matrix is changed in-place (default)\n * False: a new matrix is returned and the current one is not changed \n \n Returns\n -------\n result : Matrix_4x4 \n self is returned if in_place is True\n '''\n if rotation_rad == 0: \n return self if in_place else Matrix_4x4(self)\n \n c = np.math.cos(rotation_rad)\n s = np.math.sin(rotation_rad)\n\n mat = np.array([\n [ c, 0, s, 0],\n [ 0, 1, 0, 0],\n [-s, 0, c, 0],\n [ 0, 0, 0, 1]])\n\n return self.multiply(mat, in_place)\n\n def rotate_z_rad(self, rotation_rad, in_place=False):\n '''\n Rotates the current transformation matrix around the z-axis\n\n Parameters\n ----------\n rotation_rad : float\n rotation in radians\n in_place: bool, optional\n * True: the internal matrix is changed in-place (default)\n * False: a new matrix is returned and the current one is not changed \n \n Returns\n -------\n result : Matrix_4x4 \n self is returned if in_place is True\n '''\n if rotation_rad == 0: \n return self if in_place else Matrix_4x4(self)\n \n c = np.math.cos(rotation_rad)\n s = np.math.sin(rotation_rad)\n\n mat = np.array([\n [ c,-s, 0, 0],\n [ s, c, 0, 0],\n [ 0, 0, 1, 0],\n [ 0, 0, 0, 1]])\n\n return self.multiply(mat, in_place)\n\n def rotate_x_deg(self, rotation_deg, in_place=False):\n '''\n Rotates the current transformation matrix around the x-axis\n\n Parameters\n ----------\n rotation_rad : float\n rotation in degrees\n in_place: bool, optional\n * True: the internal matrix is changed in-place (default)\n * False: a new matrix is returned and the current one is not changed \n \n Returns\n -------\n result : Matrix_4x4 \n self is returned if in_place is True\n '''\n return self.rotate_x_rad(rotation_deg * (pi/180), in_place)\n\n def rotate_y_deg(self, rotation_deg, in_place=False):\n '''\n Rotates the current transformation matrix around the y-axis\n\n Parameters\n ----------\n rotation_rad : float\n rotation in degrees\n in_place: bool, optional\n * True: the internal matrix is changed in-place (default)\n * False: a new matrix is returned and the current one is not changed \n \n Returns\n -------\n result : Matrix_4x4 \n self is returned if in_place is True\n '''\n return self.rotate_y_rad(rotation_deg * (pi/180), in_place)\n\n def rotate_z_deg(self, rotation_deg, in_place=False):\n '''\n Rotates the current transformation matrix around the z-axis\n\n Parameters\n ----------\n rotation_rad : float\n rotation in degrees\n in_place: bool, optional\n * True: the internal matrix is changed in-place (default)\n * False: a new matrix is returned and the current one is not changed \n \n Returns\n -------\n result : Matrix_4x4 \n self is returned if in_place is True\n '''\n return self.rotate_z_rad(rotation_deg * (pi/180), in_place)\n\n def invert(self, in_place=False):\n '''\n Inverts the current transformation matrix\n\n Parameters\n ----------\n in_place: bool, optional\n * True: the internal matrix is changed in-place (default)\n * False: a new matrix is returned and the current one is not changed \n \n Returns\n -------\n result : Matrix_4x4 \n self is returned if in_place is True\n '''\n\n if in_place:\n self.m = np.linalg.inv(self.m)\n return self\n else:\n return Matrix_4x4(np.linalg.inv(self.m))\n\n def multiply(self,mat, in_place=False):\n '''\n Multiplies the current transformation matrix by mat\n\n Parameters\n ----------\n mat : Matrix_4x4 or array_like\n multiplier matrix or 3D vector\n in_place: bool, optional\n * True: the internal matrix is changed in-place (default)\n * False: a new matrix is returned and the current one is not changed (if mat is a 4x4 matrix)\n \n Returns\n -------\n result : Matrix_4x4 | array_like\n Matrix_4x4 is returned if mat is a matrix (self is returned if in_place is True); \n a 3D vector is returned if mat is a vector\n '''\n if type(mat) == Matrix_4x4: \n mat = mat.m\n else:\n mat = np.asarray(mat) # conversion to array, if needed\n if mat.ndim == 1: # multiplication by 3D vector\n vec = np.append(mat,1) # conversion to 4D vector\n return np.matmul(self.m, vec)[0:3] # conversion to 3D vector\n\n if in_place:\n np.matmul(self.m, mat, self.m)\n return self\n else:\n return Matrix_4x4(np.matmul(self.m, mat))\n\n def __call__(self,mat, is_spherical=False):\n '''\n Multiplies the current transformation matrix by mat and returns a new matrix or vector\n\n Parameters\n ----------\n mat : Matrix_4x4 or array_like\n multiplier matrix or 3D vector\n is_spherical : bool\n only relevant if mat is a 3D vector, True if it uses spherical coordinates\n \n Returns\n -------\n result : Matrix_4x4 | array_like\n Matrix_4x4 is returned if mat is a matrix; \n a 3D vector is returned if mat is a vector\n '''\n\n if is_spherical and mat.ndim == 1: mat = M.deg_sph2cart(mat)\n return self.multiply(mat,False)" }, { "identifier": "Draw", "path": "world/commons/Draw.py", "snippet": "class Draw():\n _socket = None\n\n def __init__(self, is_enabled:bool, unum:int, host:str, port:int) -> None:\n self.enabled = is_enabled \n self._is_team_right = None\n self._unum = unum \n self._prefix = f'?{unum}_'.encode() # temporary prefix that should never be used in normal circumstances\n \n #Create one socket for all instances\n if Draw._socket is None:\n Draw._socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM )\n Draw._socket.connect((host, port))\n Draw.clear_all()\n\n\n def set_team_side(self, is_right):\n ''' Called by world parser to switch side '''\n '''\n Generate an appropriate player ID\n RoboViz has a bug/feature: we send \"swap buffers for player: 'l_1' and RoboViz\n will swap every buffer that contains 'l_1' in the name, including \n 'l_10' and 'l_11'. To avoid that, we swap the separator to 'l-10', 'l-11'\n '''\n self._is_team_right = is_right\n self._prefix = f\"{'r' if is_right else 'l'}{'_' if self._unum < 10 else '-'}{self._unum}_\".encode() #e.g. b'l_5', b'l-10'\n\n\n @staticmethod\n def _send(msg, id, flush):\n ''' Private method to send message if RoboViz is accessible '''\n try:\n if flush:\n Draw._socket.send(msg + id + b'\\x00\\x00\\x00' + id + b'\\x00')\n else:\n Draw._socket.send(msg + id + b'\\x00')\n except ConnectionRefusedError:\n pass\n\n \n def circle(self, pos2d, radius, thickness, color:bytes, id:str, flush=True):\n ''' \n Draw circle\n\n Examples\n ----------\n Circle in 2D (z=0): circle((-1,2), 3, 2, Draw.Color.red, \"my_circle\")\n '''\n if not self.enabled: return\n assert type(color)==bytes, \"The RGB color must be a bytes object, e.g. red: b'\\xFF\\x00\\x00'\"\n assert not np.isnan(pos2d).any(), \"Argument 'pos2d' contains 'nan' values\"\n\n if self._is_team_right:\n pos2d = (-pos2d[0],-pos2d[1]) \n\n msg = b'\\x01\\x00' + (\n f'{f\"{pos2d[0] :.4f}\":.6s}'\n f'{f\"{pos2d[1] :.4f}\":.6s}'\n f'{f\"{radius :.4f}\":.6s}'\n f'{f\"{thickness :.4f}\":.6s}').encode() + color\n \n Draw._send(msg, self._prefix + id.encode(), flush)\n\n\n def line(self, p1, p2, thickness, color:bytes, id:str, flush=True):\n ''' \n Draw line\n\n Examples\n ----------\n Line in 3D: line((0,0,0), (0,0,2), 3, Draw.Color.red, \"my_line\") \n Line in 2D (z=0): line((0,0), (0,1), 3, Draw.Color.red, \"my_line\") \n '''\n if not self.enabled: return\n assert type(color)==bytes, \"The RGB color must be a bytes object, e.g. red: b'\\xFF\\x00\\x00'\"\n assert not np.isnan(p1).any(), \"Argument 'p1' contains 'nan' values\"\n assert not np.isnan(p2).any(), \"Argument 'p2' contains 'nan' values\"\n\n z1 = p1[2] if len(p1)==3 else 0\n z2 = p2[2] if len(p2)==3 else 0\n\n if self._is_team_right: \n p1 = (-p1[0],-p1[1],p1[2]) if len(p1)==3 else (-p1[0],-p1[1])\n p2 = (-p2[0],-p2[1],p2[2]) if len(p2)==3 else (-p2[0],-p2[1])\n\n msg = b'\\x01\\x01' + (\n f'{f\"{p1[0] :.4f}\":.6s}'\n f'{f\"{p1[1] :.4f}\":.6s}'\n f'{f\"{z1 :.4f}\":.6s}'\n f'{f\"{p2[0] :.4f}\":.6s}'\n f'{f\"{p2[1] :.4f}\":.6s}'\n f'{f\"{z2 :.4f}\":.6s}'\n f'{f\"{thickness :.4f}\":.6s}').encode() + color\n\n Draw._send(msg, self._prefix + id.encode(), flush)\n \n\n def point(self, pos, size, color:bytes, id:str, flush=True):\n ''' \n Draw point\n\n Examples\n ----------\n Point in 3D: point((1,1,1), 3, Draw.Color.red, \"my_point\")\n Point in 2D (z=0): point((1,1), 3, Draw.Color.red, \"my_point\")\n '''\n if not self.enabled: return\n assert type(color)==bytes, \"The RGB color must be a bytes object, e.g. red: b'\\xFF\\x00\\x00'\"\n assert not np.isnan(pos).any(), \"Argument 'pos' contains 'nan' values\"\n\n z = pos[2] if len(pos)==3 else 0\n\n if self._is_team_right: \n pos = (-pos[0],-pos[1],pos[2]) if len(pos)==3 else (-pos[0],-pos[1])\n\n msg = b'\\x01\\x02' + (\n f'{f\"{pos[0] :.4f}\":.6s}'\n f'{f\"{pos[1] :.4f}\":.6s}'\n f'{f\"{z :.4f}\":.6s}'\n f'{f\"{size :.4f}\":.6s}').encode() + color\n \n Draw._send(msg, self._prefix + id.encode(), flush)\n\n\n def sphere(self, pos, radius, color:bytes, id:str, flush=True):\n ''' \n Draw sphere\n\n Examples\n ----------\n Sphere in 3D: sphere((1,1,1), 3, Draw.Color.red, \"my_sphere\")\n Sphere in 2D (z=0): sphere((1,1), 3, Draw.Color.red, \"my_sphere\")\n '''\n if not self.enabled: return\n assert type(color)==bytes, \"The RGB color must be a bytes object, e.g. red: b'\\xFF\\x00\\x00'\"\n assert not np.isnan(pos).any(), \"Argument 'pos' contains 'nan' values\"\n\n z = pos[2] if len(pos)==3 else 0\n\n if self._is_team_right: \n pos = (-pos[0],-pos[1],pos[2]) if len(pos)==3 else (-pos[0],-pos[1])\n\n msg = b'\\x01\\x03' + (\n f'{f\"{pos[0] :.4f}\":.6s}'\n f'{f\"{pos[1] :.4f}\":.6s}'\n f'{f\"{z :.4f}\":.6s}'\n f'{f\"{radius :.4f}\":.6s}').encode() + color\n \n Draw._send(msg, self._prefix + id.encode(), flush)\n\n\n def polygon(self, vertices, color:bytes, alpha:int, id:str, flush=True):\n ''' \n Draw polygon\n\n Examples\n ----------\n Polygon in 3D: polygon(((0,0,0),(1,0,0),(0,1,0)), Draw.Color.red, 255, \"my_polygon\")\n '''\n if not self.enabled: return\n assert type(color)==bytes, \"The RGB color must be a bytes object, e.g. red: b'\\xFF\\x00\\x00'\"\n assert 0<=alpha<=255, \"The alpha channel (degree of opacity) must be in range [0,255]\"\n\n if self._is_team_right: \n vertices = [(-v[0],-v[1],v[2]) for v in vertices]\n\n msg = b'\\x01\\x04' + bytes([len(vertices)]) + color + alpha.to_bytes(1,'big')\n\n for v in vertices:\n msg += (\n f'{f\"{v[0] :.4f}\":.6s}'\n f'{f\"{v[1] :.4f}\":.6s}'\n f'{f\"{v[2] :.4f}\":.6s}').encode()\n \n Draw._send(msg, self._prefix + id.encode(), flush)\n\n\n def annotation(self, pos, text, color:bytes, id:str, flush=True):\n ''' \n Draw annotation\n\n Examples\n ----------\n Annotation in 3D: annotation((1,1,1), \"SOMEtext!\", Draw.Color.red, \"my_annotation\")\n Annotation in 2D (z=0): annotation((1,1), \"SOMEtext!\", Draw.Color.red, \"my_annotation\")\n '''\n if not self.enabled: return\n if type(text) != bytes: text = str(text).encode()\n assert type(color)==bytes, \"The RGB color must be a bytes object, e.g. red: b'\\xFF\\x00\\x00'\"\n z = pos[2] if len(pos)==3 else 0\n\n if self._is_team_right: \n pos = (-pos[0],-pos[1],pos[2]) if len(pos)==3 else (-pos[0],-pos[1])\n\n msg = b'\\x02\\x00' + (\n f'{f\"{pos[0] :.4f}\":.6s}'\n f'{f\"{pos[1] :.4f}\":.6s}'\n f'{f\"{z :.4f}\":.6s}').encode() + color + text + b'\\x00'\n \n Draw._send(msg, self._prefix + id.encode(), flush)\n\n \n def arrow(self, p1, p2, arrowhead_size, thickness, color:bytes, id:str, flush=True):\n ''' \n Draw arrow\n\n Examples\n ----------\n Arrow in 3D: arrow((0,0,0), (0,0,2), 0.1, 3, Draw.Color.red, \"my_arrow\")\n Arrow in 2D (z=0): arrow((0,0), (0,1), 0.1, 3, Draw.Color.red, \"my_arrow\")\n '''\n if not self.enabled: return\n assert type(color)==bytes, \"The RGB color must be a bytes object, e.g. red: b'\\xFF\\x00\\x00'\"\n\n # No need to invert sides, the called shapes will handle that\n if len(p1)==2: p1 = M.to_3d(p1) \n else: p1 = np.asarray(p1)\n if len(p2)==2: p2 = M.to_3d(p2) \n else: p2 = np.asarray(p2)\n\n vec = p2-p1\n vec_size = np.linalg.norm(vec)\n if vec_size == 0: return #return without warning/error\n if arrowhead_size > vec_size: arrowhead_size = vec_size\n\n ground_proj_perpendicular = np.array([ vec[1], -vec[0], 0 ])\n\n if np.all(ground_proj_perpendicular == 0): #vertical arrow\n ground_proj_perpendicular = np.array([ arrowhead_size/2, 0, 0 ])\n else:\n ground_proj_perpendicular *= arrowhead_size/2 / np.linalg.norm(ground_proj_perpendicular)\n\n head_start = p2 - vec * (arrowhead_size/vec_size)\n head_pt1 = head_start + ground_proj_perpendicular\n head_pt2 = head_start - ground_proj_perpendicular\n\n self.line(p1,p2,thickness,color,id,False)\n self.line(p2,head_pt1,thickness,color,id,False)\n self.line(p2,head_pt2,thickness,color,id,flush)\n\n\n def flush(self, id):\n ''' Flush specific drawing by ID '''\n if not self.enabled: return\n\n Draw._send(b'\\x00\\x00', self._prefix + id.encode(), False)\n\n def clear(self, id):\n ''' Clear specific drawing by ID '''\n if not self.enabled: return\n\n Draw._send(b'\\x00\\x00', self._prefix + id.encode(), True) #swap buffer twice\n\n\n def clear_player(self):\n ''' Clear all drawings made by this player '''\n if not self.enabled: return\n\n Draw._send(b'\\x00\\x00', self._prefix, True) #swap buffer twice\n\n\n @staticmethod\n def clear_all():\n ''' Clear all drawings of all players '''\n if Draw._socket is not None:\n Draw._send(b'\\x00\\x00\\x00\\x00\\x00',b'',False) #swap buffer twice using no id\n\n\n class Color():\n '''\n Based on X11 colors\n The names are restructured to make better suggestions\n '''\n pink_violet = b'\\xC7\\x15\\x85'\n pink_hot = b'\\xFF\\x14\\x93'\n pink_violet_pale = b'\\xDB\\x70\\x93'\n pink = b'\\xFF\\x69\\xB4'\n pink_pale = b'\\xFF\\xB6\\xC1'\n \n red_dark = b'\\x8B\\x00\\x00'\n red = b'\\xFF\\x00\\x00'\n red_brick = b'\\xB2\\x22\\x22'\n red_crimson = b'\\xDC\\x14\\x3C'\n red_indian = b'\\xCD\\x5C\\x5C'\n red_salmon = b'\\xFA\\x80\\x72'\n\n orange_red = b'\\xFF\\x45\\x00'\n orange = b'\\xFF\\x8C\\x00'\n orange_ligth = b'\\xFF\\xA5\\x00'\n\n yellow_gold = b'\\xFF\\xD7\\x00'\n yellow = b'\\xFF\\xFF\\x00'\n yellow_light = b'\\xBD\\xB7\\x6B'\n\n brown_maroon =b'\\x80\\x00\\x00'\n brown_dark = b'\\x8B\\x45\\x13'\n brown = b'\\xA0\\x52\\x2D'\n brown_gold = b'\\xB8\\x86\\x0B'\n brown_light = b'\\xCD\\x85\\x3F'\n brown_pale = b'\\xDE\\xB8\\x87'\n\n green_dark = b'\\x00\\x64\\x00' \n green = b'\\x00\\x80\\x00' \n green_lime = b'\\x32\\xCD\\x32' \n green_light = b'\\x00\\xFF\\x00' \n green_lawn = b'\\x7C\\xFC\\x00' \n green_pale = b'\\x90\\xEE\\x90' \n\n cyan_dark = b'\\x00\\x80\\x80' \n cyan_medium = b'\\x00\\xCE\\xD1' \n cyan = b'\\x00\\xFF\\xFF' \n cyan_light = b'\\xAF\\xEE\\xEE'\n\n blue_dark = b'\\x00\\x00\\x8B' \n blue = b'\\x00\\x00\\xFF' \n blue_royal = b'\\x41\\x69\\xE1' \n blue_medium = b'\\x1E\\x90\\xFF' \n blue_light = b'\\x00\\xBF\\xFF'\n blue_pale = b'\\x87\\xCE\\xEB'\n\n purple_violet = b'\\x94\\x00\\xD3' \n purple_magenta = b'\\xFF\\x00\\xFF' \n purple_light = b'\\xBA\\x55\\xD3' \n purple_pale = b'\\xDD\\xA0\\xDD'\n\n white = b'\\xFF\\xFF\\xFF'\n gray_10 = b'\\xE6\\xE6\\xE6'\n gray_20 = b'\\xCC\\xCC\\xCC'\n gray_30 = b'\\xB2\\xB2\\xB2' \n gray_40 = b'\\x99\\x99\\x99'\n gray_50 = b'\\x80\\x80\\x80'\n gray_60 = b'\\x66\\x66\\x66'\n gray_70 = b'\\x4C\\x4C\\x4C'\n gray_80 = b'\\x33\\x33\\x33'\n gray_90 = b'\\x1A\\x1A\\x1A'\n black = b'\\x00\\x00\\x00' \n\n @staticmethod\n def get(r,g,b):\n ''' Get RGB color (0-255) '''\n return bytes([int(r),int(g),int(b)])" }, { "identifier": "Other_Robot", "path": "world/commons/Other_Robot.py", "snippet": "class Other_Robot():\n def __init__(self, unum, is_teammate) -> None:\n self.unum = unum # convenient variable to indicate uniform number (same as other robot's index + 1)\n self.is_self = False # convenient flag to indicate if this robot is self\n self.is_teammate = is_teammate # convenient variable to indicate if this robot is from our team\n self.is_visible = False # True if this robot was seen in the last message from the server (it doesn't mean we know its absolute location)\n self.body_parts_cart_rel_pos = dict() # cartesian relative position of the robot's visible body parts\n self.body_parts_sph_rel_pos = dict() # spherical relative position of the robot's visible body parts\n self.vel_filter = 0.3 # EMA filter coefficient applied to self.state_filtered_velocity\n self.vel_decay = 0.95 # velocity decay at every vision cycle (neutralized if velocity is updated)\n\n\n # State variables: these are computed when this robot is visible and when the original robot is able to self-locate\n self.state_fallen = False # true if the robot is lying down (updated when head is visible)\n self.state_last_update = 0 # World.time_local_ms when the state was last updated\n self.state_horizontal_dist = 0 # horizontal head distance if head is visible, otherwise, average horizontal distance of visible body parts (the distance is updated by vision or radio when state_abs_pos gets a new value, but also when the other player is not visible, by assuming its last position)\n self.state_abs_pos = None # 3D head position if head is visible, otherwise, 2D average position of visible body parts, or, 2D radio head position\n self.state_orientation = 0 # orientation based on pair of lower arms or feet, or average of both (WARNING: may be older than state_last_update) \n self.state_ground_area = None # (pt_2d,radius) projection of player area on ground (circle), not precise if farther than 3m (for performance), useful for obstacle avoidance when it falls\n self.state_body_parts_abs_pos = dict() # 3D absolute position of each body part\n self.state_filtered_velocity = np.zeros(3) # 3D filtered velocity (m/s) (if the head is not visible, the 2D part is updated and v.z decays)" }, { "identifier": "Robot", "path": "world/Robot.py", "snippet": "class Robot():\n STEPTIME = 0.02 # Fixed step time\n VISUALSTEP = 0.04 # Fixed visual step time\n SQ_STEPTIME = STEPTIME * STEPTIME\n GRAVITY = np.array([0,0,-9.81])\n IMU_DECAY = 0.996 #IMU's velocity decay\n \n #------------------ constants to force symmetry in joints/effectors\n\n MAP_PERCEPTOR_TO_INDEX = {\"hj1\":0, \"hj2\":1, \"llj1\":2, \"rlj1\":3,\n \"llj2\":4, \"rlj2\":5, \"llj3\":6, \"rlj3\":7,\n \"llj4\":8, \"rlj4\":9, \"llj5\":10,\"rlj5\":11,\n \"llj6\":12,\"rlj6\":13,\"laj1\":14,\"raj1\":15,\n \"laj2\":16,\"raj2\":17,\"laj3\":18,\"raj3\":19,\n \"laj4\":20,\"raj4\":21,\"llj7\":22,\"rlj7\":23 }\n\n # Fix symmetry issues 1a/4 (identification) \n FIX_PERCEPTOR_SET = {'rlj2','rlj6','raj2','laj3','laj4'}\n FIX_INDICES_LIST = [5,13,17,18,20]\n\n # Recommended height for unofficial beam (near ground)\n BEAM_HEIGHTS = [0.4, 0.43, 0.4, 0.46, 0.4]\n\n\n def __init__(self, unum:int, robot_type:int) -> None:\n robot_xml = \"nao\"+str(robot_type)+\".xml\" # Typical NAO file name\n self.type = robot_type\n self.beam_height = Robot.BEAM_HEIGHTS[robot_type]\n self.no_of_joints = 24 if robot_type == 4 else 22 \n\n #Fix symmetry issues 1b/4 (identification) \n self.FIX_EFFECTOR_MASK = np.ones(self.no_of_joints)\n self.FIX_EFFECTOR_MASK[Robot.FIX_INDICES_LIST] = -1\n\n self.body_parts = dict() # keys='body part names' (given by the robot's XML), values='Body_Part objects'\n self.unum = unum # Robot's uniform number\n self.gyro = np.zeros(3) # Angular velocity along the three axes of freedom of the robot's torso (deg/s)\n self.acc = np.zeros(3) # Proper acceleration along the three axes of freedom of the robot's torso (m/s2)\n self.frp = dict() # foot \"lf\"/\"rf\", toe \"lf1\"/\"rf1\" resistance perceptor (relative [p]oint of origin + [f]orce vector) e.g. {\"lf\":(px,py,pz,fx,fy,fz)}\n self.feet_toes_last_touch = {\"lf\":0,\"rf\":0,\"lf1\":0,\"rf1\":0} # foot \"lf\"/\"rf\", toe \"lf1\"/\"rf1\" World.time_local_ms when foot/toe last touched any surface\n self.feet_toes_are_touching = {\"lf\":False,\"rf\":False,\"lf1\":False,\"rf1\":False} # foot \"lf\"/\"rf\", toe \"lf1\"/\"rf1\" True if touching in last received server message\n self.fwd_kinematics_list = None # List of body parts, ordered according to dependencies\n self.rel_cart_CoM_position = np.zeros(3) # Center of Mass position, relative to head, in cartesian coordinates (m)\n\n # Joint variables are optimized for performance / array operations\n self.joints_position = np.zeros(self.no_of_joints) # Joints' angular position (deg)\n self.joints_speed = np.zeros(self.no_of_joints) # Joints' angular speed (rad/s)\n self.joints_target_speed = np.zeros(self.no_of_joints) # Joints' target speed (rad/s) (max: 6.1395 rad/s, see rcssserver3d/data/rsg/agent/nao/hingejoint.rsg)\n self.joints_target_last_speed = np.zeros(self.no_of_joints) # Joints' last target speed (rad/s) (max: 6.1395 rad/s, see rcssserver3d/data/rsg/agent/nao/hingejoint.rsg)\n self.joints_info = [None] * self.no_of_joints # Joints' constant information (see class Joint_Info)\n self.joints_transform = [Matrix_4x4() for _ in range(self.no_of_joints)] # Joints' transformation matrix\n\n # Localization variables relative to head\n self.loc_head_to_field_transform = Matrix_4x4() # Transformation matrix from head to field\n self.loc_field_to_head_transform = Matrix_4x4() # Transformation matrix from field to head\n self.loc_rotation_head_to_field = Matrix_3x3() # Rotation matrix from head to field\n self.loc_rotation_field_to_head = Matrix_3x3() # Rotation matrix from field to head\n self.loc_head_position = np.zeros(3) # Absolute head position (m)\n self.loc_head_position_history = deque(maxlen=40)# Absolute head position history (queue with up to 40 old positions at intervals of 0.04s, where index 0 is the previous position)\n self.loc_head_velocity = np.zeros(3) # Absolute head velocity (m/s) (Warning: possibly noisy)\n self.loc_head_orientation = 0 # Head orientation (deg)\n self.loc_is_up_to_date = False # False if this is not a visual step, or not enough elements are visible\n self.loc_last_update = 0 # World.time_local_ms when the localization was last updated\n self.loc_head_position_last_update = 0 # World.time_local_ms when loc_head_position was last updated by vision or radio\n self.radio_fallen_state = False # True if (radio says we fell) and (radio is significantly more recent than loc)\n self.radio_last_update = 0 # World.time_local_ms when radio_fallen_state was last updated (and possibly loc_head_position)\n\n # Localization variables relative to torso\n self.loc_torso_to_field_rotation = Matrix_3x3() # Rotation matrix from torso to field \n self.loc_torso_to_field_transform = Matrix_4x4() # Transformation matrix from torso to field\n self.loc_torso_roll = 0 # Torso roll (deg)\n self.loc_torso_pitch = 0 # Torso pitch (deg) \n self.loc_torso_orientation = 0 # Torso orientation (deg)\n self.loc_torso_inclination = 0 # Torso inclination (deg) (inclination of z-axis in relation to field z-axis)\n self.loc_torso_position = np.zeros(3) # Absolute torso position (m)\n self.loc_torso_velocity = np.zeros(3) # Absolute torso velocity (m/s)\n self.loc_torso_acceleration = np.zeros(3) # Absolute Coordinate acceleration (m/s2)\n\n # Other localization variables\n self.cheat_abs_pos = np.zeros(3) # Absolute head position provided by the server as cheat (m)\n self.cheat_ori = 0.0 # Absolute head orientation provided by the server as cheat (deg)\n self.loc_CoM_position = np.zeros(3) # Absolute CoM position (m)\n self.loc_CoM_velocity = np.zeros(3) # Absolute CoM velocity (m/s)\n\n # Localization special variables\n '''\n self.loc_head_z is often equivalent to self.loc_head_position[2], but sometimes it differs.\n There are situations in which the rotation and translation cannot be computed, \n but the z-coordinate can still be found through vision, in which case:\n self.loc_is_up_to_date is False\n self.loc_head_z_is_up_to_date is True\n It should be used in applications which rely on z as an independent coordinate, such\n as detecting if the robot has fallen, or as an observation for machine learning.\n It should NEVER be used for 3D transformations.\n '''\n self.loc_head_z = 0 # Absolute head position (z) - see above for explanation (m)\n self.loc_head_z_is_up_to_date = False # False if this is not a visual step, or not enough elements are visible\n self.loc_head_z_last_update = 0 # World.time_local_ms when loc_head_z was last computed\n self.loc_head_z_vel = 0 # Absolute head velocity (z) (m/s)\n\n # Localization + Gyroscope\n # These variables are reliable. The gyroscope is used to update the rotation when waiting for the next visual cycle\n self.imu_torso_roll = 0 # Torso roll (deg) (src: Localization + Gyro)\n self.imu_torso_pitch = 0 # Torso pitch (deg) (src: Localization + Gyro)\n self.imu_torso_orientation = 0 # Torso orientation (deg) (src: Localization + Gyro)\n self.imu_torso_inclination = 0 # Torso inclination (deg) (src: Localization + Gyro)\n self.imu_torso_to_field_rotation = Matrix_3x3() # Rotation matrix from torso to field (src: Localization + Gyro)\n self.imu_last_visual_update = 0 # World.time_local_ms when the IMU data was last updated with visual information \n\n # Localization + Gyroscope + Accelerometer\n # Warning: these variables are unreliable, since small errors in the Localization Orientation lead to \n # wrong acceleration -> wrong velocity -> wrong position\n self.imu_weak_torso_to_field_transform = Matrix_4x4() # Transformation matrix from torso to field (src: Localization + Gyro + Acc)\n self.imu_weak_head_to_field_transform = Matrix_4x4() # Transformation matrix from head to field (src: Localization + Gyro + Acc)\n self.imu_weak_field_to_head_transform = Matrix_4x4() # Transformation matrix from field to head (src: Localization + Gyro + Acc)\n self.imu_weak_torso_position = np.zeros(3) # Absolute torso position (m) (src: Localization + Gyro + Acc)\n self.imu_weak_torso_velocity = np.zeros(3) # Absolute torso velocity (m/s) (src: Localization + Gyro + Acc)\n self.imu_weak_torso_acceleration = np.zeros(3) # Absolute torso acceleration (m/s2) (src: Localization + Gyro + Acc)\n self.imu_weak_torso_next_position = np.zeros(3) # Absolute position in next step estimate (m) (src: Localization + Gyro + Acc)\n self.imu_weak_torso_next_velocity = np.zeros(3) # Absolute velocity in next step estimate (m/s) (src: Localization + Gyro + Acc)\n self.imu_weak_CoM_position = np.zeros(3) # Absolute CoM position (m) (src: Localization + Gyro + Acc)\n self.imu_weak_CoM_velocity = np.zeros(3) # Absolute CoM velocity (m/s) (src: Localization + Gyro + Acc)\n\n\n #Using explicit variables to enable IDE suggestions\n self.J_HEAD_YAW = 0\n self.J_HEAD_PITCH = 1\n self.J_LLEG_YAW_PITCH = 2\n self.J_RLEG_YAW_PITCH = 3\n self.J_LLEG_ROLL = 4\n self.J_RLEG_ROLL = 5\n self.J_LLEG_PITCH = 6\n self.J_RLEG_PITCH = 7\n self.J_LKNEE = 8\n self.J_RKNEE = 9\n self.J_LFOOT_PITCH = 10\n self.J_RFOOT_PITCH = 11\n self.J_LFOOT_ROLL = 12\n self.J_RFOOT_ROLL = 13\n self.J_LARM_PITCH = 14\n self.J_RARM_PITCH = 15\n self.J_LARM_ROLL = 16\n self.J_RARM_ROLL = 17\n self.J_LELBOW_YAW = 18\n self.J_RELBOW_YAW = 19\n self.J_LELBOW_ROLL = 20\n self.J_RELBOW_ROLL = 21\n self.J_LTOE_PITCH = 22\n self.J_RTOE_PITCH = 23\n\n\n #------------------ parse robot xml\n\n dir = M.get_active_directory(\"/world/commons/robots/\")\n robot_xml_root = xmlp.parse(dir + robot_xml).getroot()\n\n joint_no = 0\n for child in robot_xml_root:\n if child.tag == \"bodypart\":\n self.body_parts[child.attrib['name']] = Body_Part(child.attrib['mass'])\n elif child.tag == \"joint\":\n self.joints_info[joint_no] = Joint_Info(child)\n self.joints_position[joint_no] = 0.0\n ji = self.joints_info[joint_no]\n\n #save joint if body part is 1st anchor (to simplify model traversal in a single direction)\n self.body_parts[ji.anchor0_part].joints.append(Robot.MAP_PERCEPTOR_TO_INDEX[ji.perceptor]) \n\n joint_no += 1\n if joint_no == self.no_of_joints: break #ignore extra joints\n\n else:\n raise NotImplementedError\n\n assert joint_no == self.no_of_joints, \"The Robot XML and the robot type don't match!\"\n\n\n def get_head_abs_vel(self, history_steps:int):\n '''\n Get robot's head absolute velocity (m/s)\n\n Parameters\n ----------\n history_steps : int\n number of history steps to consider [1,40]\n\n Examples\n --------\n get_head_abs_vel(1) is equivalent to (current abs pos - last abs pos) / 0.04\n get_head_abs_vel(2) is equivalent to (current abs pos - abs pos 0.08s ago) / 0.08\n get_head_abs_vel(3) is equivalent to (current abs pos - abs pos 0.12s ago) / 0.12\n '''\n assert 1 <= history_steps <= 40, \"Argument 'history_steps' must be in range [1,40]\"\n\n if len(self.loc_head_position_history) == 0:\n return np.zeros(3)\n\n h_step = min(history_steps, len(self.loc_head_position_history))\n t = h_step * Robot.VISUALSTEP\n\n return (self.loc_head_position - self.loc_head_position_history[h_step-1]) / t\n \n\n def _initialize_kinematics(self):\n\n #starting with head\n parts={\"head\"}\n sequential_body_parts = [\"head\"]\n\n while len(parts) > 0:\n part = parts.pop()\n\n for j in self.body_parts[part].joints:\n\n p = self.joints_info[j].anchor1_part\n\n if len(self.body_parts[p].joints) > 0: #add body part if it is the 1st anchor of some joint\n parts.add(p)\n sequential_body_parts.append(p)\n\n self.fwd_kinematics_list = [(self.body_parts[part],j, self.body_parts[self.joints_info[j].anchor1_part] ) \n for part in sequential_body_parts for j in self.body_parts[part].joints]\n\n #Fix symmetry issues 4/4 (kinematics)\n for i in Robot.FIX_INDICES_LIST:\n self.joints_info[i].axes *= -1\n aux = self.joints_info[i].min\n self.joints_info[i].min = -self.joints_info[i].max\n self.joints_info[i].max = -aux\n\n\n def update_localization(self, localization_raw, time_local_ms): \n\n # parse raw data\n loc = localization_raw.astype(float) #32bits to 64bits for consistency\n self.loc_is_up_to_date = bool(loc[32])\n self.loc_head_z_is_up_to_date = bool(loc[34])\n\n if self.loc_head_z_is_up_to_date:\n time_diff = (time_local_ms - self.loc_head_z_last_update) / 1000 \n self.loc_head_z_vel = (loc[33] - self.loc_head_z) / time_diff\n self.loc_head_z = loc[33]\n self.loc_head_z_last_update = time_local_ms\n\n # Save last position to history at every vision cycle (even if not up to date) (update_localization is only called at vision cycles)\n self.loc_head_position_history.appendleft(np.copy(self.loc_head_position))\n\n if self.loc_is_up_to_date:\n time_diff = (time_local_ms - self.loc_last_update) / 1000\n self.loc_last_update = time_local_ms\n self.loc_head_to_field_transform.m[:] = loc[0:16].reshape((4,4))\n self.loc_field_to_head_transform.m[:] = loc[16:32].reshape((4,4))\n \n # extract data (related to the robot's head)\n self.loc_rotation_head_to_field = self.loc_head_to_field_transform.get_rotation()\n self.loc_rotation_field_to_head = self.loc_field_to_head_transform.get_rotation()\n p = self.loc_head_to_field_transform.get_translation()\n self.loc_head_velocity = (p - self.loc_head_position) / time_diff\n self.loc_head_position = p\n self.loc_head_position_last_update = time_local_ms\n self.loc_head_orientation = self.loc_head_to_field_transform.get_yaw_deg()\n self.radio_fallen_state = False\n\n # extract data (related to the center of mass)\n p = self.loc_head_to_field_transform(self.rel_cart_CoM_position)\n self.loc_CoM_velocity = (p - self.loc_CoM_position) / time_diff\n self.loc_CoM_position = p\n\n # extract data (related to the robot's torso)\n t = self.get_body_part_to_field_transform('torso')\n self.loc_torso_to_field_transform = t\n self.loc_torso_to_field_rotation = t.get_rotation()\n self.loc_torso_orientation = t.get_yaw_deg()\n self.loc_torso_pitch = t.get_pitch_deg()\n self.loc_torso_roll = t.get_roll_deg()\n self.loc_torso_inclination = t.get_inclination_deg()\n p = t.get_translation()\n self.loc_torso_velocity = (p - self.loc_torso_position) / time_diff\n self.loc_torso_position = p\n self.loc_torso_acceleration = self.loc_torso_to_field_rotation.multiply(self.acc) + Robot.GRAVITY\n\n\n def head_to_body_part_transform(self, body_part_name, coords, is_batch=False):\n '''\n If coord is a vector or list of vectors:\n Convert cartesian coordinates that are relative to head to coordinates that are relative to a body part \n\n If coord is a Matrix_4x4 or a list of Matrix_4x4:\n Convert pose that is relative to head to a pose that is relative to a body part \n \n Parameters\n ----------\n body_part_name : `str`\n name of body part (given by the robot's XML)\n coords : array_like\n One 3D position or list of 3D positions\n is_batch : `bool`\n Indicates if coords is a batch of 3D positions\n\n Returns\n -------\n coord : `list` or ndarray\n A numpy array is returned if is_batch is False, otherwise, a list of arrays is returned\n '''\n head_to_bp_transform : Matrix_4x4 = self.body_parts[body_part_name].transform.invert()\n \n if is_batch:\n return [head_to_bp_transform(c) for c in coords]\n else:\n return head_to_bp_transform(coords)\n\n\n\n def get_body_part_to_field_transform(self, body_part_name) -> Matrix_4x4:\n '''\n Computes the transformation matrix from body part to field, from which we can extract its absolute position and rotation.\n For best results, use this method when self.loc_is_up_to_date is True. Otherwise, the forward kinematics\n will not be synced with the localization data and strange results may occur.\n '''\n return self.loc_head_to_field_transform.multiply(self.body_parts[body_part_name].transform)\n\n def get_body_part_abs_position(self, body_part_name) -> np.ndarray:\n '''\n Computes the absolute position of a body part considering the localization data and forward kinematics.\n For best results, use this method when self.loc_is_up_to_date is True. Otherwise, the forward kinematics\n will not be synced with the localization data and strange results may occur.\n '''\n return self.get_body_part_to_field_transform(body_part_name).get_translation()\n\n def get_joint_to_field_transform(self, joint_index) -> Matrix_4x4:\n '''\n Computes the transformation matrix from joint to field, from which we can extract its absolute position and rotation.\n For best results, use this method when self.loc_is_up_to_date is True. Otherwise, the forward kinematics\n will not be synced with the localization data and strange results may occur.\n '''\n return self.loc_head_to_field_transform.multiply(self.joints_transform[joint_index])\n\n def get_joint_abs_position(self, joint_index) -> np.ndarray:\n '''\n Computes the absolute position of a joint considering the localization data and forward kinematics.\n For best results, use this method when self.loc_is_up_to_date is True. Otherwise, the forward kinematics\n will not be synced with the localization data and strange results may occur.\n '''\n return self.get_joint_to_field_transform(joint_index).get_translation()\n\n def update_pose(self):\n\n if self.fwd_kinematics_list is None:\n self._initialize_kinematics()\n\n for body_part, j, child_body_part in self.fwd_kinematics_list:\n ji = self.joints_info[j]\n self.joints_transform[j].m[:] = body_part.transform.m\n self.joints_transform[j].translate(ji.anchor0_axes, True)\n child_body_part.transform.m[:] = self.joints_transform[j].m\n child_body_part.transform.rotate_deg(ji.axes, self.joints_position[j], True)\n child_body_part.transform.translate(ji.anchor1_axes_neg, True)\n\n self.rel_cart_CoM_position = np.average([b.transform.get_translation() for b in self.body_parts.values()], 0,\n [b.mass for b in self.body_parts.values()])\n\n\n def update_imu(self, time_local_ms):\n\n # update IMU\n if self.loc_is_up_to_date:\n self.imu_torso_roll = self.loc_torso_roll\n self.imu_torso_pitch = self.loc_torso_pitch \n self.imu_torso_orientation = self.loc_torso_orientation\n self.imu_torso_inclination = self.loc_torso_inclination\n self.imu_torso_to_field_rotation.m[:] = self.loc_torso_to_field_rotation.m\n self.imu_weak_torso_to_field_transform.m[:] = self.loc_torso_to_field_transform.m\n self.imu_weak_head_to_field_transform.m[:] = self.loc_head_to_field_transform.m\n self.imu_weak_field_to_head_transform.m[:] = self.loc_field_to_head_transform.m\n self.imu_weak_torso_position[:] = self.loc_torso_position\n self.imu_weak_torso_velocity[:] = self.loc_torso_velocity\n self.imu_weak_torso_acceleration[:] = self.loc_torso_acceleration\n self.imu_weak_torso_next_position = self.loc_torso_position + self.loc_torso_velocity * Robot.STEPTIME + self.loc_torso_acceleration * (0.5 * Robot.SQ_STEPTIME)\n self.imu_weak_torso_next_velocity = self.loc_torso_velocity + self.loc_torso_acceleration * Robot.STEPTIME\n self.imu_weak_CoM_position[:] = self.loc_CoM_position\n self.imu_weak_CoM_velocity[:] = self.loc_CoM_velocity\n self.imu_last_visual_update = time_local_ms\n else:\n g = self.gyro / 50 # convert degrees per second to degrees per step\n\n self.imu_torso_to_field_rotation.multiply( Matrix_3x3.from_rotation_deg(g), in_place=True, reverse_order=True)\n\n self.imu_torso_orientation = self.imu_torso_to_field_rotation.get_yaw_deg()\n self.imu_torso_pitch = self.imu_torso_to_field_rotation.get_pitch_deg()\n self.imu_torso_roll = self.imu_torso_to_field_rotation.get_roll_deg()\n\n self.imu_torso_inclination = atan(sqrt(tan(self.imu_torso_roll/180*pi)**2+tan(self.imu_torso_pitch/180*pi)**2))*180/pi\n\n # Update position and velocity until 0.2 seconds has passed since last visual update\n if time_local_ms < self.imu_last_visual_update + 200:\n self.imu_weak_torso_position[:] = self.imu_weak_torso_next_position\n if self.imu_weak_torso_position[2] < 0: self.imu_weak_torso_position[2] = 0 # limit z coordinate to positive values\n self.imu_weak_torso_velocity[:] = self.imu_weak_torso_next_velocity * Robot.IMU_DECAY # stability tradeoff\n else:\n self.imu_weak_torso_velocity *= 0.97 # without visual updates for 0.2s, the position is locked, and the velocity decays to zero\n\n # convert proper acceleration to coordinate acceleration and fix rounding bias\n self.imu_weak_torso_acceleration = self.imu_torso_to_field_rotation.multiply(self.acc) + Robot.GRAVITY\n self.imu_weak_torso_to_field_transform = Matrix_4x4.from_3x3_and_translation(self.imu_torso_to_field_rotation,self.imu_weak_torso_position)\n self.imu_weak_head_to_field_transform = self.imu_weak_torso_to_field_transform.multiply(self.body_parts[\"torso\"].transform.invert())\n self.imu_weak_field_to_head_transform = self.imu_weak_head_to_field_transform.invert()\n p = self.imu_weak_head_to_field_transform(self.rel_cart_CoM_position)\n self.imu_weak_CoM_velocity = (p-self.imu_weak_CoM_position)/Robot.STEPTIME\n self.imu_weak_CoM_position = p\n\n # Next Position = x0 + v0*t + 0.5*a*t^2, Next velocity = v0 + a*t\n self.imu_weak_torso_next_position = self.imu_weak_torso_position + self.imu_weak_torso_velocity * Robot.STEPTIME + self.imu_weak_torso_acceleration * (0.5 * Robot.SQ_STEPTIME)\n self.imu_weak_torso_next_velocity = self.imu_weak_torso_velocity + self.imu_weak_torso_acceleration * Robot.STEPTIME\n\n\n\n def set_joints_target_position_direct(self,indices,values:np.ndarray,harmonize=True,max_speed=7.03,tolerance=0.012,limit_joints=True) -> int:\n '''\n Computes the speed of a list of joints, taking as argument the target position\n\n Parameters\n ----------\n indices : `int`/`list`/`slice`/numpy array\n joint indices\n values : numpy array \n target position for each listed joint index\n harmonize : `bool`\n if True, all joints reach target at same time\n max_speed : `float`\n max. speed for all joints in deg/step\n Most joints have a maximum speed of 351.77 deg/s according to rcssserver3d/data/rsg/agent/nao/hingejoint.rsg\n That translates as 7.0354 deg/step or 6.1395 rad/s\n tolerance : `float`\n angle error tolerance (in degrees) to return that target was reached (returns -1)\n limit_joints : `bool`\n limit values to the joints' range of motion\n\n Returns\n -------\n remaining_steps : `int`\n predicted number of remaining steps or -1 if target was already reached\n\n Examples\n -------\n (let p[tx] be the joint position at t=x)\n\n Example for return value: moving joint[0] from 0deg to 10deg\n pos[t0]: 0, speed[t0]: 7deg/step, ret=2 # target will predictedly be reached in 2 steps\n pos[t1]: 7, speed[t1]: 3deg/step, ret=1 # target will predictedly be reached in 1 step (send final action)\n pos[t2]: 10, speed[t2]: 0deg/step, ret=0 # target was predictedly already reached \n pos[t3]: 10, speed[t3]: 0deg/step, ret=-1 # (best case scenario) server reported with delay, that target was reached (see tolerance)\n pos[t?]: 10, speed[t?]: 0deg/step, ret=-1 # if there is friction, it may take some additional steps \n\n If everything worked as predicted we could stop calling this function when ret==1\n If we need precision, it is recommended to wait for ret==-1\n\n Example 1:\n set_joints_target_position_direct(range(2,4),np.array([10.0,5.0]),harmonize=True) \n Joint[2] p[t0]: 0 target pos: 10 -> p[t1]=5, p[t2]=10\n Joint[3] p[t0]: 0 target pos: 5 -> p[t1]=2.5, p[t2]=5\n\n Example 2:\n set_joints_target_position_direct([2,3],np.array([10.0,5.0]),harmonize=False) \n Joint[2] p[t0]: 0 target pos: 10 -> p[t1]=7, p[t2]=10\n Joint[3] p[t0]: 0 target pos: 5 -> p[t1]=5, p[t2]=5 \n '''\n\n assert type(values) == np.ndarray, \"'values' argument must be a numpy array\"\n np.nan_to_num(values, copy=False) # Replace NaN with zero and infinity with large finite numbers\n\n # limit range of joints\n if limit_joints: \n if type(indices) == list or type(indices) == np.ndarray:\n for i in range(len(indices)):\n values[i] = np.clip(values[i], self.joints_info[indices[i]].min, self.joints_info[indices[i]].max)\n elif type(indices) == slice:\n info = self.joints_info[indices]\n for i in range(len(info)):\n values[i] = np.clip(values[i], info[i].min, info[i].max)\n else: # int\n values[0] = np.clip(values[0], self.joints_info[indices].min, self.joints_info[indices].max)\n\n #predicted_diff: predicted difference between reported position and actual position\n\n predicted_diff = self.joints_target_last_speed[indices] * 1.1459156 #rad/s to deg/step\n predicted_diff = np.asarray(predicted_diff)\n np.clip(predicted_diff,-7.03,7.03,out=predicted_diff) #saturate predicted movement in-place\n\n #reported_dist: difference between reported position and target position\n\n reported_dist = values - self.joints_position[indices]\n if np.all((np.abs(reported_dist) < tolerance)) and np.all((np.abs(predicted_diff) < tolerance)):\n self.joints_target_speed[indices] = 0\n return -1\n \n deg_per_step = reported_dist - predicted_diff\n\n relative_max = np.max( np.abs(deg_per_step) ) / max_speed\n remaining_steps = np.ceil( relative_max )\n\n if remaining_steps == 0:\n self.joints_target_speed[indices] = 0\n return 0\n\n if harmonize: \n deg_per_step /= remaining_steps\n else:\n np.clip(deg_per_step,-max_speed,max_speed,out=deg_per_step) #limit maximum speed\n\n self.joints_target_speed[indices] = deg_per_step * 0.87266463 #convert to rad/s\n\n return remaining_steps\n\n\n\n def get_command(self) -> bytes:\n '''\n Builds commands string from self.joints_target_speed\n '''\n j_speed = self.joints_target_speed * self.FIX_EFFECTOR_MASK #Fix symmetry issues 3/4 (effectors)\n cmd = \"\".join(f\"({self.joints_info[i].effector} {j_speed[i]:.5f})\" for i in range(self.no_of_joints)).encode('utf-8')\n\n self.joints_target_last_speed = self.joints_target_speed #1. both point to the same array\n self.joints_target_speed = np.zeros_like(self.joints_target_speed) #2. create new array for joints_target_speed\n return cmd" } ]
from collections import deque from cpp.ball_predictor import ball_predictor from cpp.localization import localization from logs.Logger import Logger from math import atan2, pi from math_ops.Matrix_4x4 import Matrix_4x4 from world.commons.Draw import Draw from world.commons.Other_Robot import Other_Robot from world.Robot import Robot import numpy as np
20,401
---------- history_steps : int number of history steps to consider [1,20] Examples -------- get_ball_rel_vel(1) is equivalent to (current rel pos - last rel pos) / 0.04 get_ball_rel_vel(2) is equivalent to (current rel pos - rel pos 0.08s ago) / 0.08 get_ball_rel_vel(3) is equivalent to (current rel pos - rel pos 0.12s ago) / 0.12 ''' assert 1 <= history_steps <= 20, "Argument 'history_steps' must be in range [1,20]" if len(self.ball_rel_torso_cart_pos_history) == 0: return np.zeros(3) h_step = min(history_steps, len(self.ball_rel_torso_cart_pos_history)) t = h_step * World.VISUALSTEP return (self.ball_rel_torso_cart_pos - self.ball_rel_torso_cart_pos_history[h_step-1]) / t def get_ball_abs_vel(self, history_steps:int): ''' Get ball absolute velocity (m/s) Parameters ---------- history_steps : int number of history steps to consider [1,20] Examples -------- get_ball_abs_vel(1) is equivalent to (current abs pos - last abs pos) / 0.04 get_ball_abs_vel(2) is equivalent to (current abs pos - abs pos 0.08s ago) / 0.08 get_ball_abs_vel(3) is equivalent to (current abs pos - abs pos 0.12s ago) / 0.12 ''' assert 1 <= history_steps <= 20, "Argument 'history_steps' must be in range [1,20]" if len(self.ball_abs_pos_history) == 0: return np.zeros(3) h_step = min(history_steps, len(self.ball_abs_pos_history)) t = h_step * World.VISUALSTEP return (self.ball_abs_pos - self.ball_abs_pos_history[h_step-1]) / t def get_predicted_ball_pos(self, max_speed): ''' Get predicted 2D ball position when its predicted speed is equal to or less than `max_speed` In case that position exceeds the prediction horizon, the last available prediction is returned Parameters ---------- max_speed : float maximum speed at which the ball will be moving at returned future position ''' b_sp = self.ball_2d_pred_spd index = len(b_sp) - max( 1, np.searchsorted(b_sp[::-1], max_speed, side='right') ) return self.ball_2d_pred_pos[index] def get_intersection_point_with_ball(self, player_speed): ''' Get 2D intersection point with moving ball, based on `self.ball_2d_pred_pos` Parameters ---------- player_speed : float average speed at which the robot will chase the ball Returns ------- 2D intersection point : ndarray 2D intersection point with moving ball, assuming the robot moves at an avg. speed of `player_speed` intersection distance : float distance between current robot position and intersection point ''' params = np.array([*self.robot.loc_head_position[:2], player_speed*0.02, *self.ball_2d_pred_pos.flat], np.float32) pred_ret = ball_predictor.get_intersection(params) return pred_ret[:2], pred_ret[2] def update(self): r = self.robot PM = self.play_mode W = World # reset variables r.loc_is_up_to_date = False r.loc_head_z_is_up_to_date = False # update play mode groups if PM in (W.M_PLAY_ON, W.M_GAME_OVER): # most common group self.play_mode_group = W.MG_OTHER elif PM in (W.M_OUR_KICKOFF, W.M_OUR_KICK_IN, W.M_OUR_CORNER_KICK, W.M_OUR_GOAL_KICK, W.M_OUR_OFFSIDE, W.M_OUR_PASS, W.M_OUR_DIR_FREE_KICK, W.M_OUR_FREE_KICK): self.play_mode_group = W.MG_OUR_KICK elif PM in (W.M_THEIR_KICK_IN, W.M_THEIR_CORNER_KICK, W.M_THEIR_GOAL_KICK, W.M_THEIR_OFFSIDE, W.M_THEIR_PASS, W.M_THEIR_DIR_FREE_KICK, W.M_THEIR_FREE_KICK, W.M_THEIR_KICKOFF): self.play_mode_group = W.MG_THEIR_KICK elif PM in (W.M_BEFORE_KICKOFF, W.M_THEIR_GOAL): self.play_mode_group = W.MG_ACTIVE_BEAM elif PM in (W.M_OUR_GOAL,): self.play_mode_group = W.MG_PASSIVE_BEAM elif PM is not None: raise ValueError(f'Unexpected play mode ID: {PM}') r.update_pose() # update forward kinematics if self.ball_is_visible: # Compute ball position, relative to torso self.ball_rel_torso_cart_pos = r.head_to_body_part_transform("torso",self.ball_rel_head_cart_pos) if self.vision_is_up_to_date: # update vision based localization # Prepare all variables for localization feet_contact = np.zeros(6) lf_contact = r.frp.get('lf', None) rf_contact = r.frp.get('rf', None) if lf_contact is not None:
class World(): STEPTIME = 0.02 # Fixed step time STEPTIME_MS = 20 # Fixed step time in milliseconds VISUALSTEP = 0.04 # Fixed visual step time VISUALSTEP_MS = 40 # Fixed visual step time in milliseconds # play modes in our favor M_OUR_KICKOFF = 0 M_OUR_KICK_IN = 1 M_OUR_CORNER_KICK = 2 M_OUR_GOAL_KICK = 3 M_OUR_FREE_KICK = 4 M_OUR_PASS = 5 M_OUR_DIR_FREE_KICK = 6 M_OUR_GOAL = 7 M_OUR_OFFSIDE = 8 # play modes in their favor M_THEIR_KICKOFF = 9 M_THEIR_KICK_IN = 10 M_THEIR_CORNER_KICK = 11 M_THEIR_GOAL_KICK = 12 M_THEIR_FREE_KICK = 13 M_THEIR_PASS = 14 M_THEIR_DIR_FREE_KICK = 15 M_THEIR_GOAL = 16 M_THEIR_OFFSIDE = 17 # neutral play modes M_BEFORE_KICKOFF = 18 M_GAME_OVER = 19 M_PLAY_ON = 20 # play mode groups MG_OUR_KICK = 0 MG_THEIR_KICK = 1 MG_ACTIVE_BEAM = 2 MG_PASSIVE_BEAM = 3 MG_OTHER = 4 # play on, game over FLAGS_CORNERS_POS = ((-15,-10,0), (-15,+10,0), (+15,-10,0), (+15,+10,0)) FLAGS_POSTS_POS = ((-15,-1.05,0.8),(-15,+1.05,0.8),(+15,-1.05,0.8),(+15,+1.05,0.8)) def __init__(self,robot_type:int, team_name:str, unum:int, apply_play_mode_correction:bool, enable_draw:bool, logger:Logger, host:str) -> None: self.team_name = team_name # Name of our team self.team_name_opponent : str = None # Name of opponent team self.apply_play_mode_correction = apply_play_mode_correction # True to adjust ball position according to play mode self.step = 0 # Total number of received simulation steps (always in sync with self.time_local_ms) self.time_server = 0.0 # Time, in seconds, as indicated by the server (this time is NOT reliable, use only for synchronization between agents) self.time_local_ms = 0 # Reliable simulation time in milliseconds, use this when possible (it is incremented 20ms for every TCP message) self.time_game = 0.0 # Game time, in seconds, as indicated by the server self.goals_scored = 0 # Goals score by our team self.goals_conceded = 0 # Goals conceded by our team self.team_side_is_left : bool = None # True if our team plays on the left side (this value is later changed by the world parser) self.play_mode = None # Play mode of the soccer game, provided by the server self.play_mode_group = None # Certain play modes share characteristics, so it makes sense to group them self.flags_corners : dict = None # corner flags, key=(x,y,z), always assume we play on the left side self.flags_posts : dict = None # goal posts, key=(x,y,z), always assume we play on the left side self.ball_rel_head_sph_pos = np.zeros(3) # Ball position relative to head (spherical coordinates) (m, deg, deg) self.ball_rel_head_cart_pos = np.zeros(3) # Ball position relative to head (cartesian coordinates) (m) self.ball_rel_torso_cart_pos = np.zeros(3) # Ball position relative to torso (cartesian coordinates) (m) self.ball_rel_torso_cart_pos_history = deque(maxlen=20) # Ball position relative to torso history (queue with up to 20 old positions at intervals of 0.04s, where index 0 is the previous position) self.ball_abs_pos = np.zeros(3) # Ball absolute position (up to date if self.ball_is_visible and self.robot.loc_is_up_to_date) (m) self.ball_abs_pos_history = deque(maxlen=20) # Ball absolute position history (queue with up to 20 old positions at intervals of 0.04s, where index 0 is the previous position) self.ball_abs_pos_last_update = 0 # World.time_local_ms when self.ball_abs_pos was last updated by vision or radio self.ball_abs_vel = np.zeros(3) # Ball velocity vector based on the last 2 known values of self.ball_abs_pos (m/s) (Warning: noisy if ball is distant, use instead get_ball_abs_vel) self.ball_abs_speed = 0 # Ball scalar speed based on the last 2 known values of self.ball_abs_pos (m/s) (Warning: noisy if ball is distant, use instead ||get_ball_abs_vel||) self.ball_is_visible = False # True if the last server message contained vision information related to the ball self.is_ball_abs_pos_from_vision = False # True if ball_abs_pos originated from vision, False if it originated from radio self.ball_last_seen = 0 # World.time_local_ms when ball was last seen (note: may be different from self.ball_abs_pos_last_update) self.ball_cheat_abs_pos = np.zeros(3) # Absolute ball position provided by the server as cheat (m) self.ball_cheat_abs_vel = np.zeros(3) # Absolute velocity vector based on the last 2 values of self.ball_cheat_abs_pos (m/s) self.ball_2d_pred_pos = np.zeros((1,2)) # prediction of current and future 2D ball positions* self.ball_2d_pred_vel = np.zeros((1,2)) # prediction of current and future 2D ball velocities* self.ball_2d_pred_spd = np.zeros(1) # prediction of current and future 2D ball linear speeds* # *at intervals of 0.02 s until ball comes to a stop or gets out of bounds (according to prediction) self.lines = np.zeros((30,6)) # Position of visible lines, relative to head, start_pos+end_pos (spherical coordinates) (m, deg, deg, m, deg, deg) self.line_count = 0 # Number of visible lines self.vision_last_update = 0 # World.time_local_ms when last vision update was received self.vision_is_up_to_date = False # True if the last server message contained vision information self.teammates = [Other_Robot(i, True ) for i in range(1,12)] # List of teammates, ordered by unum self.opponents = [Other_Robot(i, False) for i in range(1,12)] # List of opponents, ordered by unum self.teammates[unum-1].is_self = True # This teammate is self self.draw = Draw(enable_draw, unum, host, 32769) # Draw object for current player self.team_draw = Draw(enable_draw, 0, host, 32769) # Draw object shared with teammates self.logger = logger self.robot = Robot(unum, robot_type) def log(self, msg:str): ''' Shortcut for: self.logger.write(msg, True, self.step) Parameters ---------- msg : str message to be written after the simulation step ''' self.logger.write(msg, True, self.step) def get_ball_rel_vel(self, history_steps:int): ''' Get ball velocity, relative to torso (m/s) Parameters ---------- history_steps : int number of history steps to consider [1,20] Examples -------- get_ball_rel_vel(1) is equivalent to (current rel pos - last rel pos) / 0.04 get_ball_rel_vel(2) is equivalent to (current rel pos - rel pos 0.08s ago) / 0.08 get_ball_rel_vel(3) is equivalent to (current rel pos - rel pos 0.12s ago) / 0.12 ''' assert 1 <= history_steps <= 20, "Argument 'history_steps' must be in range [1,20]" if len(self.ball_rel_torso_cart_pos_history) == 0: return np.zeros(3) h_step = min(history_steps, len(self.ball_rel_torso_cart_pos_history)) t = h_step * World.VISUALSTEP return (self.ball_rel_torso_cart_pos - self.ball_rel_torso_cart_pos_history[h_step-1]) / t def get_ball_abs_vel(self, history_steps:int): ''' Get ball absolute velocity (m/s) Parameters ---------- history_steps : int number of history steps to consider [1,20] Examples -------- get_ball_abs_vel(1) is equivalent to (current abs pos - last abs pos) / 0.04 get_ball_abs_vel(2) is equivalent to (current abs pos - abs pos 0.08s ago) / 0.08 get_ball_abs_vel(3) is equivalent to (current abs pos - abs pos 0.12s ago) / 0.12 ''' assert 1 <= history_steps <= 20, "Argument 'history_steps' must be in range [1,20]" if len(self.ball_abs_pos_history) == 0: return np.zeros(3) h_step = min(history_steps, len(self.ball_abs_pos_history)) t = h_step * World.VISUALSTEP return (self.ball_abs_pos - self.ball_abs_pos_history[h_step-1]) / t def get_predicted_ball_pos(self, max_speed): ''' Get predicted 2D ball position when its predicted speed is equal to or less than `max_speed` In case that position exceeds the prediction horizon, the last available prediction is returned Parameters ---------- max_speed : float maximum speed at which the ball will be moving at returned future position ''' b_sp = self.ball_2d_pred_spd index = len(b_sp) - max( 1, np.searchsorted(b_sp[::-1], max_speed, side='right') ) return self.ball_2d_pred_pos[index] def get_intersection_point_with_ball(self, player_speed): ''' Get 2D intersection point with moving ball, based on `self.ball_2d_pred_pos` Parameters ---------- player_speed : float average speed at which the robot will chase the ball Returns ------- 2D intersection point : ndarray 2D intersection point with moving ball, assuming the robot moves at an avg. speed of `player_speed` intersection distance : float distance between current robot position and intersection point ''' params = np.array([*self.robot.loc_head_position[:2], player_speed*0.02, *self.ball_2d_pred_pos.flat], np.float32) pred_ret = ball_predictor.get_intersection(params) return pred_ret[:2], pred_ret[2] def update(self): r = self.robot PM = self.play_mode W = World # reset variables r.loc_is_up_to_date = False r.loc_head_z_is_up_to_date = False # update play mode groups if PM in (W.M_PLAY_ON, W.M_GAME_OVER): # most common group self.play_mode_group = W.MG_OTHER elif PM in (W.M_OUR_KICKOFF, W.M_OUR_KICK_IN, W.M_OUR_CORNER_KICK, W.M_OUR_GOAL_KICK, W.M_OUR_OFFSIDE, W.M_OUR_PASS, W.M_OUR_DIR_FREE_KICK, W.M_OUR_FREE_KICK): self.play_mode_group = W.MG_OUR_KICK elif PM in (W.M_THEIR_KICK_IN, W.M_THEIR_CORNER_KICK, W.M_THEIR_GOAL_KICK, W.M_THEIR_OFFSIDE, W.M_THEIR_PASS, W.M_THEIR_DIR_FREE_KICK, W.M_THEIR_FREE_KICK, W.M_THEIR_KICKOFF): self.play_mode_group = W.MG_THEIR_KICK elif PM in (W.M_BEFORE_KICKOFF, W.M_THEIR_GOAL): self.play_mode_group = W.MG_ACTIVE_BEAM elif PM in (W.M_OUR_GOAL,): self.play_mode_group = W.MG_PASSIVE_BEAM elif PM is not None: raise ValueError(f'Unexpected play mode ID: {PM}') r.update_pose() # update forward kinematics if self.ball_is_visible: # Compute ball position, relative to torso self.ball_rel_torso_cart_pos = r.head_to_body_part_transform("torso",self.ball_rel_head_cart_pos) if self.vision_is_up_to_date: # update vision based localization # Prepare all variables for localization feet_contact = np.zeros(6) lf_contact = r.frp.get('lf', None) rf_contact = r.frp.get('rf', None) if lf_contact is not None:
feet_contact[0:3] = Matrix_4x4( r.body_parts["lfoot"].transform ).translate( lf_contact[0:3] , True).get_translation()
1
2023-12-16 23:40:23+00:00
24k
Sam-Izdat/tinycio
src/tinycio/balance.py
[ { "identifier": "ColorSpace", "path": "src/tinycio/colorspace.py", "snippet": "class ColorSpace:\n \"\"\"\n Color space conversion. Applies OETFs and EOTFs as needed but omits tonemapping. Cylindrical transformations are \n treated as distinct color spaces. Example:\n\n .. highlight:: python\n .. code-block:: python\n \n cs_in = ColorSpace.Variant.SRGB_LIN\n cs_out = ColorSpace.Variant.OKLAB\n oklab_image = ColorSpace.convert(srgb_image, source=cs_in, destination=cs_out)\n \"\"\"\n class Variant(IntEnum):\n \"\"\"\n Color space enum. For a list of available options, see :ref:`ref_color_spaces`.\n \"\"\"\n UNKNOWN = 1<<0 \n NONCOLOR = 1<<1 \n CIE_XYZ = 1<<2 \n CIE_XYY = 1<<3 \n SRGB = 1<<4 \n SRGB_LIN = 1<<5 \n REC709 = 1<<6 \n REC2020 = 1<<7 \n REC2020_LIN = 1<<8 \n DCI_P3 = 1<<9 \n DCI_P3_LIN = 1<<10 \n DISPLAY_P3 = 1<<11 \n ACESCG = 1<<12 \n ACESCC = 1<<13 \n ACESCCT = 1<<14 \n ACES2065_1 = 1<<15 \n LMS = 1<<16 \n OKLAB = 1<<17 \n CIELAB = 1<<18 \n CIELUV = 1<<19 \n HSV = 1<<20 \n HSL = 1<<21 \n OKHSV = 1<<22\n OKHSL = 1<<23\n\n SCENE_LINEAR = SRGB_LIN | REC2020_LIN | DCI_P3_LIN | ACESCG | ACES2065_1 | CIE_XYZ\n PERCEPTUAL = OKLAB | CIELAB | CIELUV | OKHSL | OKHSV\n CYLINDRICAL = HSL | HSV | OKHSL | OKHSV\n\n GAMUT_SRGB = SRGB | SRGB_LIN | REC709 | HSL | HSV\n GAMUT_AP0 = ACES2065_1\n GAMUT_AP1 = ACESCG | ACESCC | ACESCCT\n GAMUT_REC2020 = REC2020 | REC2020_LIN\n GAMUT_DCI_P3 = DCI_P3 | DCI_P3_LIN\n GAMUT_DISPLAY_P3= DISPLAY_P3\n GAMUT_OKLAB = OKLAB | OKHSL | OKHSV\n GAMUT_CIE_XYZ = CIE_XYZ | CIE_XYY\n GAMUT_CIELAB = CIELAB\n GAMUT_CIELUV = CIELUV\n GAMUT_OTHER = LMS | UNKNOWN | NONCOLOR\n\n WP_D65 = SRGB | SRGB_LIN | REC709 | DISPLAY_P3 | REC2020 | REC2020_LIN | CIE_XYZ | CIE_XYY\n WP_CCT_6300 = DCI_P3 | DCI_P3_LIN\n WP_CCT_6000 = ACESCG | ACESCC | ACESCCT | ACES2065_1\n\n MODEL_RGB = SRGB | SRGB_LIN | REC709 | REC2020 | REC2020_LIN | DCI_P3 | DCI_P3_LIN | DISPLAY_P3 | \\\n ACESCG | ACESCC | ACESCCT | ACES2065_1\n MODEL_CIE = CIE_XYZ | CIE_XYY | CIELAB | CIELUV\n MODEL_CAM = 0\n MODEL_YUV = 0\n MODEL_OTHER = LMS | HSL | HSV | OKLAB # is OKLAB CAM-based?\n \n NEGATIVE = OKLAB | CIELAB | CIELUV | GAMUT_AP0\n NON_NEGATIVE = ~NEGATIVE\n\n DISABLED = CIELUV\n UNSUPPORTED = OKHSV | OKHSL # disabled doesn't go here - CS must have alternate path\n SUPPORTED = ~UNSUPPORTED \n\n # FIXME: LUV doesn't quite match expected values, needs further testing\n\n mat_xyz_to_srgb = [\n [3.24096994190452134, -1.53738317757009346, -0.498610760293003284],\n [-0.969243636280879826, 1.87596750150772067, 0.0415550574071756125],\n [0.0556300796969936084, -0.203976958888976564, 1.05697151424287856]]\n\n mat_srgb_to_xyz = [\n [0.412390799265959481, 0.357584339383877964, 0.180480788401834288],\n [0.212639005871510358, 0.715168678767755927, 0.072192315360733715],\n [0.0193308187155918507, 0.119194779794625988, 0.950532152249660581]]\n\n mat_srgb_to_acescg = [\n [ 0.6130974024, 0.3395231462, 0.04737945141],\n [ 0.07019372247, 0.916353879, 0.01345239847],\n [ 0.02061559288, 0.1095697729, 0.8698146341]]\n\n # NOTE: Includes \"D60\"/D65 white point conversion\n mat_acescg_to_srgb = [\n [ 1.705050993, -0.6217921206,-0.083258872],\n [-0.1302564175, 1.140804737, -0.01054831907],\n [-0.02400335681,-0.1289689761, 1.152972333]]\n\n # NOTE: Includes \"D60\"/D65 white point conversion\n mat_srgb_to_aces2065_1 = [\n [ 0.439632982, 0.382988698, 0.17737832],\n [ 0.0897764431, 0.813439429, 0.0967841284],\n [ 0.0175411704, 0.111546553, 0.870912277]]\n\n mat_aces2065_1_to_srgb = [\n [ 2.52168619, -1.13413099, -0.387555198],\n [-0.276479914, 1.37271909, -0.0962391736],\n [-0.015378065, -0.152975336, 1.1683534]]\n\n mat_srgb_to_displayp3 = [\n [ 0.822461969, 0.177538031, 1.15772692e-10],\n [ 0.0331941989, 0.966805801, 1.95085037e-11],\n [ 0.0170826307, 0.0723974405, 0.910519929]]\n\n mat_displayp3_to_srgb = [\n [ 1.22494018, -0.224940176, -4.77534979e-11],\n [-0.0420569547, 1.04205695, 3.37864801e-11],\n [-0.0196375546,-0.0786360454, 1.0982736]] \n\n # NOTE: No chromatic adaptation\n mat_srgb_to_dcip3 = [\n [0.868579739716132409, 0.128919138460847047, 0.00250112182302054368],\n [0.0345404102543194426, 0.961811386361919975, 0.0036482033837605824],\n [0.0167714290414502718, 0.0710399977868858352, 0.912188573171663893]]\n\n # NOTE: No chromatic adaptation\n mat_dcip3_to_srgb = [\n [ 1.15751640619975871, -0.154962378073857756, -0.00255402812590095854],\n [-0.0415000715306859699, 1.04556792307969925, -0.00406785154901328463],\n [-0.0180500389562539583,-0.0785782726530290654, 1.09662831160928302]]\n\n # NOTE: No chromatic adaptation\n mat_dcip3_to_xyz = [\n [ 0.445169815564552417, 0.277134409206777664, 0.172282669815564564],\n [ 0.209491677912730539, 0.721595254161043636, 0.0689130679262258258],\n [-3.63410131696985616e-17, 0.0470605600539811521, 0.907355394361973415]]\n\n # NOTE: No chromatic adaptation\n mat_xyz_to_dcip3 = [\n [2.7253940304917328, -1.01800300622718496, -0.440163195190036463],\n [-0.795168025808764195, 1.689732054843624, 0.0226471906084774533],\n [0.0412418913957000325, -0.0876390192158623825, 1.10092937864632191]]\n\n mat_srgb_to_rec2020 = [\n [ 0.627403896, 0.329283039, 0.0433130657],\n [ 0.0690972894, 0.919540395, 0.0113623156],\n [ 0.0163914389, 0.0880133077, 0.895595253]]\n\n mat_rec2020_to_srgb = [\n [ 1.660491, -0.587641139,-0.0728498633],\n [-0.124550475, 1.1328999, -0.00834942258],\n [-0.0181507633,-0.100578898, 1.11872966]]\n\n mat_rec2020_to_xyz = [\n [0.636958048301291, 0.144616903586208, 0.168880975164172],\n [0.262700212011267, 0.677998071518871, 0.059301716469862],\n [4.99410657446607e-17, 0.0280726930490874, 1.06098505771079]]\n\n mat_xyz_to_rec2020 = [\n [1.71665118797127, -0.355670783776393, -0.25336628137366],\n [-0.666684351832489, 1.61648123663494, 0.0157685458139111],\n [0.0176398574453108, -0.0427706132578085, 0.942103121235474]]\n\n # NOTE: No chromatic adaptation\n mat_acescg_to_xyz = [\n [ 0.66245418, 0.13400421, 0.15618769],\n [ 0.27222872, 0.67408177, 0.05368952],\n [-0.00557465, 0.00406073, 1.0103391 ]]\n\n # NOTE: No chromatic adaptation\n mat_xyz_to_acescg = [\n [ 1.64102338, -0.32480329, -0.2364247 ],\n [-0.66366286, 1.61533159, 0.01675635],\n [ 0.01172189, -0.00828444, 0.98839486]]\n\n # NOTE: For CIE XYZ color\n mat_d60_to_d65 = [\n [ 0.98722400,-0.00611327, 0.01595330],\n [-0.00759836, 1.00186000, 0.00533002],\n [ 0.00307257,-0.00509595, 1.08168000]]\n\n # NOTE: For CIE XYZ color\n mat_d65_to_d60 = [\n [ 1.01303000, 0.00610531,-0.01497100],\n [ 0.00769823, 0.99816500,-0.00503203],\n [-0.00284131, 0.00468516, 0.92450700]]\n\n # NOTE: For CIE XYZ color\n mat_d65_to_dci = [\n [0.976578896646979768, -0.0154362646984919742, -0.016686021704209866],\n [-0.0256896658505145926, 1.02853916787996963, -0.00378517365630504153],\n [-0.00570574587417104179, 0.0110778657389971485, 0.871176159390377409]]\n \n # NOTE: For CIE XYZ color\n mat_dci_to_d65 = [\n [1.02449672775257752, 0.0151635410224165156, 0.0196885223342066827],\n [0.0256121933371584198, 0.97258630562441342, 0.00471635229242730096],\n [0.0063842306500876874, -0.012268082736730219, 1.14794244517367791]]\n\n mat_xyz_to_lms = [\n [ 0.8951, 0.2664,-0.1614],\n [-0.7502, 1.7135, 0.0367],\n [ 0.0389,-0.0685, 1.0296]]\n\n mat_lms_to_xyz = [\n [ 0.986993, -0.147054, 0.159963],\n [ 0.432305, 0.51836, 0.0492912],\n [ -0.00852866, 0.0400428, 0.968487]]\n\n # OKLAB's XYZ to LMS\n mat_oklab_m1 = [\n [ 0.8189330101, 0.3618667424, -0.1288597137],\n [ 0.0329845436, 0.9293118715, 0.0361456387],\n [ 0.0482003018, 0.2643662691, 0.6338517070]]\n\n # OKLAB's non-linear L'M'S' to OKLAB\n mat_oklab_m2 = [\n [ 0.2104542553, 0.7936177850, -0.0040720468],\n [ 1.9779984951, -2.4285922050, 0.4505937099],\n [ 0.0259040371, 0.7827717662, -0.8086757660]]\n\n # Inverse of OKLAB M1\n mat_oklab_m1_inv = [\n [ 1.22701385, -0.55779998, 0.28125615],\n [-0.04058018, 1.11225687, -0.07167668],\n [-0.07638128, -0.42148198, 1.58616322]]\n\n # Inverse of OKLAB M2\n mat_oklab_m2_inv = [\n [ 1. , 0.39633779, 0.21580376],\n [ 1.00000001, -0.10556134, -0.06385417],\n [ 1.00000005, -0.08948418, -1.29148554]]\n\n @classmethod\n def convert(cls, im:Union[torch.Tensor, ColorImage], source:Variant, destination:Variant) -> torch.Tensor:\n \"\"\"\n Change the color space of an image. Cylindrical transformations HSV/HSL are \n treated as their own color spaces and assumed to be relative to sRGB linear. \n Unless otherwise noted or required by specification (e.g. ACES), we assume D65 white point.\n\n .. warning::\n\n Tone mapping is not included, so converting the color space of HDR values to \n an LDR-designated color space will not automatically reduce dynamic range. For example, \n taking an HDR image from :code:`ACESCG` (AP1) to :code:`SRGB` will yield the sRGB \n gamma curve, but values outside the required range must still be tone mapped or clamped beforehand.\n\n .. warning::\n\n Cylindrical transformations (HSL, HSV) should be given input in [0, 1] linear sRGB range \n (or equivalent). This is not strictly enforced but input outside this range may yield \n unpredictable results or *NaN* values.\n\n :param im: [C=3, H, W] image tensor \n :type im: torch.Tensor | ColorImage\n :param source: color space to convert from\n :param destination: color space to convert to\n :return: image tensor in designated color space\n \"\"\"\n ip, op = source, destination\n cs = cls.Variant\n tf = TransferFunction\n if ip == op: return im\n\n assert im.dim() == 3 and im.size(0) == 3, f\"expected [C=3, H, W] image tensor, got {im.size()}\"\n assert source != 0, f\"Unknown source color space\"\n assert ip & cs.SUPPORTED, f\"Source color space not supported: {source.name}\"\n assert op & cs.SUPPORTED, f\"Destination color space not supported: {destination.name}\"\n assert ip & ~cs.DISABLED, f\"Source color space disabled: {ColorSpace.Variant(ip).name}\"\n assert op & ~cs.DISABLED, f\"Destination color space disabled: {ColorSpace.Variant(op).name}\"\n\n err_not_implemented = f\"Color space conversion not implemented: {ColorSpace.Variant(ip).name} to {ColorSpace.Variant(op).name}\" \n\n # Direct path where it matters, loop-de-loop elsewhere\n if ip == cs.SRGB_LIN:\n if op == cs.SRGB: im = tf.srgb_oetf(im)\n elif op == cs.REC709: im = tf.rec709_oetf(im)\n elif op == cs.REC2020: im = tf.rec2020_oetf(mm(im, cls.mat_srgb_to_rec2020))\n elif op == cs.REC2020_LIN: im = mm(im, cls.mat_srgb_to_rec2020)\n elif op == cs.DCI_P3: im = tf.dcip3_oetf(mm(mm(mm(im, cls.mat_srgb_to_xyz), cls.mat_d65_to_dci), cls.mat_xyz_to_dcip3))\n elif op == cs.DCI_P3_LIN: im = mm(mm(mm(im, cls.mat_srgb_to_xyz), cls.mat_d65_to_dci), cls.mat_xyz_to_dcip3)\n elif op == cs.DISPLAY_P3: im = tf.srgb_oetf(mm(im, cls.mat_srgb_to_displayp3))\n elif op == cs.CIE_XYZ: im = mm(im, cls.mat_srgb_to_xyz)\n elif op == cs.CIE_XYY: im = cls._xyz_to_xyy(mm(im, cls.mat_srgb_to_xyz))\n elif op == cs.LMS: im = cls._xyz_to_lms(mm(im, cls.mat_srgb_to_xyz))\n elif op == cs.ACESCG: im = mm(im, cls.mat_srgb_to_acescg)\n elif op == cs.ACESCC: im = cls._acescg_to_acescc(mm(im, cls.mat_srgb_to_acescg))\n elif op == cs.ACES2065_1: im = mm(im, cls.mat_srgb_to_aces2065_1)\n elif op == cs.CIELAB: im = cls._xyz_to_cielab(mm(im, cls.mat_srgb_to_xyz))\n elif op == cs.CIELUV: im = cls._xyz_to_cieluv(mm(im, cls.mat_srgb_to_xyz))\n elif op == cs.OKLAB: im = cls._rgb_to_oklab(im)\n elif op == cs.HSL: im = cls._rgb_to_hsl(tf.srgb_oetf(im))\n elif op == cs.HSV: im = cls._rgb_to_hsv(tf.srgb_oetf(im))\n else: raise Exception(err_not_implemented)\n elif ip == cs.SRGB:\n if op == cs.HSL: im = cls._rgb_to_hsl(im)\n elif op == cs.HSV: im = cls._rgb_to_hsv(im)\n else: im = cls.convert(tf.srgb_eotf(im), cs.SRGB_LIN, op)\n elif ip == cs.REC709: im = cls.convert(tf.rec709_eotf(im), cs.SRGB_LIN, op)\n elif ip == cs.REC2020: \n if op == cs.REC2020_LIN: im = tf.rec2020_eotf(im)\n elif op == cs.CIE_XYZ: im = mm(tf.rec2020_eotf(im), cls.mat_rec2020_to_xyz)\n elif op == cs.SRGB_LIN: im = mm(tf.rec2020_eotf(im), cls.mat_rec2020_to_srgb)\n else: im = cls.convert(mm(tf.rec2020_eotf(im), cls.mat_rec2020_to_srgb), cs.SRGB_LIN, op)\n elif ip == cs.REC2020_LIN: \n if op == cs.REC2020: im = tf.rec2020_oetf(im)\n elif op == cs.CIE_XYZ: im = mm(im, cls.mat_rec2020_to_xyz)\n elif op == cs.SRGB_LIN: im = mm(im, cls.mat_rec2020_to_srgb)\n else: im = cls.convert(mm(im, cls.mat_rec2020_to_srgb), cs.SRGB_LIN, op)\n elif ip == cs.DCI_P3: \n if op == cs.DCI_P3_LIN: im = tf.dcip3_eotf(im)\n elif op == cs.CIE_XYZ: im = mm(mm(tf.dcip3_eotf(im), cls.mat_dcip3_to_xyz), cls.mat_dci_to_d65)\n else: im = cls.convert(mm(mm(tf.dcip3_eotf(im), cls.mat_dcip3_to_xyz), cls.mat_dci_to_d65), cs.CIE_XYZ, op)\n elif ip == cs.DCI_P3_LIN: \n if op == cs.DCI_P3: im = tf.dcip3_oetf(im)\n elif op == cs.CIE_XYZ: im = mm(mm(im, cls.mat_dcip3_to_xyz), cls.mat_dci_to_d65)\n else: im = cls.convert(mm(mm(im, cls.mat_dcip3_to_xyz), cls.mat_dci_to_d65), cs.CIE_XYZ, op)\n elif ip == cs.DISPLAY_P3: im = cls.convert(mm(tf.srgb_eotf(im), cls.mat_displayp3_to_srgb), cs.SRGB_LIN, op)\n elif ip == cs.CIE_XYZ:\n if op == cs.CIE_XYY: im = cls._xyz_to_xyy(im)\n elif op == cs.REC2020_LIN: im = mm(im, cls.mat_xyz_to_rec2020)\n elif op == cs.REC2020: im = tf.rec2020_oetf(mm(im, cls.mat_xyz_to_rec2020))\n elif op == cs.DCI_P3_LIN: im = mm(mm(im, cls.mat_d65_to_dci), cls.mat_xyz_to_dcip3)\n elif op == cs.DCI_P3: im = tf.dcip3_oetf(mm(mm(im, cls.mat_d65_to_dci), cls.mat_xyz_to_dcip3))\n elif op == cs.LMS: im = cls._xyz_to_lms(im)\n elif op == cs.ACESCG: im = mm(cls._d65_to_d60(im), cls.mat_xyz_to_acescg)\n elif op == cs.CIELAB: im = cls._xyz_to_cielab(im)\n elif op == cs.CIELUV: im = cls._xyz_to_cieluv(im)\n elif op == cs.OKLAB: im = cls._xyz_to_oklab(im)\n else: im = cls.convert(mm(im, cls.mat_xyz_to_srgb), cs.SRGB_LIN, op)\n elif ip == cs.CIE_XYY: \n if op == cs.CIE_XYZ: im = cls._xyy_to_xyz(im)\n else: im = cls.convert(cls._xyy_to_xyz(im), cs.CIE_XYZ, op)\n elif ip == cs.LMS: \n if op == cs.CIE_XYZ: im = cls._lms_to_xyz(im)\n else: im = cls.convert(cls._lms_to_xyz(im), cs.CIE_XYZ, op)\n elif ip == cs.ACESCG:\n # if op == cs.CIE_XYZ: im = cls._d60_to_d65(mm(im, cls.mat_acescg_to_xyz)) # FIXME: fails unit test (?)\n if op == cs.ACESCC: im = cls._acescg_to_acescc(im)\n else: im = cls.convert(mm(im, cls.mat_acescg_to_srgb), cs.SRGB_LIN, op)\n elif ip == cs.ACESCC:\n if op == cs.ACESCG: im = cls._acescc_to_acescg(im)\n else: im = cls.convert(cls._acescc_to_acescg(im), cs.ACESCG, op)\n elif ip == cs.ACES2065_1: im = cls.convert(mm(im, cls.mat_aces2065_1_to_srgb), cs.SRGB_LIN, op)\n elif ip == cs.HSL:\n if op == cs.SRGB: im = cls._hsl_to_rgb(im)\n else: im = cls.convert(tf.srgb_eotf(cls._hsl_to_rgb(im)), cs.SRGB_LIN, op)\n elif ip == cs.HSV:\n if op == cs.SRGB: im = cls._hsv_to_rgb(im)\n else: im = cls.convert(tf.srgb_eotf(cls._hsv_to_rgb(im)), cs.SRGB_LIN, op)\n elif ip == cs.CIELAB: im = cls.convert(cls._cielab_to_xyz(im), cs.CIE_XYZ, op)\n elif ip == cs.CIELUV: im = cls.convert(cls._cieluv_to_xyz(im), cs.CIE_XYZ, op)\n elif ip == cs.OKLAB:\n if op == cs.CIE_XYZ: im = cls._oklab_to_xyz(im)\n else: im = cls.convert(cls._oklab_to_rgb(im), cs.SRGB_LIN, op)\n else: raise Exception(err_not_implemented)\n\n return im\n\n @classmethod\n def _xyz_to_xyy(cls, xyz:torch.Tensor) -> torch.Tensor:\n \"\"\"\n Convert CIE XYZ color space to CIE xyY color space.\n\n :param xyz: Input CIE XYZ color space tensor\n :return: CIE xyY color space tensor\n \"\"\"\n X = xyz[0:1]\n Y = xyz[1:2]\n Z = xyz[2:3]\n x = X / (X + Y + Z)\n y = Y / (X + Y + Z)\n return torch.cat([x, y, Y], dim=0)\n\n @classmethod\n def _xyy_to_xyz(cls, xyy:torch.Tensor) -> torch.Tensor:\n \"\"\"\n Convert CIE xyY color space to CIE XYZ color space.\n\n :param xyy: Input CIE xyY color space tensor\n :return: CIE XYZ color space tensor\n \"\"\"\n x = xyy[0:1]\n y = xyy[1:2]\n Y = xyy[2:3]\n X = (Y / y) * x\n Z = (Y / y) * (1. - x - y)\n return torch.cat([X, Y, Z], dim=0)\n\n @classmethod\n def _xyz_to_lms(cls, xyz:torch.Tensor) -> torch.Tensor:\n \"\"\"\n Convert CIE XYZ color space to LMS color space.\n\n :param xyz: Input CIE XYZ color space tensor\n :return: LMS color space tensor\n \"\"\"\n return mm(xyz, cls.mat_xyz_to_lms)\n\n @classmethod\n def _lms_to_xyz(cls, lms:torch.Tensor) -> torch.Tensor:\n \"\"\"\n Convert LMS color space to CIE XYZ color space.\n\n :param lms: Input LMS color space tensor\n :return: CIE XYZ color space tensor\n \"\"\"\n return mm(lms, cls.mat_lms_to_xyz)\n\n @classmethod\n def _acescg_to_acescc(cls, cg:torch.Tensor) -> torch.Tensor:\n \"\"\"\n Convert scene-linear ACEScg to log ACEScc.\n\n :param lms: Input ACEScg color space tensor\n :return: ACEScc color space tensor\n \"\"\"\n res = torch.where(cg < 0.00003051757, \n (torch.log2(0.00001525878 + cg * 0.5) + 9.72) / 17.52, \n (torch.log2(cg) + 9.72) / 17.52)\n return res\n\n @classmethod\n def _acescc_to_acescg(cls, cc:torch.Tensor) -> torch.Tensor:\n \"\"\"\n Convert log ACEScc to scene-linear ACEScg.\n\n :param lms: Input ACEScc color space tensor\n :return: ACEScg color space tensor\n \"\"\"\n res = torch.where(cc < -0.3013698630, \n (torch.exp2(cc * 17.52 - 9.72) - 0.00001525878) * 2,\n torch.exp2(cc * 17.52 - 9.72))\n return res\n\n @classmethod\n def _xyz_to_oklab(cls, xyz:torch.Tensor) -> torch.Tensor:\n \"\"\"\n Convert CIE XYZ color space to OKLAB color space.\n\n :param xyz: Input CIE XYZ color space tensor\n :return: OKLAB color space tensor\n \"\"\" \n lms = mm(xyz, cls.mat_oklab_m1)\n lms_p = torch.pow(torch.abs(lms), 0.3333333333) * torch.sign(lms).float()\n lab = mm(lms_p, cls.mat_oklab_m2)\n return lab\n\n @classmethod\n def _oklab_to_xyz(cls, lab:torch.Tensor) -> torch.Tensor:\n \"\"\"\n Convert OKLAB color space to CIE XYZ color space.\n\n :param lab: Input OKLAB color space tensor\n :return: CIE XYZ color space tensor\n \"\"\"\n lms_p = mm(lab, cls.mat_oklab_m2_inv)\n lms = torch.pow(lms_p, 3.)\n xyz = mm(lms, cls.mat_oklab_m1_inv)\n return xyz\n\n\n @classmethod\n def __pivot_xyz_to_lab(cls, val): \n return torch.where(val > 0.008856, torch.pow(val, 0.3333333333), ((val * 903.3) + 16.0) / 116.0)\n\n @classmethod\n def _xyz_to_cielab(cls, xyz:torch.Tensor) -> torch.Tensor:\n \"\"\"\n Convert color space from CIE XYZ to CIELAB.\n\n :param xyz: Input CIE XYZ color space tensor\n :return: CIELAB color space tensor\n \"\"\"\n # https://github.com/CairX/convert-colors-py/blob/master/convcolors/__init__.py\n # MIT License\n\n # Copyright (c) 2022 Thomas Cairns\n\n # Permission is hereby granted, free of charge, to any person obtaining a copy\n # of this software and associated documentation files (the \"Software\"), to deal\n # in the Software without restriction, including without limitation the rights\n # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n # copies of the Software, and to permit persons to whom the Software is\n # furnished to do so, subject to the following conditions:\n\n # The above copyright notice and this permission notice shall be included in all\n # copies or substantial portions of the Software.\n\n # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n # SOFTWARE. \n x = xyz[0:1] / 0.95047 \n y = xyz[1:2] / 1.00000 \n z = xyz[2:3] / 1.08883 \n\n x = cls.__pivot_xyz_to_lab(x)\n y = cls.__pivot_xyz_to_lab(y)\n z = cls.__pivot_xyz_to_lab(z)\n\n l = torch.maximum(torch.zeros_like(y).to(y.device), (116.0 * y) - 16.0)\n a = (x - y) * 500.0\n b = (y - z) * 200.0\n return torch.cat([l, a, b], dim=0)\n\n @classmethod\n def _cielab_to_xyz(cls, lab:torch.Tensor) -> torch.Tensor:\n \"\"\"\n Convert color space from CIELAB to CIE XYZ.\n \n .. note::\n\n Assumes D65 standard illuminant.\n\n :param lab: Input CIELAB color space tensor\n :return: CIE XYZ color space tensor\n \"\"\"\n # https://github.com/CairX/convert-colors-py/blob/master/convcolors/__init__.py\n # MIT License\n\n # Copyright (c) 2022 Thomas Cairns\n\n # Permission is hereby granted, free of charge, to any person obtaining a copy\n # of this software and associated documentation files (the \"Software\"), to deal\n # in the Software without restriction, including without limitation the rights\n # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n # copies of the Software, and to permit persons to whom the Software is\n # furnished to do so, subject to the following conditions:\n\n # The above copyright notice and this permission notice shall be included in all\n # copies or substantial portions of the Software.\n\n # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n # SOFTWARE.\n l = lab[0:1]\n a = lab[1:2]\n b = lab[2:3]\n\n # Reminder: The y values is calculated first as it can be reused\n # for the calculation of x and z.\n y = (l + 16.0) / 116.0\n x = y + (a / 500.0)\n z = y - (b / 200.0)\n\n x3 = x * x * x\n z3 = z * z * z\n y3 = y * y * y\n\n x = torch.where(x3 > 0.008856, x3, ((x * 116.0) - 16.0) / 903.3)\n y = torch.where(l > 7.9996248, y3, l / 903.3)\n z = torch.where(z3 > 0.008856, z3, ((z * 116.0) - 16.0) / 903.3)\n\n x = x * 0.95047 \n y = y * 1.00000 \n z = z * 1.08883\n\n return torch.cat([x, y, z], dim=0)\n\n def _xyz_to_cieluv(image:torch.Tensor) -> torch.Tensor:\n \"\"\"\n Converts CIE XYZ to CIELUV. \n \n .. note::\n\n Assumes D65 standard illuminant.\n\n :param image: A pytorch tensor of shape (3, n_pixels_x, n_pixels_y) in which the channels are X, Y, Z\n :return: A pytorch tensor of shape (3, n_pixels_x, n_pixels_y) in which the channels are L, U, V\n \"\"\"\n # https://github.com/stefanLeong/S2CRNet/blob/main/scripts/utils/color.py\n # MIT License\n\n # Copyright (c) 2021 StefanLeong\n\n # Permission is hereby granted, free of charge, to any person obtaining a copy\n # of this software and associated documentation files (the \"Software\"), to deal\n # in the Software without restriction, including without limitation the rights\n # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n # copies of the Software, and to permit persons to whom the Software is\n # furnished to do so, subject to the following conditions:\n\n # The above copyright notice and this permission notice shall be included in all\n # copies or substantial portions of the Software.\n\n # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n # SOFTWARE.\n if len(image.size()) == 3:\n small_L = (29. / 3) ** 3 * image[1]\n large_L = 116 * torch.pow(image[1], 1 / 3.) - 16\n L = torch.where(image[1] <= (6. / 29) ** 3, small_L, large_L)\n\n denom = (image[0] + 15 * image[1] + 3 * image[2])\n u_prime = torch.where(denom != 0., 4 * image[0] / denom, 0.)\n v_prime = torch.where(denom != 0., 9 * image[1] / denom, 0.)\n d = 0\n elif len(image.size()) == 4:\n small_L = (29. / 3) ** 3 * image[:, 1]\n large_L = 116 * torch.pow(image[:, 1], 1 / 3.) - 16\n L = torch.where(image[:, 1] <= (6. / 29) ** 3, small_L, large_L)\n\n denom = (image[:, 0] + 15 * image[:, 1] + 3 * image[:, 2])\n u_prime = torch.where(denom > 0., 4 * image[:, 0] / denom, 0.)\n v_prime = torch.where(denom > 0., 9 * image[:, 1] / denom, 0.)\n d = 1\n\n u = 13 * L * (u_prime - .2009)\n v = 13 * L * (v_prime - .4610)\n\n luv_image = torch.stack((L, u, v), dim=d)\n\n return luv_image\n\n def _cieluv_to_xyz(image:torch.Tensor) -> torch.Tensor:\n \"\"\"\n Converts CIELUV to CIE XYZ. \n \n .. note::\n\n Assumes D65 standard illuminant.\n\n :param image: A pytorch tensor of shape (3, n_pixels_x, n_pixels_y) in which the channels are L, U, V\n :return: A pytorch tensor of shape (3, n_pixels_x, n_pixels_y) in which the channels are X, Y, Z\n \"\"\"\n # https://github.com/stefanLeong/S2CRNet/blob/main/scripts/utils/color.py\n # MIT License\n\n # Copyright (c) 2021 StefanLeong\n\n # Permission is hereby granted, free of charge, to any person obtaining a copy\n # of this software and associated documentation files (the \"Software\"), to deal\n # in the Software without restriction, including without limitation the rights\n # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n # copies of the Software, and to permit persons to whom the Software is\n # furnished to do so, subject to the following conditions:\n\n # The above copyright notice and this permission notice shall be included in all\n # copies or substantial portions of the Software.\n\n # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n # SOFTWARE.\n if len(image.size()) == 3:\n denom = (13 * image[0])\n u_prime = torch.where(denom != 0., image[1] / denom, 0.) + .2009\n v_prime = torch.where(denom != 0., image[2] / denom, 0.) + .4610\n\n small_Y = image[0] * (3. / 29) ** 3\n large_Y = ((image[0] + 16.) / 116.) ** 3\n\n Y = torch.where(image[0] <= 8, small_Y, large_Y)\n d = 0\n # batch of images\n elif len(image.size()) == 4:\n denom = (13 * image[:, 0])\n u_prime = torch.where(denom != 0., image[:, 1] / denom, 0.) + .2009\n v_prime = torch.where(denom != 0., image[:, 2] / denom, 0.) + .4610\n\n small_Y = image[:, 0] * (3. / 29) ** 3\n large_Y = ((image[:, 0] + 16.) / 116.) ** 3\n\n Y = torch.where(image[:, 0] <= 8, small_Y, large_Y)\n d = 1\n\n X = torch.where(v_prime != 0., Y * 9 * u_prime / (4 * v_prime), 0.)\n Z = torch.where(v_prime != 0., Y * (12 - 3 * u_prime - 20 * v_prime) / (4 * v_prime), 0.)\n\n xyz_image = torch.stack((X, Y, Z), dim=d)\n\n return xyz_image\n\n @classmethod\n def _rgb_to_oklab(cls, rgb:torch.Tensor) -> torch.Tensor:\n \"\"\"\n Convert color space from linear sRGB to OKLAB.\n\n :param rgb: Input linear sRGB color space tensor\n :return: OKLAB color space tensor\n \"\"\"\n cr = rgb[0:1]\n cg = rgb[1:2]\n cb = rgb[2:3]\n\n l = 0.4122214708 * cr + 0.5363325363 * cg + 0.0514459929 * cb;\n m = 0.2119034982 * cr + 0.6806995451 * cg + 0.1073969566 * cb;\n s = 0.0883024619 * cr + 0.2817188376 * cg + 0.6299787005 * cb;\n\n l_ = torch.pow(torch.abs(l), 0.3333333333) * torch.sign(l).float()\n m_ = torch.pow(torch.abs(m), 0.3333333333) * torch.sign(m).float()\n s_ = torch.pow(torch.abs(s), 0.3333333333) * torch.sign(s).float()\n\n return torch.cat([\n 0.2104542553 * l_ + 0.7936177850 * m_ - 0.0040720468 * s_,\n 1.9779984951 * l_ - 2.4285922050 * m_ + 0.4505937099 * s_,\n 0.0259040371 * l_ + 0.7827717662 * m_ - 0.8086757660 * s_], dim=0)\n\n @classmethod\n def _oklab_to_rgb(cls, lab:torch.Tensor) -> torch.Tensor:\n \"\"\"\n Convert color space from OKLAB to linear sRGB.\n\n :param lab: Input OKLAB color space tensor\n :return: Linear sRGB color space tensor\n \"\"\"\n cl = lab[0:1]\n ca = lab[1:2]\n cb = lab[2:3]\n\n l_ = cl + 0.3963377774 * ca + 0.2158037573 * cb\n m_ = cl - 0.1055613458 * ca - 0.0638541728 * cb\n s_ = cl - 0.0894841775 * ca - 1.2914855480 * cb\n\n l = l_*l_*l_\n m = m_*m_*m_\n s = s_*s_*s_\n\n return torch.cat([\n +4.0767416621 * l - 3.3077115913 * m + 0.2309699292 * s,\n -1.2684380046 * l + 2.6097574011 * m - 0.3413193965 * s,\n -0.0041960863 * l - 0.7034186147 * m + 1.7076147010 * s], dim=0)\n\n @classmethod\n def _rgb_to_hsl(cls, rgb: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Transform sRGB image tensor to sRGB-relative HSL. \n \n .. note::\n\n expects non-linear sRGB w/ gamma curve as input\n\n :param rgb: Input sRGB image tensor\n :return: HSL image tensor\n \"\"\"\n # https://github.com/windingwind/seal-3d/blob/main/SealNeRF/color_utils.py\n # MIT License\n\n # Copyright (c) 2022 hawkey\n\n # Permission is hereby granted, free of charge, to any person obtaining a copy\n # of this software and associated documentation files (the \"Software\"), to deal\n # in the Software without restriction, including without limitation the rights\n # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n # copies of the Software, and to permit persons to whom the Software is\n # furnished to do so, subject to the following conditions:\n\n # The above copyright notice and this permission notice shall be included in all\n # copies or substantial portions of the Software.\n\n # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n # SOFTWARE.\n rgb = rgb.unsqueeze(0)\n cmax, cmax_idx = torch.max(rgb, dim=1, keepdim=True)\n cmin = torch.min(rgb, dim=1, keepdim=True)[0]\n delta = cmax - cmin\n hsl_h = torch.empty_like(rgb[:, 0:1, :, :])\n cmax_idx[delta == 0] = 3\n hsl_h[cmax_idx == 0] = (((rgb[:, 1:2] - rgb[:, 2:3]) / delta) % 6)[cmax_idx == 0]\n hsl_h[cmax_idx == 1] = (((rgb[:, 2:3] - rgb[:, 0:1]) / delta) + 2)[cmax_idx == 1]\n hsl_h[cmax_idx == 2] = (((rgb[:, 0:1] - rgb[:, 1:2]) / delta) + 4)[cmax_idx == 2]\n hsl_h[cmax_idx == 3] = 0.\n hsl_h /= 6.\n\n hsl_l = (cmax + cmin) / 2.\n hsl_s = torch.empty_like(hsl_h)\n hsl_s[hsl_l == 0] = 0\n hsl_s[hsl_l == 1] = 0\n hsl_l_ma = torch.bitwise_and(hsl_l > 0, hsl_l < 1)\n hsl_l_s0_5 = torch.bitwise_and(hsl_l_ma, hsl_l <= 0.5)\n hsl_l_l0_5 = torch.bitwise_and(hsl_l_ma, hsl_l > 0.5)\n hsl_s[hsl_l_s0_5] = ((cmax - cmin) / (hsl_l * 2.))[hsl_l_s0_5]\n hsl_s[hsl_l_l0_5] = ((cmax - cmin) / (- hsl_l * 2. + 2.))[hsl_l_l0_5]\n return torch.cat([hsl_h, hsl_s, hsl_l], dim=1).squeeze(0)\n\n @classmethod\n def _hsl_to_rgb(cls, hsl: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Transform sRGB-relative HSL image tensor to sRGB. \n \n .. note::\n\n returns non-linear sRGB w/ gamma curve as output\n\n :param hsl: Input HSL image tensor\n :return: sRGB image tensor\n \"\"\"\n # https://github.com/windingwind/seal-3d/blob/main/SealNeRF/color_utils.py\n # MIT License\n\n # Copyright (c) 2022 hawkey\n\n # Permission is hereby granted, free of charge, to any person obtaining a copy\n # of this software and associated documentation files (the \"Software\"), to deal\n # in the Software without restriction, including without limitation the rights\n # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n # copies of the Software, and to permit persons to whom the Software is\n # furnished to do so, subject to the following conditions:\n\n # The above copyright notice and this permission notice shall be included in all\n # copies or substantial portions of the Software.\n\n # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n # SOFTWARE.\n hsl = hsl.unsqueeze(0)\n hsl_h, hsl_s, hsl_l = hsl[:, 0:1], hsl[:, 1:2], hsl[:, 2:3]\n _c = (-torch.abs(hsl_l * 2. - 1.) + 1) * hsl_s\n _x = _c * (-torch.abs(hsl_h * 6. % 2. - 1) + 1.)\n _m = hsl_l - _c / 2.\n idx = (hsl_h * 6.).type(torch.uint8)\n idx = (idx % 6).expand(-1, 3, -1, -1)\n rgb = torch.empty_like(hsl).to(hsl.device)\n _o = torch.zeros_like(_c).to(hsl.device)\n rgb[idx == 0] = torch.cat([_c, _x, _o], dim=1)[idx == 0]\n rgb[idx == 1] = torch.cat([_x, _c, _o], dim=1)[idx == 1]\n rgb[idx == 2] = torch.cat([_o, _c, _x], dim=1)[idx == 2]\n rgb[idx == 3] = torch.cat([_o, _x, _c], dim=1)[idx == 3]\n rgb[idx == 4] = torch.cat([_x, _o, _c], dim=1)[idx == 4]\n rgb[idx == 5] = torch.cat([_c, _o, _x], dim=1)[idx == 5]\n rgb += _m\n return rgb.squeeze(0)\n\n @classmethod\n def _rgb_to_hsv(cls, rgb: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Transform sRGB image tensor to sRGB-relative HSV. \n \n .. note::\n\n expects non-linear sRGB w/ gamma curve as input\n\n .. warning::\n\n input tensor will be clamped to [0, 1] range\n\n :param rgb: Input sRGB image tensor\n :return: HSV image tensor\n \"\"\"\n # https://github.com/windingwind/seal-3d/blob/main/SealNeRF/color_utils.py\n # MIT License\n\n # Copyright (c) 2022 hawkey\n\n # Permission is hereby granted, free of charge, to any person obtaining a copy\n # of this software and associated documentation files (the \"Software\"), to deal\n # in the Software without restriction, including without limitation the rights\n # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n # copies of the Software, and to permit persons to whom the Software is\n # furnished to do so, subject to the following conditions:\n\n # The above copyright notice and this permission notice shall be included in all\n # copies or substantial portions of the Software.\n\n # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n # SOFTWARE.\n rgb = rgb.clamp(0.,1.).unsqueeze(0)\n cmax, cmax_idx = torch.max(rgb, dim=1, keepdim=True)\n cmin = torch.min(rgb, dim=1, keepdim=True)[0]\n delta = cmax - cmin\n hsv_h = torch.empty_like(rgb[:, 0:1, :, :])\n cmax_idx[delta == 0] = 3\n hsv_h[cmax_idx == 0] = (((rgb[:, 1:2] - rgb[:, 2:3]) / delta) % 6)[cmax_idx == 0]\n hsv_h[cmax_idx == 1] = (((rgb[:, 2:3] - rgb[:, 0:1]) / delta) + 2)[cmax_idx == 1]\n hsv_h[cmax_idx == 2] = (((rgb[:, 0:1] - rgb[:, 1:2]) / delta) + 4)[cmax_idx == 2]\n hsv_h[cmax_idx == 3] = 0.\n hsv_h /= 6.\n hsv_s = torch.where(cmax == 0, torch.tensor(0.).type_as(rgb), delta / cmax)\n hsv_v = cmax\n return torch.cat([hsv_h, hsv_s, hsv_v], dim=1).squeeze(0)\n\n @classmethod\n def _hsv_to_rgb(cls, hsv: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Transform sRGB-relative HSV image tensor to sRGB. \n \n .. note::\n \n returns non-linear sRGB w/ gamma curve as output\n\n :param hsv: Input HSV image tensor\n :return: sRGB image tensor\n \"\"\"\n # https://github.com/windingwind/seal-3d/blob/main/SealNeRF/color_utils.py\n # MIT License\n\n # Copyright (c) 2022 hawkey\n\n # Permission is hereby granted, free of charge, to any person obtaining a copy\n # of this software and associated documentation files (the \"Software\"), to deal\n # in the Software without restriction, including without limitation the rights\n # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n # copies of the Software, and to permit persons to whom the Software is\n # furnished to do so, subject to the following conditions:\n\n # The above copyright notice and this permission notice shall be included in all\n # copies or substantial portions of the Software.\n\n # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n # SOFTWARE.\n hsv = hsv.unsqueeze(0)\n hsv_h, hsv_s, hsv_l = hsv[:, 0:1], hsv[:, 1:2], hsv[:, 2:3]\n _c = hsv_l * hsv_s\n _x = _c * (- torch.abs(hsv_h * 6. % 2. - 1) + 1.)\n _m = hsv_l - _c\n _o = torch.zeros_like(_c).to(hsv.device)\n idx = (hsv_h * 6.).type(torch.uint8)\n idx = (idx % 6).expand(-1, 3, -1, -1)\n rgb = torch.empty_like(hsv).to(hsv.device)\n rgb[idx == 0] = torch.cat([_c, _x, _o], dim=1)[idx == 0]\n rgb[idx == 1] = torch.cat([_x, _c, _o], dim=1)[idx == 1]\n rgb[idx == 2] = torch.cat([_o, _c, _x], dim=1)[idx == 2]\n rgb[idx == 3] = torch.cat([_o, _x, _c], dim=1)[idx == 3]\n rgb[idx == 4] = torch.cat([_x, _o, _c], dim=1)[idx == 4]\n rgb[idx == 5] = torch.cat([_c, _o, _x], dim=1)[idx == 5]\n rgb += _m\n return rgb.squeeze(0)\n\n @classmethod\n def _d60_to_d65(cls, im:torch.Tensor) -> torch.Tensor:\n \"\"\"\n Convert CIE XYZ image from \"D60\" to D65 white point.\n\n :param im: Input image tensor\n :return: Converted image tensor\n \"\"\"\n # There is not really a CIE D60 white point, but that's what everyone calls what ACES uses.\n return mm(im, cls.mat_d60_to_d65)\n\n @classmethod\n def _d65_to_d60(cls, im:torch.Tensor) -> torch.Tensor:\n \"\"\"\n Convert CIE XYZ image from D65 to \"D60\" white point.\n\n :param torch.Tensor im: Input image tensor\n :return: Converted image tensor\n \"\"\"\n return mm(im, cls.mat_d65_to_d60)" }, { "identifier": "Float2", "path": "src/tinycio/numerics/vector.py", "snippet": "class Float2(np.ndarray):\n \"\"\"\n Float2 type using numpy.ndarray.\n \"\"\"\n def __new__(cls, *args):\n if len(args) == 1:\n if isinstance(args[0], list) or isinstance(args[0], tuple):\n assert len(args[0]) == 2, \"list/tuple must have 2 components\"\n arr = np.asarray([args[0][0], args[0][1]], dtype=np.float32).view(cls)\n elif isinstance(args[0], np.ndarray):\n assert len(args[0].squeeze().shape) == 1 and args[0].shape[0] == 2, \\\n \"numpy array must be sized [C=2] or [C=2, H=1, W=1]\"\n arr = np.asarray(args[0].squeeze(), dtype=np.float32).view(cls)\n elif torch.is_tensor(args[0]):\n assert len(args[0].squeeze().size()) == 1 and args[0].size(0) == 2, \\\n \"torch tensor must be sized [C=2] or [C=2, H=1, W=1]\"\n value = args[0].squeeze().float().cpu()\n arr = np.asarray([value[0].item(), value[1].item()], dtype=np.float32).view(cls)\n else:\n value = float(args[0])\n arr = np.asarray([value, value], dtype=np.float32).view(cls)\n elif len(args) == 2:\n arr = np.asarray(args, dtype=np.float32).view(cls)\n else: \n raise TypeError(\"Float2 only accepts 1 or 2 arguments.\")\n return arr\n\n def list(self) -> list:\n \"\"\"Returns values as Python list\"\"\"\n return [self[0], self[1]]\n\n def tuple(self) -> tuple:\n \"\"\"Returns values as Python tuple\"\"\"\n return (self[0], self[1])\n\n @property\n def x(self) -> float:\n return self[0]\n @x.setter\n def x(self, value):\n self[0] = value\n @property\n def y(self) -> float:\n return self[1]\n @y.setter\n def y(self, value):\n self[1] = value\n @property\n def r(self) -> float:\n return self[0]\n @r.setter\n def r(self, value):\n self[0] = value\n @property\n def g(self) -> float:\n return self[1]\n @g.setter\n def g(self, value):\n self[1] = value\n\n @staticmethod\n def zero():\n \"\"\"Returns numeric type filled with zero values\"\"\"\n return Float2(0., 0.)\n @staticmethod\n def one():\n \"\"\"Returns numeric type filled with one values\"\"\"\n return Float2(1., 1.)\n @staticmethod\n def x_axis():\n \"\"\"Returns numeric type with x-axis set to 1 and all others to 0\"\"\"\n return Float2(1., 0.)\n @staticmethod\n def y_axis():\n \"\"\"Returns numeric type with y-axis set to 1 and all others to 0\"\"\"\n return Float2(0., 1.)\n\n @property\n def xx(self): return Float2(self.x, self.x)\n @property\n def xy(self): return self\n @property\n def yx(self): return Float2(self.y, self.x)\n @property\n def yy(self): return Float2(self.y, self.y)\n\n @property\n def rr(self): return Float2(self.r, self.r)\n @property\n def rg(self): return self\n @property\n def gr(self): return Float2(self.g, self.r)\n @property\n def gg(self): return Float2(self.g, self.g)\n\n @property\n def xxx(self): return Float3(self.x, self.x, self.x)\n @property\n def xxy(self): return Float3(self.x, self.x, self.y)\n @property\n def xyx(self): return Float3(self.x, self.y, self.x)\n @property\n def xyy(self): return Float3(self.x, self.y, self.y)\n @property\n def yxx(self): return Float3(self.y, self.x, self.x)\n @property\n def yxy(self): return Float3(self.y, self.x, self.y)\n @property\n def yyx(self): return Float3(self.y, self.y, self.x)\n @property\n def yyy(self): return Float3(self.y, self.y, self.y)\n\n @property\n def rrr(self): return Float3(self.r, self.r, self.r)\n @property\n def rrg(self): return Float3(self.r, self.r, self.g)\n @property\n def rgr(self): return Float3(self.r, self.g, self.r)\n @property\n def rgg(self): return Float3(self.r, self.g, self.g)\n @property\n def grr(self): return Float3(self.g, self.r, self.r)\n @property\n def grg(self): return Float3(self.g, self.r, self.g)\n @property\n def ggr(self): return Float3(self.g, self.g, self.r)\n @property\n def ggg(self): return Float3(self.g, self.g, self.g)\n\n @property\n def xxxx(self): return Float4(self.x, self.x, self.x, self.x)\n @property\n def xxxy(self): return Float4(self.x, self.x, self.x, self.y)\n @property\n def xxyx(self): return Float4(self.x, self.x, self.y, self.x)\n @property\n def xxyy(self): return Float4(self.x, self.x, self.y, self.y)\n @property\n def xyxx(self): return Float4(self.x, self.y, self.x, self.x)\n @property\n def xyxy(self): return Float4(self.x, self.y, self.x, self.y)\n @property\n def xyyx(self): return Float4(self.x, self.y, self.y, self.x)\n @property\n def xyyy(self): return Float4(self.x, self.y, self.y, self.y)\n @property\n def yxxx(self): return Float4(self.y, self.x, self.x, self.x)\n @property\n def yxxy(self): return Float4(self.y, self.x, self.x, self.y)\n @property\n def yxyx(self): return Float4(self.y, self.x, self.y, self.x)\n @property\n def yxyy(self): return Float4(self.y, self.x, self.y, self.y)\n @property\n def yyxx(self): return Float4(self.y, self.y, self.x, self.x)\n @property\n def yyxy(self): return Float4(self.y, self.y, self.x, self.y)\n @property\n def yyyx(self): return Float4(self.y, self.y, self.y, self.x)\n @property\n def yyyy(self): return Float4(self.y, self.y, self.y, self.y)\n\n @property\n def rrrr(self): return Float4(self.r, self.r, self.r, self.r)\n @property\n def rrrg(self): return Float4(self.r, self.r, self.r, self.g)\n @property\n def rrgr(self): return Float4(self.r, self.r, self.g, self.r)\n @property\n def rrgg(self): return Float4(self.r, self.r, self.g, self.g)\n @property\n def rgrr(self): return Float4(self.r, self.g, self.r, self.r)\n @property\n def rgrg(self): return Float4(self.r, self.g, self.r, self.g)\n @property\n def rggr(self): return Float4(self.r, self.g, self.g, self.r)\n @property\n def rggg(self): return Float4(self.r, self.g, self.g, self.g)\n @property\n def grrr(self): return Float4(self.g, self.r, self.r, self.r)\n @property\n def grrg(self): return Float4(self.g, self.r, self.r, self.g)\n @property\n def grgr(self): return Float4(self.g, self.r, self.g, self.r)\n @property\n def grgg(self): return Float4(self.g, self.r, self.g, self.g)\n @property\n def ggrr(self): return Float4(self.g, self.g, self.r, self.r)\n @property\n def ggrg(self): return Float4(self.g, self.g, self.r, self.g)\n @property\n def gggr(self): return Float4(self.g, self.g, self.g, self.r)\n @property\n def gggg(self): return Float4(self.g, self.g, self.g, self.g)" }, { "identifier": "matmul_tl", "path": "src/tinycio/numerics/linalg.py", "snippet": "def matmul_tl(im:torch.Tensor, mat:list):\n \"\"\"\n Multiply image tensor by a 3x3 matrix as Python list.\n\n :param im: Input image tensor of shape (C, H, W).\n :type im: torch.Tensor\n :param mat: 3x3 matrix for multiplication.\n :type mat: List[List[float]]\n :return: Result of the matrix multiplication, with the same shape as the input image.\n :rtype: torch.Tensor\n \"\"\"\n # NOTE: Internal - leaving this clutter undocumented intentionally\n C, H, W = im.size()\n out = im.clone().permute(1,2,0).reshape(-1, 1, C)\n mat = torch.tensor(mat).unsqueeze(0).repeat(out.size(0), 1, 1).to(im.device)\n out = torch.bmm(out, mat.transpose(1, 2)).permute(2,0,1).view(C, H, W)\n return out" } ]
import typing import torch import numpy as np from typing import Union from enum import IntEnum from .colorspace import ColorSpace from .numerics import Float2, matmul_tl as mm
20,571
elif ilm == cls.Illuminant.F10: return Float2(0.34609, 0.35986) elif ilm == cls.Illuminant.F11: return Float2(0.38052, 0.37713) elif ilm == cls.Illuminant.F12: return Float2(0.43695, 0.40441) elif ilm == cls.Illuminant.LED_B1: return Float2(0.4560, 0.4078) elif ilm == cls.Illuminant.LED_B2: return Float2(0.4357, 0.4012) elif ilm == cls.Illuminant.LED_B3: return Float2(0.3756, 0.3723) elif ilm == cls.Illuminant.LED_B4: return Float2(0.3422, 0.3502) elif ilm == cls.Illuminant.LED_B5: return Float2(0.3118, 0.3236) elif ilm == cls.Illuminant.LED_BH1: return Float2(0.4474, 0.4066) elif ilm == cls.Illuminant.LED_RGB1: return Float2(0.4557, 0.4211) elif ilm == cls.Illuminant.LED_V1: return Float2(0.4560, 0.4548) elif ilm == cls.Illuminant.LED_V2: return Float2(0.3781, 0.3775) else: raise Exception(f"Invalid illuminant: {illuminant.name}") @staticmethod def wp_from_cct(cct:int) -> Float2: """ Compute CIE xy chromaticity coordinates (white point) from correlated colour temperature. :param cct: Correlated colour temperature in range [4000, 25000] :return: White point coordinates (CIE xy) """ # https://github.com/colour-science/colour/blob/develop/colour/temperature/cie_d.py # BSD-3-Clause license: # Copyright 2013 Colour Developers # Redistribution and use in source and binary forms, with or without modification, # are permitted provided that the following conditions are met: # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation and/or # other materials provided with the distribution. # 3. Neither the name of the copyright holder nor the names of its contributors # may be used to endorse or promote products derived from this software without # specific prior written permission. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS “AS IS” # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, # INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, # BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY # OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, # EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE assert 4000 <= cct <= 25000, "Correlated color temperature must be in range [4000, 25000]" cct = float(cct) cct_3 = cct**3 cct_2 = cct**2 x = 0. if cct <= 7000.: x = -4.607 * 10**9 / cct_3 \ + 2.9678 * 10**6 / cct_2 \ + 0.09911 * 10**3 / cct \ + 0.244063 else: x = -2.0064 * 10**9 / cct_3 \ + 1.9018 * 10**6 / cct_2 \ + 0.24748 * 10**3 / cct \ + 0.23704 y = -3.000 * x**2 + 2.870 * x - 0.275 return Float2(x, y) @staticmethod def wp_from_image(im_xyz:Union[torch.Tensor, ColorImage]) -> Float2: """ Estimate the dominant illuminant of an environment map or a target image directly and return its approximate CIE xy chromaticity coordinates (white point). .. warning:: This is a lazy method that just averages the pixels in the image tensor. There is no spherical mapping, nor PCA, nor any serious attempt to analyze the image. :param im_xyz: Image tensor in CIE XYZ color space :type im_xyz: torch.Tensor | ColorImage :return: Estimated white point coordinates (CIE xy) """ mean_color = torch.tensor([[[im_xyz[0:1].mean(), im_xyz[1:2].mean(), im_xyz[2:3].mean()]]]).permute(2, 0, 1) csum = mean_color[0] + mean_color[1] + mean_color[2] mean_color[0] /= csum mean_color[1] /= csum return Float2(mean_color[0].item(), mean_color[1].item()) @staticmethod def apply( im_lms:Union[torch.Tensor, ColorImage], source_white:Union[Float2, Chromaticity, torch.Tensor, numpy.ndarray], target_white:Union[Float2, Chromaticity, torch.Tensor, numpy.ndarray]) -> torch.Tensor: """ Apply white balance. :param im_lms: Image tensor in LMS color space :type im_lms: torch.Tensor | ColorImage :param source_white: Source white point coordinates (CIE xy) :type source_white: Float2 | Chromaticity | torch.Tensor | numpy.ndarray :param target_white: Target white point coordinates (CIE xy) :type target_white: Float2 | Chromaticity | torch.Tensor | numpy.ndarray :return: White balanced image tensor """ source = torch.tensor([[[source_white[0], source_white[0], 1.]]], dtype=torch.float32).permute(2, 0, 1) target = torch.tensor([[[target_white[0], target_white[0], 1.]]], dtype=torch.float32).permute(2, 0, 1) src_lms = ColorSpace.convert(source, ColorSpace.Variant.CIE_XYY, ColorSpace.Variant.LMS) dst_lms = ColorSpace.convert(target, ColorSpace.Variant.CIE_XYY, ColorSpace.Variant.LMS) mat = [ [dst_lms[0].item()/src_lms[0].item(), 0., 0.], [0., dst_lms[1].item()/src_lms[1].item(), 0.], [0., 0., dst_lms[2].item()/src_lms[2].item()]]
from __future__ import annotations class WhiteBalance: """ Adjust white balance of an image. Example: .. highlight:: python .. code-block:: python source_white = WhiteBalance.wp_from_image(input_image) target_white = WhiteBalance.wp_from_illuminant(WhiteBalance.Illuminant.NORTH_SKY) white_balanced_image = WhiteBalance.apply(input_image, source_white, target_white) """ class Illuminant(IntEnum): """ CIE 1931 2° standard illuminant. Available options are: .. highlight:: text .. code-block:: text - NONE - A (INCANDESCENT, TUNGSTEN) - D50 (HORIZON) - D55 (MIDMORNING) - D65 (DAYLIGHT_NOON) - D75 (NORTH SKY) - D93 (BLUE_PHOSPHOR) - E (EQUAL_ENERGY) - F1 (FLUORESCENT_DAYLIGHT1) - F2 (FLUORESCENT_COOL_WHITE) - F3 (FLUORESCENT_WHITE) - F4 (FLUORESCENT_WARM_WHITE) - F5 (FLUORESCENT_DAYLIGHT2) - F6 (FLUORESCENT_LIGHT_WHITE) - F7 (D65_SIMULATOR, DAYLIGHT_SIMULATOR) - F8 (D50_SIMULATOR, SYLVANIA_F40) - F9 (FLOURESCENT_COOL_WHITE_DELUXE) - F10 (PHILIPS_TL85, ULTRALUME_50) - F11 (PHILIPS_TL84, ULTRALUME_40) - F12 (PHILIPS_TL83, ULTRALUME_30) - LED_B1 (PHOSPHOR_CONVERTED_BLUE1) - LED_B2 (PHOSPHOR_CONVERTED_BLUE2) - LED_B3 (PHOSPHOR_CONVERTED_BLUE3) - LED_B4 (PHOSPHOR_CONVERTED_BLUE4) - LED_B5 (PHOSPHOR_CONVERTED_BLUE5) - LED_BH1 - LED_RGB1 - LED_V1 (LED_VIOLET1) - LED_V2 (LED_VIOLET2) """ NONE = 0 A = 1 D50 = 2 D55 = 3 D65 = 4 D75 = 5 D93 = 6 E = 7 F1 = 8 F2 = 9 F3 = 10 F4 = 11 F5 = 12 F6 = 13 F7 = 14 F8 = 15 F9 = 16 F10 = 17 F11 = 18 F12 = 19 LED_B1 = 20 LED_B2 = 21 LED_B3 = 22 LED_B4 = 23 LED_B5 = 24 LED_BH1 = 25 LED_RGB1 = 26 LED_V1 = 27 LED_V2 = 28 INCANDESCENT = A TUNGSTEN = A HORIZON = D50 MIDMORNING = D55 DAYLIGHT_NOON = D65 NORTH_SKY = D75 BLUE_PHOSPHOR = D93 EQUAL_ENERGY = E FLUORESCENT_DAYLIGHT1 = F1 FLUORESCENT_COOL_WHITE = F2 FLUORESCENT_WHITE = F3 FLUORESCENT_WARM_WHITE = F4 FLUORESCENT_DAYLIGHT2 = F5 FLUORESCENT_LIGHT_WHITE = F6 D65_SIMULATOR = F7 DAYLIGHT_SIMULATOR = F7 D50_SIMULATOR = F8 SYLVANIA_F40 = F8 FLOURESCENT_COOL_WHITE_DELUXE = F9 PHILIPS_TL85 = F10 ULTRALUME_50 = F10 PHILIPS_TL84 = F11 ULTRALUME_40 = F11 PHILIPS_TL83 = F12 ULTRALUME_30 = F12 PHOSPHOR_CONVERTED_BLUE1 = LED_B1 PHOSPHOR_CONVERTED_BLUE2 = LED_B2 PHOSPHOR_CONVERTED_BLUE3 = LED_B3 PHOSPHOR_CONVERTED_BLUE4 = LED_B4 PHOSPHOR_CONVERTED_BLUE5 = LED_B5 LED_VIOLET1 = LED_V1 LED_VIOLET2 = LED_V2 @classmethod def wp_from_illuminant(cls, illuminant:Illuminant) -> Float2: """ Look up chromaticity coordinates (white point) of a CIE 1931 2° standard illuminant. :param illuminant: Standard illuminant :return: White point coordinates (CIE xy) """ # https://en.wikipedia.org/wiki/Standard_illuminant ilm = illuminant if ilm == cls.Illuminant.A: return Float2(0.44757, 0.40745) elif ilm == cls.Illuminant.D50: return Float2(0.34567, 0.35850) elif ilm == cls.Illuminant.D55: return Float2(0.33242, 0.34743) elif ilm == cls.Illuminant.D65: return Float2(0.31271, 0.32902) elif ilm == cls.Illuminant.D75: return Float2(0.29902, 0.31485) elif ilm == cls.Illuminant.D93: return Float2(0.28315, 0.29711) elif ilm == cls.Illuminant.E: return Float2(0.33333, 0.33333) elif ilm == cls.Illuminant.F1: return Float2(0.31310, 0.33727) elif ilm == cls.Illuminant.F2: return Float2(0.37208, 0.37529) elif ilm == cls.Illuminant.F3: return Float2(0.40910, 0.39430) elif ilm == cls.Illuminant.F4: return Float2(0.44018, 0.40329) elif ilm == cls.Illuminant.F5: return Float2(0.31379, 0.34531) elif ilm == cls.Illuminant.F6: return Float2(0.37790, 0.38835) elif ilm == cls.Illuminant.F7: return Float2(0.31292, 0.32933) elif ilm == cls.Illuminant.F8: return Float2(0.34588, 0.35875) elif ilm == cls.Illuminant.F9: return Float2(0.37417, 0.37281) elif ilm == cls.Illuminant.F10: return Float2(0.34609, 0.35986) elif ilm == cls.Illuminant.F11: return Float2(0.38052, 0.37713) elif ilm == cls.Illuminant.F12: return Float2(0.43695, 0.40441) elif ilm == cls.Illuminant.LED_B1: return Float2(0.4560, 0.4078) elif ilm == cls.Illuminant.LED_B2: return Float2(0.4357, 0.4012) elif ilm == cls.Illuminant.LED_B3: return Float2(0.3756, 0.3723) elif ilm == cls.Illuminant.LED_B4: return Float2(0.3422, 0.3502) elif ilm == cls.Illuminant.LED_B5: return Float2(0.3118, 0.3236) elif ilm == cls.Illuminant.LED_BH1: return Float2(0.4474, 0.4066) elif ilm == cls.Illuminant.LED_RGB1: return Float2(0.4557, 0.4211) elif ilm == cls.Illuminant.LED_V1: return Float2(0.4560, 0.4548) elif ilm == cls.Illuminant.LED_V2: return Float2(0.3781, 0.3775) else: raise Exception(f"Invalid illuminant: {illuminant.name}") @staticmethod def wp_from_cct(cct:int) -> Float2: """ Compute CIE xy chromaticity coordinates (white point) from correlated colour temperature. :param cct: Correlated colour temperature in range [4000, 25000] :return: White point coordinates (CIE xy) """ # https://github.com/colour-science/colour/blob/develop/colour/temperature/cie_d.py # BSD-3-Clause license: # Copyright 2013 Colour Developers # Redistribution and use in source and binary forms, with or without modification, # are permitted provided that the following conditions are met: # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation and/or # other materials provided with the distribution. # 3. Neither the name of the copyright holder nor the names of its contributors # may be used to endorse or promote products derived from this software without # specific prior written permission. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS “AS IS” # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, # INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, # BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY # OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, # EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE assert 4000 <= cct <= 25000, "Correlated color temperature must be in range [4000, 25000]" cct = float(cct) cct_3 = cct**3 cct_2 = cct**2 x = 0. if cct <= 7000.: x = -4.607 * 10**9 / cct_3 \ + 2.9678 * 10**6 / cct_2 \ + 0.09911 * 10**3 / cct \ + 0.244063 else: x = -2.0064 * 10**9 / cct_3 \ + 1.9018 * 10**6 / cct_2 \ + 0.24748 * 10**3 / cct \ + 0.23704 y = -3.000 * x**2 + 2.870 * x - 0.275 return Float2(x, y) @staticmethod def wp_from_image(im_xyz:Union[torch.Tensor, ColorImage]) -> Float2: """ Estimate the dominant illuminant of an environment map or a target image directly and return its approximate CIE xy chromaticity coordinates (white point). .. warning:: This is a lazy method that just averages the pixels in the image tensor. There is no spherical mapping, nor PCA, nor any serious attempt to analyze the image. :param im_xyz: Image tensor in CIE XYZ color space :type im_xyz: torch.Tensor | ColorImage :return: Estimated white point coordinates (CIE xy) """ mean_color = torch.tensor([[[im_xyz[0:1].mean(), im_xyz[1:2].mean(), im_xyz[2:3].mean()]]]).permute(2, 0, 1) csum = mean_color[0] + mean_color[1] + mean_color[2] mean_color[0] /= csum mean_color[1] /= csum return Float2(mean_color[0].item(), mean_color[1].item()) @staticmethod def apply( im_lms:Union[torch.Tensor, ColorImage], source_white:Union[Float2, Chromaticity, torch.Tensor, numpy.ndarray], target_white:Union[Float2, Chromaticity, torch.Tensor, numpy.ndarray]) -> torch.Tensor: """ Apply white balance. :param im_lms: Image tensor in LMS color space :type im_lms: torch.Tensor | ColorImage :param source_white: Source white point coordinates (CIE xy) :type source_white: Float2 | Chromaticity | torch.Tensor | numpy.ndarray :param target_white: Target white point coordinates (CIE xy) :type target_white: Float2 | Chromaticity | torch.Tensor | numpy.ndarray :return: White balanced image tensor """ source = torch.tensor([[[source_white[0], source_white[0], 1.]]], dtype=torch.float32).permute(2, 0, 1) target = torch.tensor([[[target_white[0], target_white[0], 1.]]], dtype=torch.float32).permute(2, 0, 1) src_lms = ColorSpace.convert(source, ColorSpace.Variant.CIE_XYY, ColorSpace.Variant.LMS) dst_lms = ColorSpace.convert(target, ColorSpace.Variant.CIE_XYY, ColorSpace.Variant.LMS) mat = [ [dst_lms[0].item()/src_lms[0].item(), 0., 0.], [0., dst_lms[1].item()/src_lms[1].item(), 0.], [0., 0., dst_lms[2].item()/src_lms[2].item()]]
corrected = mm(im_lms, mat)
0
2023-12-15 15:39:08+00:00
24k
quocanh34/magic-animate-modified
magicanimate/pipelines/pipeline_animation.py
[ { "identifier": "UNet3DConditionModel", "path": "magicanimate/models/unet_controlnet.py", "snippet": "class UNet3DConditionModel(ModelMixin, ConfigMixin):\n _supports_gradient_checkpointing = True\n\n @register_to_config\n def __init__(\n self,\n sample_size: Optional[int] = None,\n in_channels: int = 4,\n out_channels: int = 4,\n center_input_sample: bool = False,\n flip_sin_to_cos: bool = True,\n freq_shift: int = 0, \n down_block_types: Tuple[str] = (\n \"CrossAttnDownBlock3D\",\n \"CrossAttnDownBlock3D\",\n \"CrossAttnDownBlock3D\",\n \"DownBlock3D\",\n ),\n mid_block_type: str = \"UNetMidBlock3DCrossAttn\",\n up_block_types: Tuple[str] = (\n \"UpBlock3D\",\n \"CrossAttnUpBlock3D\",\n \"CrossAttnUpBlock3D\",\n \"CrossAttnUpBlock3D\"\n ),\n only_cross_attention: Union[bool, Tuple[bool]] = False,\n block_out_channels: Tuple[int] = (320, 640, 1280, 1280),\n layers_per_block: int = 2,\n downsample_padding: int = 1,\n mid_block_scale_factor: float = 1,\n act_fn: str = \"silu\",\n norm_num_groups: int = 32,\n norm_eps: float = 1e-5,\n cross_attention_dim: int = 1280,\n attention_head_dim: Union[int, Tuple[int]] = 8,\n dual_cross_attention: bool = False,\n use_linear_projection: bool = False,\n class_embed_type: Optional[str] = None,\n num_class_embeds: Optional[int] = None,\n upcast_attention: bool = False,\n resnet_time_scale_shift: str = \"default\",\n \n # Additional\n use_motion_module = False,\n motion_module_resolutions = ( 1,2,4,8 ),\n motion_module_mid_block = False,\n motion_module_decoder_only = False,\n motion_module_type = None,\n motion_module_kwargs = {},\n unet_use_cross_frame_attention = None,\n unet_use_temporal_attention = None,\n ):\n super().__init__()\n\n self.sample_size = sample_size\n time_embed_dim = block_out_channels[0] * 4\n\n # input\n self.conv_in = InflatedConv3d(in_channels, block_out_channels[0], kernel_size=3, padding=(1, 1))\n\n # time\n self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift)\n timestep_input_dim = block_out_channels[0]\n\n self.time_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)\n\n # class embedding\n if class_embed_type is None and num_class_embeds is not None:\n self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim)\n elif class_embed_type == \"timestep\":\n self.class_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)\n elif class_embed_type == \"identity\":\n self.class_embedding = nn.Identity(time_embed_dim, time_embed_dim)\n else:\n self.class_embedding = None\n\n self.down_blocks = nn.ModuleList([])\n self.mid_block = None\n self.up_blocks = nn.ModuleList([])\n\n if isinstance(only_cross_attention, bool):\n only_cross_attention = [only_cross_attention] * len(down_block_types)\n\n if isinstance(attention_head_dim, int):\n attention_head_dim = (attention_head_dim,) * len(down_block_types)\n\n # down\n output_channel = block_out_channels[0]\n for i, down_block_type in enumerate(down_block_types):\n res = 2 ** i\n input_channel = output_channel\n output_channel = block_out_channels[i]\n is_final_block = i == len(block_out_channels) - 1\n\n down_block = get_down_block(\n down_block_type,\n num_layers=layers_per_block,\n in_channels=input_channel,\n out_channels=output_channel,\n temb_channels=time_embed_dim,\n add_downsample=not is_final_block,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n resnet_groups=norm_num_groups,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=attention_head_dim[i],\n downsample_padding=downsample_padding,\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention[i],\n upcast_attention=upcast_attention,\n resnet_time_scale_shift=resnet_time_scale_shift,\n\n unet_use_cross_frame_attention=unet_use_cross_frame_attention,\n unet_use_temporal_attention=unet_use_temporal_attention,\n \n use_motion_module=use_motion_module and (res in motion_module_resolutions) and (not motion_module_decoder_only),\n motion_module_type=motion_module_type,\n motion_module_kwargs=motion_module_kwargs,\n )\n self.down_blocks.append(down_block)\n\n # mid\n if mid_block_type == \"UNetMidBlock3DCrossAttn\":\n self.mid_block = UNetMidBlock3DCrossAttn(\n in_channels=block_out_channels[-1],\n temb_channels=time_embed_dim,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n output_scale_factor=mid_block_scale_factor,\n resnet_time_scale_shift=resnet_time_scale_shift,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=attention_head_dim[-1],\n resnet_groups=norm_num_groups,\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n upcast_attention=upcast_attention,\n\n unet_use_cross_frame_attention=unet_use_cross_frame_attention,\n unet_use_temporal_attention=unet_use_temporal_attention,\n \n use_motion_module=use_motion_module and motion_module_mid_block,\n motion_module_type=motion_module_type,\n motion_module_kwargs=motion_module_kwargs,\n )\n else:\n raise ValueError(f\"unknown mid_block_type : {mid_block_type}\")\n \n # count how many layers upsample the videos\n self.num_upsamplers = 0\n\n # up\n reversed_block_out_channels = list(reversed(block_out_channels))\n reversed_attention_head_dim = list(reversed(attention_head_dim))\n only_cross_attention = list(reversed(only_cross_attention))\n output_channel = reversed_block_out_channels[0]\n for i, up_block_type in enumerate(up_block_types):\n res = 2 ** (3 - i)\n is_final_block = i == len(block_out_channels) - 1\n\n prev_output_channel = output_channel\n output_channel = reversed_block_out_channels[i]\n input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)]\n\n # add upsample block for all BUT final layer\n if not is_final_block:\n add_upsample = True\n self.num_upsamplers += 1\n else:\n add_upsample = False\n\n up_block = get_up_block(\n up_block_type,\n num_layers=layers_per_block + 1,\n in_channels=input_channel,\n out_channels=output_channel,\n prev_output_channel=prev_output_channel,\n temb_channels=time_embed_dim,\n add_upsample=add_upsample,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n resnet_groups=norm_num_groups,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=reversed_attention_head_dim[i],\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention[i],\n upcast_attention=upcast_attention,\n resnet_time_scale_shift=resnet_time_scale_shift,\n\n unet_use_cross_frame_attention=unet_use_cross_frame_attention,\n unet_use_temporal_attention=unet_use_temporal_attention,\n\n use_motion_module=use_motion_module and (res in motion_module_resolutions),\n motion_module_type=motion_module_type,\n motion_module_kwargs=motion_module_kwargs,\n )\n self.up_blocks.append(up_block)\n prev_output_channel = output_channel\n\n # out\n self.conv_norm_out = nn.GroupNorm(num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=norm_eps)\n self.conv_act = nn.SiLU()\n self.conv_out = InflatedConv3d(block_out_channels[0], out_channels, kernel_size=3, padding=1)\n\n def set_attention_slice(self, slice_size):\n r\"\"\"\n Enable sliced attention computation.\n\n When this option is enabled, the attention module will split the input tensor in slices, to compute attention\n in several steps. This is useful to save some memory in exchange for a small speed decrease.\n\n Args:\n slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `\"auto\"`):\n When `\"auto\"`, halves the input to the attention heads, so attention will be computed in two steps. If\n `\"max\"`, maxium amount of memory will be saved by running only one slice at a time. If a number is\n provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim`\n must be a multiple of `slice_size`.\n \"\"\"\n sliceable_head_dims = []\n\n def fn_recursive_retrieve_slicable_dims(module: torch.nn.Module):\n if hasattr(module, \"set_attention_slice\"):\n sliceable_head_dims.append(module.sliceable_head_dim)\n\n for child in module.children():\n fn_recursive_retrieve_slicable_dims(child)\n\n # retrieve number of attention layers\n for module in self.children():\n fn_recursive_retrieve_slicable_dims(module)\n\n num_slicable_layers = len(sliceable_head_dims)\n\n if slice_size == \"auto\":\n # half the attention head size is usually a good trade-off between\n # speed and memory\n slice_size = [dim // 2 for dim in sliceable_head_dims]\n elif slice_size == \"max\":\n # make smallest slice possible\n slice_size = num_slicable_layers * [1]\n\n slice_size = num_slicable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size\n\n if len(slice_size) != len(sliceable_head_dims):\n raise ValueError(\n f\"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different\"\n f\" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}.\"\n )\n\n for i in range(len(slice_size)):\n size = slice_size[i]\n dim = sliceable_head_dims[i]\n if size is not None and size > dim:\n raise ValueError(f\"size {size} has to be smaller or equal to {dim}.\")\n\n # Recursively walk through all the children.\n # Any children which exposes the set_attention_slice method\n # gets the message\n def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]):\n if hasattr(module, \"set_attention_slice\"):\n module.set_attention_slice(slice_size.pop())\n\n for child in module.children():\n fn_recursive_set_attention_slice(child, slice_size)\n\n reversed_slice_size = list(reversed(slice_size))\n for module in self.children():\n fn_recursive_set_attention_slice(module, reversed_slice_size)\n\n def _set_gradient_checkpointing(self, module, value=False):\n if isinstance(module, (CrossAttnDownBlock3D, DownBlock3D, CrossAttnUpBlock3D, UpBlock3D)):\n module.gradient_checkpointing = value\n\n def forward(\n self,\n sample: torch.FloatTensor,\n timestep: Union[torch.Tensor, float, int],\n encoder_hidden_states: torch.Tensor,\n class_labels: Optional[torch.Tensor] = None,\n attention_mask: Optional[torch.Tensor] = None,\n # for controlnet\n down_block_additional_residuals: Optional[Tuple[torch.Tensor]] = None,\n mid_block_additional_residual: Optional[torch.Tensor] = None,\n return_dict: bool = True,\n ) -> Union[UNet3DConditionOutput, Tuple]:\n r\"\"\"\n Args:\n sample (`torch.FloatTensor`): (batch, channel, height, width) noisy inputs tensor\n timestep (`torch.FloatTensor` or `float` or `int`): (batch) timesteps\n encoder_hidden_states (`torch.FloatTensor`): (batch, sequence_length, feature_dim) encoder hidden states\n return_dict (`bool`, *optional*, defaults to `True`):\n Whether or not to return a [`models.unet_2d_condition.UNet2DConditionOutput`] instead of a plain tuple.\n\n Returns:\n [`~models.unet_2d_condition.UNet2DConditionOutput`] or `tuple`:\n [`~models.unet_2d_condition.UNet2DConditionOutput`] if `return_dict` is True, otherwise a `tuple`. When\n returning a tuple, the first element is the sample tensor.\n \"\"\"\n # By default samples have to be AT least a multiple of the overall upsampling factor.\n # The overall upsampling factor is equal to 2 ** (# num of upsampling layears).\n # However, the upsampling interpolation output size can be forced to fit any upsampling size\n # on the fly if necessary.\n default_overall_up_factor = 2**self.num_upsamplers\n\n # upsample size should be forwarded when sample is not a multiple of `default_overall_up_factor`\n forward_upsample_size = False\n upsample_size = None\n\n if any(s % default_overall_up_factor != 0 for s in sample.shape[-2:]):\n logger.info(\"Forward upsample size to force interpolation output size.\")\n forward_upsample_size = True\n\n # prepare attention_mask\n if attention_mask is not None:\n attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0\n attention_mask = attention_mask.unsqueeze(1)\n\n # center input if necessary\n if self.config.center_input_sample:\n sample = 2 * sample - 1.0\n\n # time\n timesteps = timestep\n if not torch.is_tensor(timesteps):\n # This would be a good case for the `match` statement (Python 3.10+)\n is_mps = sample.device.type == \"mps\"\n if isinstance(timestep, float):\n dtype = torch.float32 if is_mps else torch.float64\n else:\n dtype = torch.int32 if is_mps else torch.int64\n timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device)\n elif len(timesteps.shape) == 0:\n timesteps = timesteps[None].to(sample.device)\n\n # broadcast to batch dimension in a way that's compatible with ONNX/Core ML\n timesteps = timesteps.expand(sample.shape[0])\n\n t_emb = self.time_proj(timesteps)\n\n # timesteps does not contain any weights and will always return f32 tensors\n # but time_embedding might actually be running in fp16. so we need to cast here.\n # there might be better ways to encapsulate this.\n t_emb = t_emb.to(dtype=self.dtype)\n emb = self.time_embedding(t_emb)\n\n if self.class_embedding is not None:\n if class_labels is None:\n raise ValueError(\"class_labels should be provided when num_class_embeds > 0\")\n\n if self.config.class_embed_type == \"timestep\":\n class_labels = self.time_proj(class_labels)\n\n class_emb = self.class_embedding(class_labels).to(dtype=self.dtype)\n emb = emb + class_emb\n\n # pre-process\n sample = self.conv_in(sample)\n\n # down\n is_controlnet = mid_block_additional_residual is not None and down_block_additional_residuals is not None\n\n down_block_res_samples = (sample,)\n for downsample_block in self.down_blocks:\n if hasattr(downsample_block, \"has_cross_attention\") and downsample_block.has_cross_attention:\n sample, res_samples = downsample_block(\n hidden_states=sample,\n temb=emb,\n encoder_hidden_states=encoder_hidden_states,\n attention_mask=attention_mask,\n )\n else:\n sample, res_samples = downsample_block(hidden_states=sample, temb=emb, encoder_hidden_states=encoder_hidden_states)\n\n down_block_res_samples += res_samples\n\n if is_controlnet:\n new_down_block_res_samples = ()\n\n for down_block_res_sample, down_block_additional_residual in zip(\n down_block_res_samples, down_block_additional_residuals\n ):\n down_block_res_sample = down_block_res_sample + down_block_additional_residual\n new_down_block_res_samples = new_down_block_res_samples + (down_block_res_sample,)\n\n down_block_res_samples = new_down_block_res_samples\n\n # mid\n sample = self.mid_block(\n sample, emb, encoder_hidden_states=encoder_hidden_states, attention_mask=attention_mask\n )\n\n if is_controlnet:\n sample = sample + mid_block_additional_residual\n\n # up\n for i, upsample_block in enumerate(self.up_blocks):\n is_final_block = i == len(self.up_blocks) - 1\n\n res_samples = down_block_res_samples[-len(upsample_block.resnets) :]\n down_block_res_samples = down_block_res_samples[: -len(upsample_block.resnets)]\n\n # if we have not reached the final block and need to forward the\n # upsample size, we do it here\n if not is_final_block and forward_upsample_size:\n upsample_size = down_block_res_samples[-1].shape[2:]\n\n if hasattr(upsample_block, \"has_cross_attention\") and upsample_block.has_cross_attention:\n sample = upsample_block(\n hidden_states=sample,\n temb=emb,\n res_hidden_states_tuple=res_samples,\n encoder_hidden_states=encoder_hidden_states,\n upsample_size=upsample_size,\n attention_mask=attention_mask,\n )\n else:\n sample = upsample_block(\n hidden_states=sample, temb=emb, res_hidden_states_tuple=res_samples, upsample_size=upsample_size, encoder_hidden_states=encoder_hidden_states,\n )\n\n # post-process\n sample = self.conv_norm_out(sample)\n sample = self.conv_act(sample)\n sample = self.conv_out(sample)\n\n if not return_dict:\n return (sample,)\n\n return UNet3DConditionOutput(sample=sample)\n\n @classmethod\n def from_pretrained_2d(cls, pretrained_model_path, subfolder=None, unet_additional_kwargs=None):\n if subfolder is not None:\n pretrained_model_path = os.path.join(pretrained_model_path, subfolder)\n print(f\"loaded temporal unet's pretrained weights from {pretrained_model_path} ...\")\n\n config_file = os.path.join(pretrained_model_path, 'config.json')\n if not os.path.isfile(config_file):\n raise RuntimeError(f\"{config_file} does not exist\")\n with open(config_file, \"r\") as f:\n config = json.load(f)\n config[\"_class_name\"] = cls.__name__\n config[\"down_block_types\"] = [\n \"CrossAttnDownBlock3D\",\n \"CrossAttnDownBlock3D\",\n \"CrossAttnDownBlock3D\",\n \"DownBlock3D\"\n ]\n config[\"up_block_types\"] = [\n \"UpBlock3D\",\n \"CrossAttnUpBlock3D\",\n \"CrossAttnUpBlock3D\",\n \"CrossAttnUpBlock3D\"\n ]\n # config[\"mid_block_type\"] = \"UNetMidBlock3DCrossAttn\"\n\n from diffusers.utils import WEIGHTS_NAME\n model = cls.from_config(config, **unet_additional_kwargs)\n model_file = os.path.join(pretrained_model_path, WEIGHTS_NAME)\n if not os.path.isfile(model_file):\n raise RuntimeError(f\"{model_file} does not exist\")\n state_dict = torch.load(model_file, map_location=\"cpu\")\n\n m, u = model.load_state_dict(state_dict, strict=False)\n print(f\"### missing keys: {len(m)}; \\n### unexpected keys: {len(u)};\")\n # print(f\"### missing keys:\\n{m}\\n### unexpected keys:\\n{u}\\n\")\n \n params = [p.numel() if \"temporal\" in n else 0 for n, p in model.named_parameters()]\n print(f\"### Temporal Module Parameters: {sum(params) / 1e6} M\")\n \n return model" }, { "identifier": "ControlNetProcessor", "path": "magicanimate/models/multicontrolnet.py", "snippet": "class ControlNetProcessor(object):\n def __init__(\n self,\n controlnet: ControlNetModel,\n # image: Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]],\n # controlnet_cond = torch.FloatTensor, #fix\n # conditioning_scale: float = 1.0,\n ):\n self.controlnet = controlnet\n # self.image = image\n # self.controlnet_cond = controlnet_cond #fix\n # self.conditioning_scale = conditioning_scale\n\n # def _default_height_width(self, height, width, image):\n # if isinstance(image, list):\n # image = image[0]\n\n # if height is None:\n # if isinstance(image, PIL.Image.Image):\n # height = image.height\n # elif isinstance(image, torch.Tensor):\n # height = image.shape[3]\n\n # height = (height // 8) * 8 # round down to nearest multiple of 8\n\n # if width is None:\n # if isinstance(image, PIL.Image.Image):\n # width = image.width\n # elif isinstance(image, torch.Tensor):\n # width = image.shape[2]\n\n # width = (width // 8) * 8 # round down to nearest multiple of 8\n\n # return height, width\n\n # def default_height_width(self, height, width):\n # return self._default_height_width(height, width, self.image)\n\n # def _prepare_image(self, image, width, height, batch_size, num_images_per_prompt, device, dtype):\n # if not isinstance(image, torch.Tensor):\n # if isinstance(image, PIL.Image.Image):\n # image = [image]\n\n # if isinstance(image[0], PIL.Image.Image):\n # image = [\n # np.array(i.resize((width, height), resample=PIL_INTERPOLATION[\"lanczos\"]))[None, :] for i in image\n # ]\n # image = np.concatenate(image, axis=0)\n # image = np.array(image).astype(np.float32) / 255.0\n # image = image.transpose(0, 3, 1, 2)\n # image = torch.from_numpy(image)\n # elif isinstance(image[0], torch.Tensor):\n # image = torch.cat(image, dim=0)\n\n # image_batch_size = image.shape[0]\n\n # if image_batch_size == 1:\n # repeat_by = batch_size\n # else:\n # # image batch size is the same as prompt batch size\n # repeat_by = num_images_per_prompt\n\n # image = image.repeat_interleave(repeat_by, dim=0)\n\n # image = image.to(device=device, dtype=dtype)\n\n # return image\n\n # def _check_inputs(self, image, prompt, prompt_embeds):\n # image_is_pil = isinstance(image, PIL.Image.Image)\n # image_is_tensor = isinstance(image, torch.Tensor)\n # image_is_pil_list = isinstance(image, list) and isinstance(image[0], PIL.Image.Image)\n # image_is_tensor_list = isinstance(image, list) and isinstance(image[0], torch.Tensor)\n\n # if not image_is_pil and not image_is_tensor and not image_is_pil_list and not image_is_tensor_list:\n # raise TypeError(\n # \"image must be passed and be one of PIL image, torch tensor, list of PIL images, or list of torch tensors\"\n # )\n\n # if image_is_pil:\n # image_batch_size = 1\n # elif image_is_tensor:\n # image_batch_size = image.shape[0]\n # elif image_is_pil_list:\n # image_batch_size = len(image)\n # elif image_is_tensor_list:\n # image_batch_size = len(image)\n\n # if prompt is not None and isinstance(prompt, str):\n # prompt_batch_size = 1\n # elif prompt is not None and isinstance(prompt, list):\n # prompt_batch_size = len(prompt)\n # elif prompt_embeds is not None:\n # prompt_batch_size = prompt_embeds.shape[0]\n\n # if image_batch_size != 1 and image_batch_size != prompt_batch_size:\n # raise ValueError(\n # f\"If image batch size is not 1, image batch size must be same as prompt batch size. image batch size: {image_batch_size}, prompt batch size: {prompt_batch_size}\"\n # )\n\n # def check_inputs(self, prompt, prompt_embeds):\n # self._check_inputs(self.image, prompt, prompt_embeds)\n\n # def prepare_image(self, width, height, batch_size, num_images_per_prompt, device, do_classifier_free_guidance):\n # self.image = self._prepare_image(\n # self.image, width, height, batch_size, num_images_per_prompt, device, self.controlnet.dtype\n # )\n # if do_classifier_free_guidance:\n # self.image = torch.cat([self.image] * 2)\n\n def __call__(\n self,\n controlnet_latent_input,\n t,\n encoder_hidden_states,\n controlnet_cond, #fix\n conditioning_scale,\n return_dict,\n ) -> Tuple:\n down_block_res_samples, mid_block_res_sample = self.controlnet(\n controlnet_latent_input,\n t,\n encoder_hidden_states,\n controlnet_cond,\n conditioning_scale, \n return_dict=False,\n )\n down_block_res_samples = [\n down_block_res_sample * conditioning_scale for down_block_res_sample in down_block_res_samples\n ]\n mid_block_res_sample *= conditioning_scale\n return (down_block_res_samples, mid_block_res_sample)" }, { "identifier": "ReferenceAttentionControl", "path": "magicanimate/models/mutual_self_attention.py", "snippet": "class ReferenceAttentionControl():\n \n def __init__(self, \n unet,\n mode=\"write\",\n do_classifier_free_guidance=False,\n attention_auto_machine_weight = float('inf'),\n gn_auto_machine_weight = 1.0,\n style_fidelity = 1.0,\n reference_attn=True,\n reference_adain=False,\n fusion_blocks=\"midup\",\n batch_size=1, \n ) -> None:\n # 10. Modify self attention and group norm\n self.unet = unet\n assert mode in [\"read\", \"write\"]\n assert fusion_blocks in [\"midup\", \"full\"]\n self.reference_attn = reference_attn\n self.reference_adain = reference_adain\n self.fusion_blocks = fusion_blocks\n self.register_reference_hooks(\n mode, \n do_classifier_free_guidance,\n attention_auto_machine_weight,\n gn_auto_machine_weight,\n style_fidelity,\n reference_attn,\n reference_adain,\n fusion_blocks,\n batch_size=batch_size, \n )\n\n def register_reference_hooks(\n self, \n mode, \n do_classifier_free_guidance,\n attention_auto_machine_weight,\n gn_auto_machine_weight,\n style_fidelity,\n reference_attn,\n reference_adain,\n dtype=torch.float16,\n batch_size=1, \n num_images_per_prompt=1, \n device=torch.device(\"cpu\"), \n fusion_blocks='midup',\n ):\n MODE = mode\n do_classifier_free_guidance = do_classifier_free_guidance\n attention_auto_machine_weight = attention_auto_machine_weight\n gn_auto_machine_weight = gn_auto_machine_weight\n style_fidelity = style_fidelity\n reference_attn = reference_attn\n reference_adain = reference_adain\n fusion_blocks = fusion_blocks\n num_images_per_prompt = num_images_per_prompt\n dtype=dtype\n if do_classifier_free_guidance:\n uc_mask = (\n torch.Tensor([1] * batch_size * num_images_per_prompt * 16 + [0] * batch_size * num_images_per_prompt * 16)\n .to(device)\n .bool()\n )\n else:\n uc_mask = (\n torch.Tensor([0] * batch_size * num_images_per_prompt * 2)\n .to(device)\n .bool()\n )\n \n def hacked_basic_transformer_inner_forward(\n self,\n hidden_states: torch.FloatTensor,\n attention_mask: Optional[torch.FloatTensor] = None,\n encoder_hidden_states: Optional[torch.FloatTensor] = None,\n encoder_attention_mask: Optional[torch.FloatTensor] = None,\n timestep: Optional[torch.LongTensor] = None,\n cross_attention_kwargs: Dict[str, Any] = None,\n class_labels: Optional[torch.LongTensor] = None,\n video_length=None,\n ):\n if self.use_ada_layer_norm:\n norm_hidden_states = self.norm1(hidden_states, timestep)\n elif self.use_ada_layer_norm_zero:\n norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(\n hidden_states, timestep, class_labels, hidden_dtype=hidden_states.dtype\n )\n else:\n norm_hidden_states = self.norm1(hidden_states)\n\n # 1. Self-Attention\n cross_attention_kwargs = cross_attention_kwargs if cross_attention_kwargs is not None else {}\n if self.only_cross_attention:\n attn_output = self.attn1(\n norm_hidden_states,\n encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None,\n attention_mask=attention_mask,\n **cross_attention_kwargs,\n )\n else:\n if MODE == \"write\":\n self.bank.append(norm_hidden_states.clone())\n attn_output = self.attn1(\n norm_hidden_states,\n encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None,\n attention_mask=attention_mask,\n **cross_attention_kwargs,\n )\n if MODE == \"read\":\n self.bank = [rearrange(d.unsqueeze(1).repeat(1, video_length, 1, 1), \"b t l c -> (b t) l c\")[:hidden_states.shape[0]] for d in self.bank]\n hidden_states_uc = self.attn1(norm_hidden_states, \n encoder_hidden_states=torch.cat([norm_hidden_states] + self.bank, dim=1),\n attention_mask=attention_mask) + hidden_states\n hidden_states_c = hidden_states_uc.clone()\n _uc_mask = uc_mask.clone()\n if do_classifier_free_guidance:\n if hidden_states.shape[0] != _uc_mask.shape[0]:\n _uc_mask = (\n torch.Tensor([1] * (hidden_states.shape[0]//2) + [0] * (hidden_states.shape[0]//2))\n .to(device)\n .bool()\n )\n hidden_states_c[_uc_mask] = self.attn1(\n norm_hidden_states[_uc_mask],\n encoder_hidden_states=norm_hidden_states[_uc_mask],\n attention_mask=attention_mask,\n ) + hidden_states[_uc_mask]\n hidden_states = hidden_states_c.clone()\n \n self.bank.clear()\n if self.attn2 is not None:\n # Cross-Attention\n norm_hidden_states = (\n self.norm2(hidden_states, timestep) if self.use_ada_layer_norm else self.norm2(hidden_states)\n )\n hidden_states = (\n self.attn2(\n norm_hidden_states, encoder_hidden_states=encoder_hidden_states, attention_mask=attention_mask\n )\n + hidden_states\n )\n\n # Feed-forward\n hidden_states = self.ff(self.norm3(hidden_states)) + hidden_states\n\n # Temporal-Attention\n if self.unet_use_temporal_attention:\n d = hidden_states.shape[1]\n hidden_states = rearrange(hidden_states, \"(b f) d c -> (b d) f c\", f=video_length)\n norm_hidden_states = (\n self.norm_temp(hidden_states, timestep) if self.use_ada_layer_norm else self.norm_temp(hidden_states)\n )\n hidden_states = self.attn_temp(norm_hidden_states) + hidden_states\n hidden_states = rearrange(hidden_states, \"(b d) f c -> (b f) d c\", d=d)\n\n return hidden_states\n \n if self.use_ada_layer_norm_zero:\n attn_output = gate_msa.unsqueeze(1) * attn_output\n hidden_states = attn_output + hidden_states\n\n if self.attn2 is not None:\n norm_hidden_states = (\n self.norm2(hidden_states, timestep) if self.use_ada_layer_norm else self.norm2(hidden_states)\n )\n\n # 2. Cross-Attention\n attn_output = self.attn2(\n norm_hidden_states,\n encoder_hidden_states=encoder_hidden_states,\n attention_mask=encoder_attention_mask,\n **cross_attention_kwargs,\n )\n hidden_states = attn_output + hidden_states\n\n # 3. Feed-forward\n norm_hidden_states = self.norm3(hidden_states)\n\n if self.use_ada_layer_norm_zero:\n norm_hidden_states = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]\n\n ff_output = self.ff(norm_hidden_states)\n\n if self.use_ada_layer_norm_zero:\n ff_output = gate_mlp.unsqueeze(1) * ff_output\n\n hidden_states = ff_output + hidden_states\n\n return hidden_states\n\n def hacked_mid_forward(self, *args, **kwargs):\n eps = 1e-6\n x = self.original_forward(*args, **kwargs)\n if MODE == \"write\":\n if gn_auto_machine_weight >= self.gn_weight:\n var, mean = torch.var_mean(x, dim=(2, 3), keepdim=True, correction=0)\n self.mean_bank.append(mean)\n self.var_bank.append(var)\n if MODE == \"read\":\n if len(self.mean_bank) > 0 and len(self.var_bank) > 0:\n var, mean = torch.var_mean(x, dim=(2, 3), keepdim=True, correction=0)\n std = torch.maximum(var, torch.zeros_like(var) + eps) ** 0.5\n mean_acc = sum(self.mean_bank) / float(len(self.mean_bank))\n var_acc = sum(self.var_bank) / float(len(self.var_bank))\n std_acc = torch.maximum(var_acc, torch.zeros_like(var_acc) + eps) ** 0.5\n x_uc = (((x - mean) / std) * std_acc) + mean_acc\n x_c = x_uc.clone()\n if do_classifier_free_guidance and style_fidelity > 0:\n x_c[uc_mask] = x[uc_mask]\n x = style_fidelity * x_c + (1.0 - style_fidelity) * x_uc\n self.mean_bank = []\n self.var_bank = []\n return x\n\n def hack_CrossAttnDownBlock2D_forward(\n self,\n hidden_states: torch.FloatTensor,\n temb: Optional[torch.FloatTensor] = None,\n encoder_hidden_states: Optional[torch.FloatTensor] = None,\n attention_mask: Optional[torch.FloatTensor] = None,\n cross_attention_kwargs: Optional[Dict[str, Any]] = None,\n encoder_attention_mask: Optional[torch.FloatTensor] = None,\n ):\n eps = 1e-6\n\n # TODO(Patrick, William) - attention mask is not used\n output_states = ()\n\n for i, (resnet, attn) in enumerate(zip(self.resnets, self.attentions)):\n hidden_states = resnet(hidden_states, temb)\n hidden_states = attn(\n hidden_states,\n encoder_hidden_states=encoder_hidden_states,\n cross_attention_kwargs=cross_attention_kwargs,\n attention_mask=attention_mask,\n encoder_attention_mask=encoder_attention_mask,\n return_dict=False,\n )[0]\n if MODE == \"write\":\n if gn_auto_machine_weight >= self.gn_weight:\n var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)\n self.mean_bank.append([mean])\n self.var_bank.append([var])\n if MODE == \"read\":\n if len(self.mean_bank) > 0 and len(self.var_bank) > 0:\n var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)\n std = torch.maximum(var, torch.zeros_like(var) + eps) ** 0.5\n mean_acc = sum(self.mean_bank[i]) / float(len(self.mean_bank[i]))\n var_acc = sum(self.var_bank[i]) / float(len(self.var_bank[i]))\n std_acc = torch.maximum(var_acc, torch.zeros_like(var_acc) + eps) ** 0.5\n hidden_states_uc = (((hidden_states - mean) / std) * std_acc) + mean_acc\n hidden_states_c = hidden_states_uc.clone()\n if do_classifier_free_guidance and style_fidelity > 0:\n hidden_states_c[uc_mask] = hidden_states[uc_mask].to(hidden_states_c.dtype)\n hidden_states = style_fidelity * hidden_states_c + (1.0 - style_fidelity) * hidden_states_uc\n\n output_states = output_states + (hidden_states,)\n\n if MODE == \"read\":\n self.mean_bank = []\n self.var_bank = []\n\n if self.downsamplers is not None:\n for downsampler in self.downsamplers:\n hidden_states = downsampler(hidden_states)\n\n output_states = output_states + (hidden_states,)\n\n return hidden_states, output_states\n\n def hacked_DownBlock2D_forward(self, hidden_states, temb=None):\n eps = 1e-6\n\n output_states = ()\n\n for i, resnet in enumerate(self.resnets):\n hidden_states = resnet(hidden_states, temb)\n\n if MODE == \"write\":\n if gn_auto_machine_weight >= self.gn_weight:\n var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)\n self.mean_bank.append([mean])\n self.var_bank.append([var])\n if MODE == \"read\":\n if len(self.mean_bank) > 0 and len(self.var_bank) > 0:\n var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)\n std = torch.maximum(var, torch.zeros_like(var) + eps) ** 0.5\n mean_acc = sum(self.mean_bank[i]) / float(len(self.mean_bank[i]))\n var_acc = sum(self.var_bank[i]) / float(len(self.var_bank[i]))\n std_acc = torch.maximum(var_acc, torch.zeros_like(var_acc) + eps) ** 0.5\n hidden_states_uc = (((hidden_states - mean) / std) * std_acc) + mean_acc\n hidden_states_c = hidden_states_uc.clone()\n if do_classifier_free_guidance and style_fidelity > 0:\n hidden_states_c[uc_mask] = hidden_states[uc_mask].to(hidden_states_c.dtype)\n hidden_states = style_fidelity * hidden_states_c + (1.0 - style_fidelity) * hidden_states_uc\n\n output_states = output_states + (hidden_states,)\n\n if MODE == \"read\":\n self.mean_bank = []\n self.var_bank = []\n\n if self.downsamplers is not None:\n for downsampler in self.downsamplers:\n hidden_states = downsampler(hidden_states)\n\n output_states = output_states + (hidden_states,)\n\n return hidden_states, output_states\n\n def hacked_CrossAttnUpBlock2D_forward(\n self,\n hidden_states: torch.FloatTensor,\n res_hidden_states_tuple: Tuple[torch.FloatTensor, ...],\n temb: Optional[torch.FloatTensor] = None,\n encoder_hidden_states: Optional[torch.FloatTensor] = None,\n cross_attention_kwargs: Optional[Dict[str, Any]] = None,\n upsample_size: Optional[int] = None,\n attention_mask: Optional[torch.FloatTensor] = None,\n encoder_attention_mask: Optional[torch.FloatTensor] = None,\n ):\n eps = 1e-6\n # TODO(Patrick, William) - attention mask is not used\n for i, (resnet, attn) in enumerate(zip(self.resnets, self.attentions)):\n # pop res hidden states\n res_hidden_states = res_hidden_states_tuple[-1]\n res_hidden_states_tuple = res_hidden_states_tuple[:-1]\n hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)\n hidden_states = resnet(hidden_states, temb)\n hidden_states = attn(\n hidden_states,\n encoder_hidden_states=encoder_hidden_states,\n cross_attention_kwargs=cross_attention_kwargs,\n attention_mask=attention_mask,\n encoder_attention_mask=encoder_attention_mask,\n return_dict=False,\n )[0]\n\n if MODE == \"write\":\n if gn_auto_machine_weight >= self.gn_weight:\n var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)\n self.mean_bank.append([mean])\n self.var_bank.append([var])\n if MODE == \"read\":\n if len(self.mean_bank) > 0 and len(self.var_bank) > 0:\n var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)\n std = torch.maximum(var, torch.zeros_like(var) + eps) ** 0.5\n mean_acc = sum(self.mean_bank[i]) / float(len(self.mean_bank[i]))\n var_acc = sum(self.var_bank[i]) / float(len(self.var_bank[i]))\n std_acc = torch.maximum(var_acc, torch.zeros_like(var_acc) + eps) ** 0.5\n hidden_states_uc = (((hidden_states - mean) / std) * std_acc) + mean_acc\n hidden_states_c = hidden_states_uc.clone()\n if do_classifier_free_guidance and style_fidelity > 0:\n hidden_states_c[uc_mask] = hidden_states[uc_mask].to(hidden_states_c.dtype)\n hidden_states = style_fidelity * hidden_states_c + (1.0 - style_fidelity) * hidden_states_uc\n\n if MODE == \"read\":\n self.mean_bank = []\n self.var_bank = []\n\n if self.upsamplers is not None:\n for upsampler in self.upsamplers:\n hidden_states = upsampler(hidden_states, upsample_size)\n\n return hidden_states\n\n def hacked_UpBlock2D_forward(self, hidden_states, res_hidden_states_tuple, temb=None, upsample_size=None):\n eps = 1e-6\n for i, resnet in enumerate(self.resnets):\n # pop res hidden states\n res_hidden_states = res_hidden_states_tuple[-1]\n res_hidden_states_tuple = res_hidden_states_tuple[:-1]\n hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)\n hidden_states = resnet(hidden_states, temb)\n\n if MODE == \"write\":\n if gn_auto_machine_weight >= self.gn_weight:\n var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)\n self.mean_bank.append([mean])\n self.var_bank.append([var])\n if MODE == \"read\":\n if len(self.mean_bank) > 0 and len(self.var_bank) > 0:\n var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)\n std = torch.maximum(var, torch.zeros_like(var) + eps) ** 0.5\n mean_acc = sum(self.mean_bank[i]) / float(len(self.mean_bank[i]))\n var_acc = sum(self.var_bank[i]) / float(len(self.var_bank[i]))\n std_acc = torch.maximum(var_acc, torch.zeros_like(var_acc) + eps) ** 0.5\n hidden_states_uc = (((hidden_states - mean) / std) * std_acc) + mean_acc\n hidden_states_c = hidden_states_uc.clone()\n if do_classifier_free_guidance and style_fidelity > 0:\n hidden_states_c[uc_mask] = hidden_states[uc_mask].to(hidden_states_c.dtype)\n hidden_states = style_fidelity * hidden_states_c + (1.0 - style_fidelity) * hidden_states_uc\n\n if MODE == \"read\":\n self.mean_bank = []\n self.var_bank = []\n\n if self.upsamplers is not None:\n for upsampler in self.upsamplers:\n hidden_states = upsampler(hidden_states, upsample_size)\n\n return hidden_states\n\n if self.reference_attn:\n if self.fusion_blocks == \"midup\":\n attn_modules = [module for module in (torch_dfs(self.unet.mid_block)+torch_dfs(self.unet.up_blocks)) if isinstance(module, BasicTransformerBlock) or isinstance(module, _BasicTransformerBlock)]\n elif self.fusion_blocks == \"full\":\n attn_modules = [module for module in torch_dfs(self.unet) if isinstance(module, BasicTransformerBlock) or isinstance(module, _BasicTransformerBlock)] \n attn_modules = sorted(attn_modules, key=lambda x: -x.norm1.normalized_shape[0])\n\n for i, module in enumerate(attn_modules):\n module._original_inner_forward = module.forward\n module.forward = hacked_basic_transformer_inner_forward.__get__(module, BasicTransformerBlock)\n module.bank = []\n module.attn_weight = float(i) / float(len(attn_modules))\n\n if self.reference_adain:\n gn_modules = [self.unet.mid_block]\n self.unet.mid_block.gn_weight = 0\n\n down_blocks = self.unet.down_blocks\n for w, module in enumerate(down_blocks):\n module.gn_weight = 1.0 - float(w) / float(len(down_blocks))\n gn_modules.append(module)\n\n up_blocks = self.unet.up_blocks\n for w, module in enumerate(up_blocks):\n module.gn_weight = float(w) / float(len(up_blocks))\n gn_modules.append(module)\n\n for i, module in enumerate(gn_modules):\n if getattr(module, \"original_forward\", None) is None:\n module.original_forward = module.forward\n if i == 0:\n # mid_block\n module.forward = hacked_mid_forward.__get__(module, torch.nn.Module)\n elif isinstance(module, CrossAttnDownBlock2D):\n module.forward = hack_CrossAttnDownBlock2D_forward.__get__(module, CrossAttnDownBlock2D)\n elif isinstance(module, DownBlock2D):\n module.forward = hacked_DownBlock2D_forward.__get__(module, DownBlock2D)\n elif isinstance(module, CrossAttnUpBlock2D):\n module.forward = hacked_CrossAttnUpBlock2D_forward.__get__(module, CrossAttnUpBlock2D)\n elif isinstance(module, UpBlock2D):\n module.forward = hacked_UpBlock2D_forward.__get__(module, UpBlock2D)\n module.mean_bank = []\n module.var_bank = []\n module.gn_weight *= 2\n \n def update(self, writer, dtype=torch.float16):\n if self.reference_attn:\n if self.fusion_blocks == \"midup\":\n reader_attn_modules = [module for module in (torch_dfs(self.unet.mid_block)+torch_dfs(self.unet.up_blocks)) if isinstance(module, _BasicTransformerBlock)]\n writer_attn_modules = [module for module in (torch_dfs(writer.unet.mid_block)+torch_dfs(writer.unet.up_blocks)) if isinstance(module, BasicTransformerBlock)]\n elif self.fusion_blocks == \"full\":\n reader_attn_modules = [module for module in torch_dfs(self.unet) if isinstance(module, _BasicTransformerBlock)]\n writer_attn_modules = [module for module in torch_dfs(writer.unet) if isinstance(module, BasicTransformerBlock)]\n reader_attn_modules = sorted(reader_attn_modules, key=lambda x: -x.norm1.normalized_shape[0]) \n writer_attn_modules = sorted(writer_attn_modules, key=lambda x: -x.norm1.normalized_shape[0])\n for r, w in zip(reader_attn_modules, writer_attn_modules):\n r.bank = [v.clone().to(dtype) for v in w.bank]\n # w.bank.clear()\n if self.reference_adain:\n reader_gn_modules = [self.unet.mid_block]\n \n down_blocks = self.unet.down_blocks\n for w, module in enumerate(down_blocks):\n reader_gn_modules.append(module)\n\n up_blocks = self.unet.up_blocks\n for w, module in enumerate(up_blocks):\n reader_gn_modules.append(module)\n \n writer_gn_modules = [writer.unet.mid_block]\n \n down_blocks = writer.unet.down_blocks\n for w, module in enumerate(down_blocks):\n writer_gn_modules.append(module)\n\n up_blocks = writer.unet.up_blocks\n for w, module in enumerate(up_blocks):\n writer_gn_modules.append(module)\n \n for r, w in zip(reader_gn_modules, writer_gn_modules):\n if len(w.mean_bank) > 0 and isinstance(w.mean_bank[0], list):\n r.mean_bank = [[v.clone().to(dtype) for v in vl] for vl in w.mean_bank]\n r.var_bank = [[v.clone().to(dtype) for v in vl] for vl in w.var_bank]\n else:\n r.mean_bank = [v.clone().to(dtype) for v in w.mean_bank]\n r.var_bank = [v.clone().to(dtype) for v in w.var_bank]\n \n def clear(self):\n if self.reference_attn:\n if self.fusion_blocks == \"midup\":\n reader_attn_modules = [module for module in (torch_dfs(self.unet.mid_block)+torch_dfs(self.unet.up_blocks)) if isinstance(module, BasicTransformerBlock) or isinstance(module, _BasicTransformerBlock)]\n elif self.fusion_blocks == \"full\":\n reader_attn_modules = [module for module in torch_dfs(self.unet) if isinstance(module, BasicTransformerBlock) or isinstance(module, _BasicTransformerBlock)]\n reader_attn_modules = sorted(reader_attn_modules, key=lambda x: -x.norm1.normalized_shape[0])\n for r in reader_attn_modules:\n r.bank.clear()\n if self.reference_adain:\n reader_gn_modules = [self.unet.mid_block]\n \n down_blocks = self.unet.down_blocks\n for w, module in enumerate(down_blocks):\n reader_gn_modules.append(module)\n\n up_blocks = self.unet.up_blocks\n for w, module in enumerate(up_blocks):\n reader_gn_modules.append(module)\n \n for r in reader_gn_modules:\n r.mean_bank.clear()\n r.var_bank.clear()" }, { "identifier": "get_context_scheduler", "path": "magicanimate/pipelines/context.py", "snippet": "def get_context_scheduler(name: str) -> Callable:\n if name == \"uniform\":\n return uniform\n else:\n raise ValueError(f\"Unknown context_overlap policy {name}\")" }, { "identifier": "get_total_steps", "path": "magicanimate/pipelines/context.py", "snippet": "def get_total_steps(\n scheduler,\n timesteps: List[int],\n num_steps: Optional[int] = None,\n num_frames: int = ...,\n context_size: Optional[int] = None,\n context_stride: int = 3,\n context_overlap: int = 4,\n closed_loop: bool = True,\n):\n return sum(\n len(\n list(\n scheduler(\n i,\n num_steps,\n num_frames,\n context_size,\n context_stride,\n context_overlap,\n )\n )\n )\n for i in range(len(timesteps))\n )" }, { "identifier": "get_tensor_interpolation_method", "path": "magicanimate/utils/util.py", "snippet": "def get_tensor_interpolation_method():\n return tensor_interpolation" } ]
import inspect, math import numpy as np import torch import torch.distributed as dist from typing import Callable, List, Optional, Union from dataclasses import dataclass from PIL import Image from tqdm import tqdm from diffusers.utils import is_accelerate_available from packaging import version from transformers import CLIPTextModel, CLIPTokenizer from diffusers.configuration_utils import FrozenDict from diffusers.models import AutoencoderKL from diffusers.pipeline_utils import DiffusionPipeline from diffusers.schedulers import ( DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, ) from diffusers.utils import deprecate, logging, BaseOutput from einops import rearrange from magicanimate.models.unet_controlnet import UNet3DConditionModel from magicanimate.models.multicontrolnet import ControlNetProcessor #fix from magicanimate.models.mutual_self_attention import ReferenceAttentionControl from magicanimate.pipelines.context import ( get_context_scheduler, get_total_steps ) from magicanimate.utils.util import get_tensor_interpolation_method from accelerate import cpu_offload
15,706
output_type: Optional[str] = "tensor", return_dict: bool = True, callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, callback_steps: Optional[int] = 1, controlnet_condition1: list = None, controlnet_condition2: list = None, controlnet_conditioning_scale: float = 1.0, context_frames: int = 16, context_stride: int = 1, context_overlap: int = 4, context_batch_size: int = 1, context_schedule: str = "uniform", init_latents: Optional[torch.FloatTensor] = None, num_actual_inference_steps: Optional[int] = None, appearance_encoder = None, reference_control_writer = None, reference_control_reader = None, source_image: str = None, decoder_consistency = None, **kwargs, ): """ New args: - controlnet_condition : condition map (e.g., depth, canny, keypoints) for controlnet - controlnet_conditioning_scale : conditioning scale for controlnet - init_latents : initial latents to begin with (used along with invert()) - num_actual_inference_steps : number of actual inference steps (while total steps is num_inference_steps) """ # controlnet = self.controlnet # processors = self.processors # Default height and width to unet height = height or self.unet.config.sample_size * self.vae_scale_factor width = width or self.unet.config.sample_size * self.vae_scale_factor # Check inputs. Raise error if not correct self.check_inputs(prompt, height, width, callback_steps) # Define call parameters # batch_size = 1 if isinstance(prompt, str) else len(prompt) batch_size = 1 if latents is not None: batch_size = latents.shape[0] if isinstance(prompt, list): batch_size = len(prompt) device = self._execution_device # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. do_classifier_free_guidance = guidance_scale > 1.0 # Encode input prompt prompt = prompt if isinstance(prompt, list) else [prompt] * batch_size if negative_prompt is not None: negative_prompt = negative_prompt if isinstance(negative_prompt, list) else [negative_prompt] * batch_size text_embeddings = self._encode_prompt( prompt, device, num_videos_per_prompt, do_classifier_free_guidance, negative_prompt ) text_embeddings = torch.cat([text_embeddings] * context_batch_size) reference_control_writer = ReferenceAttentionControl(appearance_encoder, do_classifier_free_guidance=True, mode='write', batch_size=context_batch_size) reference_control_reader = ReferenceAttentionControl(self.unet, do_classifier_free_guidance=True, mode='read', batch_size=context_batch_size) is_dist_initialized = kwargs.get("dist", False) rank = kwargs.get("rank", 0) world_size = kwargs.get("world_size", 1) # Prepare video assert num_videos_per_prompt == 1 # FIXME: verify if num_videos_per_prompt > 1 works assert batch_size == 1 # FIXME: verify if batch_size > 1 works control = self.prepare_condition( condition1=controlnet_condition1, condition2=controlnet_condition2, device=device, dtype=torch.float16, #fix cung num_videos_per_prompt=num_videos_per_prompt, do_classifier_free_guidance=do_classifier_free_guidance, ) controlnet_uncond_images, controlnet_cond_images = control.chunk(2) # Prepare timesteps self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps # Prepare latent variables if init_latents is not None: latents = rearrange(init_latents, "(b f) c h w -> b c f h w", f=video_length) else: num_channels_latents = self.unet.in_channels latents = self.prepare_latents( batch_size * num_videos_per_prompt, num_channels_latents, video_length, height, width, text_embeddings.dtype, device, generator, latents, ) latents_dtype = latents.dtype # Prepare extra step kwargs. extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) # Prepare text embeddings for controlnet controlnet_text_embeddings = text_embeddings.repeat_interleave(video_length, 0) _, controlnet_text_embeddings_c = controlnet_text_embeddings.chunk(2) controlnet_res_samples_cache_dict = {i:None for i in range(video_length)} # For img2img setting if num_actual_inference_steps is None: num_actual_inference_steps = num_inference_steps if isinstance(source_image, str): ref_image_latents = self.images2latents(np.array(Image.open(source_image).resize((width, height)))[None, :], latents_dtype).cuda() elif isinstance(source_image, np.ndarray): ref_image_latents = self.images2latents(source_image[None, :], latents_dtype).cuda()
# ************************************************************************* # This file may have been modified by Bytedance Inc. (“Bytedance Inc.'s Mo- # difications”). All Bytedance Inc.'s Modifications are Copyright (2023) B- # ytedance Inc.. # ************************************************************************* # Adapted from https://github.com/showlab/Tune-A-Video/blob/main/tuneavideo/pipelines/pipeline_tuneavideo.py # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ TODO: 1. support multi-controlnet 2. [DONE] support DDIM inversion 3. support Prompt-to-prompt """ # from magicanimate.models.controlnet import ControlNetModel logger = logging.get_logger(__name__) # pylint: disable=invalid-name @dataclass class AnimationPipelineOutput(BaseOutput): videos: Union[torch.Tensor, np.ndarray] class AnimationPipeline(DiffusionPipeline): _optional_components = [] def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet3DConditionModel, # controlnet: ControlNetModel, # processors: List[ControlNetProcessor], scheduler: Union[ DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler, EulerDiscreteScheduler, EulerAncestralDiscreteScheduler, DPMSolverMultistepScheduler, ], ): super().__init__() if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1: deprecation_message = ( f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`" f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure " "to update the config accordingly as leaving `steps_offset` might led to incorrect results" " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub," " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`" " file" ) deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(scheduler.config) new_config["steps_offset"] = 1 scheduler._internal_dict = FrozenDict(new_config) if hasattr(scheduler.config, "clip_sample") and scheduler.config.clip_sample is True: deprecation_message = ( f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`." " `clip_sample` should be set to False in the configuration file. Please make sure to update the" " config accordingly as not setting `clip_sample` in the config might lead to incorrect results in" " future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very" " nice if you could open a Pull request for the `scheduler/scheduler_config.json` file" ) deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(scheduler.config) new_config["clip_sample"] = False scheduler._internal_dict = FrozenDict(new_config) is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse( version.parse(unet.config._diffusers_version).base_version ) < version.parse("0.9.0.dev0") is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64 if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64: deprecation_message = ( "The configuration file of the unet has set the default `sample_size` to smaller than" " 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the" " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-" " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5" " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the" " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`" " in the config might lead to incorrect results in future versions. If you have downloaded this" " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for" " the `unet/config.json` file" ) deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(unet.config) new_config["sample_size"] = 64 unet._internal_dict = FrozenDict(new_config) self.register_modules( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, # controlnet1=processors[0], scheduler=scheduler, ) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) def enable_vae_slicing(self): self.vae.enable_slicing() def disable_vae_slicing(self): self.vae.disable_slicing() def enable_sequential_cpu_offload(self, gpu_id=0): if is_accelerate_available(): else: raise ImportError("Please install accelerate via `pip install accelerate`") device = torch.device(f"cuda:{gpu_id}") for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae]: if cpu_offloaded_model is not None: cpu_offload(cpu_offloaded_model, device) @property def _execution_device(self): if self.device != torch.device("meta") or not hasattr(self.unet, "_hf_hook"): return self.device for module in self.unet.modules(): if ( hasattr(module, "_hf_hook") and hasattr(module._hf_hook, "execution_device") and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device) return self.device def _encode_prompt(self, prompt, device, num_videos_per_prompt, do_classifier_free_guidance, negative_prompt): batch_size = len(prompt) if isinstance(prompt, list) else 1 text_inputs = self.tokenizer( prompt, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {self.tokenizer.model_max_length} tokens: {removed_text}" ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = text_inputs.attention_mask.to(device) else: attention_mask = None text_embeddings = self.text_encoder( text_input_ids.to(device), attention_mask=attention_mask, ) text_embeddings = text_embeddings[0] # duplicate text embeddings for each generation per prompt, using mps friendly method bs_embed, seq_len, _ = text_embeddings.shape text_embeddings = text_embeddings.repeat(1, num_videos_per_prompt, 1) text_embeddings = text_embeddings.view(bs_embed * num_videos_per_prompt, seq_len, -1) # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [""] * batch_size elif type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) else: uncond_tokens = negative_prompt max_length = text_input_ids.shape[-1] uncond_input = self.tokenizer( uncond_tokens, padding="max_length", max_length=max_length, truncation=True, return_tensors="pt", ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = uncond_input.attention_mask.to(device) else: attention_mask = None uncond_embeddings = self.text_encoder( uncond_input.input_ids.to(device), attention_mask=attention_mask, ) uncond_embeddings = uncond_embeddings[0] # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = uncond_embeddings.shape[1] uncond_embeddings = uncond_embeddings.repeat(1, num_videos_per_prompt, 1) uncond_embeddings = uncond_embeddings.view(batch_size * num_videos_per_prompt, seq_len, -1) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes text_embeddings = torch.cat([uncond_embeddings, text_embeddings]) return text_embeddings def decode_latents(self, latents, rank, decoder_consistency=None): video_length = latents.shape[2] latents = 1 / 0.18215 * latents latents = rearrange(latents, "b c f h w -> (b f) c h w") # video = self.vae.decode(latents).sample video = [] for frame_idx in tqdm(range(latents.shape[0]), disable=(rank!=0)): if decoder_consistency is not None: video.append(decoder_consistency(latents[frame_idx:frame_idx+1])) else: video.append(self.vae.decode(latents[frame_idx:frame_idx+1]).sample) video = torch.cat(video) video = rearrange(video, "(b f) c h w -> b c f h w", f=video_length) video = (video / 2 + 0.5).clamp(0, 1) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloa16 video = video.cpu().float().numpy() return video def prepare_extra_step_kwargs(self, generator, eta): # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs["eta"] = eta # check if the scheduler accepts generator accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs["generator"] = generator return extra_step_kwargs def check_inputs(self, prompt, height, width, callback_steps): if not isinstance(prompt, str) and not isinstance(prompt, list): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if height % 8 != 0 or width % 8 != 0: raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") if (callback_steps is None) or ( callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) ): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(callback_steps)}." ) def prepare_latents(self, batch_size, num_channels_latents, video_length, height, width, dtype, device, generator, latents=None, clip_length=16): shape = (batch_size, num_channels_latents, clip_length, height // self.vae_scale_factor, width // self.vae_scale_factor) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) if latents is None: rand_device = "cpu" if device.type == "mps" else device if isinstance(generator, list): latents = [ torch.randn(shape, generator=generator[i], device=rand_device, dtype=dtype) for i in range(batch_size) ] latents = torch.cat(latents, dim=0).to(device) else: latents = torch.randn(shape, generator=generator, device=rand_device, dtype=dtype).to(device) latents = latents.repeat(1, 1, video_length//clip_length, 1, 1) else: if latents.shape != shape: raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}") latents = latents.to(device) # scale the initial noise by the standard deviation required by the scheduler latents = latents * self.scheduler.init_noise_sigma return latents def prepare_condition(self, condition1, condition2, num_videos_per_prompt, device, dtype, do_classifier_free_guidance): # Prepare first condition condition1 = torch.from_numpy(condition1.copy()).to(device=device, dtype=dtype) / 255.0 condition1 = torch.stack([condition1 for _ in range(num_videos_per_prompt)], dim=0) condition1 = rearrange(condition1, 'b f h w c -> (b f) c h w').clone() # Prepare second condition condition2 = torch.from_numpy(condition2.copy()).to(device=device, dtype=dtype) / 255.0 condition2 = torch.stack([condition2 for _ in range(num_videos_per_prompt)], dim=0) condition2 = rearrange(condition2, 'b f h w c -> (b f) c h w').clone() # Here, we're averaging the two conditions combined_condition = (condition1*8+condition2*2)/10 if do_classifier_free_guidance: combined_condition = torch.cat([combined_condition] * 2) #combined_condition = torch.from_numpy(combined_condition.copy()).to(device=device, dtype=dtype) return combined_condition def next_step( self, model_output: torch.FloatTensor, timestep: int, x: torch.FloatTensor, eta=0., verbose=False ): """ Inverse sampling for DDIM Inversion """ if verbose: print("timestep: ", timestep) next_step = timestep timestep = min(timestep - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps, 999) alpha_prod_t = self.scheduler.alphas_cumprod[timestep] if timestep >= 0 else self.scheduler.final_alpha_cumprod alpha_prod_t_next = self.scheduler.alphas_cumprod[next_step] beta_prod_t = 1 - alpha_prod_t pred_x0 = (x - beta_prod_t**0.5 * model_output) / alpha_prod_t**0.5 pred_dir = (1 - alpha_prod_t_next)**0.5 * model_output x_next = alpha_prod_t_next**0.5 * pred_x0 + pred_dir return x_next, pred_x0 @torch.no_grad() def images2latents(self, images, dtype): """ Convert RGB image to VAE latents """ device = self._execution_device images = torch.from_numpy(images).float().to(dtype) / 127.5 - 1 images = rearrange(images, "f h w c -> f c h w").to(device) latents = [] for frame_idx in range(images.shape[0]): latents.append(self.vae.encode(images[frame_idx:frame_idx+1])['latent_dist'].mean * 0.18215) latents = torch.cat(latents) return latents @torch.no_grad() def invert( self, image: torch.Tensor, prompt, num_inference_steps=20, num_actual_inference_steps=10, eta=0.0, return_intermediates=False, **kwargs): """ Adapted from: https://github.com/Yujun-Shi/DragDiffusion/blob/main/drag_pipeline.py#L440 invert a real image into noise map with determinisc DDIM inversion """ device = self._execution_device batch_size = image.shape[0] if isinstance(prompt, list): if batch_size == 1: image = image.expand(len(prompt), -1, -1, -1) elif isinstance(prompt, str): if batch_size > 1: prompt = [prompt] * batch_size # text embeddings text_input = self.tokenizer( prompt, padding="max_length", max_length=77, return_tensors="pt" ) text_embeddings = self.text_encoder(text_input.input_ids.to(device))[0] print("input text embeddings :", text_embeddings.shape) # define initial latents latents = self.images2latents(image) print("latents shape: ", latents.shape) # interative sampling self.scheduler.set_timesteps(num_inference_steps) print("Valid timesteps: ", reversed(self.scheduler.timesteps)) latents_list = [latents] pred_x0_list = [latents] for i, t in enumerate(tqdm(reversed(self.scheduler.timesteps), desc="DDIM Inversion")): if num_actual_inference_steps is not None and i >= num_actual_inference_steps: continue model_inputs = latents # predict the noise # NOTE: the u-net here is UNet3D, therefore the model_inputs need to be of shape (b c f h w) model_inputs = rearrange(model_inputs, "f c h w -> 1 c f h w") noise_pred = self.unet(model_inputs, t, encoder_hidden_states=text_embeddings).sample noise_pred = rearrange(noise_pred, "b c f h w -> (b f) c h w") # compute the previous noise sample x_t-1 -> x_t latents, pred_x0 = self.next_step(noise_pred, t, latents) latents_list.append(latents) pred_x0_list.append(pred_x0) if return_intermediates: # return the intermediate laters during inversion return latents, latents_list return latents def interpolate_latents(self, latents: torch.Tensor, interpolation_factor:int, device ): if interpolation_factor < 2: return latents new_latents = torch.zeros( (latents.shape[0],latents.shape[1],((latents.shape[2]-1) * interpolation_factor)+1, latents.shape[3],latents.shape[4]), device=latents.device, dtype=latents.dtype, ) org_video_length = latents.shape[2] rate = [i/interpolation_factor for i in range(interpolation_factor)][1:] new_index = 0 v0 = None v1 = None for i0,i1 in zip( range( org_video_length ),range( org_video_length )[1:] ): v0 = latents[:,:,i0,:,:] v1 = latents[:,:,i1,:,:] new_latents[:,:,new_index,:,:] = v0 new_index += 1 for f in rate: v = get_tensor_interpolation_method()(v0.to(device=device),v1.to(device=device),f) new_latents[:,:,new_index,:,:] = v.to(latents.device) new_index += 1 new_latents[:,:,new_index,:,:] = v1 new_index += 1 return new_latents def select_controlnet_res_samples(self, controlnet_res_samples_cache_dict, context, do_classifier_free_guidance, b, f): _down_block_res_samples = [] _mid_block_res_sample = [] for i in np.concatenate(np.array(context)): _down_block_res_samples.append(controlnet_res_samples_cache_dict[i][0]) _mid_block_res_sample.append(controlnet_res_samples_cache_dict[i][1]) down_block_res_samples = [[] for _ in range(len(controlnet_res_samples_cache_dict[i][0]))] for res_t in _down_block_res_samples: for i, res in enumerate(res_t): down_block_res_samples[i].append(res) down_block_res_samples = [torch.cat(res) for res in down_block_res_samples] mid_block_res_sample = torch.cat(_mid_block_res_sample) # reshape controlnet output to match the unet3d inputs b = b // 2 if do_classifier_free_guidance else b _down_block_res_samples = [] for sample in down_block_res_samples: sample = rearrange(sample, '(b f) c h w -> b c f h w', b=b, f=f) if do_classifier_free_guidance: sample = sample.repeat(2, 1, 1, 1, 1) _down_block_res_samples.append(sample) down_block_res_samples = _down_block_res_samples mid_block_res_sample = rearrange(mid_block_res_sample, '(b f) c h w -> b c f h w', b=b, f=f) if do_classifier_free_guidance: mid_block_res_sample = mid_block_res_sample.repeat(2, 1, 1, 1, 1) return down_block_res_samples, mid_block_res_sample @torch.no_grad() def __call__( self, prompt: Union[str, List[str]], processors: List[ControlNetProcessor], #fix video_length: Optional[int], height: Optional[int] = None, width: Optional[int] = None, num_inference_steps: int = 50, guidance_scale: float = 7.5, negative_prompt: Optional[Union[str, List[str]]] = None, num_videos_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.FloatTensor] = None, output_type: Optional[str] = "tensor", return_dict: bool = True, callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, callback_steps: Optional[int] = 1, controlnet_condition1: list = None, controlnet_condition2: list = None, controlnet_conditioning_scale: float = 1.0, context_frames: int = 16, context_stride: int = 1, context_overlap: int = 4, context_batch_size: int = 1, context_schedule: str = "uniform", init_latents: Optional[torch.FloatTensor] = None, num_actual_inference_steps: Optional[int] = None, appearance_encoder = None, reference_control_writer = None, reference_control_reader = None, source_image: str = None, decoder_consistency = None, **kwargs, ): """ New args: - controlnet_condition : condition map (e.g., depth, canny, keypoints) for controlnet - controlnet_conditioning_scale : conditioning scale for controlnet - init_latents : initial latents to begin with (used along with invert()) - num_actual_inference_steps : number of actual inference steps (while total steps is num_inference_steps) """ # controlnet = self.controlnet # processors = self.processors # Default height and width to unet height = height or self.unet.config.sample_size * self.vae_scale_factor width = width or self.unet.config.sample_size * self.vae_scale_factor # Check inputs. Raise error if not correct self.check_inputs(prompt, height, width, callback_steps) # Define call parameters # batch_size = 1 if isinstance(prompt, str) else len(prompt) batch_size = 1 if latents is not None: batch_size = latents.shape[0] if isinstance(prompt, list): batch_size = len(prompt) device = self._execution_device # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. do_classifier_free_guidance = guidance_scale > 1.0 # Encode input prompt prompt = prompt if isinstance(prompt, list) else [prompt] * batch_size if negative_prompt is not None: negative_prompt = negative_prompt if isinstance(negative_prompt, list) else [negative_prompt] * batch_size text_embeddings = self._encode_prompt( prompt, device, num_videos_per_prompt, do_classifier_free_guidance, negative_prompt ) text_embeddings = torch.cat([text_embeddings] * context_batch_size) reference_control_writer = ReferenceAttentionControl(appearance_encoder, do_classifier_free_guidance=True, mode='write', batch_size=context_batch_size) reference_control_reader = ReferenceAttentionControl(self.unet, do_classifier_free_guidance=True, mode='read', batch_size=context_batch_size) is_dist_initialized = kwargs.get("dist", False) rank = kwargs.get("rank", 0) world_size = kwargs.get("world_size", 1) # Prepare video assert num_videos_per_prompt == 1 # FIXME: verify if num_videos_per_prompt > 1 works assert batch_size == 1 # FIXME: verify if batch_size > 1 works control = self.prepare_condition( condition1=controlnet_condition1, condition2=controlnet_condition2, device=device, dtype=torch.float16, #fix cung num_videos_per_prompt=num_videos_per_prompt, do_classifier_free_guidance=do_classifier_free_guidance, ) controlnet_uncond_images, controlnet_cond_images = control.chunk(2) # Prepare timesteps self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps # Prepare latent variables if init_latents is not None: latents = rearrange(init_latents, "(b f) c h w -> b c f h w", f=video_length) else: num_channels_latents = self.unet.in_channels latents = self.prepare_latents( batch_size * num_videos_per_prompt, num_channels_latents, video_length, height, width, text_embeddings.dtype, device, generator, latents, ) latents_dtype = latents.dtype # Prepare extra step kwargs. extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) # Prepare text embeddings for controlnet controlnet_text_embeddings = text_embeddings.repeat_interleave(video_length, 0) _, controlnet_text_embeddings_c = controlnet_text_embeddings.chunk(2) controlnet_res_samples_cache_dict = {i:None for i in range(video_length)} # For img2img setting if num_actual_inference_steps is None: num_actual_inference_steps = num_inference_steps if isinstance(source_image, str): ref_image_latents = self.images2latents(np.array(Image.open(source_image).resize((width, height)))[None, :], latents_dtype).cuda() elif isinstance(source_image, np.ndarray): ref_image_latents = self.images2latents(source_image[None, :], latents_dtype).cuda()
context_scheduler = get_context_scheduler(context_schedule)
3
2023-12-15 01:22:37+00:00
24k
Azure-Samples/functions-python-web-crawler
.venv/Lib/site-packages/urllib3/contrib/socks.py
[ { "identifier": "HTTPConnection", "path": ".venv/Lib/site-packages/urllib3/connection.py", "snippet": "class HTTPConnection(_HTTPConnection):\n \"\"\"\n Based on :class:`http.client.HTTPConnection` but provides an extra constructor\n backwards-compatibility layer between older and newer Pythons.\n\n Additional keyword parameters are used to configure attributes of the connection.\n Accepted parameters include:\n\n - ``source_address``: Set the source address for the current connection.\n - ``socket_options``: Set specific options on the underlying socket. If not specified, then\n defaults are loaded from ``HTTPConnection.default_socket_options`` which includes disabling\n Nagle's algorithm (sets TCP_NODELAY to 1) unless the connection is behind a proxy.\n\n For example, if you wish to enable TCP Keep Alive in addition to the defaults,\n you might pass:\n\n .. code-block:: python\n\n HTTPConnection.default_socket_options + [\n (socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1),\n ]\n\n Or you may want to disable the defaults by passing an empty list (e.g., ``[]``).\n \"\"\"\n\n default_port: typing.ClassVar[int] = port_by_scheme[\"http\"] # type: ignore[misc]\n\n #: Disable Nagle's algorithm by default.\n #: ``[(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)]``\n default_socket_options: typing.ClassVar[connection._TYPE_SOCKET_OPTIONS] = [\n (socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)\n ]\n\n #: Whether this connection verifies the host's certificate.\n is_verified: bool = False\n\n #: Whether this proxy connection verified the proxy host's certificate.\n # If no proxy is currently connected to the value will be ``None``.\n proxy_is_verified: bool | None = None\n\n blocksize: int\n source_address: tuple[str, int] | None\n socket_options: connection._TYPE_SOCKET_OPTIONS | None\n\n _has_connected_to_proxy: bool\n _response_options: _ResponseOptions | None\n _tunnel_host: str | None\n _tunnel_port: int | None\n _tunnel_scheme: str | None\n\n def __init__(\n self,\n host: str,\n port: int | None = None,\n *,\n timeout: _TYPE_TIMEOUT = _DEFAULT_TIMEOUT,\n source_address: tuple[str, int] | None = None,\n blocksize: int = 16384,\n socket_options: None\n | (connection._TYPE_SOCKET_OPTIONS) = default_socket_options,\n proxy: Url | None = None,\n proxy_config: ProxyConfig | None = None,\n ) -> None:\n super().__init__(\n host=host,\n port=port,\n timeout=Timeout.resolve_default_timeout(timeout),\n source_address=source_address,\n blocksize=blocksize,\n )\n self.socket_options = socket_options\n self.proxy = proxy\n self.proxy_config = proxy_config\n\n self._has_connected_to_proxy = False\n self._response_options = None\n self._tunnel_host: str | None = None\n self._tunnel_port: int | None = None\n self._tunnel_scheme: str | None = None\n\n # https://github.com/python/mypy/issues/4125\n # Mypy treats this as LSP violation, which is considered a bug.\n # If `host` is made a property it violates LSP, because a writeable attribute is overridden with a read-only one.\n # However, there is also a `host` setter so LSP is not violated.\n # Potentially, a `@host.deleter` might be needed depending on how this issue will be fixed.\n @property\n def host(self) -> str:\n \"\"\"\n Getter method to remove any trailing dots that indicate the hostname is an FQDN.\n\n In general, SSL certificates don't include the trailing dot indicating a\n fully-qualified domain name, and thus, they don't validate properly when\n checked against a domain name that includes the dot. In addition, some\n servers may not expect to receive the trailing dot when provided.\n\n However, the hostname with trailing dot is critical to DNS resolution; doing a\n lookup with the trailing dot will properly only resolve the appropriate FQDN,\n whereas a lookup without a trailing dot will search the system's search domain\n list. Thus, it's important to keep the original host around for use only in\n those cases where it's appropriate (i.e., when doing DNS lookup to establish the\n actual TCP connection across which we're going to send HTTP requests).\n \"\"\"\n return self._dns_host.rstrip(\".\")\n\n @host.setter\n def host(self, value: str) -> None:\n \"\"\"\n Setter for the `host` property.\n\n We assume that only urllib3 uses the _dns_host attribute; httplib itself\n only uses `host`, and it seems reasonable that other libraries follow suit.\n \"\"\"\n self._dns_host = value\n\n def _new_conn(self) -> socket.socket:\n \"\"\"Establish a socket connection and set nodelay settings on it.\n\n :return: New socket connection.\n \"\"\"\n try:\n sock = connection.create_connection(\n (self._dns_host, self.port),\n self.timeout,\n source_address=self.source_address,\n socket_options=self.socket_options,\n )\n except socket.gaierror as e:\n raise NameResolutionError(self.host, self, e) from e\n except SocketTimeout as e:\n raise ConnectTimeoutError(\n self,\n f\"Connection to {self.host} timed out. (connect timeout={self.timeout})\",\n ) from e\n\n except OSError as e:\n raise NewConnectionError(\n self, f\"Failed to establish a new connection: {e}\"\n ) from e\n\n # Audit hooks are only available in Python 3.8+\n if _HAS_SYS_AUDIT:\n sys.audit(\"http.client.connect\", self, self.host, self.port)\n\n return sock\n\n def set_tunnel(\n self,\n host: str,\n port: int | None = None,\n headers: typing.Mapping[str, str] | None = None,\n scheme: str = \"http\",\n ) -> None:\n if scheme not in (\"http\", \"https\"):\n raise ValueError(\n f\"Invalid proxy scheme for tunneling: {scheme!r}, must be either 'http' or 'https'\"\n )\n super().set_tunnel(host, port=port, headers=headers)\n self._tunnel_scheme = scheme\n\n def connect(self) -> None:\n self.sock = self._new_conn()\n if self._tunnel_host:\n # If we're tunneling it means we're connected to our proxy.\n self._has_connected_to_proxy = True\n\n # TODO: Fix tunnel so it doesn't depend on self.sock state.\n self._tunnel() # type: ignore[attr-defined]\n\n # If there's a proxy to be connected to we are fully connected.\n # This is set twice (once above and here) due to forwarding proxies\n # not using tunnelling.\n self._has_connected_to_proxy = bool(self.proxy)\n\n @property\n def is_closed(self) -> bool:\n return self.sock is None\n\n @property\n def is_connected(self) -> bool:\n if self.sock is None:\n return False\n return not wait_for_read(self.sock, timeout=0.0)\n\n @property\n def has_connected_to_proxy(self) -> bool:\n return self._has_connected_to_proxy\n\n def close(self) -> None:\n try:\n super().close()\n finally:\n # Reset all stateful properties so connection\n # can be re-used without leaking prior configs.\n self.sock = None\n self.is_verified = False\n self.proxy_is_verified = None\n self._has_connected_to_proxy = False\n self._response_options = None\n self._tunnel_host = None\n self._tunnel_port = None\n self._tunnel_scheme = None\n\n def putrequest(\n self,\n method: str,\n url: str,\n skip_host: bool = False,\n skip_accept_encoding: bool = False,\n ) -> None:\n \"\"\"\"\"\"\n # Empty docstring because the indentation of CPython's implementation\n # is broken but we don't want this method in our documentation.\n match = _CONTAINS_CONTROL_CHAR_RE.search(method)\n if match:\n raise ValueError(\n f\"Method cannot contain non-token characters {method!r} (found at least {match.group()!r})\"\n )\n\n return super().putrequest(\n method, url, skip_host=skip_host, skip_accept_encoding=skip_accept_encoding\n )\n\n def putheader(self, header: str, *values: str) -> None:\n \"\"\"\"\"\"\n if not any(isinstance(v, str) and v == SKIP_HEADER for v in values):\n super().putheader(header, *values)\n elif to_str(header.lower()) not in SKIPPABLE_HEADERS:\n skippable_headers = \"', '\".join(\n [str.title(header) for header in sorted(SKIPPABLE_HEADERS)]\n )\n raise ValueError(\n f\"urllib3.util.SKIP_HEADER only supports '{skippable_headers}'\"\n )\n\n # `request` method's signature intentionally violates LSP.\n # urllib3's API is different from `http.client.HTTPConnection` and the subclassing is only incidental.\n def request( # type: ignore[override]\n self,\n method: str,\n url: str,\n body: _TYPE_BODY | None = None,\n headers: typing.Mapping[str, str] | None = None,\n *,\n chunked: bool = False,\n preload_content: bool = True,\n decode_content: bool = True,\n enforce_content_length: bool = True,\n ) -> None:\n # Update the inner socket's timeout value to send the request.\n # This only triggers if the connection is re-used.\n if self.sock is not None:\n self.sock.settimeout(self.timeout)\n\n # Store these values to be fed into the HTTPResponse\n # object later. TODO: Remove this in favor of a real\n # HTTP lifecycle mechanism.\n\n # We have to store these before we call .request()\n # because sometimes we can still salvage a response\n # off the wire even if we aren't able to completely\n # send the request body.\n self._response_options = _ResponseOptions(\n request_method=method,\n request_url=url,\n preload_content=preload_content,\n decode_content=decode_content,\n enforce_content_length=enforce_content_length,\n )\n\n if headers is None:\n headers = {}\n header_keys = frozenset(to_str(k.lower()) for k in headers)\n skip_accept_encoding = \"accept-encoding\" in header_keys\n skip_host = \"host\" in header_keys\n self.putrequest(\n method, url, skip_accept_encoding=skip_accept_encoding, skip_host=skip_host\n )\n\n # Transform the body into an iterable of sendall()-able chunks\n # and detect if an explicit Content-Length is doable.\n chunks_and_cl = body_to_chunks(body, method=method, blocksize=self.blocksize)\n chunks = chunks_and_cl.chunks\n content_length = chunks_and_cl.content_length\n\n # When chunked is explicit set to 'True' we respect that.\n if chunked:\n if \"transfer-encoding\" not in header_keys:\n self.putheader(\"Transfer-Encoding\", \"chunked\")\n else:\n # Detect whether a framing mechanism is already in use. If so\n # we respect that value, otherwise we pick chunked vs content-length\n # depending on the type of 'body'.\n if \"content-length\" in header_keys:\n chunked = False\n elif \"transfer-encoding\" in header_keys:\n chunked = True\n\n # Otherwise we go off the recommendation of 'body_to_chunks()'.\n else:\n chunked = False\n if content_length is None:\n if chunks is not None:\n chunked = True\n self.putheader(\"Transfer-Encoding\", \"chunked\")\n else:\n self.putheader(\"Content-Length\", str(content_length))\n\n # Now that framing headers are out of the way we send all the other headers.\n if \"user-agent\" not in header_keys:\n self.putheader(\"User-Agent\", _get_default_user_agent())\n for header, value in headers.items():\n self.putheader(header, value)\n self.endheaders()\n\n # If we're given a body we start sending that in chunks.\n if chunks is not None:\n for chunk in chunks:\n # Sending empty chunks isn't allowed for TE: chunked\n # as it indicates the end of the body.\n if not chunk:\n continue\n if isinstance(chunk, str):\n chunk = chunk.encode(\"utf-8\")\n if chunked:\n self.send(b\"%x\\r\\n%b\\r\\n\" % (len(chunk), chunk))\n else:\n self.send(chunk)\n\n # Regardless of whether we have a body or not, if we're in\n # chunked mode we want to send an explicit empty chunk.\n if chunked:\n self.send(b\"0\\r\\n\\r\\n\")\n\n def request_chunked(\n self,\n method: str,\n url: str,\n body: _TYPE_BODY | None = None,\n headers: typing.Mapping[str, str] | None = None,\n ) -> None:\n \"\"\"\n Alternative to the common request method, which sends the\n body with chunked encoding and not as one block\n \"\"\"\n warnings.warn(\n \"HTTPConnection.request_chunked() is deprecated and will be removed \"\n \"in urllib3 v2.1.0. Instead use HTTPConnection.request(..., chunked=True).\",\n category=DeprecationWarning,\n stacklevel=2,\n )\n self.request(method, url, body=body, headers=headers, chunked=True)\n\n def getresponse( # type: ignore[override]\n self,\n ) -> HTTPResponse:\n \"\"\"\n Get the response from the server.\n\n If the HTTPConnection is in the correct state, returns an instance of HTTPResponse or of whatever object is returned by the response_class variable.\n\n If a request has not been sent or if a previous response has not be handled, ResponseNotReady is raised. If the HTTP response indicates that the connection should be closed, then it will be closed before the response is returned. When the connection is closed, the underlying socket is closed.\n \"\"\"\n # Raise the same error as http.client.HTTPConnection\n if self._response_options is None:\n raise ResponseNotReady()\n\n # Reset this attribute for being used again.\n resp_options = self._response_options\n self._response_options = None\n\n # Since the connection's timeout value may have been updated\n # we need to set the timeout on the socket.\n self.sock.settimeout(self.timeout)\n\n # This is needed here to avoid circular import errors\n from .response import HTTPResponse\n\n # Get the response from http.client.HTTPConnection\n httplib_response = super().getresponse()\n\n try:\n assert_header_parsing(httplib_response.msg)\n except (HeaderParsingError, TypeError) as hpe:\n log.warning(\n \"Failed to parse headers (url=%s): %s\",\n _url_from_connection(self, resp_options.request_url),\n hpe,\n exc_info=True,\n )\n\n headers = HTTPHeaderDict(httplib_response.msg.items())\n\n response = HTTPResponse(\n body=httplib_response,\n headers=headers,\n status=httplib_response.status,\n version=httplib_response.version,\n reason=httplib_response.reason,\n preload_content=resp_options.preload_content,\n decode_content=resp_options.decode_content,\n original_response=httplib_response,\n enforce_content_length=resp_options.enforce_content_length,\n request_method=resp_options.request_method,\n request_url=resp_options.request_url,\n )\n return response" }, { "identifier": "HTTPSConnection", "path": ".venv/Lib/site-packages/urllib3/connection.py", "snippet": "class HTTPSConnection(HTTPConnection):\n \"\"\"\n Many of the parameters to this constructor are passed to the underlying SSL\n socket by means of :py:func:`urllib3.util.ssl_wrap_socket`.\n \"\"\"\n\n default_port = port_by_scheme[\"https\"] # type: ignore[misc]\n\n cert_reqs: int | str | None = None\n ca_certs: str | None = None\n ca_cert_dir: str | None = None\n ca_cert_data: None | str | bytes = None\n ssl_version: int | str | None = None\n ssl_minimum_version: int | None = None\n ssl_maximum_version: int | None = None\n assert_fingerprint: str | None = None\n\n def __init__(\n self,\n host: str,\n port: int | None = None,\n *,\n timeout: _TYPE_TIMEOUT = _DEFAULT_TIMEOUT,\n source_address: tuple[str, int] | None = None,\n blocksize: int = 16384,\n socket_options: None\n | (connection._TYPE_SOCKET_OPTIONS) = HTTPConnection.default_socket_options,\n proxy: Url | None = None,\n proxy_config: ProxyConfig | None = None,\n cert_reqs: int | str | None = None,\n assert_hostname: None | str | Literal[False] = None,\n assert_fingerprint: str | None = None,\n server_hostname: str | None = None,\n ssl_context: ssl.SSLContext | None = None,\n ca_certs: str | None = None,\n ca_cert_dir: str | None = None,\n ca_cert_data: None | str | bytes = None,\n ssl_minimum_version: int | None = None,\n ssl_maximum_version: int | None = None,\n ssl_version: int | str | None = None, # Deprecated\n cert_file: str | None = None,\n key_file: str | None = None,\n key_password: str | None = None,\n ) -> None:\n super().__init__(\n host,\n port=port,\n timeout=timeout,\n source_address=source_address,\n blocksize=blocksize,\n socket_options=socket_options,\n proxy=proxy,\n proxy_config=proxy_config,\n )\n\n self.key_file = key_file\n self.cert_file = cert_file\n self.key_password = key_password\n self.ssl_context = ssl_context\n self.server_hostname = server_hostname\n self.assert_hostname = assert_hostname\n self.assert_fingerprint = assert_fingerprint\n self.ssl_version = ssl_version\n self.ssl_minimum_version = ssl_minimum_version\n self.ssl_maximum_version = ssl_maximum_version\n self.ca_certs = ca_certs and os.path.expanduser(ca_certs)\n self.ca_cert_dir = ca_cert_dir and os.path.expanduser(ca_cert_dir)\n self.ca_cert_data = ca_cert_data\n\n # cert_reqs depends on ssl_context so calculate last.\n if cert_reqs is None:\n if self.ssl_context is not None:\n cert_reqs = self.ssl_context.verify_mode\n else:\n cert_reqs = resolve_cert_reqs(None)\n self.cert_reqs = cert_reqs\n\n def set_cert(\n self,\n key_file: str | None = None,\n cert_file: str | None = None,\n cert_reqs: int | str | None = None,\n key_password: str | None = None,\n ca_certs: str | None = None,\n assert_hostname: None | str | Literal[False] = None,\n assert_fingerprint: str | None = None,\n ca_cert_dir: str | None = None,\n ca_cert_data: None | str | bytes = None,\n ) -> None:\n \"\"\"\n This method should only be called once, before the connection is used.\n \"\"\"\n warnings.warn(\n \"HTTPSConnection.set_cert() is deprecated and will be removed \"\n \"in urllib3 v2.1.0. Instead provide the parameters to the \"\n \"HTTPSConnection constructor.\",\n category=DeprecationWarning,\n stacklevel=2,\n )\n\n # If cert_reqs is not provided we'll assume CERT_REQUIRED unless we also\n # have an SSLContext object in which case we'll use its verify_mode.\n if cert_reqs is None:\n if self.ssl_context is not None:\n cert_reqs = self.ssl_context.verify_mode\n else:\n cert_reqs = resolve_cert_reqs(None)\n\n self.key_file = key_file\n self.cert_file = cert_file\n self.cert_reqs = cert_reqs\n self.key_password = key_password\n self.assert_hostname = assert_hostname\n self.assert_fingerprint = assert_fingerprint\n self.ca_certs = ca_certs and os.path.expanduser(ca_certs)\n self.ca_cert_dir = ca_cert_dir and os.path.expanduser(ca_cert_dir)\n self.ca_cert_data = ca_cert_data\n\n def connect(self) -> None:\n sock: socket.socket | ssl.SSLSocket\n self.sock = sock = self._new_conn()\n server_hostname: str = self.host\n tls_in_tls = False\n\n # Do we need to establish a tunnel?\n if self._tunnel_host is not None:\n # We're tunneling to an HTTPS origin so need to do TLS-in-TLS.\n if self._tunnel_scheme == \"https\":\n self.sock = sock = self._connect_tls_proxy(self.host, sock)\n tls_in_tls = True\n\n # If we're tunneling it means we're connected to our proxy.\n self._has_connected_to_proxy = True\n\n self._tunnel() # type: ignore[attr-defined]\n # Override the host with the one we're requesting data from.\n server_hostname = self._tunnel_host\n\n if self.server_hostname is not None:\n server_hostname = self.server_hostname\n\n is_time_off = datetime.date.today() < RECENT_DATE\n if is_time_off:\n warnings.warn(\n (\n f\"System time is way off (before {RECENT_DATE}). This will probably \"\n \"lead to SSL verification errors\"\n ),\n SystemTimeWarning,\n )\n\n sock_and_verified = _ssl_wrap_socket_and_match_hostname(\n sock=sock,\n cert_reqs=self.cert_reqs,\n ssl_version=self.ssl_version,\n ssl_minimum_version=self.ssl_minimum_version,\n ssl_maximum_version=self.ssl_maximum_version,\n ca_certs=self.ca_certs,\n ca_cert_dir=self.ca_cert_dir,\n ca_cert_data=self.ca_cert_data,\n cert_file=self.cert_file,\n key_file=self.key_file,\n key_password=self.key_password,\n server_hostname=server_hostname,\n ssl_context=self.ssl_context,\n tls_in_tls=tls_in_tls,\n assert_hostname=self.assert_hostname,\n assert_fingerprint=self.assert_fingerprint,\n )\n self.sock = sock_and_verified.socket\n self.is_verified = sock_and_verified.is_verified\n\n # If there's a proxy to be connected to we are fully connected.\n # This is set twice (once above and here) due to forwarding proxies\n # not using tunnelling.\n self._has_connected_to_proxy = bool(self.proxy)\n\n def _connect_tls_proxy(self, hostname: str, sock: socket.socket) -> ssl.SSLSocket:\n \"\"\"\n Establish a TLS connection to the proxy using the provided SSL context.\n \"\"\"\n # `_connect_tls_proxy` is called when self._tunnel_host is truthy.\n proxy_config = typing.cast(ProxyConfig, self.proxy_config)\n ssl_context = proxy_config.ssl_context\n sock_and_verified = _ssl_wrap_socket_and_match_hostname(\n sock,\n cert_reqs=self.cert_reqs,\n ssl_version=self.ssl_version,\n ssl_minimum_version=self.ssl_minimum_version,\n ssl_maximum_version=self.ssl_maximum_version,\n ca_certs=self.ca_certs,\n ca_cert_dir=self.ca_cert_dir,\n ca_cert_data=self.ca_cert_data,\n server_hostname=hostname,\n ssl_context=ssl_context,\n assert_hostname=proxy_config.assert_hostname,\n assert_fingerprint=proxy_config.assert_fingerprint,\n # Features that aren't implemented for proxies yet:\n cert_file=None,\n key_file=None,\n key_password=None,\n tls_in_tls=False,\n )\n self.proxy_is_verified = sock_and_verified.is_verified\n return sock_and_verified.socket # type: ignore[return-value]" }, { "identifier": "HTTPConnectionPool", "path": ".venv/Lib/site-packages/urllib3/connectionpool.py", "snippet": "class HTTPConnectionPool(ConnectionPool, RequestMethods):\n \"\"\"\n Thread-safe connection pool for one host.\n\n :param host:\n Host used for this HTTP Connection (e.g. \"localhost\"), passed into\n :class:`http.client.HTTPConnection`.\n\n :param port:\n Port used for this HTTP Connection (None is equivalent to 80), passed\n into :class:`http.client.HTTPConnection`.\n\n :param timeout:\n Socket timeout in seconds for each individual connection. This can\n be a float or integer, which sets the timeout for the HTTP request,\n or an instance of :class:`urllib3.util.Timeout` which gives you more\n fine-grained control over request timeouts. After the constructor has\n been parsed, this is always a `urllib3.util.Timeout` object.\n\n :param maxsize:\n Number of connections to save that can be reused. More than 1 is useful\n in multithreaded situations. If ``block`` is set to False, more\n connections will be created but they will not be saved once they've\n been used.\n\n :param block:\n If set to True, no more than ``maxsize`` connections will be used at\n a time. When no free connections are available, the call will block\n until a connection has been released. This is a useful side effect for\n particular multithreaded situations where one does not want to use more\n than maxsize connections per host to prevent flooding.\n\n :param headers:\n Headers to include with all requests, unless other headers are given\n explicitly.\n\n :param retries:\n Retry configuration to use by default with requests in this pool.\n\n :param _proxy:\n Parsed proxy URL, should not be used directly, instead, see\n :class:`urllib3.ProxyManager`\n\n :param _proxy_headers:\n A dictionary with proxy headers, should not be used directly,\n instead, see :class:`urllib3.ProxyManager`\n\n :param \\\\**conn_kw:\n Additional parameters are used to create fresh :class:`urllib3.connection.HTTPConnection`,\n :class:`urllib3.connection.HTTPSConnection` instances.\n \"\"\"\n\n scheme = \"http\"\n ConnectionCls: (\n type[BaseHTTPConnection] | type[BaseHTTPSConnection]\n ) = HTTPConnection\n\n def __init__(\n self,\n host: str,\n port: int | None = None,\n timeout: _TYPE_TIMEOUT | None = _DEFAULT_TIMEOUT,\n maxsize: int = 1,\n block: bool = False,\n headers: typing.Mapping[str, str] | None = None,\n retries: Retry | bool | int | None = None,\n _proxy: Url | None = None,\n _proxy_headers: typing.Mapping[str, str] | None = None,\n _proxy_config: ProxyConfig | None = None,\n **conn_kw: typing.Any,\n ):\n ConnectionPool.__init__(self, host, port)\n RequestMethods.__init__(self, headers)\n\n if not isinstance(timeout, Timeout):\n timeout = Timeout.from_float(timeout)\n\n if retries is None:\n retries = Retry.DEFAULT\n\n self.timeout = timeout\n self.retries = retries\n\n self.pool: queue.LifoQueue[typing.Any] | None = self.QueueCls(maxsize)\n self.block = block\n\n self.proxy = _proxy\n self.proxy_headers = _proxy_headers or {}\n self.proxy_config = _proxy_config\n\n # Fill the queue up so that doing get() on it will block properly\n for _ in range(maxsize):\n self.pool.put(None)\n\n # These are mostly for testing and debugging purposes.\n self.num_connections = 0\n self.num_requests = 0\n self.conn_kw = conn_kw\n\n if self.proxy:\n # Enable Nagle's algorithm for proxies, to avoid packet fragmentation.\n # We cannot know if the user has added default socket options, so we cannot replace the\n # list.\n self.conn_kw.setdefault(\"socket_options\", [])\n\n self.conn_kw[\"proxy\"] = self.proxy\n self.conn_kw[\"proxy_config\"] = self.proxy_config\n\n # Do not pass 'self' as callback to 'finalize'.\n # Then the 'finalize' would keep an endless living (leak) to self.\n # By just passing a reference to the pool allows the garbage collector\n # to free self if nobody else has a reference to it.\n pool = self.pool\n\n # Close all the HTTPConnections in the pool before the\n # HTTPConnectionPool object is garbage collected.\n weakref.finalize(self, _close_pool_connections, pool)\n\n def _new_conn(self) -> BaseHTTPConnection:\n \"\"\"\n Return a fresh :class:`HTTPConnection`.\n \"\"\"\n self.num_connections += 1\n log.debug(\n \"Starting new HTTP connection (%d): %s:%s\",\n self.num_connections,\n self.host,\n self.port or \"80\",\n )\n\n conn = self.ConnectionCls(\n host=self.host,\n port=self.port,\n timeout=self.timeout.connect_timeout,\n **self.conn_kw,\n )\n return conn\n\n def _get_conn(self, timeout: float | None = None) -> BaseHTTPConnection:\n \"\"\"\n Get a connection. Will return a pooled connection if one is available.\n\n If no connections are available and :prop:`.block` is ``False``, then a\n fresh connection is returned.\n\n :param timeout:\n Seconds to wait before giving up and raising\n :class:`urllib3.exceptions.EmptyPoolError` if the pool is empty and\n :prop:`.block` is ``True``.\n \"\"\"\n conn = None\n\n if self.pool is None:\n raise ClosedPoolError(self, \"Pool is closed.\")\n\n try:\n conn = self.pool.get(block=self.block, timeout=timeout)\n\n except AttributeError: # self.pool is None\n raise ClosedPoolError(self, \"Pool is closed.\") from None # Defensive:\n\n except queue.Empty:\n if self.block:\n raise EmptyPoolError(\n self,\n \"Pool is empty and a new connection can't be opened due to blocking mode.\",\n ) from None\n pass # Oh well, we'll create a new connection then\n\n # If this is a persistent connection, check if it got disconnected\n if conn and is_connection_dropped(conn):\n log.debug(\"Resetting dropped connection: %s\", self.host)\n conn.close()\n\n return conn or self._new_conn()\n\n def _put_conn(self, conn: BaseHTTPConnection | None) -> None:\n \"\"\"\n Put a connection back into the pool.\n\n :param conn:\n Connection object for the current host and port as returned by\n :meth:`._new_conn` or :meth:`._get_conn`.\n\n If the pool is already full, the connection is closed and discarded\n because we exceeded maxsize. If connections are discarded frequently,\n then maxsize should be increased.\n\n If the pool is closed, then the connection will be closed and discarded.\n \"\"\"\n if self.pool is not None:\n try:\n self.pool.put(conn, block=False)\n return # Everything is dandy, done.\n except AttributeError:\n # self.pool is None.\n pass\n except queue.Full:\n # Connection never got put back into the pool, close it.\n if conn:\n conn.close()\n\n if self.block:\n # This should never happen if you got the conn from self._get_conn\n raise FullPoolError(\n self,\n \"Pool reached maximum size and no more connections are allowed.\",\n ) from None\n\n log.warning(\n \"Connection pool is full, discarding connection: %s. Connection pool size: %s\",\n self.host,\n self.pool.qsize(),\n )\n\n # Connection never got put back into the pool, close it.\n if conn:\n conn.close()\n\n def _validate_conn(self, conn: BaseHTTPConnection) -> None:\n \"\"\"\n Called right before a request is made, after the socket is created.\n \"\"\"\n\n def _prepare_proxy(self, conn: BaseHTTPConnection) -> None:\n # Nothing to do for HTTP connections.\n pass\n\n def _get_timeout(self, timeout: _TYPE_TIMEOUT) -> Timeout:\n \"\"\"Helper that always returns a :class:`urllib3.util.Timeout`\"\"\"\n if timeout is _DEFAULT_TIMEOUT:\n return self.timeout.clone()\n\n if isinstance(timeout, Timeout):\n return timeout.clone()\n else:\n # User passed us an int/float. This is for backwards compatibility,\n # can be removed later\n return Timeout.from_float(timeout)\n\n def _raise_timeout(\n self,\n err: BaseSSLError | OSError | SocketTimeout,\n url: str,\n timeout_value: _TYPE_TIMEOUT | None,\n ) -> None:\n \"\"\"Is the error actually a timeout? Will raise a ReadTimeout or pass\"\"\"\n\n if isinstance(err, SocketTimeout):\n raise ReadTimeoutError(\n self, url, f\"Read timed out. (read timeout={timeout_value})\"\n ) from err\n\n # See the above comment about EAGAIN in Python 3.\n if hasattr(err, \"errno\") and err.errno in _blocking_errnos:\n raise ReadTimeoutError(\n self, url, f\"Read timed out. (read timeout={timeout_value})\"\n ) from err\n\n def _make_request(\n self,\n conn: BaseHTTPConnection,\n method: str,\n url: str,\n body: _TYPE_BODY | None = None,\n headers: typing.Mapping[str, str] | None = None,\n retries: Retry | None = None,\n timeout: _TYPE_TIMEOUT = _DEFAULT_TIMEOUT,\n chunked: bool = False,\n response_conn: BaseHTTPConnection | None = None,\n preload_content: bool = True,\n decode_content: bool = True,\n enforce_content_length: bool = True,\n ) -> BaseHTTPResponse:\n \"\"\"\n Perform a request on a given urllib connection object taken from our\n pool.\n\n :param conn:\n a connection from one of our connection pools\n\n :param method:\n HTTP request method (such as GET, POST, PUT, etc.)\n\n :param url:\n The URL to perform the request on.\n\n :param body:\n Data to send in the request body, either :class:`str`, :class:`bytes`,\n an iterable of :class:`str`/:class:`bytes`, or a file-like object.\n\n :param headers:\n Dictionary of custom headers to send, such as User-Agent,\n If-None-Match, etc. If None, pool headers are used. If provided,\n these headers completely replace any pool-specific headers.\n\n :param retries:\n Configure the number of retries to allow before raising a\n :class:`~urllib3.exceptions.MaxRetryError` exception.\n\n Pass ``None`` to retry until you receive a response. Pass a\n :class:`~urllib3.util.retry.Retry` object for fine-grained control\n over different types of retries.\n Pass an integer number to retry connection errors that many times,\n but no other types of errors. Pass zero to never retry.\n\n If ``False``, then retries are disabled and any exception is raised\n immediately. Also, instead of raising a MaxRetryError on redirects,\n the redirect response will be returned.\n\n :type retries: :class:`~urllib3.util.retry.Retry`, False, or an int.\n\n :param timeout:\n If specified, overrides the default timeout for this one\n request. It may be a float (in seconds) or an instance of\n :class:`urllib3.util.Timeout`.\n\n :param chunked:\n If True, urllib3 will send the body using chunked transfer\n encoding. Otherwise, urllib3 will send the body using the standard\n content-length form. Defaults to False.\n\n :param response_conn:\n Set this to ``None`` if you will handle releasing the connection or\n set the connection to have the response release it.\n\n :param preload_content:\n If True, the response's body will be preloaded during construction.\n\n :param decode_content:\n If True, will attempt to decode the body based on the\n 'content-encoding' header.\n\n :param enforce_content_length:\n Enforce content length checking. Body returned by server must match\n value of Content-Length header, if present. Otherwise, raise error.\n \"\"\"\n self.num_requests += 1\n\n timeout_obj = self._get_timeout(timeout)\n timeout_obj.start_connect()\n conn.timeout = Timeout.resolve_default_timeout(timeout_obj.connect_timeout)\n\n try:\n # Trigger any extra validation we need to do.\n try:\n self._validate_conn(conn)\n except (SocketTimeout, BaseSSLError) as e:\n self._raise_timeout(err=e, url=url, timeout_value=conn.timeout)\n raise\n\n # _validate_conn() starts the connection to an HTTPS proxy\n # so we need to wrap errors with 'ProxyError' here too.\n except (\n OSError,\n NewConnectionError,\n TimeoutError,\n BaseSSLError,\n CertificateError,\n SSLError,\n ) as e:\n new_e: Exception = e\n if isinstance(e, (BaseSSLError, CertificateError)):\n new_e = SSLError(e)\n # If the connection didn't successfully connect to it's proxy\n # then there\n if isinstance(\n new_e, (OSError, NewConnectionError, TimeoutError, SSLError)\n ) and (conn and conn.proxy and not conn.has_connected_to_proxy):\n new_e = _wrap_proxy_error(new_e, conn.proxy.scheme)\n raise new_e\n\n # conn.request() calls http.client.*.request, not the method in\n # urllib3.request. It also calls makefile (recv) on the socket.\n try:\n conn.request(\n method,\n url,\n body=body,\n headers=headers,\n chunked=chunked,\n preload_content=preload_content,\n decode_content=decode_content,\n enforce_content_length=enforce_content_length,\n )\n\n # We are swallowing BrokenPipeError (errno.EPIPE) since the server is\n # legitimately able to close the connection after sending a valid response.\n # With this behaviour, the received response is still readable.\n except BrokenPipeError:\n pass\n except OSError as e:\n # MacOS/Linux\n # EPROTOTYPE is needed on macOS\n # https://erickt.github.io/blog/2014/11/19/adventures-in-debugging-a-potential-osx-kernel-bug/\n if e.errno != errno.EPROTOTYPE:\n raise\n\n # Reset the timeout for the recv() on the socket\n read_timeout = timeout_obj.read_timeout\n\n if not conn.is_closed:\n # In Python 3 socket.py will catch EAGAIN and return None when you\n # try and read into the file pointer created by http.client, which\n # instead raises a BadStatusLine exception. Instead of catching\n # the exception and assuming all BadStatusLine exceptions are read\n # timeouts, check for a zero timeout before making the request.\n if read_timeout == 0:\n raise ReadTimeoutError(\n self, url, f\"Read timed out. (read timeout={read_timeout})\"\n )\n conn.timeout = read_timeout\n\n # Receive the response from the server\n try:\n response = conn.getresponse()\n except (BaseSSLError, OSError) as e:\n self._raise_timeout(err=e, url=url, timeout_value=read_timeout)\n raise\n\n # Set properties that are used by the pooling layer.\n response.retries = retries\n response._connection = response_conn # type: ignore[attr-defined]\n response._pool = self # type: ignore[attr-defined]\n\n log.debug(\n '%s://%s:%s \"%s %s %s\" %s %s',\n self.scheme,\n self.host,\n self.port,\n method,\n url,\n # HTTP version\n conn._http_vsn_str, # type: ignore[attr-defined]\n response.status,\n response.length_remaining, # type: ignore[attr-defined]\n )\n\n return response\n\n def close(self) -> None:\n \"\"\"\n Close all pooled connections and disable the pool.\n \"\"\"\n if self.pool is None:\n return\n # Disable access to the pool\n old_pool, self.pool = self.pool, None\n\n # Close all the HTTPConnections in the pool.\n _close_pool_connections(old_pool)\n\n def is_same_host(self, url: str) -> bool:\n \"\"\"\n Check if the given ``url`` is a member of the same host as this\n connection pool.\n \"\"\"\n if url.startswith(\"/\"):\n return True\n\n # TODO: Add optional support for socket.gethostbyname checking.\n scheme, _, host, port, *_ = parse_url(url)\n scheme = scheme or \"http\"\n if host is not None:\n host = _normalize_host(host, scheme=scheme)\n\n # Use explicit default port for comparison when none is given\n if self.port and not port:\n port = port_by_scheme.get(scheme)\n elif not self.port and port == port_by_scheme.get(scheme):\n port = None\n\n return (scheme, host, port) == (self.scheme, self.host, self.port)\n\n def urlopen( # type: ignore[override]\n self,\n method: str,\n url: str,\n body: _TYPE_BODY | None = None,\n headers: typing.Mapping[str, str] | None = None,\n retries: Retry | bool | int | None = None,\n redirect: bool = True,\n assert_same_host: bool = True,\n timeout: _TYPE_TIMEOUT = _DEFAULT_TIMEOUT,\n pool_timeout: int | None = None,\n release_conn: bool | None = None,\n chunked: bool = False,\n body_pos: _TYPE_BODY_POSITION | None = None,\n preload_content: bool = True,\n decode_content: bool = True,\n **response_kw: typing.Any,\n ) -> BaseHTTPResponse:\n \"\"\"\n Get a connection from the pool and perform an HTTP request. This is the\n lowest level call for making a request, so you'll need to specify all\n the raw details.\n\n .. note::\n\n More commonly, it's appropriate to use a convenience method\n such as :meth:`request`.\n\n .. note::\n\n `release_conn` will only behave as expected if\n `preload_content=False` because we want to make\n `preload_content=False` the default behaviour someday soon without\n breaking backwards compatibility.\n\n :param method:\n HTTP request method (such as GET, POST, PUT, etc.)\n\n :param url:\n The URL to perform the request on.\n\n :param body:\n Data to send in the request body, either :class:`str`, :class:`bytes`,\n an iterable of :class:`str`/:class:`bytes`, or a file-like object.\n\n :param headers:\n Dictionary of custom headers to send, such as User-Agent,\n If-None-Match, etc. If None, pool headers are used. If provided,\n these headers completely replace any pool-specific headers.\n\n :param retries:\n Configure the number of retries to allow before raising a\n :class:`~urllib3.exceptions.MaxRetryError` exception.\n\n Pass ``None`` to retry until you receive a response. Pass a\n :class:`~urllib3.util.retry.Retry` object for fine-grained control\n over different types of retries.\n Pass an integer number to retry connection errors that many times,\n but no other types of errors. Pass zero to never retry.\n\n If ``False``, then retries are disabled and any exception is raised\n immediately. Also, instead of raising a MaxRetryError on redirects,\n the redirect response will be returned.\n\n :type retries: :class:`~urllib3.util.retry.Retry`, False, or an int.\n\n :param redirect:\n If True, automatically handle redirects (status codes 301, 302,\n 303, 307, 308). Each redirect counts as a retry. Disabling retries\n will disable redirect, too.\n\n :param assert_same_host:\n If ``True``, will make sure that the host of the pool requests is\n consistent else will raise HostChangedError. When ``False``, you can\n use the pool on an HTTP proxy and request foreign hosts.\n\n :param timeout:\n If specified, overrides the default timeout for this one\n request. It may be a float (in seconds) or an instance of\n :class:`urllib3.util.Timeout`.\n\n :param pool_timeout:\n If set and the pool is set to block=True, then this method will\n block for ``pool_timeout`` seconds and raise EmptyPoolError if no\n connection is available within the time period.\n\n :param bool preload_content:\n If True, the response's body will be preloaded into memory.\n\n :param bool decode_content:\n If True, will attempt to decode the body based on the\n 'content-encoding' header.\n\n :param release_conn:\n If False, then the urlopen call will not release the connection\n back into the pool once a response is received (but will release if\n you read the entire contents of the response such as when\n `preload_content=True`). This is useful if you're not preloading\n the response's content immediately. You will need to call\n ``r.release_conn()`` on the response ``r`` to return the connection\n back into the pool. If None, it takes the value of ``preload_content``\n which defaults to ``True``.\n\n :param bool chunked:\n If True, urllib3 will send the body using chunked transfer\n encoding. Otherwise, urllib3 will send the body using the standard\n content-length form. Defaults to False.\n\n :param int body_pos:\n Position to seek to in file-like body in the event of a retry or\n redirect. Typically this won't need to be set because urllib3 will\n auto-populate the value when needed.\n \"\"\"\n parsed_url = parse_url(url)\n destination_scheme = parsed_url.scheme\n\n if headers is None:\n headers = self.headers\n\n if not isinstance(retries, Retry):\n retries = Retry.from_int(retries, redirect=redirect, default=self.retries)\n\n if release_conn is None:\n release_conn = preload_content\n\n # Check host\n if assert_same_host and not self.is_same_host(url):\n raise HostChangedError(self, url, retries)\n\n # Ensure that the URL we're connecting to is properly encoded\n if url.startswith(\"/\"):\n url = to_str(_encode_target(url))\n else:\n url = to_str(parsed_url.url)\n\n conn = None\n\n # Track whether `conn` needs to be released before\n # returning/raising/recursing. Update this variable if necessary, and\n # leave `release_conn` constant throughout the function. That way, if\n # the function recurses, the original value of `release_conn` will be\n # passed down into the recursive call, and its value will be respected.\n #\n # See issue #651 [1] for details.\n #\n # [1] <https://github.com/urllib3/urllib3/issues/651>\n release_this_conn = release_conn\n\n http_tunnel_required = connection_requires_http_tunnel(\n self.proxy, self.proxy_config, destination_scheme\n )\n\n # Merge the proxy headers. Only done when not using HTTP CONNECT. We\n # have to copy the headers dict so we can safely change it without those\n # changes being reflected in anyone else's copy.\n if not http_tunnel_required:\n headers = headers.copy() # type: ignore[attr-defined]\n headers.update(self.proxy_headers) # type: ignore[union-attr]\n\n # Must keep the exception bound to a separate variable or else Python 3\n # complains about UnboundLocalError.\n err = None\n\n # Keep track of whether we cleanly exited the except block. This\n # ensures we do proper cleanup in finally.\n clean_exit = False\n\n # Rewind body position, if needed. Record current position\n # for future rewinds in the event of a redirect/retry.\n body_pos = set_file_position(body, body_pos)\n\n try:\n # Request a connection from the queue.\n timeout_obj = self._get_timeout(timeout)\n conn = self._get_conn(timeout=pool_timeout)\n\n conn.timeout = timeout_obj.connect_timeout # type: ignore[assignment]\n\n # Is this a closed/new connection that requires CONNECT tunnelling?\n if self.proxy is not None and http_tunnel_required and conn.is_closed:\n try:\n self._prepare_proxy(conn)\n except (BaseSSLError, OSError, SocketTimeout) as e:\n self._raise_timeout(\n err=e, url=self.proxy.url, timeout_value=conn.timeout\n )\n raise\n\n # If we're going to release the connection in ``finally:``, then\n # the response doesn't need to know about the connection. Otherwise\n # it will also try to release it and we'll have a double-release\n # mess.\n response_conn = conn if not release_conn else None\n\n # Make the request on the HTTPConnection object\n response = self._make_request(\n conn,\n method,\n url,\n timeout=timeout_obj,\n body=body,\n headers=headers,\n chunked=chunked,\n retries=retries,\n response_conn=response_conn,\n preload_content=preload_content,\n decode_content=decode_content,\n **response_kw,\n )\n\n # Everything went great!\n clean_exit = True\n\n except EmptyPoolError:\n # Didn't get a connection from the pool, no need to clean up\n clean_exit = True\n release_this_conn = False\n raise\n\n except (\n TimeoutError,\n HTTPException,\n OSError,\n ProtocolError,\n BaseSSLError,\n SSLError,\n CertificateError,\n ProxyError,\n ) as e:\n # Discard the connection for these exceptions. It will be\n # replaced during the next _get_conn() call.\n clean_exit = False\n new_e: Exception = e\n if isinstance(e, (BaseSSLError, CertificateError)):\n new_e = SSLError(e)\n if isinstance(\n new_e,\n (\n OSError,\n NewConnectionError,\n TimeoutError,\n SSLError,\n HTTPException,\n ),\n ) and (conn and conn.proxy and not conn.has_connected_to_proxy):\n new_e = _wrap_proxy_error(new_e, conn.proxy.scheme)\n elif isinstance(new_e, (OSError, HTTPException)):\n new_e = ProtocolError(\"Connection aborted.\", new_e)\n\n retries = retries.increment(\n method, url, error=new_e, _pool=self, _stacktrace=sys.exc_info()[2]\n )\n retries.sleep()\n\n # Keep track of the error for the retry warning.\n err = e\n\n finally:\n if not clean_exit:\n # We hit some kind of exception, handled or otherwise. We need\n # to throw the connection away unless explicitly told not to.\n # Close the connection, set the variable to None, and make sure\n # we put the None back in the pool to avoid leaking it.\n if conn:\n conn.close()\n conn = None\n release_this_conn = True\n\n if release_this_conn:\n # Put the connection back to be reused. If the connection is\n # expired then it will be None, which will get replaced with a\n # fresh connection during _get_conn.\n self._put_conn(conn)\n\n if not conn:\n # Try again\n log.warning(\n \"Retrying (%r) after connection broken by '%r': %s\", retries, err, url\n )\n return self.urlopen(\n method,\n url,\n body,\n headers,\n retries,\n redirect,\n assert_same_host,\n timeout=timeout,\n pool_timeout=pool_timeout,\n release_conn=release_conn,\n chunked=chunked,\n body_pos=body_pos,\n preload_content=preload_content,\n decode_content=decode_content,\n **response_kw,\n )\n\n # Handle redirect?\n redirect_location = redirect and response.get_redirect_location()\n if redirect_location:\n if response.status == 303:\n # Change the method according to RFC 9110, Section 15.4.4.\n method = \"GET\"\n # And lose the body not to transfer anything sensitive.\n body = None\n headers = HTTPHeaderDict(headers)._prepare_for_method_change()\n\n try:\n retries = retries.increment(method, url, response=response, _pool=self)\n except MaxRetryError:\n if retries.raise_on_redirect:\n response.drain_conn()\n raise\n return response\n\n response.drain_conn()\n retries.sleep_for_retry(response)\n log.debug(\"Redirecting %s -> %s\", url, redirect_location)\n return self.urlopen(\n method,\n redirect_location,\n body,\n headers,\n retries=retries,\n redirect=redirect,\n assert_same_host=assert_same_host,\n timeout=timeout,\n pool_timeout=pool_timeout,\n release_conn=release_conn,\n chunked=chunked,\n body_pos=body_pos,\n preload_content=preload_content,\n decode_content=decode_content,\n **response_kw,\n )\n\n # Check if we should retry the HTTP response.\n has_retry_after = bool(response.headers.get(\"Retry-After\"))\n if retries.is_retry(method, response.status, has_retry_after):\n try:\n retries = retries.increment(method, url, response=response, _pool=self)\n except MaxRetryError:\n if retries.raise_on_status:\n response.drain_conn()\n raise\n return response\n\n response.drain_conn()\n retries.sleep(response)\n log.debug(\"Retry: %s\", url)\n return self.urlopen(\n method,\n url,\n body,\n headers,\n retries=retries,\n redirect=redirect,\n assert_same_host=assert_same_host,\n timeout=timeout,\n pool_timeout=pool_timeout,\n release_conn=release_conn,\n chunked=chunked,\n body_pos=body_pos,\n preload_content=preload_content,\n decode_content=decode_content,\n **response_kw,\n )\n\n return response" }, { "identifier": "HTTPSConnectionPool", "path": ".venv/Lib/site-packages/urllib3/connectionpool.py", "snippet": "class HTTPSConnectionPool(HTTPConnectionPool):\n \"\"\"\n Same as :class:`.HTTPConnectionPool`, but HTTPS.\n\n :class:`.HTTPSConnection` uses one of ``assert_fingerprint``,\n ``assert_hostname`` and ``host`` in this order to verify connections.\n If ``assert_hostname`` is False, no verification is done.\n\n The ``key_file``, ``cert_file``, ``cert_reqs``, ``ca_certs``,\n ``ca_cert_dir``, ``ssl_version``, ``key_password`` are only used if :mod:`ssl`\n is available and are fed into :meth:`urllib3.util.ssl_wrap_socket` to upgrade\n the connection socket into an SSL socket.\n \"\"\"\n\n scheme = \"https\"\n ConnectionCls: type[BaseHTTPSConnection] = HTTPSConnection\n\n def __init__(\n self,\n host: str,\n port: int | None = None,\n timeout: _TYPE_TIMEOUT | None = _DEFAULT_TIMEOUT,\n maxsize: int = 1,\n block: bool = False,\n headers: typing.Mapping[str, str] | None = None,\n retries: Retry | bool | int | None = None,\n _proxy: Url | None = None,\n _proxy_headers: typing.Mapping[str, str] | None = None,\n key_file: str | None = None,\n cert_file: str | None = None,\n cert_reqs: int | str | None = None,\n key_password: str | None = None,\n ca_certs: str | None = None,\n ssl_version: int | str | None = None,\n ssl_minimum_version: ssl.TLSVersion | None = None,\n ssl_maximum_version: ssl.TLSVersion | None = None,\n assert_hostname: str | Literal[False] | None = None,\n assert_fingerprint: str | None = None,\n ca_cert_dir: str | None = None,\n **conn_kw: typing.Any,\n ) -> None:\n super().__init__(\n host,\n port,\n timeout,\n maxsize,\n block,\n headers,\n retries,\n _proxy,\n _proxy_headers,\n **conn_kw,\n )\n\n self.key_file = key_file\n self.cert_file = cert_file\n self.cert_reqs = cert_reqs\n self.key_password = key_password\n self.ca_certs = ca_certs\n self.ca_cert_dir = ca_cert_dir\n self.ssl_version = ssl_version\n self.ssl_minimum_version = ssl_minimum_version\n self.ssl_maximum_version = ssl_maximum_version\n self.assert_hostname = assert_hostname\n self.assert_fingerprint = assert_fingerprint\n\n def _prepare_proxy(self, conn: HTTPSConnection) -> None: # type: ignore[override]\n \"\"\"Establishes a tunnel connection through HTTP CONNECT.\"\"\"\n if self.proxy and self.proxy.scheme == \"https\":\n tunnel_scheme = \"https\"\n else:\n tunnel_scheme = \"http\"\n\n conn.set_tunnel(\n scheme=tunnel_scheme,\n host=self._tunnel_host,\n port=self.port,\n headers=self.proxy_headers,\n )\n conn.connect()\n\n def _new_conn(self) -> BaseHTTPSConnection:\n \"\"\"\n Return a fresh :class:`urllib3.connection.HTTPConnection`.\n \"\"\"\n self.num_connections += 1\n log.debug(\n \"Starting new HTTPS connection (%d): %s:%s\",\n self.num_connections,\n self.host,\n self.port or \"443\",\n )\n\n if not self.ConnectionCls or self.ConnectionCls is DummyConnection: # type: ignore[comparison-overlap]\n raise ImportError(\n \"Can't connect to HTTPS URL because the SSL module is not available.\"\n )\n\n actual_host: str = self.host\n actual_port = self.port\n if self.proxy is not None and self.proxy.host is not None:\n actual_host = self.proxy.host\n actual_port = self.proxy.port\n\n return self.ConnectionCls(\n host=actual_host,\n port=actual_port,\n timeout=self.timeout.connect_timeout,\n cert_file=self.cert_file,\n key_file=self.key_file,\n key_password=self.key_password,\n cert_reqs=self.cert_reqs,\n ca_certs=self.ca_certs,\n ca_cert_dir=self.ca_cert_dir,\n assert_hostname=self.assert_hostname,\n assert_fingerprint=self.assert_fingerprint,\n ssl_version=self.ssl_version,\n ssl_minimum_version=self.ssl_minimum_version,\n ssl_maximum_version=self.ssl_maximum_version,\n **self.conn_kw,\n )\n\n def _validate_conn(self, conn: BaseHTTPConnection) -> None:\n \"\"\"\n Called right before a request is made, after the socket is created.\n \"\"\"\n super()._validate_conn(conn)\n\n # Force connect early to allow us to validate the connection.\n if conn.is_closed:\n conn.connect()\n\n if not conn.is_verified:\n warnings.warn(\n (\n f\"Unverified HTTPS request is being made to host '{conn.host}'. \"\n \"Adding certificate verification is strongly advised. See: \"\n \"https://urllib3.readthedocs.io/en/latest/advanced-usage.html\"\n \"#tls-warnings\"\n ),\n InsecureRequestWarning,\n )" }, { "identifier": "ConnectTimeoutError", "path": ".venv/Lib/site-packages/urllib3/exceptions.py", "snippet": "class ConnectTimeoutError(TimeoutError):\n \"\"\"Raised when a socket timeout occurs while connecting to a server\"\"\"" }, { "identifier": "NewConnectionError", "path": ".venv/Lib/site-packages/urllib3/exceptions.py", "snippet": "class NewConnectionError(ConnectTimeoutError, HTTPError):\n \"\"\"Raised when we fail to establish a new connection. Usually ECONNREFUSED.\"\"\"\n\n def __init__(self, conn: HTTPConnection, message: str) -> None:\n self.conn = conn\n super().__init__(f\"{conn}: {message}\")\n\n @property\n def pool(self) -> HTTPConnection:\n warnings.warn(\n \"The 'pool' property is deprecated and will be removed \"\n \"in urllib3 v2.1.0. Use 'conn' instead.\",\n DeprecationWarning,\n stacklevel=2,\n )\n\n return self.conn" }, { "identifier": "PoolManager", "path": ".venv/Lib/site-packages/urllib3/poolmanager.py", "snippet": "class PoolManager(RequestMethods):\n \"\"\"\n Allows for arbitrary requests while transparently keeping track of\n necessary connection pools for you.\n\n :param num_pools:\n Number of connection pools to cache before discarding the least\n recently used pool.\n\n :param headers:\n Headers to include with all requests, unless other headers are given\n explicitly.\n\n :param \\\\**connection_pool_kw:\n Additional parameters are used to create fresh\n :class:`urllib3.connectionpool.ConnectionPool` instances.\n\n Example:\n\n .. code-block:: python\n\n import urllib3\n\n http = urllib3.PoolManager(num_pools=2)\n\n resp1 = http.request(\"GET\", \"https://google.com/\")\n resp2 = http.request(\"GET\", \"https://google.com/mail\")\n resp3 = http.request(\"GET\", \"https://yahoo.com/\")\n\n print(len(http.pools))\n # 2\n\n \"\"\"\n\n proxy: Url | None = None\n proxy_config: ProxyConfig | None = None\n\n def __init__(\n self,\n num_pools: int = 10,\n headers: typing.Mapping[str, str] | None = None,\n **connection_pool_kw: typing.Any,\n ) -> None:\n super().__init__(headers)\n self.connection_pool_kw = connection_pool_kw\n\n self.pools: RecentlyUsedContainer[PoolKey, HTTPConnectionPool]\n self.pools = RecentlyUsedContainer(num_pools)\n\n # Locally set the pool classes and keys so other PoolManagers can\n # override them.\n self.pool_classes_by_scheme = pool_classes_by_scheme\n self.key_fn_by_scheme = key_fn_by_scheme.copy()\n\n def __enter__(self: _SelfT) -> _SelfT:\n return self\n\n def __exit__(\n self,\n exc_type: type[BaseException] | None,\n exc_val: BaseException | None,\n exc_tb: TracebackType | None,\n ) -> Literal[False]:\n self.clear()\n # Return False to re-raise any potential exceptions\n return False\n\n def _new_pool(\n self,\n scheme: str,\n host: str,\n port: int,\n request_context: dict[str, typing.Any] | None = None,\n ) -> HTTPConnectionPool:\n \"\"\"\n Create a new :class:`urllib3.connectionpool.ConnectionPool` based on host, port, scheme, and\n any additional pool keyword arguments.\n\n If ``request_context`` is provided, it is provided as keyword arguments\n to the pool class used. This method is used to actually create the\n connection pools handed out by :meth:`connection_from_url` and\n companion methods. It is intended to be overridden for customization.\n \"\"\"\n pool_cls: type[HTTPConnectionPool] = self.pool_classes_by_scheme[scheme]\n if request_context is None:\n request_context = self.connection_pool_kw.copy()\n\n # Default blocksize to _DEFAULT_BLOCKSIZE if missing or explicitly\n # set to 'None' in the request_context.\n if request_context.get(\"blocksize\") is None:\n request_context[\"blocksize\"] = _DEFAULT_BLOCKSIZE\n\n # Although the context has everything necessary to create the pool,\n # this function has historically only used the scheme, host, and port\n # in the positional args. When an API change is acceptable these can\n # be removed.\n for key in (\"scheme\", \"host\", \"port\"):\n request_context.pop(key, None)\n\n if scheme == \"http\":\n for kw in SSL_KEYWORDS:\n request_context.pop(kw, None)\n\n return pool_cls(host, port, **request_context)\n\n def clear(self) -> None:\n \"\"\"\n Empty our store of pools and direct them all to close.\n\n This will not affect in-flight connections, but they will not be\n re-used after completion.\n \"\"\"\n self.pools.clear()\n\n def connection_from_host(\n self,\n host: str | None,\n port: int | None = None,\n scheme: str | None = \"http\",\n pool_kwargs: dict[str, typing.Any] | None = None,\n ) -> HTTPConnectionPool:\n \"\"\"\n Get a :class:`urllib3.connectionpool.ConnectionPool` based on the host, port, and scheme.\n\n If ``port`` isn't given, it will be derived from the ``scheme`` using\n ``urllib3.connectionpool.port_by_scheme``. If ``pool_kwargs`` is\n provided, it is merged with the instance's ``connection_pool_kw``\n variable and used to create the new connection pool, if one is\n needed.\n \"\"\"\n\n if not host:\n raise LocationValueError(\"No host specified.\")\n\n request_context = self._merge_pool_kwargs(pool_kwargs)\n request_context[\"scheme\"] = scheme or \"http\"\n if not port:\n port = port_by_scheme.get(request_context[\"scheme\"].lower(), 80)\n request_context[\"port\"] = port\n request_context[\"host\"] = host\n\n return self.connection_from_context(request_context)\n\n def connection_from_context(\n self, request_context: dict[str, typing.Any]\n ) -> HTTPConnectionPool:\n \"\"\"\n Get a :class:`urllib3.connectionpool.ConnectionPool` based on the request context.\n\n ``request_context`` must at least contain the ``scheme`` key and its\n value must be a key in ``key_fn_by_scheme`` instance variable.\n \"\"\"\n if \"strict\" in request_context:\n warnings.warn(\n \"The 'strict' parameter is no longer needed on Python 3+. \"\n \"This will raise an error in urllib3 v2.1.0.\",\n DeprecationWarning,\n )\n request_context.pop(\"strict\")\n\n scheme = request_context[\"scheme\"].lower()\n pool_key_constructor = self.key_fn_by_scheme.get(scheme)\n if not pool_key_constructor:\n raise URLSchemeUnknown(scheme)\n pool_key = pool_key_constructor(request_context)\n\n return self.connection_from_pool_key(pool_key, request_context=request_context)\n\n def connection_from_pool_key(\n self, pool_key: PoolKey, request_context: dict[str, typing.Any]\n ) -> HTTPConnectionPool:\n \"\"\"\n Get a :class:`urllib3.connectionpool.ConnectionPool` based on the provided pool key.\n\n ``pool_key`` should be a namedtuple that only contains immutable\n objects. At a minimum it must have the ``scheme``, ``host``, and\n ``port`` fields.\n \"\"\"\n with self.pools.lock:\n # If the scheme, host, or port doesn't match existing open\n # connections, open a new ConnectionPool.\n pool = self.pools.get(pool_key)\n if pool:\n return pool\n\n # Make a fresh ConnectionPool of the desired type\n scheme = request_context[\"scheme\"]\n host = request_context[\"host\"]\n port = request_context[\"port\"]\n pool = self._new_pool(scheme, host, port, request_context=request_context)\n self.pools[pool_key] = pool\n\n return pool\n\n def connection_from_url(\n self, url: str, pool_kwargs: dict[str, typing.Any] | None = None\n ) -> HTTPConnectionPool:\n \"\"\"\n Similar to :func:`urllib3.connectionpool.connection_from_url`.\n\n If ``pool_kwargs`` is not provided and a new pool needs to be\n constructed, ``self.connection_pool_kw`` is used to initialize\n the :class:`urllib3.connectionpool.ConnectionPool`. If ``pool_kwargs``\n is provided, it is used instead. Note that if a new pool does not\n need to be created for the request, the provided ``pool_kwargs`` are\n not used.\n \"\"\"\n u = parse_url(url)\n return self.connection_from_host(\n u.host, port=u.port, scheme=u.scheme, pool_kwargs=pool_kwargs\n )\n\n def _merge_pool_kwargs(\n self, override: dict[str, typing.Any] | None\n ) -> dict[str, typing.Any]:\n \"\"\"\n Merge a dictionary of override values for self.connection_pool_kw.\n\n This does not modify self.connection_pool_kw and returns a new dict.\n Any keys in the override dictionary with a value of ``None`` are\n removed from the merged dictionary.\n \"\"\"\n base_pool_kwargs = self.connection_pool_kw.copy()\n if override:\n for key, value in override.items():\n if value is None:\n try:\n del base_pool_kwargs[key]\n except KeyError:\n pass\n else:\n base_pool_kwargs[key] = value\n return base_pool_kwargs\n\n def _proxy_requires_url_absolute_form(self, parsed_url: Url) -> bool:\n \"\"\"\n Indicates if the proxy requires the complete destination URL in the\n request. Normally this is only needed when not using an HTTP CONNECT\n tunnel.\n \"\"\"\n if self.proxy is None:\n return False\n\n return not connection_requires_http_tunnel(\n self.proxy, self.proxy_config, parsed_url.scheme\n )\n\n def urlopen( # type: ignore[override]\n self, method: str, url: str, redirect: bool = True, **kw: typing.Any\n ) -> BaseHTTPResponse:\n \"\"\"\n Same as :meth:`urllib3.HTTPConnectionPool.urlopen`\n with custom cross-host redirect logic and only sends the request-uri\n portion of the ``url``.\n\n The given ``url`` parameter must be absolute, such that an appropriate\n :class:`urllib3.connectionpool.ConnectionPool` can be chosen for it.\n \"\"\"\n u = parse_url(url)\n\n if u.scheme is None:\n warnings.warn(\n \"URLs without a scheme (ie 'https://') are deprecated and will raise an error \"\n \"in a future version of urllib3. To avoid this DeprecationWarning ensure all URLs \"\n \"start with 'https://' or 'http://'. Read more in this issue: \"\n \"https://github.com/urllib3/urllib3/issues/2920\",\n category=DeprecationWarning,\n stacklevel=2,\n )\n\n conn = self.connection_from_host(u.host, port=u.port, scheme=u.scheme)\n\n kw[\"assert_same_host\"] = False\n kw[\"redirect\"] = False\n\n if \"headers\" not in kw:\n kw[\"headers\"] = self.headers\n\n if self._proxy_requires_url_absolute_form(u):\n response = conn.urlopen(method, url, **kw)\n else:\n response = conn.urlopen(method, u.request_uri, **kw)\n\n redirect_location = redirect and response.get_redirect_location()\n if not redirect_location:\n return response\n\n # Support relative URLs for redirecting.\n redirect_location = urljoin(url, redirect_location)\n\n if response.status == 303:\n # Change the method according to RFC 9110, Section 15.4.4.\n method = \"GET\"\n # And lose the body not to transfer anything sensitive.\n kw[\"body\"] = None\n kw[\"headers\"] = HTTPHeaderDict(kw[\"headers\"])._prepare_for_method_change()\n\n retries = kw.get(\"retries\")\n if not isinstance(retries, Retry):\n retries = Retry.from_int(retries, redirect=redirect)\n\n # Strip headers marked as unsafe to forward to the redirected location.\n # Check remove_headers_on_redirect to avoid a potential network call within\n # conn.is_same_host() which may use socket.gethostbyname() in the future.\n if retries.remove_headers_on_redirect and not conn.is_same_host(\n redirect_location\n ):\n new_headers = kw[\"headers\"].copy()\n for header in kw[\"headers\"]:\n if header.lower() in retries.remove_headers_on_redirect:\n new_headers.pop(header, None)\n kw[\"headers\"] = new_headers\n\n try:\n retries = retries.increment(method, url, response=response, _pool=conn)\n except MaxRetryError:\n if retries.raise_on_redirect:\n response.drain_conn()\n raise\n return response\n\n kw[\"retries\"] = retries\n kw[\"redirect\"] = redirect\n\n log.info(\"Redirecting %s -> %s\", url, redirect_location)\n\n response.drain_conn()\n return self.urlopen(method, redirect_location, **kw)" }, { "identifier": "parse_url", "path": ".venv/Lib/site-packages/urllib3/util/url.py", "snippet": "def parse_url(url: str) -> Url:\n \"\"\"\n Given a url, return a parsed :class:`.Url` namedtuple. Best-effort is\n performed to parse incomplete urls. Fields not provided will be None.\n This parser is RFC 3986 and RFC 6874 compliant.\n\n The parser logic and helper functions are based heavily on\n work done in the ``rfc3986`` module.\n\n :param str url: URL to parse into a :class:`.Url` namedtuple.\n\n Partly backwards-compatible with :mod:`urllib.parse`.\n\n Example:\n\n .. code-block:: python\n\n import urllib3\n\n print( urllib3.util.parse_url('http://google.com/mail/'))\n # Url(scheme='http', host='google.com', port=None, path='/mail/', ...)\n\n print( urllib3.util.parse_url('google.com:80'))\n # Url(scheme=None, host='google.com', port=80, path=None, ...)\n\n print( urllib3.util.parse_url('/foo?bar'))\n # Url(scheme=None, host=None, port=None, path='/foo', query='bar', ...)\n \"\"\"\n if not url:\n # Empty\n return Url()\n\n source_url = url\n if not _SCHEME_RE.search(url):\n url = \"//\" + url\n\n scheme: str | None\n authority: str | None\n auth: str | None\n host: str | None\n port: str | None\n port_int: int | None\n path: str | None\n query: str | None\n fragment: str | None\n\n try:\n scheme, authority, path, query, fragment = _URI_RE.match(url).groups() # type: ignore[union-attr]\n normalize_uri = scheme is None or scheme.lower() in _NORMALIZABLE_SCHEMES\n\n if scheme:\n scheme = scheme.lower()\n\n if authority:\n auth, _, host_port = authority.rpartition(\"@\")\n auth = auth or None\n host, port = _HOST_PORT_RE.match(host_port).groups() # type: ignore[union-attr]\n if auth and normalize_uri:\n auth = _encode_invalid_chars(auth, _USERINFO_CHARS)\n if port == \"\":\n port = None\n else:\n auth, host, port = None, None, None\n\n if port is not None:\n port_int = int(port)\n if not (0 <= port_int <= 65535):\n raise LocationParseError(url)\n else:\n port_int = None\n\n host = _normalize_host(host, scheme)\n\n if normalize_uri and path:\n path = _remove_path_dot_segments(path)\n path = _encode_invalid_chars(path, _PATH_CHARS)\n if normalize_uri and query:\n query = _encode_invalid_chars(query, _QUERY_CHARS)\n if normalize_uri and fragment:\n fragment = _encode_invalid_chars(fragment, _FRAGMENT_CHARS)\n\n except (ValueError, AttributeError) as e:\n raise LocationParseError(source_url) from e\n\n # For the sake of backwards compatibility we put empty\n # string values for path if there are any defined values\n # beyond the path in the URL.\n # TODO: Remove this when we break backwards compatibility.\n if not path:\n if query is not None or fragment is not None:\n path = \"\"\n else:\n path = None\n\n return Url(\n scheme=scheme,\n auth=auth,\n host=host,\n port=port_int,\n path=path,\n query=query,\n fragment=fragment,\n )" } ]
import socks # type: ignore[import] import warnings import typing import ssl from ..exceptions import DependencyWarning from socket import timeout as SocketTimeout from ..connection import HTTPConnection, HTTPSConnection from ..connectionpool import HTTPConnectionPool, HTTPSConnectionPool from ..exceptions import ConnectTimeoutError, NewConnectionError from ..poolmanager import PoolManager from ..util.url import parse_url from typing import TypedDict
20,041
try: except ImportError: warnings.warn( ( "SOCKS support in urllib3 requires the installation of optional " "dependencies: specifically, PySocks. For more information, see " "https://urllib3.readthedocs.io/en/latest/contrib.html#socks-proxies" ), DependencyWarning, ) raise try: except ImportError: ssl = None # type: ignore[assignment] class _TYPE_SOCKS_OPTIONS(TypedDict): socks_version: int proxy_host: str | None proxy_port: str | None username: str | None password: str | None rdns: bool class SOCKSConnection(HTTPConnection): """ A plain-text HTTP connection that connects via a SOCKS proxy. """ def __init__( self, _socks_options: _TYPE_SOCKS_OPTIONS, *args: typing.Any, **kwargs: typing.Any, ) -> None: self._socks_options = _socks_options super().__init__(*args, **kwargs) def _new_conn(self) -> socks.socksocket: """ Establish a new connection via the SOCKS proxy. """ extra_kw: dict[str, typing.Any] = {} if self.source_address: extra_kw["source_address"] = self.source_address if self.socket_options: extra_kw["socket_options"] = self.socket_options try: conn = socks.create_connection( (self.host, self.port), proxy_type=self._socks_options["socks_version"], proxy_addr=self._socks_options["proxy_host"], proxy_port=self._socks_options["proxy_port"], proxy_username=self._socks_options["username"], proxy_password=self._socks_options["password"], proxy_rdns=self._socks_options["rdns"], timeout=self.timeout, **extra_kw, ) except SocketTimeout as e: raise ConnectTimeoutError( self, f"Connection to {self.host} timed out. (connect timeout={self.timeout})", ) from e except socks.ProxyError as e: # This is fragile as hell, but it seems to be the only way to raise # useful errors here. if e.socket_err: error = e.socket_err if isinstance(error, SocketTimeout): raise ConnectTimeoutError( self, f"Connection to {self.host} timed out. (connect timeout={self.timeout})", ) from e else: # Adding `from e` messes with coverage somehow, so it's omitted. # See #2386. raise NewConnectionError( self, f"Failed to establish a new connection: {error}" ) else: raise NewConnectionError( self, f"Failed to establish a new connection: {e}" ) from e except OSError as e: # Defensive: PySocks should catch all these. raise NewConnectionError( self, f"Failed to establish a new connection: {e}" ) from e return conn # We don't need to duplicate the Verified/Unverified distinction from # urllib3/connection.py here because the HTTPSConnection will already have been # correctly set to either the Verified or Unverified form by that module. This # means the SOCKSHTTPSConnection will automatically be the correct type. class SOCKSHTTPSConnection(SOCKSConnection, HTTPSConnection): pass class SOCKSHTTPConnectionPool(HTTPConnectionPool): ConnectionCls = SOCKSConnection class SOCKSHTTPSConnectionPool(HTTPSConnectionPool): ConnectionCls = SOCKSHTTPSConnection
""" This module contains provisional support for SOCKS proxies from within urllib3. This module supports SOCKS4, SOCKS4A (an extension of SOCKS4), and SOCKS5. To enable its functionality, either install PySocks or install this module with the ``socks`` extra. The SOCKS implementation supports the full range of urllib3 features. It also supports the following SOCKS features: - SOCKS4A (``proxy_url='socks4a://...``) - SOCKS4 (``proxy_url='socks4://...``) - SOCKS5 with remote DNS (``proxy_url='socks5h://...``) - SOCKS5 with local DNS (``proxy_url='socks5://...``) - Usernames and passwords for the SOCKS proxy .. note:: It is recommended to use ``socks5h://`` or ``socks4a://`` schemes in your ``proxy_url`` to ensure that DNS resolution is done from the remote server instead of client-side when connecting to a domain name. SOCKS4 supports IPv4 and domain names with the SOCKS4A extension. SOCKS5 supports IPv4, IPv6, and domain names. When connecting to a SOCKS4 proxy the ``username`` portion of the ``proxy_url`` will be sent as the ``userid`` section of the SOCKS request: .. code-block:: python proxy_url="socks4a://<userid>@proxy-host" When connecting to a SOCKS5 proxy the ``username`` and ``password`` portion of the ``proxy_url`` will be sent as the username/password to authenticate with the proxy: .. code-block:: python proxy_url="socks5h://<username>:<password>@proxy-host" """ from __future__ import annotations try: except ImportError: warnings.warn( ( "SOCKS support in urllib3 requires the installation of optional " "dependencies: specifically, PySocks. For more information, see " "https://urllib3.readthedocs.io/en/latest/contrib.html#socks-proxies" ), DependencyWarning, ) raise try: except ImportError: ssl = None # type: ignore[assignment] class _TYPE_SOCKS_OPTIONS(TypedDict): socks_version: int proxy_host: str | None proxy_port: str | None username: str | None password: str | None rdns: bool class SOCKSConnection(HTTPConnection): """ A plain-text HTTP connection that connects via a SOCKS proxy. """ def __init__( self, _socks_options: _TYPE_SOCKS_OPTIONS, *args: typing.Any, **kwargs: typing.Any, ) -> None: self._socks_options = _socks_options super().__init__(*args, **kwargs) def _new_conn(self) -> socks.socksocket: """ Establish a new connection via the SOCKS proxy. """ extra_kw: dict[str, typing.Any] = {} if self.source_address: extra_kw["source_address"] = self.source_address if self.socket_options: extra_kw["socket_options"] = self.socket_options try: conn = socks.create_connection( (self.host, self.port), proxy_type=self._socks_options["socks_version"], proxy_addr=self._socks_options["proxy_host"], proxy_port=self._socks_options["proxy_port"], proxy_username=self._socks_options["username"], proxy_password=self._socks_options["password"], proxy_rdns=self._socks_options["rdns"], timeout=self.timeout, **extra_kw, ) except SocketTimeout as e: raise ConnectTimeoutError( self, f"Connection to {self.host} timed out. (connect timeout={self.timeout})", ) from e except socks.ProxyError as e: # This is fragile as hell, but it seems to be the only way to raise # useful errors here. if e.socket_err: error = e.socket_err if isinstance(error, SocketTimeout): raise ConnectTimeoutError( self, f"Connection to {self.host} timed out. (connect timeout={self.timeout})", ) from e else: # Adding `from e` messes with coverage somehow, so it's omitted. # See #2386. raise NewConnectionError( self, f"Failed to establish a new connection: {error}" ) else: raise NewConnectionError( self, f"Failed to establish a new connection: {e}" ) from e except OSError as e: # Defensive: PySocks should catch all these. raise NewConnectionError( self, f"Failed to establish a new connection: {e}" ) from e return conn # We don't need to duplicate the Verified/Unverified distinction from # urllib3/connection.py here because the HTTPSConnection will already have been # correctly set to either the Verified or Unverified form by that module. This # means the SOCKSHTTPSConnection will automatically be the correct type. class SOCKSHTTPSConnection(SOCKSConnection, HTTPSConnection): pass class SOCKSHTTPConnectionPool(HTTPConnectionPool): ConnectionCls = SOCKSConnection class SOCKSHTTPSConnectionPool(HTTPSConnectionPool): ConnectionCls = SOCKSHTTPSConnection
class SOCKSProxyManager(PoolManager):
6
2023-12-16 04:12:01+00:00
24k
YaoFANGUK/video-subtitle-remover
backend/scenedetect/scene_manager.py
[ { "identifier": "SimpleTableCell", "path": "backend/scenedetect/_thirdparty/simpletable.py", "snippet": "class SimpleTableCell(object):\n \"\"\"A table class to create table cells.\n\n Example:\n cell = SimpleTableCell('Hello, world!')\n \"\"\"\n\n def __init__(self, text, header=False):\n \"\"\"Table cell constructor.\n\n Keyword arguments:\n text -- text to be displayed\n header -- flag to indicate this cell is a header cell.\n \"\"\"\n self.text = text\n self.header = header\n\n def __str__(self):\n \"\"\"Return the HTML code for the table cell.\"\"\"\n if self.header:\n return '<th>%s</th>' % (self.text)\n else:\n return '<td>%s</td>' % (self.text)" }, { "identifier": "SimpleTableImage", "path": "backend/scenedetect/_thirdparty/simpletable.py", "snippet": "class SimpleTableImage(object):\n \"\"\"A table class to create table cells with an image.\n\n Example:\n cell = SimpleTableImage('images/image_1.jpg')\n \"\"\"\n\n def __init__(self, image_file, width=None, height=None):\n \"\"\"Table cell constructor.\n\n Keyword arguments:\n image_file -- relative filepath to image file to display.\n width -- (optional) width of the image in pixels\n height -- (optional) height of the image in pixels\n \"\"\"\n self.image_file = image_file\n if width:\n self.width = round(width)\n else:\n self.width = width\n if height:\n self.height = round(height)\n else:\n self.height = height\n\n def __str__(self):\n \"\"\"Return the HTML code for the table cell with the image.\"\"\"\n safe_filename = quote(self.image_file)\n output = '<a href=\"%s\" target=\"_blank\">' % (safe_filename)\n output += '<img src=\"%s\"' % (safe_filename)\n if self.height:\n output += ' height=\"%s\"' % (self.height)\n if self.width:\n output += ' width=\"%s\"' % (self.width)\n output += '></a>'\n\n return output" }, { "identifier": "SimpleTableRow", "path": "backend/scenedetect/_thirdparty/simpletable.py", "snippet": "class SimpleTableRow(object):\n \"\"\"A table class to create table rows, populated by table cells.\n\n Example:\n # Row from list\n row = SimpleTableRow(['Hello,', 'world!'])\n\n # Row from SimpleTableCell\n cell1 = SimpleTableCell('Hello,')\n cell2 = SimpleTableCell('world!')\n row = SimpleTableRow([cell1, cell2])\n \"\"\"\n\n def __init__(self, cells=None, header=False):\n \"\"\"Table row constructor.\n\n Keyword arguments:\n cells -- iterable of SimpleTableCell (default None)\n header -- flag to indicate this row is a header row.\n if the cells are SimpleTableCell, it is the programmer's\n responsibility to verify whether it was created with the\n header flag set to True.\n \"\"\"\n cells = cells or []\n if isinstance(cells[0], SimpleTableCell):\n self.cells = cells\n else:\n self.cells = [SimpleTableCell(cell, header=header) for cell in cells]\n\n self.header = header\n\n def __str__(self):\n \"\"\"Return the HTML code for the table row and its cells as a string.\"\"\"\n row = []\n\n row.append('<tr>')\n\n for cell in self.cells:\n row.append(str(cell))\n\n row.append('</tr>')\n\n return '\\n'.join(row)\n\n def __iter__(self):\n \"\"\"Iterate through row cells\"\"\"\n for cell in self.cells:\n yield cell\n\n def add_cell(self, cell):\n \"\"\"Add a SimpleTableCell object to the list of cells.\"\"\"\n self.cells.append(cell)\n\n def add_cells(self, cells):\n \"\"\"Add a list of SimpleTableCell objects to the list of cells.\"\"\"\n for cell in cells:\n self.cells.append(cell)" }, { "identifier": "SimpleTable", "path": "backend/scenedetect/_thirdparty/simpletable.py", "snippet": "class SimpleTable(object):\n \"\"\"A table class to create HTML tables, populated by HTML table rows.\n\n Example:\n # Table from lists\n table = SimpleTable([['Hello,', 'world!'], ['How', 'are', 'you?']])\n\n # Table with header row\n table = SimpleTable([['Hello,', 'world!'], ['How', 'are', 'you?']],\n header_row=['Header1', 'Header2', 'Header3'])\n\n # Table from SimpleTableRow\n rows = SimpleTableRow(['Hello,', 'world!'])\n table = SimpleTable(rows)\n \"\"\"\n\n def __init__(self, rows=None, header_row=None, css_class=None):\n \"\"\"Table constructor.\n\n Keyword arguments:\n rows -- iterable of SimpleTableRow\n header_row -- row that will be displayed at the beginning of the table.\n if this row is SimpleTableRow, it is the programmer's\n responsibility to verify whether it was created with the\n header flag set to True.\n css_class -- table CSS class\n \"\"\"\n rows = rows or []\n if isinstance(rows[0], SimpleTableRow):\n self.rows = rows\n else:\n self.rows = [SimpleTableRow(row) for row in rows]\n\n if header_row is None:\n self.header_row = None\n elif isinstance(header_row, SimpleTableRow):\n self.header_row = header_row\n else:\n self.header_row = SimpleTableRow(header_row, header=True)\n\n self.css_class = css_class\n\n def __str__(self):\n \"\"\"Return the HTML code for the table as a string.\"\"\"\n table = []\n\n if self.css_class:\n table.append('<table class=%s>' % self.css_class)\n else:\n table.append('<table>')\n\n if self.header_row:\n table.append(str(self.header_row))\n\n for row in self.rows:\n table.append(str(row))\n\n table.append('</table>')\n\n return '\\n'.join(table)\n\n def __iter__(self):\n \"\"\"Iterate through table rows\"\"\"\n for row in self.rows:\n yield row\n\n def add_row(self, row):\n \"\"\"Add a SimpleTableRow object to the list of rows.\"\"\"\n self.rows.append(row)\n\n def add_rows(self, rows):\n \"\"\"Add a list of SimpleTableRow objects to the list of rows.\"\"\"\n for row in rows:\n self.rows.append(row)" }, { "identifier": "HTMLPage", "path": "backend/scenedetect/_thirdparty/simpletable.py", "snippet": "class HTMLPage(object):\n \"\"\"A class to create HTML pages containing CSS and tables.\"\"\"\n\n def __init__(self, tables=None, css=None, encoding=\"utf-8\"):\n \"\"\"HTML page constructor.\n\n Keyword arguments:\n tables -- List of SimpleTable objects\n css -- Cascading Style Sheet specification that is appended before the\n table string\n encoding -- Characters encoding. Default: UTF-8\n \"\"\"\n self.tables = tables or []\n self.css = css\n self.encoding = encoding\n\n def __str__(self):\n \"\"\"Return the HTML page as a string.\"\"\"\n page = []\n\n if self.css:\n page.append('<style type=\"text/css\">\\n%s\\n</style>' % self.css)\n\n # Set encoding\n page.append('<meta http-equiv=\"Content-Type\" content=\"text/html;'\n 'charset=%s\">' % self.encoding)\n\n for table in self.tables:\n page.append(str(table))\n page.append('<br />')\n\n return '\\n'.join(page)\n\n def __iter__(self):\n \"\"\"Iterate through tables\"\"\"\n for table in self.tables:\n yield table\n\n def save(self, filename):\n \"\"\"Save HTML page to a file using the proper encoding\"\"\"\n with codecs.open(filename, 'w', self.encoding) as outfile:\n for line in str(self):\n outfile.write(line)\n\n def add_table(self, table):\n \"\"\"Add a SimpleTable to the page list of tables\"\"\"\n self.tables.append(table)" }, { "identifier": "tqdm", "path": "backend/scenedetect/platform.py", "snippet": "class FakeTqdmObject:\nclass FakeTqdmLoggingRedirect:\nclass CommandTooLong(Exception):\nclass Template(string.Template):\n def __init__(self, **kawrgs):\n def update(self, n=1):\n def close(self):\n def set_description(self, desc=None, refresh=True):\n def __init__(self, **kawrgs):\n def __enter__(self):\n def __exit__(self, type, value, traceback):\ndef get_cv2_imwrite_params() -> Dict[str, Union[int, None]]:\n def _get_cv2_param(param_name: str) -> Union[int, None]:\ndef get_file_name(file_path: AnyStr, include_extension=True) -> AnyStr:\ndef get_and_create_path(file_path: AnyStr, output_directory: Optional[AnyStr] = None) -> AnyStr:\ndef init_logger(log_level: int = logging.INFO,\n show_stdout: bool = False,\n log_file: Optional[str] = None):\ndef invoke_command(args: List[str]) -> int:\ndef get_ffmpeg_path() -> Optional[str]:\ndef get_ffmpeg_version() -> Optional[str]:\ndef get_mkvmerge_version() -> Optional[str]:\ndef get_system_version_info() -> str:\n INFO_TEMPLATE = '[PySceneDetect] %(message)s'\n DEBUG_TEMPLATE = '%(levelname)s: %(module)s.%(funcName)s(): %(message)s'" }, { "identifier": "FrameTimecode", "path": "backend/scenedetect/frame_timecode.py", "snippet": "class FrameTimecode:\n \"\"\"Object for frame-based timecodes, using the video framerate to compute back and\n forth between frame number and seconds/timecode.\n\n A timecode is valid only if it complies with one of the following three types/formats:\n\n 1. Timecode as `str` in the form 'HH:MM:SS[.nnn]' (`'01:23:45'` or `'01:23:45.678'`)\n 2. Number of seconds as `float`, or `str` in form 'Ss' or 'S.SSSs' (`'2s'` or `'2.3456s'`)\n 3. Exact number of frames as `int`, or `str` in form NNNNN (`123` or `'123'`)\n \"\"\"\n\n def __init__(self,\n timecode: Union[int, float, str, 'FrameTimecode'] = None,\n fps: Union[int, float, str, 'FrameTimecode'] = None):\n \"\"\"\n Arguments:\n timecode: A frame number (int), number of seconds (float), or timecode (str in\n the form `'HH:MM:SS'` or `'HH:MM:SS.nnn'`).\n fps: The framerate or FrameTimecode to use as a time base for all arithmetic.\n Raises:\n TypeError: Thrown if either `timecode` or `fps` are unsupported types.\n ValueError: Thrown when specifying a negative timecode or framerate.\n \"\"\"\n # The following two properties are what is used to keep track of time\n # in a frame-specific manner. Note that once the framerate is set,\n # the value should never be modified (only read if required).\n # TODO(v1.0): Make these actual @properties.\n self.framerate = None\n self.frame_num = None\n\n # Copy constructor. Only the timecode argument is used in this case.\n if isinstance(timecode, FrameTimecode):\n self.framerate = timecode.framerate\n self.frame_num = timecode.frame_num\n if fps is not None:\n raise TypeError('Framerate cannot be overwritten when copying a FrameTimecode.')\n else:\n # Ensure other arguments are consistent with API.\n if fps is None:\n raise TypeError('Framerate (fps) is a required argument.')\n if isinstance(fps, FrameTimecode):\n fps = fps.framerate\n\n # Process the given framerate, if it was not already set.\n if not isinstance(fps, (int, float)):\n raise TypeError('Framerate must be of type int/float.')\n if (isinstance(fps, int) and not fps > 0) or (isinstance(fps, float)\n and not fps >= MAX_FPS_DELTA):\n raise ValueError('Framerate must be positive and greater than zero.')\n self.framerate = float(fps)\n\n # Process the timecode value, storing it as an exact number of frames.\n if isinstance(timecode, str):\n self.frame_num = self._parse_timecode_string(timecode)\n else:\n self.frame_num = self._parse_timecode_number(timecode)\n\n # TODO(v1.0): Add a `frame` property to replace the existing one and deprecate this getter.\n def get_frames(self) -> int:\n \"\"\"Get the current time/position in number of frames. This is the\n equivalent of accessing the self.frame_num property (which, along\n with the specified framerate, forms the base for all of the other\n time measurement calculations, e.g. the :meth:`get_seconds` method).\n\n If using to compare a :class:`FrameTimecode` with a frame number,\n you can do so directly against the object (e.g. ``FrameTimecode(10, 10.0) <= 10``).\n\n Returns:\n int: The current time in frames (the current frame number).\n \"\"\"\n return self.frame_num\n\n # TODO(v1.0): Add a `framerate` property to replace the existing one and deprecate this getter.\n def get_framerate(self) -> float:\n \"\"\"Get Framerate: Returns the framerate used by the FrameTimecode object.\n\n Returns:\n float: Framerate of the current FrameTimecode object, in frames per second.\n \"\"\"\n return self.framerate\n\n def equal_framerate(self, fps) -> bool:\n \"\"\"Equal Framerate: Determines if the passed framerate is equal to that of this object.\n\n Arguments:\n fps: Framerate to compare against within the precision constant defined in this module\n (see :data:`MAX_FPS_DELTA`).\n Returns:\n bool: True if passed fps matches the FrameTimecode object's framerate, False otherwise.\n\n \"\"\"\n return math.fabs(self.framerate - fps) < MAX_FPS_DELTA\n\n # TODO(v1.0): Add a `seconds` property to replace this and deprecate the existing one.\n def get_seconds(self) -> float:\n \"\"\"Get the frame's position in number of seconds.\n\n If using to compare a :class:`FrameTimecode` with a frame number,\n you can do so directly against the object (e.g. ``FrameTimecode(10, 10.0) <= 1.0``).\n\n Returns:\n float: The current time/position in seconds.\n \"\"\"\n return float(self.frame_num) / self.framerate\n\n # TODO(v1.0): Add a `timecode` property to replace this and deprecate the existing one.\n def get_timecode(self, precision: int = 3, use_rounding: bool = True) -> str:\n \"\"\"Get a formatted timecode string of the form HH:MM:SS[.nnn].\n\n Args:\n precision: The number of decimal places to include in the output ``[.nnn]``.\n use_rounding: Rounds the output to the desired precision. If False, the value\n will be truncated to the specified precision.\n\n Returns:\n str: The current time in the form ``\"HH:MM:SS[.nnn]\"``.\n \"\"\"\n # Compute hours and minutes based off of seconds, and update seconds.\n secs = self.get_seconds()\n base = 60.0 * 60.0\n hrs = int(secs / base)\n secs -= (hrs * base)\n base = 60.0\n mins = int(secs / base)\n secs -= (mins * base)\n # Convert seconds into string based on required precision.\n if precision > 0:\n if use_rounding:\n secs = round(secs, precision)\n msec = format(secs, '.%df' % precision)[-precision:]\n secs = '%02d.%s' % (int(secs), msec)\n else:\n secs = '%02d' % int(round(secs, 0)) if use_rounding else '%02d' % int(secs)\n # Return hours, minutes, and seconds as a formatted timecode string.\n return '%02d:%02d:%s' % (hrs, mins, secs)\n\n # TODO(v1.0): Add a `previous` property to replace the existing one and deprecate this getter.\n def previous_frame(self) -> 'FrameTimecode':\n \"\"\"Return a new FrameTimecode for the previous frame (or 0 if on frame 0).\"\"\"\n new_timecode = FrameTimecode(self)\n new_timecode.frame_num = max(0, new_timecode.frame_num - 1)\n return new_timecode\n\n def _seconds_to_frames(self, seconds: float) -> int:\n \"\"\"Convert the passed value seconds to the nearest number of frames using\n the current FrameTimecode object's FPS (self.framerate).\n\n Returns:\n Integer number of frames the passed number of seconds represents using\n the current FrameTimecode's framerate property.\n \"\"\"\n return round(seconds * self.framerate)\n\n def _parse_timecode_number(self, timecode: Union[int, float]) -> int:\n \"\"\" Parse a timecode number, storing it as the exact number of frames.\n Can be passed as frame number (int), seconds (float)\n\n Raises:\n TypeError, ValueError\n \"\"\"\n # Process the timecode value, storing it as an exact number of frames.\n # Exact number of frames N\n if isinstance(timecode, int):\n if timecode < 0:\n raise ValueError('Timecode frame number must be positive and greater than zero.')\n return timecode\n # Number of seconds S\n elif isinstance(timecode, float):\n if timecode < 0.0:\n raise ValueError('Timecode value must be positive and greater than zero.')\n return self._seconds_to_frames(timecode)\n # FrameTimecode\n elif isinstance(timecode, FrameTimecode):\n return timecode.frame_num\n elif timecode is None:\n raise TypeError('Timecode/frame number must be specified!')\n else:\n raise TypeError('Timecode format/type unrecognized.')\n\n def _parse_timecode_string(self, timecode_string: str) -> int:\n \"\"\"Parses a string based on the three possible forms (in timecode format,\n as an integer number of frames, or floating-point seconds, ending with 's').\n\n Requires that the `framerate` property is set before calling this method.\n Assuming a framerate of 30.0 FPS, the strings '00:05:00.000', '00:05:00',\n '9000', '300s', and '300.0s' are all possible valid values, all representing\n a period of time equal to 5 minutes, 300 seconds, or 9000 frames (at 30 FPS).\n\n Raises:\n TypeError, ValueError\n \"\"\"\n if self.framerate is None:\n raise TypeError('self.framerate must be set before calling _parse_timecode_string.')\n # Number of seconds S\n if timecode_string.endswith('s'):\n secs = timecode_string[:-1]\n if not secs.replace('.', '').isdigit():\n raise ValueError('All characters in timecode seconds string must be digits.')\n secs = float(secs)\n if secs < 0.0:\n raise ValueError('Timecode seconds value must be positive.')\n return self._seconds_to_frames(secs)\n # Exact number of frames N\n elif timecode_string.isdigit():\n timecode = int(timecode_string)\n if timecode < 0:\n raise ValueError('Timecode frame number must be positive.')\n return timecode\n # Standard timecode in string format 'HH:MM:SS[.nnn]'\n else:\n tc_val = timecode_string.split(':')\n if not (len(tc_val) == 3 and tc_val[0].isdigit() and tc_val[1].isdigit()\n and tc_val[2].replace('.', '').isdigit()):\n raise ValueError('Unrecognized or improperly formatted timecode string.')\n hrs, mins = int(tc_val[0]), int(tc_val[1])\n secs = float(tc_val[2]) if '.' in tc_val[2] else int(tc_val[2])\n if not (hrs >= 0 and mins >= 0 and secs >= 0 and mins < 60 and secs < 60):\n raise ValueError('Invalid timecode range (values outside allowed range).')\n secs += (((hrs * 60.0) + mins) * 60.0)\n return self._seconds_to_frames(secs)\n\n def __iadd__(self, other: Union[int, float, str, 'FrameTimecode']) -> 'FrameTimecode':\n if isinstance(other, int):\n self.frame_num += other\n elif isinstance(other, FrameTimecode):\n if self.equal_framerate(other.framerate):\n self.frame_num += other.frame_num\n else:\n raise ValueError('FrameTimecode instances require equal framerate for addition.')\n # Check if value to add is in number of seconds.\n elif isinstance(other, float):\n self.frame_num += self._seconds_to_frames(other)\n elif isinstance(other, str):\n self.frame_num += self._parse_timecode_string(other)\n else:\n raise TypeError('Unsupported type for performing addition with FrameTimecode.')\n if self.frame_num < 0: # Required to allow adding negative seconds/frames.\n self.frame_num = 0\n return self\n\n def __add__(self, other: Union[int, float, str, 'FrameTimecode']) -> 'FrameTimecode':\n to_return = FrameTimecode(timecode=self)\n to_return += other\n return to_return\n\n def __isub__(self, other: Union[int, float, str, 'FrameTimecode']) -> 'FrameTimecode':\n if isinstance(other, int):\n self.frame_num -= other\n elif isinstance(other, FrameTimecode):\n if self.equal_framerate(other.framerate):\n self.frame_num -= other.frame_num\n else:\n raise ValueError('FrameTimecode instances require equal framerate for subtraction.')\n # Check if value to add is in number of seconds.\n elif isinstance(other, float):\n self.frame_num -= self._seconds_to_frames(other)\n elif isinstance(other, str):\n self.frame_num -= self._parse_timecode_string(other)\n else:\n raise TypeError('Unsupported type for performing subtraction with FrameTimecode: %s' %\n type(other))\n if self.frame_num < 0:\n self.frame_num = 0\n return self\n\n def __sub__(self, other: Union[int, float, str, 'FrameTimecode']) -> 'FrameTimecode':\n to_return = FrameTimecode(timecode=self)\n to_return -= other\n return to_return\n\n def __eq__(self, other: Union[int, float, str, 'FrameTimecode']) -> 'FrameTimecode':\n if isinstance(other, int):\n return self.frame_num == other\n elif isinstance(other, float):\n return self.get_seconds() == other\n elif isinstance(other, str):\n return self.frame_num == self._parse_timecode_string(other)\n elif isinstance(other, FrameTimecode):\n if self.equal_framerate(other.framerate):\n return self.frame_num == other.frame_num\n else:\n raise TypeError(\n 'FrameTimecode objects must have the same framerate to be compared.')\n elif other is None:\n return False\n else:\n raise TypeError('Unsupported type for performing == with FrameTimecode: %s' %\n type(other))\n\n def __ne__(self, other: Union[int, float, str, 'FrameTimecode']) -> bool:\n return not self == other\n\n def __lt__(self, other: Union[int, float, str, 'FrameTimecode']) -> bool:\n if isinstance(other, int):\n return self.frame_num < other\n elif isinstance(other, float):\n return self.get_seconds() < other\n elif isinstance(other, str):\n return self.frame_num < self._parse_timecode_string(other)\n elif isinstance(other, FrameTimecode):\n if self.equal_framerate(other.framerate):\n return self.frame_num < other.frame_num\n else:\n raise TypeError(\n 'FrameTimecode objects must have the same framerate to be compared.')\n else:\n raise TypeError('Unsupported type for performing < with FrameTimecode: %s' %\n type(other))\n\n def __le__(self, other: Union[int, float, str, 'FrameTimecode']) -> bool:\n if isinstance(other, int):\n return self.frame_num <= other\n elif isinstance(other, float):\n return self.get_seconds() <= other\n elif isinstance(other, str):\n return self.frame_num <= self._parse_timecode_string(other)\n elif isinstance(other, FrameTimecode):\n if self.equal_framerate(other.framerate):\n return self.frame_num <= other.frame_num\n else:\n raise TypeError(\n 'FrameTimecode objects must have the same framerate to be compared.')\n else:\n raise TypeError('Unsupported type for performing <= with FrameTimecode: %s' %\n type(other))\n\n def __gt__(self, other: Union[int, float, str, 'FrameTimecode']) -> bool:\n if isinstance(other, int):\n return self.frame_num > other\n elif isinstance(other, float):\n return self.get_seconds() > other\n elif isinstance(other, str):\n return self.frame_num > self._parse_timecode_string(other)\n elif isinstance(other, FrameTimecode):\n if self.equal_framerate(other.framerate):\n return self.frame_num > other.frame_num\n else:\n raise TypeError(\n 'FrameTimecode objects must have the same framerate to be compared.')\n else:\n raise TypeError('Unsupported type for performing > with FrameTimecode: %s' %\n type(other))\n\n def __ge__(self, other: Union[int, float, str, 'FrameTimecode']) -> bool:\n if isinstance(other, int):\n return self.frame_num >= other\n elif isinstance(other, float):\n return self.get_seconds() >= other\n elif isinstance(other, str):\n return self.frame_num >= self._parse_timecode_string(other)\n elif isinstance(other, FrameTimecode):\n if self.equal_framerate(other.framerate):\n return self.frame_num >= other.frame_num\n else:\n raise TypeError(\n 'FrameTimecode objects must have the same framerate to be compared.')\n else:\n raise TypeError('Unsupported type for performing >= with FrameTimecode: %s' %\n type(other))\n\n # TODO(v1.0): __int__ and __float__ should be removed. Mark as deprecated, and indicate\n # need to use relevant property instead.\n\n def __int__(self) -> int:\n return self.frame_num\n\n def __float__(self) -> float:\n return self.get_seconds()\n\n def __str__(self) -> str:\n return self.get_timecode()\n\n def __repr__(self) -> str:\n return '%s [frame=%d, fps=%.3f]' % (self.get_timecode(), self.frame_num, self.framerate)\n\n def __hash__(self) -> int:\n return self.frame_num" }, { "identifier": "VideoStream", "path": "backend/scenedetect/video_stream.py", "snippet": "class VideoStream(ABC):\n \"\"\" Interface which all video backends must implement. \"\"\"\n\n #\n # Default Implementations\n #\n\n @property\n def base_timecode(self) -> FrameTimecode:\n \"\"\"FrameTimecode object to use as a time base.\"\"\"\n return FrameTimecode(timecode=0, fps=self.frame_rate)\n\n #\n # Abstract Static Methods\n #\n\n @staticmethod\n @abstractmethod\n def BACKEND_NAME() -> str:\n \"\"\"Unique name used to identify this backend. Should be a static property in derived\n classes (`BACKEND_NAME = 'backend_identifier'`).\"\"\"\n raise NotImplementedError\n\n #\n # Abstract Properties\n #\n\n @property\n @abstractmethod\n def path(self) -> Union[bytes, str]:\n \"\"\"Video or device path.\"\"\"\n raise NotImplementedError\n\n @property\n @abstractmethod\n def name(self) -> Union[bytes, str]:\n \"\"\"Name of the video, without extension, or device.\"\"\"\n raise NotImplementedError\n\n @property\n @abstractmethod\n def is_seekable(self) -> bool:\n \"\"\"True if seek() is allowed, False otherwise.\"\"\"\n raise NotImplementedError\n\n @property\n @abstractmethod\n def frame_rate(self) -> float:\n \"\"\"Frame rate in frames/sec.\"\"\"\n raise NotImplementedError\n\n @property\n @abstractmethod\n def duration(self) -> Optional[FrameTimecode]:\n \"\"\"Duration of the stream as a FrameTimecode, or None if non terminating.\"\"\"\n raise NotImplementedError\n\n @property\n @abstractmethod\n def frame_size(self) -> Tuple[int, int]:\n \"\"\"Size of each video frame in pixels as a tuple of (width, height).\"\"\"\n raise NotImplementedError\n\n @property\n @abstractmethod\n def aspect_ratio(self) -> float:\n \"\"\"Pixel aspect ratio as a float (1.0 represents square pixels).\"\"\"\n raise NotImplementedError\n\n @property\n @abstractmethod\n def position(self) -> FrameTimecode:\n \"\"\"Current position within stream as FrameTimecode.\n\n This can be interpreted as presentation time stamp, thus frame 1 corresponds\n to the presentation time 0. Returns 0 even if `frame_number` is 1.\"\"\"\n raise NotImplementedError\n\n @property\n @abstractmethod\n def position_ms(self) -> float:\n \"\"\"Current position within stream as a float of the presentation time in\n milliseconds. The first frame has a PTS of 0.\"\"\"\n raise NotImplementedError\n\n @property\n @abstractmethod\n def frame_number(self) -> int:\n \"\"\"Current position within stream as the frame number.\n\n Will return 0 until the first frame is `read`.\"\"\"\n raise NotImplementedError\n\n #\n # Abstract Methods\n #\n\n @abstractmethod\n def read(self, decode: bool = True, advance: bool = True) -> Union[ndarray, bool]:\n \"\"\"Read and decode the next frame as a numpy.ndarray. Returns False when video ends.\n\n Arguments:\n decode: Decode and return the frame.\n advance: Seek to the next frame. If False, will return the current (last) frame.\n\n Returns:\n If decode = True, the decoded frame (numpy.ndarray), or False (bool) if end of video.\n If decode = False, a bool indicating if advancing to the the next frame succeeded.\n \"\"\"\n raise NotImplementedError\n\n @abstractmethod\n def reset(self) -> None:\n \"\"\" Close and re-open the VideoStream (equivalent to seeking back to beginning). \"\"\"\n raise NotImplementedError\n\n @abstractmethod\n def seek(self, target: Union[FrameTimecode, float, int]) -> None:\n \"\"\"Seek to the given timecode. If given as a frame number, represents the current seek\n pointer (e.g. if seeking to 0, the next frame decoded will be the first frame of the video).\n\n For 1-based indices (first frame is frame #1), the target frame number needs to be converted\n to 0-based by subtracting one. For example, if we want to seek to the first frame, we call\n seek(0) followed by read(). If we want to seek to the 5th frame, we call seek(4) followed\n by read(), at which point frame_number will be 5.\n\n May not be supported on all backend types or inputs (e.g. cameras).\n\n Arguments:\n target: Target position in video stream to seek to.\n If float, interpreted as time in seconds.\n If int, interpreted as frame number.\n Raises:\n SeekError: An error occurs while seeking, or seeking is not supported.\n ValueError: `target` is not a valid value (i.e. it is negative).\n \"\"\"\n raise NotImplementedError" }, { "identifier": "SceneDetector", "path": "backend/scenedetect/scene_detector.py", "snippet": "class SceneDetector:\n \"\"\" Base class to inherit from when implementing a scene detection algorithm.\n\n This API is not yet stable and subject to change.\n\n This represents a \"dense\" scene detector, which returns a list of frames where\n the next scene/shot begins in a video.\n\n Also see the implemented scene detectors in the scenedetect.detectors module\n to get an idea of how a particular detector can be created.\n \"\"\"\n # TODO(v0.7): Make this a proper abstract base class.\n\n stats_manager: Optional[StatsManager] = None\n \"\"\"Optional :class:`StatsManager <scenedetect.stats_manager.StatsManager>` to\n use for caching frame metrics to and from.\"\"\"\n\n # TODO(v1.0): Remove - this is a rarely used case for what is now a neglegible performance gain.\n def is_processing_required(self, frame_num: int) -> bool:\n \"\"\"[DEPRECATED] DO NOT USE\n\n Test if all calculations for a given frame are already done.\n\n Returns:\n False if the SceneDetector has assigned _metric_keys, and the\n stats_manager property is set to a valid StatsManager object containing\n the required frame metrics/calculations for the given frame - thus, not\n needing the frame to perform scene detection.\n\n True otherwise (i.e. the frame_img passed to process_frame is required\n to be passed to process_frame for the given frame_num).\n \"\"\"\n metric_keys = self.get_metrics()\n return not metric_keys or not (self.stats_manager is not None\n and self.stats_manager.metrics_exist(frame_num, metric_keys))\n\n def stats_manager_required(self) -> bool:\n \"\"\"Stats Manager Required: Prototype indicating if detector requires stats.\n\n Returns:\n True if a StatsManager is required for the detector, False otherwise.\n \"\"\"\n return False\n\n def get_metrics(self) -> List[str]:\n \"\"\"Get Metrics: Get a list of all metric names/keys used by the detector.\n\n Returns:\n List of strings of frame metric key names that will be used by\n the detector when a StatsManager is passed to process_frame.\n \"\"\"\n return []\n\n def process_frame(self, frame_num: int, frame_img: Optional[numpy.ndarray]) -> List[int]:\n \"\"\"Process Frame: Computes/stores metrics and detects any scene changes.\n\n Prototype method, no actual detection.\n\n Returns:\n List of frame numbers of cuts to be added to the cutting list.\n \"\"\"\n return []\n\n def post_process(self, frame_num: int) -> List[int]:\n \"\"\"Post Process: Performs any processing after the last frame has been read.\n\n Prototype method, no actual detection.\n\n Returns:\n List of frame numbers of cuts to be added to the cutting list.\n \"\"\"\n return []\n\n @property\n def event_buffer_length(self) -> int:\n \"\"\"The amount of frames a given event can be buffered for, in time. Represents maximum\n amount any event can be behind `frame_number` in the result of :meth:`process_frame`.\n \"\"\"\n return 0" }, { "identifier": "SparseSceneDetector", "path": "backend/scenedetect/scene_detector.py", "snippet": "class SparseSceneDetector(SceneDetector):\n \"\"\"Base class to inherit from when implementing a sparse scene detection algorithm.\n\n This class will be removed in v1.0 and should not be used.\n\n Unlike dense detectors, sparse detectors scene_detect \"events\" and return a *pair* of frames,\n as opposed to just a single cut.\n\n An example of a SparseSceneDetector is the MotionDetector.\n \"\"\"\n\n def process_frame(self, frame_num: int, frame_img: numpy.ndarray) -> List[Tuple[int, int]]:\n \"\"\"Process Frame: Computes/stores metrics and detects any scene changes.\n\n Prototype method, no actual detection.\n\n Returns:\n List of frame pairs representing individual scenes\n to be added to the output scene list directly.\n \"\"\"\n return []\n\n def post_process(self, frame_num: int) -> List[Tuple[int, int]]:\n \"\"\"Post Process: Performs any processing after the last frame has been read.\n\n Prototype method, no actual detection.\n\n Returns:\n List of frame pairs representing individual scenes\n to be added to the output scene list directly.\n \"\"\"\n return []" }, { "identifier": "StatsManager", "path": "backend/scenedetect/stats_manager.py", "snippet": "class StatsManager:\n \"\"\"Provides a key-value store for frame metrics/calculations which can be used\n for two-pass detection algorithms, as well as saving stats to a CSV file.\n\n Analyzing a statistics CSV file is also very useful for finding the optimal\n algorithm parameters for certain detection methods. Additionally, the data\n may be plotted by a graphing module (e.g. matplotlib) by obtaining the\n metric of interest for a series of frames by iteratively calling get_metrics(),\n after having called the detect_scenes(...) method on the SceneManager object\n which owns the given StatsManager instance.\n\n Only metrics consisting of `float` or `int` should be used currently.\n \"\"\"\n\n def __init__(self, base_timecode: FrameTimecode = None):\n \"\"\"Initialize a new StatsManager.\n\n Arguments:\n base_timecode: Timecode associated with this object. Must not be None (default value\n will be removed in a future release).\n \"\"\"\n # Frame metrics is a dict of frame (int): metric_dict (Dict[str, float])\n # of each frame metric key and the value it represents (usually float).\n self._frame_metrics: Dict[FrameTimecode, Dict[str, float]] = dict()\n self._registered_metrics: Set[str] = set() # Set of frame metric keys.\n self._loaded_metrics: Set[str] = set() # Metric keys loaded from stats file.\n self._metrics_updated: bool = False # Flag indicating if metrics require saving.\n self._base_timecode: Optional[FrameTimecode] = base_timecode # Used for timing calculations.\n\n def register_metrics(self, metric_keys: Iterable[str]) -> None:\n \"\"\"Register a list of metric keys that will be used by the detector.\n\n Used to ensure that multiple detector keys don't overlap.\n\n Raises:\n FrameMetricRegistered: A particular metric_key has already been registered/added\n to the StatsManager. Only if the StatsManager is being used for read-only\n access (i.e. all frames in the video have already been processed for the given\n metric_key in the exception) is this behavior desirable.\n \"\"\"\n for metric_key in metric_keys:\n if metric_key not in self._registered_metrics:\n self._registered_metrics.add(metric_key)\n else:\n raise FrameMetricRegistered(metric_key)\n\n # TODO(v1.0): Change frame_number to a FrameTimecode now that it is just a hash and will\n # be required for VFR support.\n def get_metrics(self, frame_number: int, metric_keys: Iterable[str]) -> List[Any]:\n \"\"\"Return the requested statistics/metrics for a given frame.\n\n Arguments:\n frame_number (int): Frame number to retrieve metrics for.\n metric_keys (List[str]): A list of metric keys to look up.\n\n Returns:\n A list containing the requested frame metrics for the given frame number\n in the same order as the input list of metric keys. If a metric could\n not be found, None is returned for that particular metric.\n \"\"\"\n return [self._get_metric(frame_number, metric_key) for metric_key in metric_keys]\n\n def set_metrics(self, frame_number: int, metric_kv_dict: Dict[str, Any]) -> None:\n \"\"\" Set Metrics: Sets the provided statistics/metrics for a given frame.\n\n Arguments:\n frame_number: Frame number to retrieve metrics for.\n metric_kv_dict: A dict mapping metric keys to the\n respective integer/floating-point metric values to set.\n \"\"\"\n for metric_key in metric_kv_dict:\n self._set_metric(frame_number, metric_key, metric_kv_dict[metric_key])\n\n def metrics_exist(self, frame_number: int, metric_keys: Iterable[str]) -> bool:\n \"\"\" Metrics Exist: Checks if the given metrics/stats exist for the given frame.\n\n Returns:\n bool: True if the given metric keys exist for the frame, False otherwise.\n \"\"\"\n return all([self._metric_exists(frame_number, metric_key) for metric_key in metric_keys])\n\n def is_save_required(self) -> bool:\n \"\"\" Is Save Required: Checks if the stats have been updated since loading.\n\n Returns:\n bool: True if there are frame metrics/statistics not yet written to disk,\n False otherwise.\n \"\"\"\n return self._metrics_updated\n\n def save_to_csv(self,\n csv_file: Union[str, bytes, TextIO],\n base_timecode: Optional[FrameTimecode] = None,\n force_save=True) -> None:\n \"\"\" Save To CSV: Saves all frame metrics stored in the StatsManager to a CSV file.\n\n Arguments:\n csv_file: A file handle opened in write mode (e.g. open('...', 'w')) or a path as str.\n base_timecode: [DEPRECATED] DO NOT USE. For backwards compatibility.\n force_save: If True, writes metrics out even if an update is not required.\n\n Raises:\n OSError: If `path` cannot be opened or a write failure occurs.\n \"\"\"\n # TODO(v0.7): Replace with DeprecationWarning that `base_timecode` will be removed in v0.8.\n if base_timecode is not None:\n logger.error('base_timecode is deprecated.')\n\n # Ensure we need to write to the file, and that we have data to do so with.\n if not ((self.is_save_required() or force_save) and self._registered_metrics\n and self._frame_metrics):\n logger.info(\"No metrics to save.\")\n return\n\n assert self._base_timecode is not None\n\n # If we get a path instead of an open file handle, recursively call ourselves\n # again but with file handle instead of path.\n if isinstance(csv_file, (str, bytes)):\n with open(csv_file, 'w') as file:\n self.save_to_csv(csv_file=file, force_save=force_save)\n return\n\n csv_writer = csv.writer(csv_file, lineterminator='\\n')\n metric_keys = sorted(list(self._registered_metrics.union(self._loaded_metrics)))\n csv_writer.writerow([COLUMN_NAME_FRAME_NUMBER, COLUMN_NAME_TIMECODE] + metric_keys)\n frame_keys = sorted(self._frame_metrics.keys())\n logger.info(\"Writing %d frames to CSV...\", len(frame_keys))\n for frame_key in frame_keys:\n frame_timecode = self._base_timecode + frame_key\n csv_writer.writerow(\n [frame_timecode.get_frames() +\n 1, frame_timecode.get_timecode()] +\n [str(metric) for metric in self.get_metrics(frame_key, metric_keys)])\n\n @staticmethod\n def valid_header(row: List[str]) -> bool:\n \"\"\"Check that the given CSV row is a valid header for a statsfile.\n\n Arguments:\n row: A row decoded from the CSV reader.\n\n Returns:\n True if `row` is a valid statsfile header, False otherwise.\n \"\"\"\n if not row or not len(row) >= 2:\n return False\n if row[0] != COLUMN_NAME_FRAME_NUMBER or row[1] != COLUMN_NAME_TIMECODE:\n return False\n return True\n\n # TODO(v1.0): Remove.\n def load_from_csv(self, csv_file: Union[str, bytes, TextIO]) -> Optional[int]:\n \"\"\"[DEPRECATED] DO NOT USE\n\n Load all metrics stored in a CSV file into the StatsManager instance. Will be removed in a\n future release after becoming a no-op.\n\n Arguments:\n csv_file: A file handle opened in read mode (e.g. open('...', 'r')) or a path as str.\n\n Returns:\n int or None: Number of frames/rows read from the CSV file, or None if the\n input file was blank or could not be found.\n\n Raises:\n StatsFileCorrupt: Stats file is corrupt and can't be loaded, or wrong file\n was specified.\n \"\"\"\n # TODO: Make this an error, then make load_from_csv() a no-op, and finally, remove it.\n logger.warning(\"load_from_csv() is deprecated and will be removed in a future release.\")\n\n # If we get a path instead of an open file handle, check that it exists, and if so,\n # recursively call ourselves again but with file set instead of path.\n if isinstance(csv_file, (str, bytes)):\n if os.path.exists(csv_file):\n with open(csv_file, 'r') as file:\n return self.load_from_csv(csv_file=file)\n # Path doesn't exist.\n return None\n\n # If we get here, file is a valid file handle in read-only text mode.\n csv_reader = csv.reader(csv_file, lineterminator='\\n')\n num_cols = None\n num_metrics = None\n num_frames = None\n # First Row: Frame Num, Timecode, [metrics...]\n try:\n row = next(csv_reader)\n # Backwards compatibility for previous versions of statsfile\n # which included an additional header row.\n if not self.valid_header(row):\n row = next(csv_reader)\n except StopIteration:\n # If the file is blank or we couldn't decode anything, assume the file was empty.\n return None\n if not self.valid_header(row):\n raise StatsFileCorrupt()\n num_cols = len(row)\n num_metrics = num_cols - 2\n if not num_metrics > 0:\n raise StatsFileCorrupt('No metrics defined in CSV file.')\n self._loaded_metrics = row[2:]\n num_frames = 0\n for row in csv_reader:\n metric_dict = {}\n if not len(row) == num_cols:\n raise StatsFileCorrupt('Wrong number of columns detected in stats file row.')\n for i, metric_str in enumerate(row[2:]):\n if metric_str and metric_str != 'None':\n try:\n metric_dict[self._loaded_metrics[i]] = float(metric_str)\n except ValueError:\n raise StatsFileCorrupt('Corrupted value in stats file: %s' %\n metric_str) from ValueError\n frame_number = int(row[0])\n # Switch from 1-based to 0-based frame numbers.\n if frame_number > 0:\n frame_number -= 1\n self.set_metrics(frame_number, metric_dict)\n num_frames += 1\n logger.info('Loaded %d metrics for %d frames.', num_metrics, num_frames)\n self._metrics_updated = False\n return num_frames\n\n def _get_metric(self, frame_number: int, metric_key: str) -> Optional[Any]:\n if self._metric_exists(frame_number, metric_key):\n return self._frame_metrics[frame_number][metric_key]\n return None\n\n def _set_metric(self, frame_number: int, metric_key: str, metric_value: Any) -> None:\n self._metrics_updated = True\n if not frame_number in self._frame_metrics:\n self._frame_metrics[frame_number] = dict()\n self._frame_metrics[frame_number][metric_key] = metric_value\n\n def _metric_exists(self, frame_number: int, metric_key: str) -> bool:\n return (frame_number in self._frame_metrics\n and metric_key in self._frame_metrics[frame_number])" }, { "identifier": "FrameMetricRegistered", "path": "backend/scenedetect/stats_manager.py", "snippet": "class FrameMetricRegistered(Exception):\n \"\"\" Raised when attempting to register a frame metric key which has\n already been registered. \"\"\"\n\n def __init__(self,\n metric_key: str,\n message: str = \"Attempted to re-register frame metric key.\"):\n super().__init__(message)\n self.metric_key = metric_key" } ]
import csv import threading import queue import logging import math import sys import cv2 import numpy as np from enum import Enum from typing import Iterable, List, Tuple, Optional, Dict, Callable, Union, TextIO from backend.scenedetect._thirdparty.simpletable import (SimpleTableCell, SimpleTableImage, SimpleTableRow, SimpleTable, HTMLPage) from backend.scenedetect.platform import (tqdm, get_and_create_path, get_cv2_imwrite_params, Template) from backend.scenedetect.frame_timecode import FrameTimecode from backend.scenedetect.video_stream import VideoStream from backend.scenedetect.scene_detector import SceneDetector, SparseSceneDetector from backend.scenedetect.stats_manager import StatsManager, FrameMetricRegistered
14,729
css_class=css_class) # Output list of scenes header_row = [ "Scene Number", "Start Frame", "Start Timecode", "Start Time (seconds)", "End Frame", "End Timecode", "End Time (seconds)", "Length (frames)", "Length (timecode)", "Length (seconds)" ] for i, (start, end) in enumerate(scene_list): duration = end - start row = SimpleTableRow([ '%d' % (i + 1), '%d' % (start.get_frames() + 1), start.get_timecode(), '%.3f' % start.get_seconds(), '%d' % end.get_frames(), end.get_timecode(), '%.3f' % end.get_seconds(), '%d' % duration.get_frames(), duration.get_timecode(), '%.3f' % duration.get_seconds() ]) if image_filenames: for image in image_filenames[i]: row.add_cell( SimpleTableCell( SimpleTableImage(image, width=image_width, height=image_height))) if i == 0: scene_table = SimpleTable(rows=[row], header_row=header_row, css_class=css_class) else: scene_table.add_row(row=row) # Write html file page = HTMLPage() page.add_table(timecode_table) page.add_table(scene_table) page.css = css page.save(output_html_filename) # # TODO(v1.0): Refactor to take a SceneList object; consider moving this and save scene list # to a better spot, or just move them to scene_list.py. # def save_images(scene_list: List[Tuple[FrameTimecode, FrameTimecode]], video: VideoStream, num_images: int = 3, frame_margin: int = 1, image_extension: str = 'jpg', encoder_param: int = 95, image_name_template: str = '$VIDEO_NAME-Scene-$SCENE_NUMBER-$IMAGE_NUMBER', output_dir: Optional[str] = None, show_progress: Optional[bool] = False, scale: Optional[float] = None, height: Optional[int] = None, width: Optional[int] = None, interpolation: Interpolation = Interpolation.CUBIC, video_manager=None) -> Dict[int, List[str]]: """Save a set number of images from each scene, given a list of scenes and the associated video/frame source. Arguments: scene_list: A list of scenes (pairs of FrameTimecode objects) returned from calling a SceneManager's detect_scenes() method. video: A VideoStream object corresponding to the scene list. Note that the video will be closed/re-opened and seeked through. num_images: Number of images to generate for each scene. Minimum is 1. frame_margin: Number of frames to pad each scene around the beginning and end (e.g. moves the first/last image into the scene by N frames). Can set to 0, but will result in some video files failing to extract the very last frame. image_extension: Type of image to save (must be one of 'jpg', 'png', or 'webp'). encoder_param: Quality/compression efficiency, based on type of image: 'jpg' / 'webp': Quality 0-100, higher is better quality. 100 is lossless for webp. 'png': Compression from 1-9, where 9 achieves best filesize but is slower to encode. image_name_template: Template to use when creating the images on disk. Can use the macros $VIDEO_NAME, $SCENE_NUMBER, and $IMAGE_NUMBER. The image extension is applied automatically as per the argument image_extension. output_dir: Directory to output the images into. If not set, the output is created in the working directory. show_progress: If True, shows a progress bar if tqdm is installed. scale: Optional factor by which to rescale saved images. A scaling factor of 1 would not result in rescaling. A value < 1 results in a smaller saved image, while a value > 1 results in an image larger than the original. This value is ignored if either the height or width values are specified. height: Optional value for the height of the saved images. Specifying both the height and width will resize images to an exact size, regardless of aspect ratio. Specifying only height will rescale the image to that number of pixels in height while preserving the aspect ratio. width: Optional value for the width of the saved images. Specifying both the width and height will resize images to an exact size, regardless of aspect ratio. Specifying only width will rescale the image to that number of pixels wide while preserving the aspect ratio. interpolation: Type of interpolation to use when resizing images. video_manager: [DEPRECATED] DO NOT USE. For backwards compatibility only. Returns: Dictionary of the format { scene_num : [image_paths] }, where scene_num is the number of the scene in scene_list (starting from 1), and image_paths is a list of the paths to the newly saved/created images. Raises: ValueError: Raised if any arguments are invalid or out of range (e.g. if num_images is negative). """ # TODO(v0.7): Add DeprecationWarning that `video_manager` will be removed in v0.8. if video_manager is not None: logger.error('`video_manager` argument is deprecated, use `video` instead.') video = video_manager if not scene_list: return {} if num_images <= 0 or frame_margin < 0: raise ValueError() # TODO: Validate that encoder_param is within the proper range. # Should be between 0 and 100 (inclusive) for jpg/webp, and 1-9 for png.
# -*- coding: utf-8 -*- # # PySceneDetect: Python-Based Video Scene Detector # ------------------------------------------------------------------- # [ Site: https://scenedetect.com ] # [ Docs: https://scenedetect.com/docs/ ] # [ Github: https://github.com/Breakthrough/PySceneDetect/ ] # # Copyright (C) 2014-2023 Brandon Castellano <http://www.bcastell.com>. # PySceneDetect is licensed under the BSD 3-Clause License; see the # included LICENSE file, or visit one of the above pages for details. # """``scenedetect.scene_manager`` Module This module implements :class:`SceneManager`, coordinates running a :mod:`SceneDetector <scenedetect.detectors>` over the frames of a video (:mod:`VideoStream <scenedetect.video_stream>`). Video decoding is done in a separate thread to improve performance. This module also contains other helper functions (e.g. :func:`save_images`) which can be used to process the resulting scene list. =============================================================== Usage =============================================================== The following example shows basic usage of a :class:`SceneManager`: .. code:: python from scenedetect import open_video, SceneManager, ContentDetector video = open_video(video_path) scene_manager = SceneManager() scene_manager.add_detector(ContentDetector()) # Detect all scenes in video from current position to end. scene_manager.detect_scenes(video) # `get_scene_list` returns a list of start/end timecode pairs # for each scene that was found. scenes = scene_manager.get_scene_list() An optional callback can also be invoked on each detected scene, for example: .. code:: python from scenedetect import open_video, SceneManager, ContentDetector # Callback to invoke on the first frame of every new scene detection. def on_new_scene(frame_img: numpy.ndarray, frame_num: int): print("New scene found at frame %d." % frame_num) video = open_video(test_video_file) scene_manager = SceneManager() scene_manager.add_detector(ContentDetector()) scene_manager.detect_scenes(video=video, callback=on_new_scene) To use a `SceneManager` with a webcam/device or existing `cv2.VideoCapture` device, use the :class:`VideoCaptureAdapter <scenedetect.backends.opencv.VideoCaptureAdapter>` instead of `open_video`. ======================================================================= Storing Per-Frame Statistics ======================================================================= `SceneManager` can use an optional :class:`StatsManager <scenedetect.stats_manager.StatsManager>` to save frame statistics to disk: .. code:: python from scenedetect import open_video, ContentDetector, SceneManager, StatsManager video = open_video(test_video_file) scene_manager = SceneManager(stats_manager=StatsManager()) scene_manager.add_detector(ContentDetector()) scene_manager.detect_scenes(video=video) scene_list = scene_manager.get_scene_list() print_scenes(scene_list=scene_list) # Save per-frame statistics to disk. scene_manager.stats_manager.save_to_csv(csv_file=STATS_FILE_PATH) The statsfile can be used to find a better threshold for certain inputs, or perform statistical analysis of the video. """ logger = logging.getLogger('pyscenedetect') # TODO: This value can and should be tuned for performance improvements as much as possible, # until accuracy falls, on a large enough dataset. This has yet to be done, but the current # value doesn't seem to have caused any issues at least. DEFAULT_MIN_WIDTH: int = 256 """The default minimum width a frame will be downscaled to when calculating a downscale factor.""" MAX_FRAME_QUEUE_LENGTH: int = 4 """Maximum number of decoded frames which can be buffered while waiting to be processed.""" PROGRESS_BAR_DESCRIPTION = 'Detected: %d | Progress' """Template to use for progress bar.""" class Interpolation(Enum): """Interpolation method used for image resizing. Based on constants defined in OpenCV.""" NEAREST = cv2.INTER_NEAREST """Nearest neighbor interpolation.""" LINEAR = cv2.INTER_LINEAR """Bilinear interpolation.""" CUBIC = cv2.INTER_CUBIC """Bicubic interpolation.""" AREA = cv2.INTER_AREA """Pixel area relation resampling. Provides moire'-free downscaling.""" LANCZOS4 = cv2.INTER_LANCZOS4 """Lanczos interpolation over 8x8 neighborhood.""" def compute_downscale_factor(frame_width: int, effective_width: int = DEFAULT_MIN_WIDTH) -> int: """Get the optimal default downscale factor based on a video's resolution (currently only the width in pixels is considered). The resulting effective width of the video will be between frame_width and 1.5 * frame_width pixels (e.g. if frame_width is 200, the range of effective widths will be between 200 and 300). Arguments: frame_width: Actual width of the video frame in pixels. effective_width: Desired minimum width in pixels. Returns: int: The default downscale factor to use to achieve at least the target effective_width. """ assert not (frame_width < 1 or effective_width < 1) if frame_width < effective_width: return 1 return frame_width // effective_width def get_scenes_from_cuts( cut_list: Iterable[FrameTimecode], start_pos: Union[int, FrameTimecode], end_pos: Union[int, FrameTimecode], base_timecode: Optional[FrameTimecode] = None, ) -> List[Tuple[FrameTimecode, FrameTimecode]]: """Returns a list of tuples of start/end FrameTimecodes for each scene based on a list of detected scene cuts/breaks. This function is called when using the :meth:`SceneManager.get_scene_list` method. The scene list is generated from a cutting list (:meth:`SceneManager.get_cut_list`), noting that each scene is contiguous, starting from the first to last frame of the input. If `cut_list` is empty, the resulting scene will span from `start_pos` to `end_pos`. Arguments: cut_list: List of FrameTimecode objects where scene cuts/breaks occur. base_timecode: The base_timecode of which all FrameTimecodes in the cut_list are based on. num_frames: The number of frames, or FrameTimecode representing duration, of the video that was processed (used to generate last scene's end time). start_frame: The start frame or FrameTimecode of the cut list. Used to generate the first scene's start time. base_timecode: [DEPRECATED] DO NOT USE. For backwards compatibility only. Returns: List of tuples in the form (start_time, end_time), where both start_time and end_time are FrameTimecode objects representing the exact time/frame where each scene occupies based on the input cut_list. """ # TODO(v0.7): Use the warnings module to turn this into a warning. if base_timecode is not None: logger.error('`base_timecode` argument is deprecated has no effect.') # Scene list, where scenes are tuples of (Start FrameTimecode, End FrameTimecode). scene_list = [] if not cut_list: scene_list.append((start_pos, end_pos)) return scene_list # Initialize last_cut to the first frame we processed,as it will be # the start timecode for the first scene in the list. last_cut = start_pos for cut in cut_list: scene_list.append((last_cut, cut)) last_cut = cut # Last scene is from last cut to end of video. scene_list.append((last_cut, end_pos)) return scene_list def write_scene_list(output_csv_file: TextIO, scene_list: Iterable[Tuple[FrameTimecode, FrameTimecode]], include_cut_list: bool = True, cut_list: Optional[Iterable[FrameTimecode]] = None) -> None: """Writes the given list of scenes to an output file handle in CSV format. Arguments: output_csv_file: Handle to open file in write mode. scene_list: List of pairs of FrameTimecodes denoting each scene's start/end FrameTimecode. include_cut_list: Bool indicating if the first row should include the timecodes where each scene starts. Should be set to False if RFC 4180 compliant CSV output is required. cut_list: Optional list of FrameTimecode objects denoting the cut list (i.e. the frames in the video that need to be split to generate individual scenes). If not specified, the cut list is generated using the start times of each scene following the first one. """ csv_writer = csv.writer(output_csv_file, lineterminator='\n') # If required, output the cutting list as the first row (i.e. before the header row). if include_cut_list: csv_writer.writerow( ["Timecode List:"] + cut_list if cut_list else [start.get_timecode() for start, _ in scene_list[1:]]) csv_writer.writerow([ "Scene Number", "Start Frame", "Start Timecode", "Start Time (seconds)", "End Frame", "End Timecode", "End Time (seconds)", "Length (frames)", "Length (timecode)", "Length (seconds)" ]) for i, (start, end) in enumerate(scene_list): duration = end - start csv_writer.writerow([ '%d' % (i + 1), '%d' % (start.get_frames() + 1), start.get_timecode(), '%.3f' % start.get_seconds(), '%d' % end.get_frames(), end.get_timecode(), '%.3f' % end.get_seconds(), '%d' % duration.get_frames(), duration.get_timecode(), '%.3f' % duration.get_seconds() ]) def write_scene_list_html(output_html_filename, scene_list, cut_list=None, css=None, css_class='mytable', image_filenames=None, image_width=None, image_height=None): """Writes the given list of scenes to an output file handle in html format. Arguments: output_html_filename: filename of output html file scene_list: List of pairs of FrameTimecodes denoting each scene's start/end FrameTimecode. cut_list: Optional list of FrameTimecode objects denoting the cut list (i.e. the frames in the video that need to be split to generate individual scenes). If not passed, the start times of each scene (besides the 0th scene) is used instead. css: String containing all the css information for the resulting html page. css_class: String containing the named css class image_filenames: dict where key i contains a list with n elements (filenames of the n saved images from that scene) image_width: Optional desired width of images in table in pixels image_height: Optional desired height of images in table in pixels """ if not css: css = """ table.mytable { font-family: times; font-size:12px; color:#000000; border-width: 1px; border-color: #eeeeee; border-collapse: collapse; background-color: #ffffff; width=100%; max-width:550px; table-layout:fixed; } table.mytable th { border-width: 1px; padding: 8px; border-style: solid; border-color: #eeeeee; background-color: #e6eed6; color:#000000; } table.mytable td { border-width: 1px; padding: 8px; border-style: solid; border-color: #eeeeee; } #code { display:inline; font-family: courier; color: #3d9400; } #string { display:inline; font-weight: bold; } """ # Output Timecode list timecode_table = SimpleTable( [["Timecode List:"] + (cut_list if cut_list else [start.get_timecode() for start, _ in scene_list[1:]])], css_class=css_class) # Output list of scenes header_row = [ "Scene Number", "Start Frame", "Start Timecode", "Start Time (seconds)", "End Frame", "End Timecode", "End Time (seconds)", "Length (frames)", "Length (timecode)", "Length (seconds)" ] for i, (start, end) in enumerate(scene_list): duration = end - start row = SimpleTableRow([ '%d' % (i + 1), '%d' % (start.get_frames() + 1), start.get_timecode(), '%.3f' % start.get_seconds(), '%d' % end.get_frames(), end.get_timecode(), '%.3f' % end.get_seconds(), '%d' % duration.get_frames(), duration.get_timecode(), '%.3f' % duration.get_seconds() ]) if image_filenames: for image in image_filenames[i]: row.add_cell( SimpleTableCell( SimpleTableImage(image, width=image_width, height=image_height))) if i == 0: scene_table = SimpleTable(rows=[row], header_row=header_row, css_class=css_class) else: scene_table.add_row(row=row) # Write html file page = HTMLPage() page.add_table(timecode_table) page.add_table(scene_table) page.css = css page.save(output_html_filename) # # TODO(v1.0): Refactor to take a SceneList object; consider moving this and save scene list # to a better spot, or just move them to scene_list.py. # def save_images(scene_list: List[Tuple[FrameTimecode, FrameTimecode]], video: VideoStream, num_images: int = 3, frame_margin: int = 1, image_extension: str = 'jpg', encoder_param: int = 95, image_name_template: str = '$VIDEO_NAME-Scene-$SCENE_NUMBER-$IMAGE_NUMBER', output_dir: Optional[str] = None, show_progress: Optional[bool] = False, scale: Optional[float] = None, height: Optional[int] = None, width: Optional[int] = None, interpolation: Interpolation = Interpolation.CUBIC, video_manager=None) -> Dict[int, List[str]]: """Save a set number of images from each scene, given a list of scenes and the associated video/frame source. Arguments: scene_list: A list of scenes (pairs of FrameTimecode objects) returned from calling a SceneManager's detect_scenes() method. video: A VideoStream object corresponding to the scene list. Note that the video will be closed/re-opened and seeked through. num_images: Number of images to generate for each scene. Minimum is 1. frame_margin: Number of frames to pad each scene around the beginning and end (e.g. moves the first/last image into the scene by N frames). Can set to 0, but will result in some video files failing to extract the very last frame. image_extension: Type of image to save (must be one of 'jpg', 'png', or 'webp'). encoder_param: Quality/compression efficiency, based on type of image: 'jpg' / 'webp': Quality 0-100, higher is better quality. 100 is lossless for webp. 'png': Compression from 1-9, where 9 achieves best filesize but is slower to encode. image_name_template: Template to use when creating the images on disk. Can use the macros $VIDEO_NAME, $SCENE_NUMBER, and $IMAGE_NUMBER. The image extension is applied automatically as per the argument image_extension. output_dir: Directory to output the images into. If not set, the output is created in the working directory. show_progress: If True, shows a progress bar if tqdm is installed. scale: Optional factor by which to rescale saved images. A scaling factor of 1 would not result in rescaling. A value < 1 results in a smaller saved image, while a value > 1 results in an image larger than the original. This value is ignored if either the height or width values are specified. height: Optional value for the height of the saved images. Specifying both the height and width will resize images to an exact size, regardless of aspect ratio. Specifying only height will rescale the image to that number of pixels in height while preserving the aspect ratio. width: Optional value for the width of the saved images. Specifying both the width and height will resize images to an exact size, regardless of aspect ratio. Specifying only width will rescale the image to that number of pixels wide while preserving the aspect ratio. interpolation: Type of interpolation to use when resizing images. video_manager: [DEPRECATED] DO NOT USE. For backwards compatibility only. Returns: Dictionary of the format { scene_num : [image_paths] }, where scene_num is the number of the scene in scene_list (starting from 1), and image_paths is a list of the paths to the newly saved/created images. Raises: ValueError: Raised if any arguments are invalid or out of range (e.g. if num_images is negative). """ # TODO(v0.7): Add DeprecationWarning that `video_manager` will be removed in v0.8. if video_manager is not None: logger.error('`video_manager` argument is deprecated, use `video` instead.') video = video_manager if not scene_list: return {} if num_images <= 0 or frame_margin < 0: raise ValueError() # TODO: Validate that encoder_param is within the proper range. # Should be between 0 and 100 (inclusive) for jpg/webp, and 1-9 for png.
imwrite_param = [get_cv2_imwrite_params()[image_extension], encoder_param
5
2023-10-25 02:50:01+00:00
24k
EulerSearch/embedding_studio
plugins/default_fine_tuning_method.py
[ { "identifier": "settings", "path": "embedding_studio/core/config.py", "snippet": "class Settings(BaseSettings):\n API_V1_STR: str = \"/api/v1\"\n SECRET_KEY: str = secrets.token_urlsafe(32)\n ACCESS_TOKEN_EXPIRE_MINUTES: int = 60 * 24 * 8\n BACKEND_CORS_ORIGINS: List[AnyHttpUrl] = []\n FINETUNING_MONGO_HOST: str = os.getenv(\"FINETUNING_MONGO_HOST\", \"mongo\")\n FINETUNING_MONGO_PORT: int = os.getenv(\"FINETUNING_MONGO_PORT\", 27017)\n FINETUNING_MONGO_DB_NAME: str = os.getenv(\n \"FINETUNING_MONGO_DB_NAME\", \"embedding_studio\"\n )\n FINETUNING_MONGO_USERNAME: str = os.getenv(\n \"FINETUNING_MONGO_USERNAME\", \"root\"\n )\n FINETUNING_MONGO_PASSWORD: str = os.getenv(\n \"FINETUNING_MONGO_PASSWORD\", \"mongopassword\"\n )\n FINETUNING_MONGO_URL: str = (\n f\"mongodb://{FINETUNING_MONGO_USERNAME}:{FINETUNING_MONGO_PASSWORD}@\"\n f\"{FINETUNING_MONGO_HOST}:{FINETUNING_MONGO_PORT}\"\n )\n CLICKSTREAM_MONGO_HOST: str = os.getenv(\"CLICKSTREAM_MONGO_HOST\", \"mongo\")\n CLICKSTREAM_MONGO_PORT: int = os.getenv(\"CLICKSTREAM_MONGO_PORT\", 27017)\n CLICKSTREAM_MONGO_DB_NAME: str = os.getenv(\n \"CLICKSTREAM_MONGO_DB_NAME\", \"embedding_studio\"\n )\n CLICKSTREAM_MONGO_USERNAME: str = os.getenv(\n \"CLICKSTREAM_MONGO_USERNAME\", \"root\"\n )\n CLICKSTREAM_MONGO_PASSWORD: str = os.getenv(\n \"CLICKSTREAM_MONGO_PASSWORD\", \"mongopassword\"\n )\n CLICKSTREAM_MONGO_URL: str = (\n f\"mongodb://{CLICKSTREAM_MONGO_USERNAME}:{CLICKSTREAM_MONGO_PASSWORD}@\"\n f\"{CLICKSTREAM_MONGO_HOST}:{CLICKSTREAM_MONGO_PORT}\"\n )\n REDIS_HOST: str = os.getenv(\"REDIS_HOST\", \"localhost\")\n REDIS_PORT: int = os.getenv(\"REDIS_PORT\", 6379)\n REDIS_PASSWORD: str = os.getenv(\"REDIS_PASSWORD\", \"redispassword\")\n REDIS_URL: str = f\"redis://{REDIS_HOST}:{REDIS_PORT}/0\"\n MINIO_HOST: str = os.getenv(\"MINIO_HOST\", \"localhost\")\n MINIO_PORT: int = os.getenv(\"MINIO_PORT\", 9000)\n MINIO_ROOT_USER: str = os.getenv(\"MINIO_ROOT_USER\", \"root\")\n MINIO_ROOT_PASSWORD: str = os.getenv(\n \"MINIO_ROOT_PASSWORD\", \"miniopassword\"\n )\n MINIO_DEFAULT_BUCKETS: str = os.getenv(\n \"MINIO_DEFAULT_BUCKETS\", \"embeddingstudio\"\n )\n MINIO_ACCESS_KEY: str = os.getenv(\n \"MINIO_ACCESS_KEY\", \"mtGNiEvoTL6C0EXAMPLE\"\n )\n MINIO_SECRET_KEY: str = os.getenv(\n \"MINIO_SECRET_KEY\", \"HY5JserXAaWmphNyCpQPEXAMPLEKEYEXAMPLEKEY\"\n )\n MYSQL_HOST: str = os.getenv(\"MYSQL_HOST\", \"localhost\")\n MYSQL_PORT: int = os.getenv(\"MYSQL_PORT\", 3306)\n MYSQL_DATABASE: str = os.getenv(\"MYSQL_DATABASE\", \"mlflow\")\n MYSQL_USER: str = os.getenv(\"MYSQL_USER\", \"mlflow_user\")\n MYSQL_PASSWORD: str = os.getenv(\"MYSQL_PASSWORD\", \"Baxp3O5rUvpIxiD77BfZ\")\n MYSQL_ROOT_PASSWORD: str = os.getenv(\n \"MYSQL_ROOT_PASSWORD\", \"PrK5qmPTDsm2IYKvHVG8\"\n )\n MLFLOW_HOST: str = os.getenv(\"MLFLOW_HOST\", \"localhost\")\n MLFLOW_PORT: int = os.getenv(\"MLFLOW_PORT\", 5001)\n MLFLOW_TRACKING_URI: str = f\"http://{MLFLOW_HOST}:{MLFLOW_PORT}\"\n ES_PLUGINS_PATH: str = os.getenv(\"ES_PLUGINS_PATH\", \"plugins\")\n FINE_TUNING_WORKER_MAX_RETRIES: int = os.getenv(\n \"FINE_TUNING_WORKER_MAX_RETRIES\", 3\n )\n FINE_TUNING_WORKER_TIME_LIMIT: int = os.getenv(\n \"FINE_TUNING_WORKER_TIME_LIMIT\", 18000000\n )\n DEFAULT_MAX_ATTEMPTS: int = os.getenv(\"DEFAULT_MAX_ATTEMPTS\", 3)\n DEFAULT_WAIT_TIME_SECONDS: float = os.getenv(\n \"DEFAULT_WAIT_TIME_SECONDS\", 3.0\n )\n S3_READ_CREDENTIALS_ATTEMPTS: int = os.getenv(\n \"S3_READ_CREDENTIALS_ATTEMPTS\", DEFAULT_MAX_ATTEMPTS\n )\n S3_READ_WAIT_TIME_SECONDS: float = os.getenv(\n \"S3_READ_WAIT_TIME_SECONDS\", DEFAULT_WAIT_TIME_SECONDS\n )\n S3_DOWNLOAD_DATA_ATTEMPTS: int = os.getenv(\n \"S3_DOWNLOAD_DATA_ATTEMPTS\", DEFAULT_MAX_ATTEMPTS\n )\n S3_DOWNLOAD_DATA_WAIT_TIME_SECONDS: float = os.getenv(\n \"S3_DOWNLOAD_DATA_WAIT_TIME_SECONDS\", DEFAULT_WAIT_TIME_SECONDS\n )\n MLFLOW_LOG_METRIC_ATTEMPTS: int = os.getenv(\n \"MLFLOW_LOG_METRIC_ATTEMPTS\", DEFAULT_MAX_ATTEMPTS\n )\n MLFLOW_LOG_METRIC_WAIT_TIME_SECONDS: float = os.getenv(\n \"MLFLOW_LOG_METRIC_WAIT_TIME_SECONDS\", DEFAULT_WAIT_TIME_SECONDS\n )\n MLFLOW_LOG_PARAM_ATTEMPTS: int = os.getenv(\n \"MLFLOW_LOG_PARAM_ATTEMPTS\", DEFAULT_MAX_ATTEMPTS\n )\n MLFLOW_LOG_PARAM_WAIT_TIME_SECONDS: float = os.getenv(\n \"MLFLOW_LOG_PARAM_WAIT_TIME_SECONDS\", DEFAULT_WAIT_TIME_SECONDS\n )\n MLFLOW_LOG_MODEL_ATTEMPTS: int = os.getenv(\n \"MLFLOW_LOG_MODEL_ATTEMPTS\", DEFAULT_MAX_ATTEMPTS\n )\n MLFLOW_LOG_MODEL_WAIT_TIME_SECONDS: float = os.getenv(\n \"MLFLOW_LOG_MODEL_WAIT_TIME_SECONDS\", DEFAULT_WAIT_TIME_SECONDS\n )\n MLFLOW_LOAD_MODEL_ATTEMPTS: int = os.getenv(\n \"MLFLOW_LOAD_MODEL_ATTEMPTS\", DEFAULT_MAX_ATTEMPTS\n )\n MLFLOW_LOAD_MODEL_WAIT_TIME_SECONDS: float = os.getenv(\n \"MLFLOW_LOAD_MODEL_WAIT_TIME_SECONDS\", DEFAULT_WAIT_TIME_SECONDS\n )\n MLFLOW_DELETE_MODEL_ATTEMPTS: int = os.getenv(\n \"MLFLOW_DELETE_MODEL_ATTEMPTS\", DEFAULT_MAX_ATTEMPTS\n )\n MLFLOW_DELETE_MODEL_WAIT_TIME_SECONDS: float = os.getenv(\n \"MLFLOW_DELETE_MODEL_WAIT_TIME_SECONDS\", DEFAULT_WAIT_TIME_SECONDS\n )\n MLFLOW_SEARCH_RUNS_ATTEMPTS: int = os.getenv(\n \"MLFLOW_SEARCH_RUNS_ATTEMPTS\", DEFAULT_MAX_ATTEMPTS\n )\n MLFLOW_SEARCH_RUNS_WAIT_TIME_SECONDS: float = os.getenv(\n \"MLFLOW_SEARCH_RUNS_WAIT_TIME_SECONDS\", DEFAULT_WAIT_TIME_SECONDS\n )\n MLFLOW_END_RUN_ATTEMPTS: int = os.getenv(\n \"MLFLOW_END_RUN_ATTEMPTS\", DEFAULT_MAX_ATTEMPTS\n )\n MLFLOW_END_RUN_WAIT_TIME_SECONDS: float = os.getenv(\n \"MLFLOW_END_RUN_WAIT_TIME_SECONDS\", DEFAULT_WAIT_TIME_SECONDS\n )\n MLFLOW_GET_RUN_ATTEMPTS: int = os.getenv(\n \"MLFLOW_GET_RUN_ATTEMPTS\", DEFAULT_MAX_ATTEMPTS\n )\n MLFLOW_GET_RUN_WAIT_TIME_SECONDS: float = os.getenv(\n \"MLFLOW_GET_RUN_WAIT_TIME_SECONDS\", DEFAULT_WAIT_TIME_SECONDS\n )\n MLFLOW_SEARCH_EXPERIMENTS_ATTEMPTS: int = os.getenv(\n \"MLFLOW_SEARCH_EXPERIMENTS_ATTEMPTS\", DEFAULT_MAX_ATTEMPTS\n )\n MLFLOW_SEARCH_EXPERIMENTS_WAIT_TIME_SECONDS: float = os.getenv(\n \"MLFLOW_SEARCH_EXPERIMENTS_WAIT_TIME_SECONDS\",\n DEFAULT_WAIT_TIME_SECONDS,\n )\n MLFLOW_DELETE_EXPERIMENT_ATTEMPTS: int = os.getenv(\n \"MLFLOW_DELETE_EXPERIMENT_ATTEMPTS\", DEFAULT_MAX_ATTEMPTS\n )\n MLFLOW_DELETE_EXPERIMENT_WAIT_TIME_SECONDS: float = os.getenv(\n \"MLFLOW_DELETE_EXPERIMENT_WAIT_TIME_SECONDS\", DEFAULT_WAIT_TIME_SECONDS\n )\n MLFLOW_CREATE_EXPERIMENT_ATTEMPTS: int = os.getenv(\n \"MLFLOW_CREATE_EXPERIMENT_ATTEMPTS\", DEFAULT_MAX_ATTEMPTS\n )\n MLFLOW_CREATE_EXPERIMENT_WAIT_TIME_SECONDS: float = os.getenv(\n \"MLFLOW_CREATE_EXPERIMENT_WAIT_TIME_SECONDS\", DEFAULT_WAIT_TIME_SECONDS\n )\n MLFLOW_GET_EXPERIMENT_ATTEMPTS: int = os.getenv(\n \"MLFLOW_GET_EXPERIMENT_ATTEMPTS\", DEFAULT_MAX_ATTEMPTS\n )\n MLFLOW_GET_EXPERIMENT_WAIT_TIME_SECONDS: float = os.getenv(\n \"MLFLOW_GET_EXPERIMENT_WAIT_TIME_SECONDS\", DEFAULT_WAIT_TIME_SECONDS\n )\n CLICKSTREAM_TIME_MAX_DELTA_MINUS_SEC: int = os.getenv(\n \"CLICKSTREAM_TIME_MAX_DELTA_MINUS_SEC\", 12 * 60 * 60\n )\n CLICKSTREAM_TIME_MAX_DELTA_PLUS_SEC: int = os.getenv(\n \"CLICKSTREAM_TIME_MAX_DELTA_PLUS_SEC\", 5 * 60\n )" }, { "identifier": "FineTuningMethod", "path": "embedding_studio/core/plugin.py", "snippet": "class FineTuningMethod(ABC):\n \"\"\"Base class (plugin) for fine-tuning methods.\n\n All fine-tuning methods must inherit from this class.\n \"\"\"\n\n meta: PluginMeta\n\n @abstractmethod\n def upload_initial_model(self) -> None:\n \"\"\"Upload the initial model to the storage.\n\n Method that should be implemented by subclasses to upload the\n initial model to the storage.\n \"\"\"\n raise NotImplementedError(\n \"Subclasses must implement upload_initial_model\"\n )\n\n @abstractmethod\n def get_fine_tuning_builder(\n self, clickstream: List[SessionWithEvents]\n ) -> FineTuningBuilder:\n \"\"\"Return a FineTuningBuilder instance for the fine-tuning process.\n\n Method that should be implemented by subclasses to provide a\n FineTuningBuilder instance.\n\n :param clickstream: Collection of user feedback, used to enhance\n the model.\n :return: An instance of FineTuningBuilder used for\n launching the fine-tuning process.\n \"\"\"\n raise NotImplementedError(\n \"Subclasses must implement get_fine_tuning_builder\"\n )" }, { "identifier": "AWSS3ClickstreamParser", "path": "embedding_studio/embeddings/data/clickstream/parsers/s3_parser.py", "snippet": "class AWSS3ClickstreamParser(ClickstreamParser):\n def __init__(\n self, query_item_type: type, search_result_type: type, event_type: type\n ):\n super(AWSS3ClickstreamParser, self).__init__(\n query_item_type, search_result_type, S3FileMeta, event_type\n )" }, { "identifier": "DummyEventType", "path": "embedding_studio/embeddings/data/clickstream/search_event.py", "snippet": "class DummyEventType(EventType):\n importance: float\n\n @property\n def event_importance(self) -> float:\n return self.importance" }, { "identifier": "SearchResult", "path": "embedding_studio/embeddings/data/clickstream/search_event.py", "snippet": "class SearchResult(BaseModel):\n item: ItemMeta\n is_click: bool\n rank: Optional[float] = None\n event_type: Optional[EventType] = None\n timestamp: Optional[int] = None\n\n @validator(\"event_type\", pre=True, always=True)\n def validate_event_type(cls, value, values):\n if value is not None and not isinstance(value, EventType):\n raise ValueError(\"Invalid event_type instance\")\n return value\n\n class Config:\n arbitrary_types_allowed = True\n\n @classmethod\n def from_mongo(\n cls,\n result: SearchResultItem,\n event_ids: Set[str],\n item_type: type,\n event_type: type,\n ) -> \"SearchResult\":\n event_instance = DummyEventType(importance=1)\n\n return cls(\n item=item_type(**result.meta),\n is_click=result.object_id in event_ids,\n event_type=event_instance,\n timestamp=None,\n )\n\n @classmethod\n def from_dict(\n cls, data: dict, item_type: type, event_type: type\n ) -> \"SearchResult\":\n event_data: Optional[Dict] = data.get(\"event_type\")\n event_instance = None\n\n if event_data is not None:\n event_instance = event_type(**event_data)\n\n return cls(\n item=item_type(**data[\"item\"]),\n is_click=data[\"is_click\"],\n rank=data[\"rank\"],\n event_type=event_instance,\n timestamp=int(data.get(\"timestamp\")),\n )" }, { "identifier": "ClickstreamSessionsSplitter", "path": "embedding_studio/embeddings/data/clickstream/splitter.py", "snippet": "class ClickstreamSessionsSplitter:\n def __init__(\n self,\n test_size_ratio: float = 0.2,\n shuffle: bool = True,\n random_state: Optional[int] = None,\n ):\n \"\"\"Generate train / test clickstream sessions split.\n\n :param test_size_ratio: ratio of test split size (default: 0.2)\n :param shuffle: to shuffle or not paired clickstream sessions (default: True)\n :param random_state: random state to sklearn splitter (default: None)\n \"\"\"\n if (\n not isinstance(test_size_ratio, float)\n or test_size_ratio <= 0\n or test_size_ratio >= 1.0\n ):\n raise ValueError(\n f\"test_size_ration is a numeric value in range (0.0, 1.0)\"\n )\n\n if test_size_ratio >= 0.5:\n logger.warning(\n \"test_size_ration is larger than 0.5. It's unusual for ML to have test size > train size.\"\n )\n\n self._test_size_ratio = test_size_ratio\n\n if not isinstance(shuffle, bool):\n raise ValueError(\"shuffle should be boolean\")\n self._shuffle = shuffle\n self._random_state = random_state\n\n @property\n def shuffle(self) -> bool:\n return self._shuffle\n\n def split(self, sessions: List[ClickstreamSession]) -> DatasetDict:\n \"\"\"Split clickstream sessions.\n\n :param sessions: sessions to be split\n :return: train / test splits accordingly (PairedClickstreamDataset)\n \"\"\"\n # Get all IDs\n all_result_ids: Set[str] = set()\n for session in sessions:\n all_result_ids.update(session.results)\n\n if len(all_result_ids) == 0:\n raise ValueError(\"Sessions list is empty\")\n\n # Ensure a minimum number of unique result IDs in each set\n min_unique_test_sessions: int = int(\n self._test_size_ratio * len(sessions)\n )\n\n # Split the result IDs into train and test sets\n train_result_ids, test_result_ids = train_test_split(\n list(all_result_ids),\n test_size=self._test_size_ratio,\n random_state=self._random_state,\n )\n test_result_ids: Set[str] = set(test_result_ids)\n\n # Split sessions into train and test based on result IDs\n train_sessions: List[ClickstreamSession] = []\n test_sessions: List[ClickstreamSession] = []\n\n for session in sessions:\n if len(session.results) == 0:\n continue\n\n if (\n len(set(session.results) & test_result_ids)\n / len(session.results)\n <= 0.5\n ):\n # If less than 50% of result IDs intersect with the test set, add to the train set\n train_sessions.append(session)\n else:\n test_sessions.append(session)\n\n if len(test_sessions) < min_unique_test_sessions:\n logger.warning(\n f\"Clickstream sessions intersects highly, so they are not split well\"\n )\n random_train_session_indexess: List[int] = random.choices(\n list(range(len(train_sessions))),\n k=min_unique_test_sessions - len(test_sessions),\n )\n for i in reversed(sorted(random_train_session_indexess)):\n test_sessions.append(train_sessions.pop(i))\n\n if len(test_sessions) + len(train_sessions) < len(sessions):\n missed_sessions_count = len(sessions) - (\n len(test_sessions) + len(train_sessions)\n )\n logger.warning(\n f\"Clickstream sessions weren't split correctly, add {missed_sessions_count} more sessions to the train split.\"\n )\n\n for session in sessions:\n if (\n session not in train_sessions\n and session not in test_sessions\n ):\n train_sessions.append(session)\n\n return DatasetDict(\n {\n \"train\": PairedClickstreamDataset(\n train_sessions, self.shuffle\n ),\n \"test\": PairedClickstreamDataset(test_sessions, self.shuffle),\n }\n )" }, { "identifier": "TextQueryItem", "path": "embedding_studio/embeddings/data/clickstream/text_query_item.py", "snippet": "class TextQueryItem(QueryItem):\n text: str\n\n class Config:\n arbitrary_types_allowed = True" }, { "identifier": "TextQueryRetriever", "path": "embedding_studio/embeddings/data/clickstream/text_query_retriever.py", "snippet": "class TextQueryRetriever(QueryRetriever):\n def __call__(self, query: TextQueryItem) -> str:\n if not hasattr(query, \"text\"):\n raise ValueError(f\"Query object does not have text attribute\")\n return query.text" }, { "identifier": "AWSS3DataLoader", "path": "embedding_studio/embeddings/data/loaders/s3/s3_loader.py", "snippet": "class AWSS3DataLoader(DataLoader):\n def __init__(self, retry_config: Optional[RetryConfig] = None, **kwargs):\n \"\"\"Items loader from AWS S3.\n\n :param max_attempts: maximum number of attempts (default: 10)\n :param wait_time_seconds: time to wait between (default: 10)\n :param kwargs: dict data for AWSS3Credentials\n \"\"\"\n super(AWSS3DataLoader, self).__init__(**kwargs)\n self.retry_config = (\n retry_config\n if retry_config\n else AWSS3DataLoader._get_default_retry_config()\n )\n self.credentials = AWSS3Credentials(**kwargs)\n self.attempt_exception_types = [EndpointConnectionError]\n\n @staticmethod\n def _get_default_retry_config() -> RetryConfig:\n default_retry_params = RetryParams(\n max_attempts=settings.DEFAULT_MAX_ATTEMPTS,\n wait_time_seconds=settings.DEFAULT_WAIT_TIME_SECONDS,\n )\n\n config = RetryConfig(default_params=default_retry_params)\n config[\"credentials\"] = RetryParams(\n max_attempts=settings.S3_READ_CREDENTIALS_ATTEMPTS,\n wait_time_seconds=settings.S3_READ_WAIT_TIME_SECONDS,\n )\n config[\"download_data\"] = RetryParams(\n max_attempts=settings.S3_DOWNLOAD_DATA_ATTEMPTS,\n wait_time_seconds=settings.S3_DOWNLOAD_DATA_WAIT_TIME_SECONDS,\n )\n return config\n\n @retry_method(name=\"download_data\")\n def _read_from_s3(self, client, bucket: str, file: str) -> Image:\n return read_from_s3(client, bucket, file)\n\n @retry_method(name=\"credentials\")\n def _get_client(self, task_id: str):\n if (\n self.credentials.aws_access_key_id is None\n or self.credentials.aws_secret_access_key is None\n ) and not self.credentials.use_system_info:\n logger.warning(\n \"No specific AWS credentials, use Anonymous session\"\n )\n s3_client = boto3.client(\n \"s3\", config=Config(signature_version=UNSIGNED)\n )\n else:\n sts_client = boto3.client(\n \"sts\",\n aws_access_key_id=self.credentials.aws_access_key_id,\n aws_secret_access_key=self.credentials.aws_secret_access_key,\n )\n if self.credentials.external_id:\n assumed_role_object = sts_client.assume_role(\n RoleArn=self.credentials.role_arn,\n RoleSessionName=task_id,\n ExternalId=self.credentials.external_id,\n )\n else:\n assumed_role_object = sts_client.assume_role(\n RoleArn=self.credentials.role_arn,\n RoleSessionName=task_id,\n )\n credentials = assumed_role_object[\"Credentials\"]\n s3_client = boto3.client(\n \"s3\",\n aws_access_key_id=credentials[\"AccessKeyId\"],\n aws_secret_access_key=credentials[\"SecretAccessKey\"],\n aws_session_token=credentials[\"SessionToken\"],\n )\n return s3_client\n\n def _generate_dataset_from_s3(\n self, files: List[S3FileMeta]\n ) -> Iterable[Dict]:\n if len(files) == 0:\n logger.warning(\"Nothing to download\")\n else:\n logger.info(\"Connecting to aws s3...\")\n task_id: str = str(uuid.uuid4())\n try:\n s3_client = self._get_client(task_id)\n logger.info(\"Start downloading data from S3...\")\n bad_items_count = 0\n for val in files:\n image = None\n try:\n image: Image = read_from_s3(\n s3_client, val.bucket, val.file\n )\n except Exception as e:\n logger.exception(\n f\"Unable to download an item: {val.bucket}/{val.file} Exception: {str(e)}\"\n )\n\n if image is None:\n logger.error(\n f\"Unable to download {val.file} from {val.bucket}\"\n )\n bad_items_count += 1\n continue\n yield {\"item\": image, \"item_id\": val.id}\n\n if bad_items_count == len(files):\n raise FailedToLoadAnythingFromAWSS3()\n\n except Exception as err:\n logger.error(f\"Failed to load dataset from s3: {err}\")\n raise err\n\n def load(self, items_data: List[S3FileMeta]) -> Dataset:\n return Dataset.from_generator(\n lambda: self._generate_dataset_from_s3(items_data)\n )" }, { "identifier": "CLIPItemStorageProducer", "path": "embedding_studio/embeddings/data/storages/producers/clip.py", "snippet": "class CLIPItemStorageProducer(ItemStorageProducer):\n def __init__(\n self,\n field_normalizer: DatasetFieldsNormalizer,\n id_field_name: Optional[str] = None,\n ):\n super(CLIPItemStorageProducer, self).__init__(\n ImageItemsDatasetDictPreprocessor(field_normalizer, 224),\n id_field_name,\n )" }, { "identifier": "DatasetFieldsNormalizer", "path": "embedding_studio/embeddings/data/utils/fields_normalizer.py", "snippet": "class DatasetFieldsNormalizer:\n ID_FIELD_NAME = \"item_id\"\n ITEM_FIELD_NAME = \"item\"\n\n def __init__(self, item_field_name: str, id_field_name: str):\n \"\"\"Unify column names in DatasetDict, so it can be used in fine-tuning script.\n A dataset should have ID column, related to ID in clickstream.\n\n :param item_field_name: name of column with items.\n :param id_field_name: name of ID column\n \"\"\"\n if not id_field_name:\n raise ValueError(\"id_field_name should be non-empty string\")\n self.id_field_name = id_field_name\n\n if not item_field_name:\n raise ValueError(\"item_field_name should be non-empty string\")\n self.item_field_name = item_field_name\n\n def __call__(self, dataset: DatasetDict) -> DatasetDict:\n id_normalizer = (\n lambda id_value: str(id_value.item())\n if (\n isinstance(id_value, Tensor)\n or isinstance(id_value, FloatTensor)\n )\n else str(id_value)\n )\n for key in dataset.keys():\n if (\n DatasetFieldsNormalizer.ID_FIELD_NAME\n not in dataset.column_names[key]\n ):\n dataset = dataset.rename_column(\n self.id_field_name, DatasetFieldsNormalizer.ID_FIELD_NAME\n )\n else:\n logger.warning(\n f\"Dataset {key} split already has {DatasetFieldsNormalizer.ID_FIELD_NAME} field\"\n )\n\n if (\n DatasetFieldsNormalizer.ITEM_FIELD_NAME\n not in dataset.column_names[key]\n ):\n dataset = dataset.rename_column(\n self.item_field_name,\n DatasetFieldsNormalizer.ITEM_FIELD_NAME,\n )\n else:\n logger.warning(\n f\"Dataset {key} split already has {DatasetFieldsNormalizer.ITEM_FIELD_NAME} field\"\n )\n\n return dataset.map(\n lambda example: {\n DatasetFieldsNormalizer.ID_FIELD_NAME: id_normalizer(\n example[DatasetFieldsNormalizer.ID_FIELD_NAME]\n )\n }\n )" }, { "identifier": "CosineProbMarginRankingLoss", "path": "embedding_studio/embeddings/losses/prob_cosine_margin_ranking_loss.py", "snippet": "class CosineProbMarginRankingLoss(ProbMarginRankingLoss):\n def __init__(self, base_margin: Optional[float] = 1.0):\n \"\"\"Embeddings Fine-tuning Loss (modification of MarginRankingLoss)\n Use sigmoid instead of ReLU + results confidences to ignore noises and mistakes.\n Adapt to cosine similarity / distance\n\n :param base_margin: margin ranking loss margin (default: 1.0)\n \"\"\"\n super(CosineProbMarginRankingLoss, self).__init__(\n base_margin=base_margin\n )\n\n def __adjust(self, adjusted_diff: FloatTensor) -> FloatTensor:\n # The way any wrong difference more than 0.01 is worth to be penaltized\n # Sigmoid with this kind of input return prob > 0.1, for difference between\n # pos and more than 0.001. That's our expected behaviour.\n # TODO: implement calculation of magic numbers\n return -400 * adjusted_diff + 6" }, { "identifier": "TextToImageCLIPModel", "path": "embedding_studio/embeddings/models/text_to_image/clip.py", "snippet": "class TextToImageCLIPModel(EmbeddingsModelInterface):\n def __init__(self, clip_model: SentenceTransformer):\n \"\"\"Wrapper to SentenceTransformer CLIP model.\n Usage: model = TextToImageCLIPModel(SentenceTransformer('clip-ViT-B-32'))\n\n :param clip_model: clip model from SentenceTransformer package\n \"\"\"\n super(TextToImageCLIPModel, self).__init__(same_query_and_items=False)\n self.clip_model = clip_model\n self.text_model = torch.nn.Sequential(\n self.clip_model._modules[\"0\"]\n ._modules[\"model\"]\n ._modules[\"text_model\"],\n PassPoolerOutputLayer(),\n self.clip_model._modules[\"0\"]\n ._modules[\"model\"]\n ._modules[\"text_projection\"],\n )\n\n self.vision_model = torch.nn.Sequential(\n self.clip_model._modules[\"0\"]\n ._modules[\"model\"]\n ._modules[\"vision_model\"],\n PassPoolerOutputLayer(),\n self.clip_model._modules[\"0\"]\n ._modules[\"model\"]\n ._modules[\"visual_projection\"],\n )\n\n def get_query_model_params(self) -> Iterator[Parameter]:\n return self.text_model.parameters()\n\n def get_items_model_params(self) -> Iterator[Parameter]:\n return self.vision_model.parameters()\n\n def fix_query_model(self, num_fixed_layers: int):\n if (\n len(self.text_model._modules[\"0\"].encoder.layers)\n <= num_fixed_layers\n ):\n raise ValueError(\n f\"Number of fixed layers ({num_fixed_layers}) >= number \"\n f'of existing layers ({len(self.text_model._modules[\"0\"].encoder.layers)})'\n )\n\n self.text_model._modules[\"0\"].embeddings.requires_grad = False\n for i, attn in enumerate(self.text_model._modules[\"0\"].encoder.layers):\n if i < num_fixed_layers:\n self.text_model._modules[\"0\"].encoder.layers[\n i\n ].requires_grad = False\n\n def unfix_query_model(self):\n self.text_model._modules[\"0\"].embeddings.requires_grad = True\n for i, attn in enumerate(self.text_model._modules[\"0\"].encoder.layers):\n self.text_model._modules[\"0\"].encoder.layers[\n i\n ].requires_grad = True\n\n def fix_item_model(self, num_fixed_layers: int):\n if (\n len(self.vision_model._modules[\"0\"].encoder.layers)\n <= num_fixed_layers\n ):\n raise ValueError(\n f\"Number of fixed layers ({num_fixed_layers}) >= number \"\n f'of existing layers ({len(self.vision_model._modules[\"0\"].encoder.layers)})'\n )\n\n self.vision_model._modules[\"0\"].embeddings.requires_grad = False\n for i, attn in enumerate(\n self.vision_model._modules[\"0\"].encoder.layers\n ):\n if i < num_fixed_layers:\n self.vision_model._modules[\"0\"].encoder.layers[\n i\n ].requires_grad = False\n\n def unfix_item_model(self):\n self.vision_model._modules[\"0\"].embeddings.requires_grad = True\n for i, attn in enumerate(\n self.vision_model._modules[\"0\"].encoder.layers\n ):\n self.vision_model._modules[\"0\"].encoder.layers[\n i\n ].requires_grad = True\n\n def tokenize(self, query: str) -> List[Dict]:\n return self.clip_model.tokenize([query])\n\n def forward_query(self, query: str) -> FloatTensor:\n if len(query) == 0:\n logger.warning(\"Provided query is empty\")\n\n tokenized = self.tokenize(query)\n return self.text_model.forward(tokenized[\"input_ids\"].to(self.device))\n\n def forward_items(self, items: List[np.array]) -> FloatTensor:\n if len(items) == 0:\n raise ValueError(\"items list must not be empty\")\n\n return self.vision_model.forward(torch.stack(items).to(self.device))" }, { "identifier": "SessionWithEvents", "path": "embedding_studio/models/clickstream/sessions.py", "snippet": "class SessionWithEvents(RegisteredSession):\n events: List[SessionEvent]" }, { "identifier": "FineTuningBuilder", "path": "embedding_studio/models/plugin.py", "snippet": "class FineTuningBuilder:\n data_loader: DataLoader\n query_retriever: QueryRetriever\n clickstream_parser: ClickstreamParser\n clickstream_sessions_splitter: ClickstreamSessionsSplitter\n dataset_fields_normalizer: DatasetFieldsNormalizer\n item_storage_producer: ItemStorageProducer\n accumulators: List[MetricsAccumulator]\n experiments_manager: ExperimentsManager\n fine_tuning_settings: FineTuningSettings\n initial_params: Dict[str, List[Any]]\n ranking_data: RankingData\n initial_max_evals: int = 100" }, { "identifier": "PluginMeta", "path": "embedding_studio/models/plugin.py", "snippet": "class PluginMeta(BaseModel):\n name: str\n version: str = \"1.0.0\"\n description: Optional[str] = None" }, { "identifier": "prepare_data", "path": "embedding_studio/workers/fine_tuning/data/prepare_data.py", "snippet": "def prepare_data(\n clickstream_sessions: List[Union[Dict, SessionWithEvents]],\n parser: ClickstreamParser,\n clickstream_splitter: ClickstreamSessionsSplitter,\n query_retriever: QueryRetriever,\n loader: DataLoader,\n storage_producer: ItemStorageProducer,\n) -> RankingData:\n \"\"\"Prepare fine-tuning data.\n\n :param clickstream_sessions: clickstream sessions\n :param parser: how to parse a clickstream session\n :param clickstream_splitter: how to split clickstream sessions\n :param query_retriever: retrieve query item\n :param loader: load items data\n :param storage_producer: get train/test datasets\n :return: train / test clickstream sessiobs and dataset dict\n \"\"\"\n if len(clickstream_sessions) == 0:\n raise ValueError(\"Empty clickstream sessions list\")\n\n logger.info(\"Parse clickstream sessions data\")\n raw_clickstream_sessions: List[RawClickstreamSession] = [\n (\n parser.parse(session)\n if isinstance(session, dict)\n else parser.parse_from_mongo(session)\n )\n for session in clickstream_sessions\n ]\n\n clickstream_sessions: List[ClickstreamSession] = [\n r.get_session() for r in raw_clickstream_sessions\n ]\n\n logger.info(\"Setup query retriever\")\n query_retriever.setup(clickstream_sessions)\n\n logger.info(\"Split clickstream sessions into train / test\")\n clickstream_dataset = clickstream_splitter.split(clickstream_sessions)\n logger.info(\n f'Splitting is finished, train: {len(clickstream_dataset[\"train\"])} / test: {len(clickstream_dataset[\"test\"])}'\n )\n\n logger.info(\"Get list of files to be loaded\")\n files_to_load: Set[ItemMeta] = set()\n for session in raw_clickstream_sessions:\n files_to_load.update(set([r.item for r in session.results]))\n\n if len(files_to_load) == 0:\n raise ValueError(\"Empty clickstream sessions\")\n\n logger.info(\"Download files and prepare DataDict of ItemStorage values\")\n files_to_load: List[ItemMeta] = list(files_to_load)\n\n dataset: DatasetDict = storage_producer(\n loader.load(files_to_load), clickstream_dataset\n )\n\n return RankingData(clickstream_dataset, dataset)" }, { "identifier": "ExperimentsManager", "path": "embedding_studio/workers/fine_tuning/experiments/experiments_tracker.py", "snippet": "class ExperimentsManager:\n def __init__(\n self,\n tracking_uri: str,\n main_metric: str,\n accumulators: List[MetricsAccumulator],\n is_loss: bool = False,\n n_top_runs: int = 10,\n requirements: Optional[str] = None,\n retry_config: Optional[RetryConfig] = None,\n ):\n \"\"\"Wrapper over mlflow package to manage certain fine-tuning experiments.\n\n :param tracking_uri: url of MLFlow server\n :param main_metric: name of main metric that will be used to find best model\n :param accumulators: accumulators of metrics to be logged\n :param is_loss: is main metric loss (if True, then best quality is minimal) (default: False)\n :param n_top_runs: how many hyper params group consider to be used in following tuning steps (default: 10)\n :param requirements: extra requirements to be passed to mlflow.pytorch.log_model (default: None)\n :param retry_config: retry policy (default: None)\n \"\"\"\n if not isinstance(tracking_uri, str) or len(tracking_uri) == 0:\n raise ValueError(\n f\"MLFlow tracking URI value should be a not empty string\"\n )\n mlflow.set_tracking_uri(tracking_uri)\n self._tracking_uri = tracking_uri\n if self._tracking_uri.endswith(\"/\"):\n self._tracking_uri = self._tracking_uri[:-1]\n\n self.retry_config = (\n retry_config\n if retry_config\n else ExperimentsManager._get_default_retry_config()\n )\n self.attempt_exception_types = [RestException]\n\n if not isinstance(main_metric, str) or len(main_metric) == 0:\n raise ValueError(f\"main_metric value should be a not empty string\")\n self.main_metric = main_metric\n self._metric_field = f\"metrics.{self.main_metric}\"\n\n self._n_top_runs = n_top_runs\n self._is_loss = is_loss\n\n if len(accumulators) == 0:\n logger.warning(\n \"No accumulators were provided, there will be no metrics logged except loss\"\n )\n self._accumulators = accumulators\n\n self._requirements: List[str] = (\n _get_base_requirements() if requirements is None else requirements\n )\n\n self._iteration_experiment = None\n self._tuning_iteration = None\n self._tuning_iteration_id = None\n\n self._run = None\n self._run_params = None\n self._run_id = None\n\n def _check_artifact_exists(self, run_id, artifact_path):\n client = mlflow.MlflowClient()\n artifacts = client.list_artifacts(run_id, path=artifact_path)\n return any(artifact.path == artifact_path for artifact in artifacts)\n\n @staticmethod\n def _get_default_retry_config() -> RetryConfig:\n default_retry_params = RetryParams(\n max_attempts=settings.DEFAULT_MAX_ATTEMPTS,\n wait_time_seconds=settings.DEFAULT_WAIT_TIME_SECONDS,\n )\n\n config = RetryConfig(default_params=default_retry_params)\n config[\"log_metric\"] = RetryParams(\n max_attempts=settings.MLFLOW_LOG_METRIC_ATTEMPTS,\n wait_time_seconds=settings.MLFLOW_LOG_METRIC_WAIT_TIME_SECONDS,\n )\n config[\"log_param\"] = RetryParams(\n max_attempts=settings.MLFLOW_LOG_PARAM_ATTEMPTS,\n wait_time_seconds=settings.MLFLOW_LOG_PARAM_WAIT_TIME_SECONDS,\n )\n config[\"log_model\"] = RetryParams(\n max_attempts=settings.MLFLOW_LOG_MODEL_ATTEMPTS,\n wait_time_seconds=settings.MLFLOW_LOG_MODEL_WAIT_TIME_SECONDS,\n )\n config[\"load_model\"] = RetryParams(\n max_attempts=settings.MLFLOW_LOAD_MODEL_ATTEMPTS,\n wait_time_seconds=settings.MLFLOW_LOAD_MODEL_WAIT_TIME_SECONDS,\n )\n config[\"delete_model\"] = RetryParams(\n max_attempts=settings.MLFLOW_DELETE_MODEL_ATTEMPTS,\n wait_time_seconds=settings.MLFLOW_DELETE_MODEL_WAIT_TIME_SECONDS,\n )\n config[\"search_runs\"] = RetryParams(\n max_attempts=settings.MLFLOW_SEARCH_RUNS_ATTEMPTS,\n wait_time_seconds=settings.MLFLOW_SEARCH_RUNS_WAIT_TIME_SECONDS,\n )\n config[\"end_run\"] = RetryParams(\n max_attempts=settings.MLFLOW_END_RUN_ATTEMPTS,\n wait_time_seconds=settings.MLFLOW_END_RUN_WAIT_TIME_SECONDS,\n )\n config[\"get_run\"] = RetryParams(\n max_attempts=settings.MLFLOW_GET_RUN_ATTEMPTS,\n wait_time_seconds=settings.MLFLOW_GET_RUN_WAIT_TIME_SECONDS,\n )\n config[\"search_experiments\"] = RetryParams(\n max_attempts=settings.MLFLOW_SEARCH_EXPERIMENTS_ATTEMPTS,\n wait_time_seconds=settings.MLFLOW_SEARCH_EXPERIMENTS_WAIT_TIME_SECONDS,\n )\n config[\"delete_experiment\"] = RetryParams(\n max_attempts=settings.MLFLOW_DELETE_EXPERIMENT_ATTEMPTS,\n wait_time_seconds=settings.MLFLOW_DELETE_EXPERIMENT_WAIT_TIME_SECONDS,\n )\n config[\"create_experiment\"] = RetryParams(\n max_attempts=settings.MLFLOW_CREATE_EXPERIMENT_ATTEMPTS,\n wait_time_seconds=settings.MLFLOW_CREATE_EXPERIMENT_WAIT_TIME_SECONDS,\n )\n config[\"get_experiment\"] = RetryParams(\n max_attempts=settings.MLFLOW_GET_EXPERIMENT_ATTEMPTS,\n wait_time_seconds=settings.MLFLOW_GET_EXPERIMENT_WAIT_TIME_SECONDS,\n )\n\n return config\n\n @property\n def is_loss(self) -> bool:\n return self._is_loss\n\n def __del__(self):\n self.finish_run()\n self.finish_iteration()\n\n def is_retryable_error(self, e: Exception) -> bool:\n return False\n\n def _get_model_exists_filter(self) -> str:\n return \"metrics.model_uploaded = 1\"\n\n def _get_artifact_url(self, run_id: str, artifact_path: str) -> str:\n return (\n f\"{self._tracking_uri}/get-artifact?path=\"\n f'{urllib.parse.quote(artifact_path, safe=\"\")}&run_uuid={run_id}'\n )\n\n @retry_method(name=\"log_model\")\n def upload_initial_model(self, model: EmbeddingsModelInterface):\n \"\"\"Upload the very first, initial model to the mlflow server\n\n :param model: model to be uploaded\n \"\"\"\n self.finish_iteration()\n experiment_id = get_experiment_id_by_name(INITIAL_EXPERIMENT_NAME)\n if experiment_id is None:\n logger.info(\n f\"Can't find any active iteration with name: {INITIAL_EXPERIMENT_NAME}\"\n )\n try:\n logger.info(\"Create initial experiment\")\n mlflow.create_experiment(INITIAL_EXPERIMENT_NAME)\n except MlflowException as e:\n if \"Cannot set a deleted experiment\" in str(e):\n logger.error(\n f\"Creation of initial experiment is failed: experiment with the same name {INITIAL_EXPERIMENT_NAME} is deleted, but not archived\"\n )\n experiments = mlflow.search_experiments(\n view_type=mlflow.entities.ViewType.ALL\n )\n deleted_experiment_id = None\n\n for exp in experiments:\n if exp.name == INITIAL_EXPERIMENT_NAME:\n deleted_experiment_id = exp.experiment_id\n break\n\n logger.info(\n f\"Restore deleted experiment with the same name: {INITIAL_EXPERIMENT_NAME}\"\n )\n mlflow.tracking.MlflowClient().restore_experiment(\n deleted_experiment_id\n )\n logger.info(\n f\"Archive deleted experiment with the same name: {INITIAL_EXPERIMENT_NAME}\"\n )\n mlflow.tracking.MlflowClient().rename_experiment(\n deleted_experiment_id,\n INITIAL_EXPERIMENT_NAME + \"_archive\",\n )\n logger.info(\n f\"Delete archived experiment with the same name: {INITIAL_EXPERIMENT_NAME}\"\n )\n mlflow.delete_experiment(deleted_experiment_id)\n logger.info(f\"Create initial experiment\")\n mlflow.create_experiment(INITIAL_EXPERIMENT_NAME)\n else:\n raise e\n\n with mlflow.start_run(\n experiment_id=get_experiment_id_by_name(INITIAL_EXPERIMENT_NAME),\n run_name=INITIAL_RUN_NAME,\n ) as run:\n logger.info(\n f\"Upload initial model to {INITIAL_EXPERIMENT_NAME} / {INITIAL_RUN_NAME}\"\n )\n if self._check_artifact_exists(\n get_run_id_by_name(\n get_experiment_id_by_name(INITIAL_EXPERIMENT_NAME),\n INITIAL_RUN_NAME,\n ),\n \"model\",\n ):\n logger.info(\"Model is already uploaded\")\n return\n\n mlflow.pytorch.log_model(\n model, \"model\", pip_requirements=self._requirements\n )\n logger.info(\"Uploading is finished\")\n\n @retry_method(name=\"load_model\")\n def download_initial_model(self) -> EmbeddingsModelInterface:\n \"\"\"Download initial model.\n\n :return: initial embeddings model\n \"\"\"\n model_uri: str = f\"runs:/{get_run_id_by_name(get_experiment_id_by_name(INITIAL_EXPERIMENT_NAME), INITIAL_RUN_NAME)}/model\"\n logger.info(f\"Download the model from {model_uri}\")\n model = mlflow.pytorch.load_model(model_uri)\n logger.info(\"Downloading is finished\")\n return model\n\n @retry_method(name=\"search_runs\")\n def get_top_params(self) -> Optional[List[FineTuningParams]]:\n \"\"\"Get top N previous fine-tuning iteration best params\n\n :return: fine-tuning iteration params\n \"\"\"\n initial_id: Optional[str] = get_experiment_id_by_name(\n INITIAL_EXPERIMENT_NAME\n )\n last_session_id: Optional[str] = self.get_previous_iteration_id()\n if initial_id == last_session_id:\n logger.warning(\n \"Can't retrieve top params, no previous iteration in history\"\n )\n return None\n\n else:\n runs: pd.DataFrame = mlflow.search_runs(\n experiment_ids=[last_session_id],\n filter_string=self._get_model_exists_filter(),\n )\n runs = runs[runs.status == \"FINISHED\"] # and only finished ones\n if runs.shape[0] == 0:\n logger.warning(\n \"Can't retrieve top params, no previous iteration's finished runs with uploaded model in history\"\n )\n return None\n\n # Get the indices that would sort the DataFrame based on the specified parameter\n sorted_indices: np.ndarray = np.argsort(\n runs[self._metric_field].values\n )\n if not self.is_loss:\n sorted_indices = sorted_indices[\n ::-1\n ] # Use [::-1] to sort in descending order\n\n # Extract the top N rows based on the sorted indices\n top_n_rows: np.ndarray = runs.iloc[\n sorted_indices[: self._n_top_runs]\n ]\n\n # Define a mapping dictionary to remove the \"params.\" prefix\n column_mapping: Dict[str, str] = {\n col: col.replace(\"params.\", \"\") for col in top_n_rows.columns\n }\n\n # Rename the columns\n top_n_rows: np.ndarray = top_n_rows.rename(\n columns=column_mapping\n ).to_dict(orient=\"records\")\n\n return [FineTuningParams(**row) for row in top_n_rows]\n\n def _get_best_previous_run_id(self) -> Tuple[Optional[str], bool]:\n initial_id: Optional[str] = get_experiment_id_by_name(\n INITIAL_EXPERIMENT_NAME\n )\n last_session_id: Optional[str] = self.get_previous_iteration_id()\n if initial_id == last_session_id or last_session_id is None:\n return None, True\n else:\n run_id, _ = self._get_best_quality(last_session_id)\n return run_id, False\n\n def _get_best_current_run_id(self) -> Tuple[Optional[str], bool]:\n initial_id: Optional[str] = get_experiment_id_by_name(\n INITIAL_EXPERIMENT_NAME\n )\n if (\n initial_id == self._tuning_iteration_id\n or self._tuning_iteration_id is None\n ):\n return None, True\n else:\n run_id, _ = self._get_best_quality(self._tuning_iteration_id)\n return run_id, False\n\n @retry_method(name=\"load_model\")\n def get_last_model_url(self) -> Optional[str]:\n run_id, is_initial = self._get_best_previous_run_id()\n if is_initial:\n logger.warning(\n \"Can't get the best model URL, no previous iteration in history\"\n )\n return None\n else:\n if run_id is None:\n logger.warning(\n \"Can't get the best model URL, no previous iterations \"\n \"finished runs with uploaded model in history\"\n )\n return None\n path = MODEL_ARTIFACT_PATH\n return self._get_artifact_url(run_id, path)\n\n @retry_method(name=\"load_model\")\n def get_current_model_url(self) -> Optional[str]:\n run_id, is_initial = self._get_best_current_run_id()\n if is_initial:\n logger.warning(\n \"Can't get the best model URL, current run is initial\"\n )\n return None\n\n if run_id is None:\n logger.warning(\n \"Can't get the best model URL, no iterations \"\n \"finished runs with uploaded model in history\"\n )\n return None\n path = MODEL_ARTIFACT_PATH\n return self._get_artifact_url(run_id, path)\n\n @retry_method(name=\"load_model\")\n def get_last_model(self) -> EmbeddingsModelInterface:\n \"\"\"Get previous iteration best embedding model.\n\n :return: best embedding model\n \"\"\"\n run_id, is_initial = self._get_best_previous_run_id()\n if is_initial:\n logger.warning(\n \"Download initial model, no previous iteration in history\"\n )\n return self.download_initial_model()\n\n else:\n if run_id is None:\n logger.warning(\n \"Download initial model, no previous iteration's \"\n \"finished runs with uploaded model in history\"\n )\n return self.download_initial_model()\n else:\n model_uri: str = f\"runs:/{run_id}/model\"\n logger.info(f\"Download the model from {model_uri}\")\n model = mlflow.pytorch.load_model(model_uri)\n logger.info(\"Downloading is finished\")\n return model\n\n @retry_method(name=\"load_model\")\n def get_current_model(self) -> Optional[EmbeddingsModelInterface]:\n \"\"\"Get current iteration best embedding model.\n\n :return: best embedding model\n \"\"\"\n if self._tuning_iteration is None:\n logger.error(\"No current iteration, can't get any model\")\n return\n\n if self._tuning_iteration == INITIAL_EXPERIMENT_NAME:\n logger.info(\"Download initial model\")\n return self.download_initial_model()\n\n run_id, is_initial = self._get_best_current_run_id()\n model_uri: str = f\"runs:/{run_id}/model\"\n logger.info(f\"Download the model from {model_uri}\")\n model = mlflow.pytorch.load_model(model_uri)\n logger.info(\"Downloading is finished\")\n return model\n\n @retry_method(name=\"search_experiments\")\n def get_previous_iteration_id(self) -> Optional[str]:\n if (\n self._tuning_iteration == INITIAL_EXPERIMENT_NAME\n or self._tuning_iteration is None\n ):\n logger.warning(\n f\"Can't find previous iteration - no current iteration was setup\"\n )\n return None\n\n plugin_name = f\"{self._tuning_iteration.plugin_name}\"\n experiments: List[Experiment] = [\n e\n for e in mlflow.search_experiments()\n if (\n e.name.startswith(EXPERIMENT_PREFIX)\n and e.name.find(plugin_name) != -1\n and e.name != str(self._tuning_iteration)\n )\n ]\n if len(experiments) == 0:\n logger.warning(\"No iteration found\")\n return None\n else:\n return max(\n experiments, key=lambda exp: exp.creation_time\n ).experiment_id\n\n @retry_method(name=\"delete_experiment\")\n def delete_previous_iteration(self):\n experiment_id: Optional[str] = self.get_previous_iteration_id()\n\n logger.info(\"Delete models of previous iteration.\")\n runs = mlflow.search_runs(\n experiment_ids=[experiment_id],\n filter_string=self._get_model_exists_filter(),\n )\n runs = runs[runs.status == \"FINISHED\"]\n run_ids = runs[\"run_id\"].tolist()\n\n for run_id in run_ids:\n self.delete_model(run_id, experiment_id)\n\n if experiment_id is not None:\n logger.info(\n f\"Iteration with ID {experiment_id} is going to be deleted\"\n )\n mlflow.tracking.MlflowClient().rename_experiment(\n experiment_id, INITIAL_EXPERIMENT_NAME + \"_archive\"\n )\n mlflow.delete_experiment(experiment_id)\n else:\n logger.warning(\n \"Can't delete a previous iteration, no previous iteration in history\"\n )\n\n @retry_method(name=\"create_experiment\")\n def set_iteration(self, iteration: FineTuningIteration):\n \"\"\"Start a new fine-tuning session.\n\n :param iteration: fine-tuning iteration info\n \"\"\"\n if self._tuning_iteration == INITIAL_EXPERIMENT_NAME:\n self.finish_iteration()\n\n logger.info(\"Start a new fine-tuning iterations\")\n\n self._tuning_iteration = iteration\n self._tuning_iteration_id = get_experiment_id_by_name(str(iteration))\n if self._tuning_iteration_id is None:\n self._tuning_iteration_id = mlflow.create_experiment(\n str(iteration)\n )\n\n self._iteration_experiment = mlflow.set_experiment(\n experiment_id=self._tuning_iteration_id\n )\n\n @retry_method(name=\"start_run\")\n def set_run(self, params: FineTuningParams) -> bool:\n \"\"\"Start a new run with provided fine-tuning params\n\n :param params: provided fine-tuning params\n :return: True if it's a finished run (otherwise False)\n \"\"\"\n convert_value = (\n lambda value: \", \".join(map(str, value))\n if isinstance(value, list)\n else value\n )\n\n if self._tuning_iteration == INITIAL_EXPERIMENT_NAME:\n # TODO: implement exception\n raise ValueError(\"You can't start run for initial iteration\")\n\n if self._run is not None:\n self.finish_run()\n\n logger.info(\n f\"Start a new run for iteration {self._tuning_iteration_id} with params:\\n\\t{str(params)}\"\n )\n\n self._run_params = params\n run_name: str = self._run_params.id\n self._run_id = get_run_id_by_name(self._tuning_iteration_id, run_name)\n\n self._run = mlflow.start_run(\n self._run_id, self._tuning_iteration_id, run_name\n )\n if self._run_id is None:\n self._run_id = self._run.info.run_id\n for key, value in dict(self._tuning_iteration).items():\n mlflow.log_param(key, convert_value(value))\n\n for key, value in dict(self._run_params).items():\n mlflow.log_param(key, convert_value(value))\n\n mlflow.log_metric(\"model_uploaded\", 0)\n\n return False\n else:\n return self._run.info.status == \"FINISHED\"\n\n @retry_method(name=\"search_runs\")\n def model_is_uploaded(self) -> bool:\n runs: pd.DataFrame = mlflow.search_runs(\n experiment_ids=[self._tuning_iteration_id],\n filter_string=self._get_model_exists_filter(),\n )\n runs = runs[runs[\"run_id\"] == self._run_id]\n return runs.shape[0] > 0\n\n @retry_method(name=\"get_experiment\")\n def finish_iteration(self):\n logger.info(f\"Finish current iteration {self._tuning_iteration_id}\")\n self._tuning_iteration = INITIAL_EXPERIMENT_NAME\n self._tuning_iteration_id = get_experiment_id_by_name(\n INITIAL_EXPERIMENT_NAME\n )\n\n if self._tuning_iteration_id is None:\n self._iteration_experiment = mlflow.set_experiment(\n experiment_name=INITIAL_EXPERIMENT_NAME\n )\n self._tuning_iteration_id = (\n self._iteration_experiment.experiment_id\n )\n else:\n self._iteration_experiment = mlflow.set_experiment(\n experiment_id=self._tuning_iteration_id\n )\n\n logger.info(f\"Current iteration is finished\")\n\n @retry_method(name=\"end_run\")\n def finish_run(self):\n logger.info(\n f\"Finish current run {self._tuning_iteration_id} / {self._run_id}\"\n )\n for accumulator in self._accumulators:\n accumulator.clear()\n\n mlflow.end_run()\n\n # Set params to default None\n self._run = None\n self._run_params = None\n self._run_id = None\n\n logger.info(f\"Current run is finished\")\n\n @retry_method(name=\"log_param\")\n def _set_model_as_deleted(self, run_id: str, experiment_id: str):\n with mlflow.start_run(\n run_id=run_id, experiment_id=experiment_id\n ) as run:\n mlflow.log_metric(\"model_deleted\", 1)\n mlflow.log_metric(\"model_uploaded\", 0)\n\n @retry_method(name=\"delete_model\")\n def _delete_model(self, run_id: str, experiment_id: str) -> bool:\n logger.warning(\n f\"Unable to delete a model for run {run_id}, MLFlow has no such functionality, please implement on your own.\"\n )\n return False\n\n @retry_method(name=\"get_run\")\n def delete_model(self, run_id: str, experiment_id: Optional[str] = None):\n experiment_id = (\n self._tuning_iteration_id\n if experiment_id is None\n else experiment_id\n )\n if experiment_id is None:\n raise ValueError(\n f\"No iteration was initialized, unable to delete model.\"\n )\n\n if experiment_id == INITIAL_EXPERIMENT_NAME:\n raise ValueError(f\"Initial model can't be deleted.\")\n\n run_info = None\n try:\n run_info = mlflow.get_run(run_id=run_id)\n except RestException as e:\n if e.get_http_status_code() == 404:\n logger.exception(f\"Run with ID {run_id} doesn't exist.\")\n else:\n raise e\n\n if run_info is not None:\n runs: pd.DataFrame = mlflow.search_runs(\n filter_string=self._get_model_exists_filter()\n )\n runs = runs[runs[\"run_id\"] == run_id]\n if runs.shape[0] == 0:\n logger.warning(\n f\"Run {run_id} has no model being uploaded. Nothing to delete\"\n )\n\n else:\n deleted = None\n try:\n deleted = self._delete_model(run_id, experiment_id)\n except MaxAttemptsReachedException:\n pass\n\n if deleted:\n self._set_model_as_deleted(run_id, experiment_id)\n\n @retry_method(name=\"log_model\")\n def save_model(\n self, model: EmbeddingsModelInterface, best_only: bool = True\n ):\n \"\"\"Save fine-tuned embedding model\n\n :param model: model to be saved\n :param best_only: save only if it's the best (default: True)\n \"\"\"\n if self._tuning_iteration == INITIAL_EXPERIMENT_NAME:\n raise ValueError(\n f\"Can't save not initial model for {INITIAL_EXPERIMENT_NAME} experiment\"\n )\n\n if self._run_id is None:\n raise ValueError(\"There is no current Run\")\n\n logger.info(\n f\"Save model for {self._tuning_iteration_id} / {self._run_id}\"\n )\n if not best_only:\n mlflow.pytorch.log_model(\n model, \"model\", pip_requirements=self._requirements\n )\n mlflow.log_metric(\"model_uploaded\", 1)\n logger.info(\"Upload is finished\")\n else:\n current_quality = self.get_quality()\n best_run_id, best_quality = self.get_best_quality()\n\n if best_run_id is None or (\n current_quality <= best_quality\n if self.is_loss\n else current_quality >= best_quality\n ):\n mlflow.pytorch.log_model(\n model, \"model\", pip_requirements=self._requirements\n )\n mlflow.log_metric(\"model_uploaded\", 1)\n logger.info(\"Upload is finished\")\n\n if best_run_id is not None:\n self.delete_model(best_run_id)\n else:\n logger.info(\"Not the best run - ignore saving\")\n\n @retry_method(name=\"log_metric\")\n def save_metric(self, metric_value: MetricValue):\n \"\"\"Accumulate and save metric value\n\n :param metric_value: value to be logged\n \"\"\"\n for accumulator in self._accumulators:\n for name, value in accumulator.accumulate(metric_value):\n mlflow.log_metric(name, value)\n\n @retry_method(name=\"search_runs\")\n def get_quality(self) -> float:\n \"\"\"Current run quality value\n\n :return: quality value\n \"\"\"\n if self._tuning_iteration == INITIAL_EXPERIMENT_NAME:\n raise ValueError(\n f\"No metrics for {INITIAL_EXPERIMENT_NAME} experiment\"\n )\n\n if self._run_id is None:\n raise ValueError(\"There is no current Run\")\n\n runs: pd.DataFrame = mlflow.search_runs(\n experiment_ids=[self._tuning_iteration_id]\n )\n quality: np.ndarray = runs[runs.run_id == self._run_id][\n self._metric_field\n ]\n return float(quality) if quality.shape[0] == 1 else float(quality[0])\n\n @retry_method(name=\"search_runs\")\n def _get_best_quality(\n self, experiment_id: str\n ) -> Tuple[Optional[str], float]:\n runs: pd.DataFrame = mlflow.search_runs(\n experiment_ids=[experiment_id],\n filter_string=self._get_model_exists_filter(),\n )\n runs = runs[runs.status == \"FINISHED\"] # and not finished ones\n if runs.shape[0] == 0:\n logger.warning(\n \"No finished experiments found with model uploaded, except initial\"\n )\n return None, 0.0\n\n else:\n value: float = (\n runs[self._metric_field].min()\n if self.is_loss\n else runs[self._metric_field].max()\n )\n best: pd.DataFrame = runs[runs[self._metric_field] == value][\n [\"run_id\", self._metric_field]\n ]\n return list(best.itertuples(index=False, name=None))[0]\n\n def get_best_quality(self) -> Tuple[str, float]:\n \"\"\"Get current fine-tuning iteration best quality\n\n :return: run_id and best metric value\n \"\"\"\n if self._tuning_iteration == INITIAL_EXPERIMENT_NAME:\n raise ValueError(\n f\"No metrics for {INITIAL_EXPERIMENT_NAME} experiment\"\n )\n\n return self._get_best_quality(self._tuning_iteration_id)" }, { "identifier": "FineTuningSettings", "path": "embedding_studio/workers/fine_tuning/experiments/finetuning_settings.py", "snippet": "class FineTuningSettings(BaseModel):\n \"\"\"\n\n :param loss_func: loss object for a ranking task\n :param metric_calculators: list of trackable metrics calculators (default: None)\n by default only DistanceShift metric\n :param ranker: ranking function (query, items) -> ranks (defult: cosine similarity)\n :param is_similarity: is ranking function similarity like or distance (default: True)\n :param confidence_calculator: function to calculate results confidences (default: dummy_confidences)\n :param step_size: optimizer steps (default: 500)\n :param gamma: optimizers gamma (default: 0.9)\n :param num_epochs: num of training epochs (default: 10)\n :param batch_size: count of sessions in a batch (default: 1)\n :param test_each_n_sessions: frequency of validation, if value in range [0, 1] - used as ratio (default: -1)\n \"\"\"\n\n loss_func: RankingLossInterface\n metric_calculators: Optional[List[MetricCalculator]] = None\n ranker: Optional[\n Callable[[FloatTensor, FloatTensor], FloatTensor]\n ] = COSINE_SIMILARITY\n is_similarity: Optional[bool] = True\n confidence_calculator: Optional[Callable] = dummy_confidences\n step_size: Optional[int] = 500\n gamma: Optional[float] = 0.9\n num_epochs: Optional[int] = 10\n batch_size: Optional[int] = 1\n test_each_n_sessions: Optional[Union[float, int]] = -1\n\n class Config:\n arbitrary_types_allowed = True" }, { "identifier": "INITIAL_PARAMS", "path": "embedding_studio/workers/fine_tuning/experiments/initial_params/clip.py", "snippet": "INITIAL_PARAMS: Dict[str, List[Union[int, float]]] = {\n \"num_fixed_layers\": [5, 6, 7, 8],\n \"query_lr\": [1e-4, 5e-5, 1e-5, 5e-6, 1e-6, 5e-7],\n \"items_lr\": [1e-4, 5e-5, 1e-5, 5e-6, 1e-6, 5e-7],\n \"query_weight_decay\": [0.0, 1e-6, 1e-5, 1e-4],\n \"items_weight_decay\": [0.0, 1e-6, 1e-5, 1e-4],\n \"margin\": [0.01, 0.025, 0.05],\n}" }, { "identifier": "MetricsAccumulator", "path": "embedding_studio/workers/fine_tuning/experiments/metrics_accumulator.py", "snippet": "class MetricsAccumulator:\n def __init__(\n self,\n name: str,\n calc_mean: bool = False,\n calc_sliding: bool = False,\n calc_min: bool = False,\n calc_max: bool = False,\n window_size: int = 10,\n ):\n \"\"\"Accumulator of metric values + calculator of aggregations like mean, max, min, sliding_mean.\n\n :param name: metric name (metrics with other name will be ignored)\n :param calc_mean: should accumulator calculate mean value (default: False)\n :param calc_sliding: should accumulator calculate sliding mean value (default: False)\n :param calc_min: should accumulator calculate min value (default: False)\n :param calc_max: should accumulator calculate max value (default: False)\n :param window_size: size of sliding window (default: 10)\n \"\"\"\n if not isinstance(name, str) or len(name) == 0:\n raise ValueError(\"MetricsAccumulator's name should not be empty\")\n\n self._name = name\n\n if not isinstance(calc_mean, bool):\n raise ValueError(\"calc_mean value should be bool\")\n self._calc_mean = calc_mean\n\n if not isinstance(calc_sliding, bool):\n raise ValueError(\"calc_sliding value should be bool\")\n self._calc_sliding = calc_sliding\n\n if not isinstance(calc_min, bool):\n raise ValueError(\"calc_min value should be bool\")\n self._calc_min = calc_min\n\n if not isinstance(calc_max, bool):\n raise ValueError(\"calc_max value should be bool\")\n self._calc_max = calc_max\n\n if not isinstance(window_size, int) or window_size <= 1:\n raise ValueError(\n \"window_size value should be integer with value more than 1\"\n )\n\n self._window_size = window_size\n self._values = []\n\n @property\n def name(self) -> str:\n return self._name\n\n def clear(self):\n \"\"\"Clear accumulator\"\"\"\n self._values = []\n\n def accumulate(self, value: MetricValue) -> List[Tuple[str, float]]:\n \"\"\"Add metric value to an accumulator.\n\n :param value: metric to be accumulated\n :return: aggregations\n \"\"\"\n if self.name == value.name:\n self._values.append(value.value)\n\n return self.aggregate()\n\n return []\n\n def aggregate(self) -> List[Tuple[str, float]]:\n \"\"\"Aggregate accumulated metrics\n\n :return: metric aggregations (last, mean, sliding, min, max)\n \"\"\"\n aggregations: List[Tuple[str, float]] = []\n if len(self._values) > 0:\n aggregations.append((self.name, self._values[-1]))\n if self._calc_mean:\n aggregations.append(\n (f\"mean_{self.name}\", float(np.mean(self._values)))\n )\n\n if self._calc_sliding:\n slide_value = float(\n np.mean(self._values)\n if len(self._values) < self._window_size\n else np.mean(self._values[-self._window_size :])\n )\n aggregations.append((f\"sliding_{self.name}\", slide_value))\n\n if self._calc_min:\n aggregations.append((f\"min_{self.name}\", np.min(self._values)))\n\n if self._calc_max:\n aggregations.append((f\"max_{self.name}\", np.max(self._values)))\n\n return aggregations" } ]
from typing import List from sentence_transformers import SentenceTransformer from embedding_studio.core.config import settings from embedding_studio.core.plugin import FineTuningMethod from embedding_studio.embeddings.data.clickstream.parsers.s3_parser import ( AWSS3ClickstreamParser, ) from embedding_studio.embeddings.data.clickstream.search_event import ( DummyEventType, SearchResult, ) from embedding_studio.embeddings.data.clickstream.splitter import ( ClickstreamSessionsSplitter, ) from embedding_studio.embeddings.data.clickstream.text_query_item import ( TextQueryItem, ) from embedding_studio.embeddings.data.clickstream.text_query_retriever import ( TextQueryRetriever, ) from embedding_studio.embeddings.data.loaders.s3.s3_loader import ( AWSS3DataLoader, ) from embedding_studio.embeddings.data.storages.producers.clip import ( CLIPItemStorageProducer, ) from embedding_studio.embeddings.data.utils.fields_normalizer import ( DatasetFieldsNormalizer, ) from embedding_studio.embeddings.losses.prob_cosine_margin_ranking_loss import ( CosineProbMarginRankingLoss, ) from embedding_studio.embeddings.models.text_to_image.clip import ( TextToImageCLIPModel, ) from embedding_studio.models.clickstream.sessions import SessionWithEvents from embedding_studio.models.plugin import FineTuningBuilder, PluginMeta from embedding_studio.workers.fine_tuning.data.prepare_data import prepare_data from embedding_studio.workers.fine_tuning.experiments.experiments_tracker import ( ExperimentsManager, ) from embedding_studio.workers.fine_tuning.experiments.finetuning_settings import ( FineTuningSettings, ) from embedding_studio.workers.fine_tuning.experiments.initial_params.clip import ( INITIAL_PARAMS, ) from embedding_studio.workers.fine_tuning.experiments.metrics_accumulator import ( MetricsAccumulator, )
16,901
class DefaultFineTuningMethod(FineTuningMethod): meta = PluginMeta( name="Default Fine Tuning Method", version="0.0.1", description="A default fine-tuning plugin", ) def __init__(self): # uncomment and pass your credentials to use your own s3 bucket # creds = { # "role_arn": "arn:aws:iam::123456789012:role/some_data" # "aws_access_key_id": "TESTACCESSKEIDTEST11", # "aws_secret_access_key": "QWERTY1232qdsadfasfg5349BBdf30ekp23odk03", # } # self.data_loader = AWSS3DataLoader(**creds) # with empty creds, use anonymous session creds = { } self.data_loader = AWSS3DataLoader(**creds) self.retriever = TextQueryRetriever() self.parser = AWSS3ClickstreamParser( TextQueryItem, SearchResult, DummyEventType ) self.splitter = ClickstreamSessionsSplitter() self.normalizer = DatasetFieldsNormalizer("item", "item_id") self.storage_producer = CLIPItemStorageProducer(self.normalizer) self.accumulators = [ MetricsAccumulator("train_loss", True, True, True, True), MetricsAccumulator( "train_not_irrelevant_dist_shift", True, True, True, True ), MetricsAccumulator( "train_irrelevant_dist_shift", True, True, True, True ), MetricsAccumulator("test_loss"), MetricsAccumulator("test_not_irrelevant_dist_shift"), MetricsAccumulator("test_irrelevant_dist_shift"), ] self.manager = ExperimentsManager( tracking_uri=settings.MLFLOW_TRACKING_URI, main_metric="test_not_irrelevant_dist_shift", accumulators=self.accumulators, ) self.initial_params = INITIAL_PARAMS self.initial_params.update( { "not_irrelevant_only": [True], "negative_downsampling": [ 0.5, ], "examples_order": [ [ 11, ] ], } ) self.settings = FineTuningSettings( loss_func=CosineProbMarginRankingLoss(), step_size=35, test_each_n_sessions=0.5, num_epochs=3, ) def upload_initial_model(self) -> None: model = TextToImageCLIPModel(SentenceTransformer("clip-ViT-B-32")) self.manager.upload_initial_model(model) def get_fine_tuning_builder( self, clickstream: List[SessionWithEvents]
class DefaultFineTuningMethod(FineTuningMethod): meta = PluginMeta( name="Default Fine Tuning Method", version="0.0.1", description="A default fine-tuning plugin", ) def __init__(self): # uncomment and pass your credentials to use your own s3 bucket # creds = { # "role_arn": "arn:aws:iam::123456789012:role/some_data" # "aws_access_key_id": "TESTACCESSKEIDTEST11", # "aws_secret_access_key": "QWERTY1232qdsadfasfg5349BBdf30ekp23odk03", # } # self.data_loader = AWSS3DataLoader(**creds) # with empty creds, use anonymous session creds = { } self.data_loader = AWSS3DataLoader(**creds) self.retriever = TextQueryRetriever() self.parser = AWSS3ClickstreamParser( TextQueryItem, SearchResult, DummyEventType ) self.splitter = ClickstreamSessionsSplitter() self.normalizer = DatasetFieldsNormalizer("item", "item_id") self.storage_producer = CLIPItemStorageProducer(self.normalizer) self.accumulators = [ MetricsAccumulator("train_loss", True, True, True, True), MetricsAccumulator( "train_not_irrelevant_dist_shift", True, True, True, True ), MetricsAccumulator( "train_irrelevant_dist_shift", True, True, True, True ), MetricsAccumulator("test_loss"), MetricsAccumulator("test_not_irrelevant_dist_shift"), MetricsAccumulator("test_irrelevant_dist_shift"), ] self.manager = ExperimentsManager( tracking_uri=settings.MLFLOW_TRACKING_URI, main_metric="test_not_irrelevant_dist_shift", accumulators=self.accumulators, ) self.initial_params = INITIAL_PARAMS self.initial_params.update( { "not_irrelevant_only": [True], "negative_downsampling": [ 0.5, ], "examples_order": [ [ 11, ] ], } ) self.settings = FineTuningSettings( loss_func=CosineProbMarginRankingLoss(), step_size=35, test_each_n_sessions=0.5, num_epochs=3, ) def upload_initial_model(self) -> None: model = TextToImageCLIPModel(SentenceTransformer("clip-ViT-B-32")) self.manager.upload_initial_model(model) def get_fine_tuning_builder( self, clickstream: List[SessionWithEvents]
) -> FineTuningBuilder:
14
2023-10-31 00:33:13+00:00
24k
facebookresearch/minimax
src/minimax/runners/xp_runner.py
[ { "identifier": "EvalRunner", "path": "src/minimax/runners/eval_runner.py", "snippet": "class EvalRunner:\n def __init__(\n self,\n pop,\n env_names,\n env_kwargs=None,\n n_episodes=10,\n agent_idxs='*',\n render_mode=None):\n\n self.pop = pop\n\n if isinstance(agent_idxs, str):\n if \"*\" in agent_idxs:\n self.agent_idxs = np.arange(pop.n_agents)\n else:\n self.agent_idxs = \\\n np.array([int(x) for x in agent_idxs.split(',')])\n else:\n self.agent_idxs = agent_idxs # assume array\n\n assert np.max(self.agent_idxs) < pop.n_agents, \\\n 'Agent index is out of bounds.'\n\n if isinstance(env_names, str):\n env_names = [\n x.strip() for x in env_names.split(',')\n ]\n\n self.n_episodes = n_episodes\n env_infos = create_envs_for_kwargs(env_names, env_kwargs)\n env_names = []\n self.ext_env_names = []\n env_kwargs = []\n for (name, ext_name, kwargs) in env_infos:\n env_names.append(name)\n self.ext_env_names.append(ext_name)\n env_kwargs.append(kwargs)\n self.n_envs = len(env_names)\n\n self.benvs = []\n self.env_params = []\n self.env_has_solved_rate = []\n for env_name, kwargs in zip(env_names, env_kwargs):\n benv = envs.BatchEnv(\n env_name=env_name,\n n_parallel=n_episodes,\n n_eval=1,\n env_kwargs=kwargs,\n wrappers=['monitor_return', 'monitor_ep_metrics']\n )\n self.benvs.append(benv)\n self.env_params.append(benv.env.params)\n self.env_has_solved_rate.append(benv.env.eval_solved_rate is not None)\n\n self.action_dtype = self.benvs[0].env.action_space().dtype\n\n monitored_metrics = self.benvs[0].env.get_monitored_metrics()\n self.rolling_stats = RollingStats(names=monitored_metrics, window=1)\n self._update_ep_stats = jax.vmap(\n jax.vmap(\n self.rolling_stats.update_stats, in_axes=(0,0,0,None)),\n in_axes=(0,0,0,None))\n\n self.test_return_pre = 'test_return'\n self.test_solved_rate_pre = 'test_solved_rate'\n\n self.render_mode = render_mode\n if render_mode:\n from minimax.envs.viz.grid_viz import GridVisualizer\n self.viz = GridVisualizer()\n self.viz.show()\n\n if render_mode == 'ipython':\n from IPython import display\n self.ipython_display = display\n\n def load_checkpoint_state(self, runner_state, state):\n runner_state = list(runner_state)\n runner_state[1] = runner_state[1].load_state_dict(state[1])\n\n return tuple(runner_state)\n\n @partial(jax.jit, static_argnums=(0,2))\n def _get_transition(\n self,\n rng,\n benv,\n params,\n state,\n obs,\n carry,\n zero_carry,\n extra):\n value, pi_params, next_carry = self.pop.act(params, obs, carry)\n pi = self.pop.get_action_dist(pi_params, dtype=self.action_dtype)\n rng, subrng = jax.random.split(rng)\n action = pi.sample(seed=subrng)\n log_pi = pi.log_prob(action)\n\n rng, *vrngs = jax.random.split(rng, self.pop.n_agents+1)\n\n step_args = (jnp.array(vrngs), state, action, extra)\n (next_obs, \n next_state, \n reward, \n done, \n info, \n extra) = benv.step(*step_args)\n\n # Add transition to storage\n step = (obs, action, reward, done, log_pi, value)\n if carry is not None:\n step += (carry,)\n\n # Zero carry if needed\n if carry is not None:\n next_carry = jax.vmap(_tree_util.pytree_select)(\n done, zero_carry, next_carry)\n\n if self.render_mode:\n self.viz.render(\n benv.env.params, \n jax.tree_util.tree_map(lambda x: x[0][0], state))\n if self.render_mode == 'ipython':\n self.ipython_display.display(self.viz.window.fig)\n self.ipython_display.clear_output(wait=True)\n\n return next_state, next_obs, next_carry, done, info, extra\n\n @partial(jax.jit, static_argnums=(0, 2))\n def _rollout_benv(\n self, \n rng, \n benv,\n params,\n env_params,\n state,\n obs,\n carry,\n zero_carry,\n extra,\n ep_stats):\n\n def _scan_rollout(scan_carry, rng):\n (state, \n obs, \n carry,\n extra, \n ep_stats) = scan_carry\n \n step = \\\n self._get_transition(\n rng,\n benv,\n params, \n state, \n obs, \n carry, \n zero_carry,\n extra)\n\n (next_state, \n next_obs, \n next_carry, \n done, \n info, \n extra) = step\n\n ep_stats = self._update_ep_stats(ep_stats, done, info, 1)\n\n return (next_state, next_obs, next_carry, extra, ep_stats), None\n\n n_steps = benv.env.max_episode_steps()\n rngs = jax.random.split(rng, n_steps)\n (state, \n obs, \n carry, \n extra,\n ep_stats),_ = jax.lax.scan(\n _scan_rollout,\n (state, obs, carry, extra, ep_stats),\n rngs,\n length=n_steps)\n\n return ep_stats\n\n @partial(jax.jit, static_argnums=(0,))\n def run(self, rng, params):\n \"\"\"\n Rollout agents on each env. \n\n For each env, run n_eval episodes in parallel, \n where each is indexed to return in order.\n \"\"\"\n eval_stats = self.fake_run(rng, params)\n rng, *rollout_rngs = jax.random.split(rng, self.n_envs+1)\n for i, (benv, env_param) in enumerate(zip(self.benvs, self.env_params)):\n rng, *reset_rngs = jax.random.split(rng, self.pop.n_agents+1)\n obs, state, extra = benv.reset(jnp.array(reset_rngs))\n\n if self.pop.agent.is_recurrent:\n rng, subrng = jax.random.split(rng)\n zero_carry = self.pop.init_carry(subrng, obs)\n else:\n zero_carry = None\n\n # Reset episodic stats\n ep_stats = self.rolling_stats.reset_stats(\n batch_shape=(self.pop.n_agents, self.n_episodes))\n\n ep_stats = self._rollout_benv(\n rollout_rngs[i],\n benv,\n jax.lax.stop_gradient(params), \n env_param, \n state, \n obs,\n zero_carry,\n zero_carry,\n extra,\n ep_stats)\n \n env_name = self.ext_env_names[i]\n mean_return = ep_stats['return'].mean(1)\n\n if self.env_has_solved_rate[i]:\n mean_solved_rate = jax.vmap(jax.vmap(benv.env.eval_solved_rate))(ep_stats).mean(1)\n\n for idx in self.agent_idxs:\n eval_stats[f'eval/a{idx}:{self.test_return_pre}:{env_name}'] = mean_return[idx].squeeze()\n if self.env_has_solved_rate[i]:\n eval_stats[f'eval/a{idx}:{self.test_solved_rate_pre}:{env_name}'] = mean_solved_rate[idx].squeeze()\n\n return eval_stats\n\n def fake_run(self, rng, params):\n eval_stats = {}\n for i, env_name in enumerate(self.ext_env_names):\n for idx in self.agent_idxs:\n eval_stats.update({\n f'eval/a{idx}:{self.test_return_pre}:{env_name}':0.\n })\n if self.env_has_solved_rate[i]:\n eval_stats.update({\n f'eval/a{idx}:{self.test_solved_rate_pre}:{env_name}':0.,\n })\n\n return eval_stats" }, { "identifier": "DRRunner", "path": "src/minimax/runners/dr_runner.py", "snippet": "class DRRunner:\n\t\"\"\"\n\tOrchestrates rollouts across one or more students. \n\tThe main components at play:\n\t- AgentPop: Manages train state and batched inference logic \n\t\tfor a population of agents.\n\t- BatchEnv: Manages environment step and reset logic, using a \n\t\tpopulaton of agents.\n\t- RolloutStorage: Manages the storing and sampling of collected txns.\n\t- PPO: Handles PPO updates, which take a train state + batch of txns.\n\t\"\"\"\n\tdef __init__(\n\t\tself, \n\t\tenv_name,\n\t\tenv_kwargs,\n\t\tstudent_agents,\n\t\tn_students=1,\n\t\tn_parallel=1,\n\t\tn_eval=1,\n\t\tn_rollout_steps=256,\n\t\tlr=1e-4,\n\t\tlr_final=None,\n\t\tlr_anneal_steps=0,\n\t\tmax_grad_norm=0.5,\n\t\tdiscount=0.99,\n\t\tgae_lambda=0.95,\n\t\tadam_eps=1e-5,\n\t\tnormalize_return=False,\n\t\ttrack_env_metrics=False,\n\t\tn_unroll_rollout=1,\n\t\tn_devices=1,\n\t\trender=False):\n\n\t\tassert len(student_agents) == 1, 'Only one type of student supported.'\n\t\tassert n_parallel % n_devices == 0, 'Num envs must be divisible by num devices.'\n\n\t\tself.n_students = n_students\n\t\tself.n_parallel = n_parallel // n_devices\n\t\tself.n_eval = n_eval\n\t\tself.n_devices = n_devices\n\t\tself.step_batch_size = n_students*n_eval*n_parallel\n\t\tself.n_rollout_steps = n_rollout_steps\n\t\tself.n_updates = 0\n\t\tself.lr = lr\n\t\tself.lr_final = lr if lr_final is None else lr_final\n\t\tself.lr_anneal_steps = lr_anneal_steps\n\t\tself.max_grad_norm = max_grad_norm\n\t\tself.adam_eps = adam_eps\n\t\tself.normalize_return = normalize_return\n\t\tself.track_env_metrics = track_env_metrics\n\t\tself.n_unroll_rollout = n_unroll_rollout\n\t\tself.render = render\n\n\t\tself.student_pop = AgentPop(student_agents[0], n_agents=n_students)\n\n\t\tself.env, self.env_params = envs.make(\n\t\t\tenv_name, \n\t\t\tenv_kwargs=env_kwargs\n\t\t)\n\t\tself._action_shape = self.env.action_space().shape\n\n\t\tself.benv = envs.BatchEnv(\n\t\t\tenv_name=env_name,\n\t\t\tn_parallel=self.n_parallel,\n\t\t\tn_eval=self.n_eval,\n\t\t\tenv_kwargs=env_kwargs,\n\t\t\twrappers=['monitor_return', 'monitor_ep_metrics']\n\t\t)\n\t\tself.action_dtype = self.benv.env.action_space().dtype\n\n\t\tself.student_rollout = RolloutStorage(\n\t\t\tdiscount=discount,\n\t\t\tgae_lambda=gae_lambda,\n\t\t\tn_steps=n_rollout_steps,\n\t\t\tn_agents=n_students,\n\t\t\tn_envs=self.n_parallel,\n\t\t\tn_eval=self.n_eval,\n\t\t\taction_space=self.env.action_space(),\n\t\t\tobs_space=self.env.observation_space(),\n\t\t\tagent=self.student_pop.agent,\n\t\t)\n\n\t\tmonitored_metrics = self.benv.env.get_monitored_metrics()\n\t\tself.rolling_stats = RollingStats(\n\t\t\tnames=monitored_metrics,\n\t\t\twindow=10,\n\t\t)\n\t\tself._update_ep_stats = jax.vmap(jax.vmap(self.rolling_stats.update_stats))\n\n\t\tif self.render:\n\t\t\tfrom envs.viz.grid_viz import GridVisualizer\n\t\t\tself.viz = GridVisualizer()\n\t\t\tself.viz.show()\n\n\tdef reset(self, rng):\n\t\tself.n_updates = 0\n\n\t\tn_parallel = self.n_parallel*self.n_devices\n\n\t\trngs, *vrngs = jax.random.split(rng, self.n_students+1)\n\t\tobs, state, extra = self.benv.reset(jnp.array(vrngs), n_parallel=n_parallel)\n\t\tdummy_obs = jax.tree_util.tree_map(lambda x: x[0], obs) # for one agent only\n\n\t\trng, subrng = jax.random.split(rng)\n\t\tif self.student_pop.agent.is_recurrent:\n\t\t\tcarry = self.student_pop.init_carry(subrng, obs)\n\t\t\tself.zero_carry = jax.tree_map(lambda x: x.at[:,:self.n_parallel].get(), carry)\n\t\telse:\n\t\t\tcarry = None\n\n\t\trng, subrng = jax.random.split(rng)\n\t\tparams = self.student_pop.init_params(subrng, dummy_obs)\n\n\t\tschedule_fn = optax.linear_schedule(\n\t\t\tinit_value=-float(self.lr),\n\t\t\tend_value=-float(self.lr_final),\n\t\t\ttransition_steps=self.lr_anneal_steps,\n\t\t)\n\n\t\ttx = optax.chain(\n\t\t\toptax.clip_by_global_norm(self.max_grad_norm),\n\t\t\toptax.adam(learning_rate=float(self.lr), eps=self.adam_eps)\n\t\t)\n\n\t\ttrain_state = VmapTrainState.create(\n\t\t\tapply_fn=self.student_pop.agent.evaluate,\n\t\t\tparams=params,\n\t\t\ttx=tx\n\t\t)\n\n\t\tep_stats = self.rolling_stats.reset_stats(\n\t\t\tbatch_shape=(self.n_students, n_parallel*self.n_eval))\n\n\t\tstart_state = state\n\n\t\treturn (\n\t\t\trng, \n\t\t\ttrain_state, \n\t\t\tstate,\n\t\t\tstart_state, # Used to track metrics from starting state\n\t\t\tobs, \n\t\t\tcarry, \n\t\t\textra, \n\t\t\tep_stats\n\t\t)\n\n\tdef get_checkpoint_state(self, state):\n\t\t_state = list(state)\n\t\t_state[1] = state[1].state_dict\n\n\t\treturn _state\n\n\tdef load_checkpoint_state(self, runner_state, state):\n\t\trunner_state = list(runner_state)\n\t\trunner_state[1] = runner_state[1].load_state_dict(state[1])\n\n\t\treturn tuple(runner_state)\n\n\t@partial(jax.jit, static_argnums=(0,2))\n\tdef _get_transition(\n\t\tself, \n\t\trng, \n\t\tpop, \n\t\tparams, \n\t\trollout, \n\t\tstate, \n\t\tstart_state, \n\t\tobs, \n\t\tcarry, \n\t\tdone,\n\t\textra=None):\n\t\t# Sample action\n\t\tvalue, pi_params, next_carry = pop.act(params, obs, carry, done)\n\n\t\tpi = pop.get_action_dist(pi_params, dtype=self.action_dtype)\n\t\trng, subrng = jax.random.split(rng)\n\t\taction = pi.sample(seed=subrng)\n\t\tlog_pi = pi.log_prob(action)\n\n\t\trng, *vrngs = jax.random.split(rng, self.n_students+1)\n\t\t(next_obs, \n\t\t next_state, \n\t\t reward, \n\t\t done, \n\t\t info, \n\t\t extra) = self.benv.step(jnp.array(vrngs), state, action, extra)\n\n\t\tnext_start_state = jax.vmap(_tree_util.pytree_select)(\n\t\t\tdone, next_state, start_state\n\t\t)\n\n\t\t# Add transition to storage\n\t\tstep = (obs, action, reward, done, log_pi, value)\n\t\tif carry is not None:\n\t\t\tstep += (carry,)\n\n\t\trollout = self.student_rollout.append(rollout, *step)\n\n\t\tif self.render:\n\t\t\tself.viz.render(\n\t\t\t\tself.benv.env.params, \n\t\t\t\tjax.tree_util.tree_map(lambda x: x[0][0], state))\n\n\t\treturn (\n\t\t\trollout, \n\t\t\tnext_state,\n\t\t\tnext_start_state, \n\t\t\tnext_obs, \n\t\t\tnext_carry, \n\t\t\tdone, \n\t\t\tinfo, \n\t\t\textra\n\t\t)\n\n\t@partial(jax.jit, static_argnums=(0,))\n\tdef _rollout_students(\n\t\tself, \n\t\trng, \n\t\ttrain_state, \n\t\tstate, \n\t\tstart_state, \n\t\tobs, \n\t\tcarry, \n\t\tdone,\n\t\textra=None, \n\t\tep_stats=None):\n\t\trollout = self.student_rollout.reset()\n\n\t\trngs = jax.random.split(rng, self.n_rollout_steps)\n\n\t\tdef _scan_rollout(scan_carry, rng):\n\t\t\trollout, state, start_state, obs, carry, done, extra, ep_stats, train_state = scan_carry \n\n\t\t\tnext_scan_carry = \\\n\t\t\t\tself._get_transition(\n\t\t\t\t\trng, \n\t\t\t\t\tself.student_pop, \n\t\t\t\t\tjax.lax.stop_gradient(train_state.params), \n\t\t\t\t\trollout, \n\t\t\t\t\tstate,\n\t\t\t\t\tstart_state, \n\t\t\t\t\tobs, \n\t\t\t\t\tcarry,\n\t\t\t\t\tdone, \n\t\t\t\t\textra)\n\t\t\t(rollout, \n\t\t\t next_state,\n\t\t\t next_start_state, \n\t\t\t next_obs, \n\t\t\t next_carry, \n\t\t\t done, \n\t\t\t info, \n\t\t\t extra) = next_scan_carry\n\n\t\t\tep_stats = self._update_ep_stats(ep_stats, done, info)\n\n\t\t\treturn (\n\t\t\t\trollout, \n\t\t\t\tnext_state,\n\t\t\t\tnext_start_state,\n\t\t\t\tnext_obs, \n\t\t\t\tnext_carry,\n\t\t\t\tdone,\n\t\t\t\textra, \n\t\t\t\tep_stats,\n\t\t\t\ttrain_state), None\n\n\t\t(rollout, \n\t\t state, \n\t\t start_state, \n\t\t obs, \n\t\t carry, \n\t\t done,\n\t\t extra, \n\t\t ep_stats,\n\t\t train_state), _ = jax.lax.scan(\n\t\t\t_scan_rollout,\n\t\t\t(rollout, \n\t\t\t state, \n\t\t\t start_state,\n\t\t\t obs, \n\t\t\t carry, \n\t\t\t done,\n\t\t\t extra, \n\t\t\t ep_stats,\n\t\t\t train_state),\n\t\t\trngs,\n\t\t\tlength=self.n_rollout_steps,\n\t\t)\n\n\t\treturn rollout, state, start_state, obs, carry, extra, ep_stats, train_state\n\n\t@partial(jax.jit, static_argnums=(0,))\n\tdef _compile_stats(self, update_stats, ep_stats, env_metrics=None):\n\t\tstats = jax.vmap(lambda info: jax.tree_map(lambda x: x.mean(), info))(\n\t\t\t{k:ep_stats[k] for k in self.rolling_stats.names}\n\t\t)\n\t\tstats.update(update_stats)\n\n\t\tif self.n_students > 1:\n\t\t\t_stats = {}\n\t\t\tfor i in range(self.n_students):\n\t\t\t\t_student_stats = jax.tree_util.tree_map(lambda x: x[i], stats) # for agent0\n\t\t\t\t_stats.update({f'a{i}/{k}':v for k,v in _student_stats.items()})\n\t\t\tstats = _stats\n\n\t\tif self.track_env_metrics:\n\t\t\tmean_env_metrics = jax.vmap(lambda info: jax.tree_map(lambda x: x.mean(), info))(env_metrics)\n\t\t\tmean_env_metrics = {f'env/{k}':v for k,v in mean_env_metrics.items()}\n\n\t\t\tif self.n_students > 1:\n\t\t\t\t_env_metrics = {}\n\t\t\t\tfor i in range(self.n_students):\n\t\t\t\t\t_student_env_metrics = jax.tree_util.tree_map(lambda x: x[i], mean_env_metrics) # for agent0\n\t\t\t\t\t_env_metrics.update({f'{k}_a{i}':v for k,v in _student_env_metrics.items()})\n\t\t\t\tmean_env_metrics = _env_metrics\n\n\t\t\tstats.update(mean_env_metrics)\n\n\t\tif self.n_students == 1:\n\t\t\tstats = jax.tree_map(lambda x: x[0], stats)\n\n\t\tif self.n_devices > 1:\n\t\t\tstats = jax.tree_map(lambda x: jax.lax.pmean(x, 'device'), stats)\n\n\t\treturn stats\n\n\tdef get_shmap_spec(self):\n\t\trunner_state_size = len(inspect.signature(self.run).parameters)\n\t\tin_spec = [P(None,'device'),]*(runner_state_size)\n\t\tout_spec = [P(None,'device'),]*(runner_state_size)\n\n\t\tin_spec[:2] = [P(None),]*2\n\t\tin_spec = tuple(in_spec)\n\t\tout_spec = (P(None),) + in_spec\n\n\t\treturn in_spec, out_spec\n\n\t@partial(jax.jit, static_argnums=(0,))\n\tdef run(\n\t\tself, \n\t\trng, \n\t\ttrain_state, \n\t\tstate, \n\t\tstart_state,\n\t\tobs, \n\t\tcarry=None, \n\t\textra=None, \n\t\tep_stats=None):\n\t\t\"\"\"\n\t\tPerform one update step: rollout all students and teachers + update with PPO\n\t\t\"\"\"\n\t\tif self.n_devices > 1:\n\t\t\trng = jax.random.fold_in(rng, jax.lax.axis_index('device'))\n\n\t\trng, *vrngs = jax.random.split(rng, self.n_students+1)\n\t\trollout_batch_shape = (self.n_students, self.n_parallel*self.n_eval)\n\n\t\tobs, state, extra = self.benv.reset(jnp.array(vrngs))\n\t\tep_stats = self.rolling_stats.reset_stats(\n\t\t\tbatch_shape=rollout_batch_shape)\n\n\t\trollout_start_state = state\n\n\t\tdone = jnp.zeros(rollout_batch_shape, dtype=jnp.bool_)\n\t\trng, subrng = jax.random.split(rng)\n\t\trollout, state, start_state, obs, carry, extra, ep_stats, train_state = \\\n\t\t\tself._rollout_students(\n\t\t\t\tsubrng, \n\t\t\t\ttrain_state, \n\t\t\t\tstate, \n\t\t\t\tstart_state,\n\t\t\t\tobs, \n\t\t\t\tcarry, \n\t\t\t\tdone,\n\t\t\t\textra, \n\t\t\t\tep_stats\n\t\t\t)\n\n\t\ttrain_batch = self.student_rollout.get_batch(\n\t\t\trollout, \n\t\t\tself.student_pop.get_value(\n\t\t\t\tjax.lax.stop_gradient(train_state.params), \n\t\t\t\tobs, \n\t\t\t\tcarry,\n\t\t\t)\n\t\t)\n\n\t\t# PPOAgent vmaps over the train state and batch. Batch must be N x EM\n\t\trng, subrng = jax.random.split(rng)\n\t\ttrain_state, update_stats = self.student_pop.update(subrng, train_state, train_batch)\n\n\t\t# Collect env metrics\n\t\tif self.track_env_metrics:\n\t\t\tenv_metrics = self.benv.get_env_metrics(rollout_start_state)\n\t\telse:\n\t\t\tenv_metrics = None\n\n\t\tstats = self._compile_stats(update_stats, ep_stats, env_metrics)\n\t\tstats.update(dict(n_updates=train_state.n_updates[0]))\n\n\t\ttrain_state = train_state.increment()\n\t\tself.n_updates += 1\n\n\t\treturn (\n\t\t\tstats, \n\t\t\trng, \n\t\t\ttrain_state, \n\t\t\tstate, \n\t\t\tstart_state, \n\t\t\tobs, \n\t\t\tcarry, \n\t\t\textra, \n\t\t\tep_stats\n\t\t)" }, { "identifier": "PAIREDRunner", "path": "src/minimax/runners/paired_runner.py", "snippet": "class PAIREDRunner:\n\t\"\"\"\n\tOrchestrates rollouts across one or more students and teachers. \n\tThe main components at play:\n\t- AgentPop: Manages train state and batched inference logic \n\t\tfor a population of agents.\n\t- BatchUEDEnv: Manages environment step and reset logic for a \n\t\tpopulation of agents batched over a pair of student and \n\t\tteacher MDPs.\n\t- RolloutStorage: Manages the storing and sampling of collected txns.\n\t- PPO: Handles PPO updates, which take a train state + batch of txns.\n\t\"\"\"\n\tdef __init__(\n\t\tself, \n\t\tenv_name,\n\t\tenv_kwargs,\n\t\tued_env_kwargs,\n\t\tstudent_agents,\n\t\tn_students=2,\n\t\tn_parallel=1,\n\t\tn_eval=1,\n\t\tn_rollout_steps=250,\n\t\tlr=1e-4,\n\t\tlr_final=None,\n\t\tlr_anneal_steps=0,\n\t\tmax_grad_norm=0.5,\n\t\tdiscount=0.99,\n\t\tgae_lambda=0.95,\n\t\tadam_eps=1e-5,\n\t\tteacher_lr=None,\n\t\tteacher_lr_final=None,\n\t\tteacher_lr_anneal_steps=None,\n\t\tteacher_discount=0.99,\n\t\tteacher_gae_lambda=0.95,\n\t\tteacher_agents=None,\n\t\tued_score='relative_regret',\n\t\ttrack_env_metrics=False,\n\t\tn_unroll_rollout=1,\n\t\trender=False,\n\t\tn_devices=1):\n\t\tassert n_parallel % n_devices == 0, 'Num envs must be divisible by num devices.'\n\n\t\tued_score = UEDScore[ued_score.upper()]\n\n\t\tassert len(student_agents) == 1, \\\n\t\t\t'Only one type of student supported.'\n\t\tassert not (n_students > 2 and ued_score in [UEDScore.RELATIVE_REGRET, UEDScore.MEAN_RELATIVE_REGRET]), \\\n\t\t\t'Standard PAIRED uses only 2 students.'\n\t\tassert teacher_agents is None or len(teacher_agents) == 1, \\\n\t\t\t'Only one type of teacher supported.'\n\n\t\tself.n_students = n_students\n\t\tself.n_parallel = n_parallel // n_devices\n\t\tself.n_eval = n_eval\n\t\tself.n_devices = n_devices\n\t\tself.step_batch_size = n_students*n_eval*n_parallel\n\t\tself.n_rollout_steps = n_rollout_steps\n\t\tself.n_updates = 0\n\t\tself.lr = lr\n\t\tself.lr_final = lr if lr_final is None else lr_final\n\t\tself.lr_anneal_steps = lr_anneal_steps\n\t\tself.teacher_lr = \\\n\t\t\tlr if teacher_lr is None else lr\n\t\tself.teacher_lr_final = \\\n\t\t\tself.lr_final if teacher_lr_final is None else teacher_lr_final\n\t\tself.teacher_lr_anneal_steps = \\\n\t\t\tlr_anneal_steps if teacher_lr_anneal_steps is None else teacher_lr_anneal_steps\n\t\tself.max_grad_norm = max_grad_norm\n\t\tself.adam_eps = adam_eps\n\t\tself.ued_score = ued_score\n\t\tself.track_env_metrics = track_env_metrics\n\n\t\tself.n_unroll_rollout = n_unroll_rollout\n\t\tself.render = render\n\n\t\tself.student_pop = AgentPop(student_agents[0], n_agents=n_students)\n\n\t\tif teacher_agents is not None:\n\t\t\tself.teacher_pop = AgentPop(teacher_agents[0], n_agents=1)\n\n\t\t# This ensures correct partial-episodic bootstrapping by avoiding\n\t\t# any termination purely due to timeouts.\n\t\t# env_kwargs.max_episode_steps = self.n_rollout_steps + 1\n\t\tself.benv = envs.BatchUEDEnv(\n\t\t\tenv_name=env_name,\n\t\t\tn_parallel=self.n_parallel,\n\t\t\tn_eval=n_eval,\n\t\t\tenv_kwargs=env_kwargs,\n\t\t\tued_env_kwargs=ued_env_kwargs,\n\t\t\twrappers=['monitor_return', 'monitor_ep_metrics'],\n\t\t\tued_wrappers=[]\n\t\t)\n\t\tself.teacher_n_rollout_steps = \\\n\t\t\tself.benv.env.ued_max_episode_steps()\n\n\t\tself.student_rollout = RolloutStorage(\n\t\t\tdiscount=discount,\n\t\t\tgae_lambda=gae_lambda,\n\t\t\tn_steps=n_rollout_steps,\n\t\t\tn_agents=n_students,\n\t\t\tn_envs=self.n_parallel,\n\t\t\tn_eval=self.n_eval,\n\t\t\taction_space=self.benv.env.action_space(),\n\t\t\tobs_space=self.benv.env.observation_space(),\n\t\t\tagent=self.student_pop.agent\n\t\t)\n\n\t\tself.teacher_rollout = RolloutStorage(\n\t\t\tdiscount=teacher_discount,\n\t\t\tgae_lambda=teacher_gae_lambda,\n\t\t\tn_steps=self.teacher_n_rollout_steps,\n\t\t\tn_agents=1,\n\t\t\tn_envs=self.n_parallel,\n\t\t\tn_eval=1,\n\t\t\taction_space=self.benv.env.ued_action_space(),\n\t\t\tobs_space=self.benv.env.ued_observation_space(),\n\t\t\tagent=self.teacher_pop.agent,\n\t\t)\n\n\t\tued_monitored_metrics = ('return',)\n\t\tself.ued_rolling_stats = RollingStats(\t\n\t\t\tnames=ued_monitored_metrics,\n\t\t\twindow=10,\n\t\t)\n\t\t\n\t\tmonitored_metrics = self.benv.env.get_monitored_metrics()\n\t\tself.rolling_stats = RollingStats(\n\t\t\tnames=monitored_metrics,\n\t\t\twindow=10,\n\t\t)\n\n\t\tself._update_ep_stats = jax.vmap(jax.vmap(self.rolling_stats.update_stats))\n\t\tself._update_ued_ep_stats = jax.vmap(jax.vmap(self.ued_rolling_stats.update_stats))\n\n\t\tif self.render:\n\t\t\tfrom envs.viz.grid_viz import GridVisualizer\n\t\t\tself.viz = GridVisualizer()\n\t\t\tself.viz.show()\n\n\tdef reset(self, rng):\n\t\tself.n_updates = 0\n\n\t\tn_parallel = self.n_parallel*self.n_devices\n\n\t\trng, student_rng, teacher_rng = jax.random.split(rng,3)\n\t\tstudent_info = self._reset_pop(\n\t\t\t\tstudent_rng, \n\t\t\t\tself.student_pop, \n\t\t\t\tpartial(self.benv.reset, sub_batch_size=n_parallel*self.n_eval),\n\t\t\t\tn_parallel_ep=n_parallel*self.n_eval,\n\t\t\t\tlr_init=self.lr,\n\t\t\t\tlr_final=self.lr_final,\n\t\t\t\tlr_anneal_steps=self.lr_anneal_steps)\n\n\t\tteacher_info = self._reset_pop(\n\t\t\tteacher_rng, \n\t\t\tself.teacher_pop, \n\t\t\tpartial(self.benv.reset_teacher, n_parallel=n_parallel),\n\t\t\tn_parallel_ep=n_parallel,\n\t\t\tlr_init=self.teacher_lr,\n\t\t\tlr_final=self.teacher_lr_final,\n\t\t\tlr_anneal_steps=self.teacher_lr_anneal_steps)\n\n\t\treturn (\n\t\t\trng,\n\t\t\t*student_info,\n\t\t\t*teacher_info\n\t\t)\n\n\tdef _reset_pop(\n\t\tself, \n\t\trng, \n\t\tpop, \n\t\tenv_reset_fn, \n\t\tn_parallel_ep=1,\n\t\tlr_init=3e-4,\n\t\tlr_final=3e-4,\n\t\tlr_anneal_steps=0):\n\t\trng, *vrngs = jax.random.split(rng, pop.n_agents+1)\n\t\treset_out = env_reset_fn(jnp.array(vrngs))\n\t\tif len(reset_out) == 2:\n\t\t\tobs, state = reset_out\n\t\telse:\n\t\t\tobs, state, extra = reset_out\n\t\tdummy_obs = jax.tree_util.tree_map(lambda x: x[0], obs) # for one agent only\n\n\t\trng, subrng = jax.random.split(rng)\n\t\tif pop.agent.is_recurrent:\n\t\t\tcarry = pop.init_carry(subrng, obs)\n\t\telse:\n\t\t\tcarry = None\n\n\t\trng, subrng = jax.random.split(rng)\n\t\tparams = pop.init_params(subrng, dummy_obs)\n\n\t\tschedule_fn = optax.linear_schedule(\n\t\t\tinit_value=-float(lr_init),\n\t\t\tend_value=-float(lr_final),\n\t\t\ttransition_steps=lr_anneal_steps,\n\t\t)\n\n\t\ttx = optax.chain(\n\t\t\toptax.clip_by_global_norm(self.max_grad_norm),\n\t\t\toptax.scale_by_adam(eps=self.adam_eps),\n\t\t\toptax.scale_by_schedule(schedule_fn),\n\t\t)\n\n\t\ttrain_state = VmapTrainState.create(\n\t\t\tapply_fn=pop.agent.evaluate,\n\t\t\tparams=params,\n\t\t\ttx=tx\n\t\t)\n\t\t\n\t\tep_stats = self.rolling_stats.reset_stats(\n\t\t\tbatch_shape=(pop.n_agents,n_parallel_ep))\n\n\t\treturn train_state, state, obs, carry, ep_stats\n\n\tdef get_checkpoint_state(self, state):\n\t\t_state = list(state)\n\t\t_state[1] = state[1].state_dict\n\t\t_state[6] = state[6].state_dict\n\n\t\treturn _state\n\n\tdef load_checkpoint_state(self, runner_state, state):\n\t\trunner_state = list(runner_state)\n\t\trunner_state[1] = runner_state[1].load_state_dict(state[1])\n\t\trunner_state[6] = runner_state[6].load_state_dict(state[6])\n\n\t\treturn tuple(runner_state)\n\n\t@partial(jax.jit, static_argnums=(0,2,3))\n\tdef _get_transition(\n\t\tself,\n\t\trng, \n\t\tpop, \n\t\trollout_mgr, \n\t\trollout, \n\t\tparams, \n\t\tstate, \n\t\tobs, \n\t\tcarry, \n\t\tdone,\n\t\treset_state=None,\n\t\textra=None):\n\t\t# Sample action\n\t\tvalue, pi_params, next_carry = pop.act(params, obs, carry, done)\n\t\tpi = pop.get_action_dist(pi_params)\n\t\trng, subrng = jax.random.split(rng)\n\t\taction = pi.sample(seed=subrng)\n\t\tlog_pi = pi.log_prob(action)\n\n\t\trng, *vrngs = jax.random.split(rng, pop.n_agents+1)\n\n\t\tif pop is self.student_pop:\n\t\t\tstep_fn = self.benv.step_student\n\t\telse:\n\t\t\tstep_fn = self.benv.step_teacher\n\t\tstep_args = (jnp.array(vrngs), state, action)\n\n\t\tif reset_state is not None: # Needed for student to reset to same instance\n\t\t\tstep_args += (reset_state,)\n\n\t\tif extra is not None:\n\t\t\tstep_args += (extra,)\n\t\t\tnext_obs, next_state, reward, done, info, extra = step_fn(*step_args)\n\t\telse:\n\t\t\tnext_obs, next_state, reward, done, info = step_fn(*step_args)\n\n\t\t# Add transition to storage\n\t\tstep = (obs, action, reward, done, log_pi, value)\n\t\tif carry is not None:\n\t\t\tstep += (carry,)\n\n\t\trollout = rollout_mgr.append(rollout, *step)\n\n\t\tif self.render and pop is self.student_pop:\n\t\t\tself.viz.render(\n\t\t\t\tself.benv.env.env.params, \n\t\t\t\tjax.tree_util.tree_map(lambda x: x[0][0], state))\n\n\t\treturn rollout, next_state, next_obs, next_carry, done, info, extra\n\n\t@partial(jax.jit, static_argnums=(0,2,3,4))\n\tdef _rollout(\n\t\tself, \n\t\trng, \n\t\tpop, \n\t\trollout_mgr,\n\t\tn_steps,\n\t\tparams, \n\t\tstate, \n\t\tobs, \n\t\tcarry, \n\t\tdone,\n\t\treset_state=None, \n\t\textra=None, \n\t\tep_stats=None):\n\t\trngs = jax.random.split(rng, n_steps)\n\n\t\trollout = rollout_mgr.reset()\n\n\t\tdef _scan_rollout(scan_carry, rng):\n\t\t\t(rollout, \n\t\t\t state, \n\t\t\t obs, \n\t\t\t carry,\n\t\t\t done, \n\t\t\t extra, \n\t\t\t ep_stats) = scan_carry\n\t\t\t\n\t\t\tnext_scan_carry = \\\n\t\t\t\tself._get_transition(\n\t\t\t\t\trng,\n\t\t\t\t\tpop, \n\t\t\t\t\trollout_mgr,\n\t\t\t\t\trollout,\n\t\t\t\t\tparams, \n\t\t\t\t\tstate, \n\t\t\t\t\tobs, \n\t\t\t\t\tcarry, \n\t\t\t\t\tdone,\n\t\t\t\t\treset_state, \n\t\t\t\t\textra)\n\n\t\t\t(rollout, \n\t\t\t next_state, \n\t\t\t next_obs, \n\t\t\t next_carry, \n\t\t\t done, \n\t\t\t info, \n\t\t\t extra) = next_scan_carry\n\n\t\t\tif ep_stats is not None:\n\t\t\t\t_ep_stats_update_fn = self._update_ep_stats \\\n\t\t\t\t\tif pop is self.student_pop else self._update_ued_ep_stats\n\n\t\t\t\tep_stats = _ep_stats_update_fn(ep_stats, done, info)\n\n\t\t\treturn (rollout, next_state, next_obs, next_carry, done, extra, ep_stats), None\n\n\t\t(rollout, state, obs, carry, done, extra, ep_stats), _ = jax.lax.scan(\n\t\t\t_scan_rollout,\n\t\t\t(rollout, state, obs, carry, done, extra, ep_stats),\n\t\t\trngs,\n\t\t\tlength=n_steps,\n\t\t\tunroll=self.n_unroll_rollout\n\t\t)\n\n\t\treturn rollout, state, obs, carry, extra, ep_stats\n\n\t@partial(jax.jit, static_argnums=(0,))\n\tdef _compile_stats(self, \n\t\tupdate_stats, ep_stats, \n\t\tued_update_stats, ued_ep_stats,\n\t\tenv_metrics=None,\n\t\tgrad_stats=None, ued_grad_stats=None):\n\t\tmean_returns_by_student = jax.vmap(lambda x: x.mean())(ep_stats['return'])\n\t\tmean_returns_by_teacher = jax.vmap(lambda x: x.mean())(ued_ep_stats['return'])\n\n\t\tmean_ep_stats = jax.vmap(lambda info: jax.tree_map(lambda x: x.mean(), info))(\n\t\t\t{k:ep_stats[k] for k in self.rolling_stats.names}\n\t\t)\n\t\tued_mean_ep_stats = jax.vmap(lambda info: jax.tree_map(lambda x: x.mean(), info))(\n\t\t\t{k:ued_ep_stats[k] for k in self.ued_rolling_stats.names}\n\t\t)\n\n\t\tstudent_stats = {\n\t\t\tf'mean_{k}':v for k,v in mean_ep_stats.items()\n\t\t}\n\t\tstudent_stats.update(update_stats)\n\n\t\tstats = {}\n\t\tfor i in range(self.n_students):\n\t\t\t_student_stats = jax.tree_util.tree_map(lambda x: x[i], student_stats) # for agent0\n\t\t\tstats.update({f'{k}_a{i}':v for k,v in _student_stats.items()})\n\n\t\tteacher_stats = {\n\t\t\tf'mean_{k}_tch':v for k,v in ued_mean_ep_stats.items()\n\t\t}\n\t\tteacher_stats.update({\n\t\t\tf'{k}_tch':v[0] for k,v in ued_update_stats.items()\n\t\t})\n\t\tstats.update(teacher_stats)\n\n\t\tif self.track_env_metrics:\n\t\t\tpassable_mask = env_metrics.pop('passable')\n\t\t\tmean_env_metrics = jax.tree_util.tree_map(\n\t\t\t\tlambda x: (x*passable_mask).sum()/passable_mask.sum(), \n\t\t\t\tenv_metrics\n\t\t\t)\n\t\t\tmean_env_metrics.update({'passable_ratio': passable_mask.mean()})\n\t\t\tstats.update({\n\t\t\t\tf'env/{k}':v for k,v in mean_env_metrics.items()\n\t\t\t})\n\n\t\tif self.n_devices > 1:\n\t\t\tstats = jax.tree_map(lambda x: jax.lax.pmean(x, 'device'), stats)\n\n\t\treturn stats\n\n\tdef get_shmap_spec(self):\n\t\trunner_state_size = len(inspect.signature(self.run).parameters)\n\t\tin_spec = [P(None,'device'),]*(runner_state_size)\n\t\tout_spec = [P(None,'device'),]*(runner_state_size)\n\n\t\tin_spec[:2] = [P(None),]*2\n\t\tin_spec[6] = P(None)\n\t\tin_spec = tuple(in_spec)\n\t\tout_spec = (P(None),) + in_spec\n\n\t\treturn in_spec, out_spec\n\n\t@partial(jax.jit, static_argnums=(0,))\n\tdef run(\n\t\tself, \n\t\trng, \n\t\ttrain_state, \n\t\tstate,\n\t\tobs,\n\t\tcarry,\n\t\tep_stats,\n\t\tued_train_state,\n\t\tued_state,\n\t\tued_obs,\n\t\tued_carry,\n\t\tued_ep_stats):\n\t\t\"\"\"\n\t\tPerform one update step: rollout teacher + students\n\t\t\"\"\"\n\t\tif self.n_devices > 1:\n\t\t\trng = jax.random.fold_in(rng, jax.lax.axis_index('device'))\n\n\t\t# === Reset teacher env + rollout teacher\n\t\trng, *vrngs = jax.random.split(rng, self.teacher_pop.n_agents+1)\n\t\tued_reset_out = self.benv.reset_teacher(jnp.array(vrngs))\n\t\tif len(ued_reset_out) > 2:\n\t\t\tued_obs, ued_state, ued_extra = ued_reset_out\n\t\telse:\n\t\t\tued_obs, ued_state = ued_reset_out\n\t\t\tued_extra = None\n\n\t\t# Reset UED ep_stats\n\t\tif self.ued_rolling_stats is not None:\n\t\t\tued_ep_stats = self.ued_rolling_stats.reset_stats(\n\t\t\t\tbatch_shape=(1,self.n_parallel))\n\t\telse:\n\t\t\tued_ep_stats = None\n\n\t\ttch_rollout_batch_shape = (1,self.n_parallel*self.n_eval)\n\t\tdone = jnp.zeros(tch_rollout_batch_shape, dtype=jnp.bool_)\n\t\trng, subrng = jax.random.split(rng)\n\t\tued_rollout, ued_state, ued_obs, ued_carry, _, ued_ep_stats = \\\n\t\t\tself._rollout(\n\t\t\t\tsubrng,\n\t\t\t\tself.teacher_pop,\n\t\t\t\tself.teacher_rollout,\n\t\t\t\tself.teacher_n_rollout_steps,\n\t\t\t\tjax.lax.stop_gradient(ued_train_state.params), \n\t\t\t\tued_state, \n\t\t\t\tued_obs, \n\t\t\t\tued_carry,\n\t\t\t\tdone, \n\t\t\t\textra=ued_extra, \n\t\t\t\tep_stats=ued_ep_stats\n\t\t\t)\n\n\t\t# === Reset student to new envs + rollout students\n\t\trng, *vrngs = jax.random.split(rng, self.teacher_pop.n_agents+1)\n\t\tobs, state, extra = jax.tree_util.tree_map(\n\t\t\tlambda x:x.squeeze(0), self.benv.reset_student(\n\t\t\t\tjnp.array(vrngs),\n\t\t\t\tued_state, \n\t\t\t\tself.student_pop.n_agents))\n\t\treset_state = state\n\n\t\t# Reset student ep_stats\n\t\tst_rollout_batch_shape = (self.n_students,self.n_parallel*self.n_eval)\n\t\tep_stats = self.rolling_stats.reset_stats(\n\t\t\tbatch_shape=st_rollout_batch_shape)\n\n\t\tdone = jnp.zeros(st_rollout_batch_shape, dtype=jnp.bool_)\n\t\trng, subrng = jax.random.split(rng)\n\t\trollout, state, obs, carry, extra, ep_stats = \\\n\t\t\tself._rollout(\n\t\t\t\tsubrng, \n\t\t\t\tself.student_pop,\n\t\t\t\tself.student_rollout,\n\t\t\t\tself.n_rollout_steps,\n\t\t\t\tjax.lax.stop_gradient(train_state.params),\n\t\t\t\tstate, \n\t\t\t\tobs, \n\t\t\t\tcarry, \n\t\t\t\tdone,\n\t\t\t\treset_state=reset_state, \n\t\t\t\textra=extra, \n\t\t\t\tep_stats=ep_stats)\n\n\t\t# === Update student with PPO\n\t\t# PPOAgent vmaps over the train state and batch. Batch must be N x EM\n\t\tstudent_rollout_last_value = self.student_pop.get_value(\n\t\t\tjax.lax.stop_gradient(train_state.params), obs, carry\n\t\t)\n\t\ttrain_batch = self.student_rollout.get_batch(\n\t\t\trollout, \n\t\t\tstudent_rollout_last_value\n\t\t)\n\n\t\trng, subrng = jax.random.split(rng)\n\t\ttrain_state, update_stats = self.student_pop.update(subrng, train_state, train_batch)\n\n\t\t# === Update teacher with PPO\n\t\t# - Compute returns per env per agent\n\t\t# - Compute batched returns based on returns per env per agent\n\t\tued_score, _ = compute_ued_scores(self.ued_score, train_batch, self.n_eval)\n\t\tued_rollout = self.teacher_rollout.set_final_reward(ued_rollout, ued_score)\n\t\tued_train_batch = self.teacher_rollout.get_batch(\n\t\t\tued_rollout, \n\t\t\tjnp.zeros((1, self.n_parallel)) # Last step terminates episode\n\t\t)\n\n\t\tued_ep_stats = self._update_ued_ep_stats(\n\t\t\tued_ep_stats, \n\t\t\tjnp.ones((1,len(ued_score),1), dtype=jnp.bool_),\n\t\t\t{'return': jnp.expand_dims(ued_score, (0,-1))}\n\t\t)\n\n\t\t# Update teacher, batch must be 1 x Ex1\n\t\trng, subrng = jax.random.split(rng)\n\t\tued_train_state, ued_update_stats = self.teacher_pop.update(subrng, ued_train_state, ued_train_batch)\n\n\t\t# --------------------------------------------------\n\t\t# Collect metrics\n\t\tif self.track_env_metrics:\n\t\t\tenv_metrics = self.benv.get_env_metrics(reset_state)\n\t\telse:\n\t\t\tenv_metrics = None\n\n\t\tgrad_stats, ued_grad_stats = None, None\n\n\t\tstats = self._compile_stats(\n\t\t\tupdate_stats, ep_stats, \n\t\t\tued_update_stats, ued_ep_stats,\n\t\t\tenv_metrics,\n\t\t\tgrad_stats, ued_grad_stats)\n\t\tstats.update(dict(n_updates=train_state.n_updates[0]))\n\n\t\ttrain_state = train_state.increment()\n\t\tued_train_state = ued_train_state.increment()\n\t\tself.n_updates += 1\n\n\t\treturn (\n\t\t\tstats, \n\t\t\trng,\n\t\t\ttrain_state, state, obs, carry, ep_stats,\n\t\t\tued_train_state, ued_state, ued_obs, ued_carry, ued_ep_stats\n\t\t)" }, { "identifier": "PLRRunner", "path": "src/minimax/runners/plr_runner.py", "snippet": "class PLRRunner(DRRunner):\n\tdef __init__(\n\t\tself, \n\t\t*,\n\t\treplay_prob=0.5,\n\t\tbuffer_size=100,\n\t\tstaleness_coef=0.3,\n\t\tuse_score_ranks=True,\n\t\ttemp=1.0,\n\t\tmin_fill_ratio=0.5,\n\t\tuse_robust_plr=False,\n\t\tuse_parallel_eval=False,\n\t\tued_score='l1_value_loss',\n\t\tforce_unique=False, # Slower if True\n\t\tmutation_fn=None,\n\t\tn_mutations=0,\n\t\tmutation_criterion='batch',\n\t\tmutation_subsample_size=1,\n\t\t**kwargs):\n\t\tuse_mutations = mutation_fn is not None\n\t\tif use_parallel_eval:\n\t\t\treplay_prob = 1.0 # Replay every rollout cycle\n\t\t\tmutation_criterion = 'batch' # Force batch mutations (no UED scores)\n\t\t\tself._n_parallel_batches = 3 if use_mutations else 2\n\t\t\tkwargs['n_parallel'] *= self._n_parallel_batches\n\n\t\tsuper().__init__(**kwargs)\n\n\t\tself.replay_prob = replay_prob\n\t\tself.buffer_size = buffer_size\n\t\tself.staleness_coef = staleness_coef\n\t\tself.temp = temp\n\t\tself.use_score_ranks = use_score_ranks\n\t\tself.min_fill_ratio = min_fill_ratio\n\t\tself.use_robust_plr = use_robust_plr\n\t\tself.use_parallel_eval = use_parallel_eval\n\t\tself.ued_score = UEDScore[ued_score.upper()]\n\n\t\tself.use_mutations = use_mutations\n\t\tif self.use_mutations:\n\t\t\tself.mutation_fn = envs.get_mutator(self.benv.env_name, mutation_fn)\n\t\telse:\n\t\t\tself.mutation_fn = None\n\t\tself.n_mutations = n_mutations\n\t\tself.mutation_criterion = MutationCriterion[mutation_criterion.upper()]\n\t\tself.mutation_subsample_size = mutation_subsample_size\n\n\t\tself.force_unique = force_unique\n\t\tif force_unique:\n\t\t\tself.comparator_fn = envs.get_comparator(self.benv.env_name)\n\t\telse:\n\t\t\tself.comparator_fn = None\n\n\t\tif mutation_fn is not None and mutation_criterion != 'batch':\n\t\t\tassert self.n_parallel % self.mutation_subsample_size == 0, \\\n\t\t\t\t'Number of parallel envs must be divisible by mutation subsample size.'\n\n\tdef reset(self, rng):\n\t\trunner_state = list(super().reset(rng))\n\t\trng = runner_state[0]\n\t\trunner_state[0], subrng = jax.random.split(rng)\n\t\texample_state = self.benv.env.reset(rng)[1]\n\n\t\tself.plr_mgr = PopPLRManager(\n\t\t\tn_agents=self.n_students,\n\t\t\texample_level=example_state,\n\t\t\tued_score=self.ued_score,\n\t\t\treplay_prob=self.replay_prob,\n\t\t\tbuffer_size=self.buffer_size,\n\t\t\tstaleness_coef=self.staleness_coef,\n\t\t\ttemp=self.temp,\n\t\t\tuse_score_ranks=self.use_score_ranks,\n\t\t\tmin_fill_ratio=self.min_fill_ratio,\n\t\t\tuse_robust_plr=self.use_robust_plr,\n\t\t\tuse_parallel_eval=self.use_parallel_eval,\n\t\t\tcomparator_fn=self.comparator_fn,\n\t\t\tn_devices=self.n_devices\n\t\t)\n\t\tplr_buffer = self.plr_mgr.reset(self.n_students)\n\n\t\ttrain_state = runner_state[1]\n\t\ttrain_state = train_state.replace(plr_buffer=plr_buffer)\n\t\tif self.n_devices == 1:\n\t\t\trunner_state[1] = train_state\n\t\telse:\n\t\t\tplr_buffer = jax.tree_map(lambda x: x.repeat(self.n_devices, 1), plr_buffer) # replicate plr buffer\n\t\t\trunner_state += (plr_buffer,) # Return PLR buffer directly to make shmap easier\n\n\t\tself.dummy_eval_output = self._create_dummy_eval_output(train_state)\n\n\t\treturn tuple(runner_state)\n\n\tdef _create_dummy_eval_output(self, train_state):\n\t\trng, *vrngs = jax.random.split(jax.random.PRNGKey(0), self.n_students+1)\n\t\tobs, state, extra = self.benv.reset(jnp.array(vrngs))\n\n\t\tep_stats = self.rolling_stats.reset_stats(\n\t\t\tbatch_shape=(self.n_students, self.n_parallel*self.n_eval))\n\n\t\tued_scores = jnp.zeros((self.n_students, self.n_parallel))\n\n\t\tif self.student_pop.agent.is_recurrent:\n\t\t\tcarry = self.zero_carry\n\t\telse:\n\t\t\tcarry = None\n\t\trollout = self.student_rollout.reset()\n\n\t\ttrain_batch = self.student_rollout.get_batch(\n\t\t\trollout, \n\t\t\tself.student_pop.get_value(\n\t\t\t\tjax.lax.stop_gradient(train_state.params), \n\t\t\t\tobs, \n\t\t\t\tcarry,\n\t\t\t)\n\t\t)\n\n\t\treturn (\n\t\t\trng,\n\t\t\ttrain_state, \n\t\t\tstate, \n\t\t\tstate,\n\t\t\tobs, \n\t\t\tcarry, \n\t\t\textra, \n\t\t\tep_stats,\n\t\t\tstate,\n\t\t\ttrain_batch,\n\t\t\tued_scores\n\t\t)\n\n\t@partial(jax.jit, static_argnums=(0,8))\n\tdef _eval_and_update_plr(\n\t\t\tself,\n\t\t\trng,\n\t\t\tlevels,\n\t\t\tlevel_idxs, \n\t\t\ttrain_state, \n\t\t\tupdate_plr,\n\t\t\tparent_idxs=None,\n\t\t\tdupe_mask=None,\n\t\t\tfake=False):\n\t\t# Collect rollout and optionally update plr buffer\n\t\t# Returns train_batch and ued_scores\n\t\t# Perform rollout: @todo: pmap this\n\t\tif fake:\n\t\t\tdummy_eval_output = list(self.dummy_eval_output)\n\t\t\tdummy_eval_output[1] = train_state\n\t\t\treturn tuple(dummy_eval_output)\n\n\t\trollout_batch_shape = (self.n_students, self.n_parallel*self.n_eval)\n\t\tobs, state, extra = self.benv.set_state(levels)\n\t\tep_stats = self.rolling_stats.reset_stats(\n\t\t\tbatch_shape=rollout_batch_shape)\n\n\t\trollout_start_state = state\n\n\t\tdone = jnp.zeros(rollout_batch_shape, dtype=jnp.bool_)\n\t\tif self.student_pop.agent.is_recurrent:\n\t\t\tcarry = self.zero_carry\n\t\telse:\n\t\t\tcarry = None\n\n\t\trng, subrng = jax.random.split(rng)\n\t\tstart_state = state\n\t\trollout, state, start_state, obs, carry, extra, ep_stats, train_state = \\\n\t\t\tself._rollout_students(\n\t\t\t\tsubrng, \n\t\t\t\ttrain_state, \n\t\t\t\tstate, \n\t\t\t\tstart_state,\n\t\t\t\tobs, \n\t\t\t\tcarry, \n\t\t\t\tdone,\n\t\t\t\textra, \n\t\t\t\tep_stats\n\t\t\t)\n\n\t\ttrain_batch = self.student_rollout.get_batch(\n\t\t\trollout, \n\t\t\tself.student_pop.get_value(\n\t\t\t\tjax.lax.stop_gradient(train_state.params), \n\t\t\t\tobs, \n\t\t\t\tcarry\n\t\t\t)\n\t\t)\n\n\t\t# Update PLR buffer\n\t\tif self.ued_score == UEDScore.MAX_MC:\n\t\t\tmax_returns = jax.vmap(lambda x,y: x.at[y].get())(train_state.plr_buffer.max_returns, level_idxs)\n\t\t\tmax_returns = jnp.where(\n\t\t\t\tjnp.greater_equal(level_idxs, 0),\n\t\t\t\tmax_returns,\n\t\t\t\tjnp.full_like(max_returns, -jnp.inf)\n\t\t\t)\n\t\t\tued_info = {'max_returns': max_returns}\n\t\telse:\n\t\t\tued_info = None\n\t\tued_scores, ued_score_info = compute_ued_scores(\n\t\t\tself.ued_score, train_batch, self.n_eval, info=ued_info, ignore_val=-jnp.inf, per_agent=True)\n\t\tnext_plr_buffer = self.plr_mgr.update(\n\t\t\ttrain_state.plr_buffer, \n\t\t\tlevels=levels, \n\t\t\tlevel_idxs=level_idxs, \n\t\t\tued_scores=ued_scores,\n\t\t\tdupe_mask=dupe_mask, \n\t\t\tinfo=ued_score_info, \n\t\t\tignore_val=-jnp.inf,\n\t\t\tparent_idxs=parent_idxs)\n\n\t\tnext_plr_buffer = jax.vmap(\n\t\t\tlambda update, new, prev: jax.tree_map(\n\t\t\t\tlambda x, y: jax.lax.select(update, x, y), new, prev)\n\t\t)(update_plr, next_plr_buffer, train_state.plr_buffer)\n\n\t\ttrain_state = train_state.replace(plr_buffer=next_plr_buffer)\n\n\t\treturn (\n\t\t\trng,\n\t\t\ttrain_state, \n\t\t\tstate, \n\t\t\tstart_state, \n\t\t\tobs, \n\t\t\tcarry, \n\t\t\textra, \n\t\t\tep_stats,\n\t\t\trollout_start_state,\n\t\t\ttrain_batch,\n\t\t\tued_scores,\n\t\t)\n\n\t@partial(jax.jit, static_argnums=(0,))\n\tdef _mutate_levels(self, rng, levels, level_idxs, ued_scores=None):\n\t\tif not self.use_mutations:\n\t\t\treturn levels, level_idxs, jnp.full_like(level_idxs, -1)\n\n\t\tdef upsample_levels(levels, level_idxs, subsample_idxs):\n\t\t\tsubsample_idxs = subsample_idxs.repeat(self.n_parallel//self.mutation_subsample_size, -1)\n\t\t\tparent_idxs = level_idxs.take(subsample_idxs)\n\t\t\tlevels = jax.vmap(\n\t\t\t\tlambda x, y: jax.tree_map(lambda _x: jnp.array(_x).take(y, 0), x)\n\t\t\t)(levels, parent_idxs)\n\t\t\t\n\t\t\treturn levels, parent_idxs\n\n\t\tif self.mutation_criterion == MutationCriterion.BATCH:\n\t\t\tparent_idxs = level_idxs\n\n\t\tif self.mutation_criterion == MutationCriterion.EASY:\n\t\t\t_, top_level_idxs = jax.lax.approx_min_k(ued_scores, self.mutation_subsample_size)\n\t\t\tlevels, parent_idxs = upsample_levels(levels, level_idxs, top_level_idxs)\n\n\t\telif self.mutation_criterion == MutationCriterion.HARD:\n\t\t\t_, top_level_idxs = jax.lax.approx_max_k(ued_scores, self.mutation_subsample_size)\n\t\t\tlevels, parent_idxs = upsample_levels(levels, level_idxs, top_level_idxs)\n\n\t\tn_parallel = level_idxs.shape[-1]\n\t\tvrngs = jax.vmap(lambda subrng: jax.random.split(subrng, n_parallel))(\n\t\t\tjax.random.split(rng, self.n_students)\n\t\t)\n\n\t\tmutated_levels = jax.vmap(\n\t\t\tlambda *args: jax.vmap(self.mutation_fn, in_axes=(0,None,0,None))(*args),\n\t\t\tin_axes=(0,None,0,None)\n\t\t)(vrngs, self.benv.env_params, levels, self.n_mutations)\n\n\t\t# Mutated levels do not have existing idxs in the PLR buffer.\n\t\tmutated_level_idxs = jnp.full((self.n_students, n_parallel), -1)\n\n\t\treturn mutated_levels, mutated_level_idxs, parent_idxs\n\n\tdef _efficient_grad_update(self, rng, train_state, train_batch, is_replay):\n\t\t# PPOAgent vmaps over the train state and batch. Batch must be N x EM\n\t\tskip_grad_update = jnp.logical_and(self.use_robust_plr, ~is_replay)\n\n\t\tif self.n_students == 1:\n\t\t\ttrain_state, stats = jax.lax.cond(\n\t\t\t\tskip_grad_update[0],\n\t\t\t\tpartial(self.student_pop.update, fake=True),\n\t\t\t\tself.student_pop.update,\n\t\t\t\t*(rng, train_state, train_batch)\n\t\t\t)\n\t\telif self.n_students > 1: # Have to vmap all students + take only students that need updates\n\t\t\t_, dummy_stats = jax.vmap(lambda *_: self.student_pop.agent.get_empty_update_stats())(np.arange(self.n_students))\n\t\t\t_train_state, stats = self.student.update(rng, train_state, train_batch)\n\t\t\ttrain_state, stats = jax.vmap(lambda cond,x,y: \\\n\t\t\t\t\tjax.tree_map(lambda _cond,_x,_y: jax.lax.select(_cond,_x,_y), cond, x, y))(\n\t\t\t\t\t\tis_replay, (train_state, stats), (_train_state, dummy_stats)\n\t\t\t\t\t)\n\n\t\treturn train_state, stats\n\n\t@partial(jax.jit, static_argnums=(0,))\n\tdef _compile_stats(self, update_stats, ep_stats, env_metrics=None, plr_stats=None):\n\t\tstats = super()._compile_stats(update_stats, ep_stats, env_metrics)\n\n\t\tif plr_stats is not None:\n\t\t\tplr_stats = jax.vmap(lambda info: jax.tree_map(lambda x: x.mean(), info))(plr_stats)\n\n\t\tif self.n_students > 1:\n\t\t\t_plr_stats = {}\n\t\t\tfor i in range(self.n_students):\n\t\t\t\t_student_plr_stats = jax.tree_util.tree_map(lambda x: x[i], plr_stats) # for agent0\n\t\t\t\t_plr_stats.update({f'{k}_a{i}':v for k,v in _student_plr_stats.items()})\n\t\t\tplr_stats = _plr_stats\n\t\telse:\n\t\t\tplr_stats = jax.tree_map(lambda x: x[0], plr_stats) \n\n\t\tstats.update({f'plr_{k}':v for k,v in plr_stats.items()})\n\n\t\tif self.n_devices > 1:\n\t\t\tstats = jax.tree_map(lambda x: jax.lax.pmean(x, 'device'), stats)\n\n\t\treturn stats\n\n\t@partial(jax.jit, static_argnums=(0,))\n\tdef run(\n\t\tself, \n\t\trng, \n\t\ttrain_state, \n\t\tstate, \n\t\tstart_state,\n\t\tobs, \n\t\tcarry=None, \n\t\textra=None, \n\t\tep_stats=None,\n\t\tplr_buffer=None):\n\t\t# If device sharded, load sharded PLR buffer into train state\n\t\tif self.n_devices > 1:\n\t\t\trng = jax.random.fold_in(rng, jax.lax.axis_index('device'))\n\t\t\ttrain_state = train_state.replace(plr_buffer=plr_buffer)\n\n\t\t# Sample next training levels via PLR\n\t\trng, *vrngs = jax.random.split(rng, self.n_students+1)\n\t\tobs, state, extra = self.benv.reset(jnp.array(vrngs), self.n_parallel, 1)\n\n\t\tif self.use_parallel_eval:\n\t\t\tn_level_samples = self.n_parallel//self._n_parallel_batches\n\t\t\tnew_levels = jax.tree_map(lambda x: x.at[:,n_level_samples:2*n_level_samples].get(), state)\n\t\telse:\n\t\t\tn_level_samples = self.n_parallel\n\t\t\tnew_levels = state\n\n\t\trng, subrng = jax.random.split(rng)\n\t\tlevels, level_idxs, is_replay, next_plr_buffer = \\\n\t\t\tself.plr_mgr.sample(subrng, train_state.plr_buffer, new_levels, n_level_samples)\n\t\ttrain_state = train_state.replace(plr_buffer=next_plr_buffer)\n\n\t\t# If use_parallel_eval=True, need to combine replay and non-replay levels together\n\t\t# Need to mutate levels as well\n\t\tparent_idxs = jnp.full((self.n_students, self.n_parallel), -1)\n\t\tif self.use_parallel_eval: # Parallel ACCEL\n\t\t\tnew_level_idxs = jnp.full_like(parent_idxs, -1)\n\n\t\t\t_all_levels = jax.vmap(\n\t\t\t\tlambda x,y: _tree_util.pytree_merge(x,y, start_idx=n_level_samples, src_len=n_level_samples),\n\t\t\t\t)(state, levels)\n\t\t\t_all_level_idxs = jax.vmap(\n\t\t\t\tlambda x,y: _tree_util.pytree_merge(x,y, start_idx=n_level_samples, src_len=n_level_samples),\n\t\t\t\t)(new_level_idxs, level_idxs)\n\n\t\t\tif self.use_mutations:\n\t\t\t\trng, subrng = jax.random.split(rng)\n\t\t\t\tmutated_levels, mutated_level_idxs, _parent_idxs = self._mutate_levels(subrng, levels, level_idxs)\n\t\t\t\t\n\t\t\t\tfallback_levels = jax.tree_map(lambda x: x.at[:,-n_level_samples:].get(), state)\n\t\t\t\tfallback_level_idxs = jnp.full_like(mutated_level_idxs, -1)\n\n\t\t\t\tmutated_levels = jax.vmap(\n\t\t\t\t\tlambda cond,x,y: jax.tree_map(\n\t\t\t\t\t\tlambda _x,_y: jax.lax.select(cond,_x,_y), x, y\n\t\t\t\t\t))(is_replay, mutated_levels, fallback_levels)\n\n\t\t\t\tmutated_level_idxs = jax.vmap(\n\t\t\t\t\tlambda cond,x,y: jax.tree_map(\n\t\t\t\t\t\tlambda _x,_y: jax.lax.select(cond,_x,_y), x, y\n\t\t\t\t\t))(is_replay, mutated_level_idxs, fallback_level_idxs)\n\n\t\t\t\t_parent_idxs = jax.vmap(\n\t\t\t\t\tlambda cond,x,y: jax.tree_map(\n\t\t\t\t\t\tlambda _x,_y: jax.lax.select(cond,_x,_y), x, y\n\t\t\t\t\t))(is_replay, _parent_idxs, fallback_level_idxs)\n\t\t\n\t\t\t\tmutated_levels_start_idx = 2*n_level_samples\n\t\t\t\t_all_levels = jax.vmap(\n\t\t\t\t\tlambda x,y: _tree_util.pytree_merge(x,y, start_idx=mutated_levels_start_idx, src_len=n_level_samples),\n\t\t\t\t\t)(_all_levels, mutated_levels)\n\t\t\t\t_all_level_idxs = jax.vmap(\n\t\t\t\t\tlambda x,y: _tree_util.pytree_merge(x,y, start_idx=mutated_levels_start_idx, src_len=n_level_samples),\n\t\t\t\t\t)(_all_level_idxs, mutated_level_idxs)\n\t\t\t\tparent_idxs = jax.vmap(\n\t\t\t\t\tlambda x,y: _tree_util.pytree_merge(x,y, start_idx=mutated_levels_start_idx, src_len=n_level_samples),\n\t\t\t\t\t)(parent_idxs, _parent_idxs)\n\n\t\t\tlevels = _all_levels\n\t\t\tlevel_idxs = _all_level_idxs\n\n\t\t# dedupe levels, move into PLR buffer logic\n\t\tif self.force_unique:\n\t\t\tlevel_idxs, dupe_mask = self.plr_mgr.dedupe_levels(next_plr_buffer, levels, level_idxs)\n\t\telse:\n\t\t\tdupe_mask = None \n\n\t\t# Evaluate levels + update PLR\n\t\tresult = self._eval_and_update_plr(\n\t\t\trng, levels, level_idxs, train_state, update_plr=jnp.array([True]*self.n_students), parent_idxs=parent_idxs, dupe_mask=dupe_mask)\n\t\trng, train_state, state, start_state, obs, carry, extra, ep_stats, \\\n\t\t\trollout_start_state, train_batch, ued_scores = result\n\n\t\tif self.use_parallel_eval:\n\t\t\treplay_start_idx = self.n_eval*n_level_samples\n\t\t\treplay_end_idx = 2*replay_start_idx\n\t\t\ttrain_batch = jax.vmap(\n\t\t\t\tlambda x: jax.tree_map(\n\t\t\t\t\tlambda _x: _x.at[:,replay_start_idx:replay_end_idx].get(), x)\n\t\t\t\t)(train_batch)\n\n\t\t# Gradient update\n\t\trng, subrng = jax.random.split(rng)\n\t\ttrain_state, update_stats = self._efficient_grad_update(subrng, train_state, train_batch, is_replay)\n\n\t\t# Mutation step\n\t\tuse_mutations = jnp.logical_and(self.use_mutations, is_replay)\n\t\tuse_mutations = jnp.logical_and(use_mutations, not self.use_parallel_eval) # Already mutated above in parallel\n\t\trng, arng, brng = jax.random.split(rng, 3)\n\n\t\tmutated_levels, mutated_level_idxs, parent_idxs = jax.lax.cond(\n\t\t\tuse_mutations.any(),\n\t\t\tself._mutate_levels,\n\t\t\tlambda *_: (levels, level_idxs, jnp.full_like(level_idxs, -1)),\n\t\t\t*(arng, levels, level_idxs, ued_scores)\n\t\t)\n\n\t\tmutated_dupe_mask = jnp.zeros_like(mutated_level_idxs, dtype=jnp.bool_)\n\t\tif self.force_unique: # Should move into update plr logic\n\t\t\tmutated_level_idxs, mutated_dupe_mask = jax.lax.cond(\n\t\t\t\tuse_mutations.any(),\n\t\t\t\tself.plr_mgr.dedupe_levels,\n\t\t\t\tlambda *_: (mutated_level_idxs, mutated_dupe_mask),\n\t\t\t\t*(next_plr_buffer, mutated_levels, mutated_level_idxs)\n\t\t\t)\n\n\t\tmutation_eval_result = jax.lax.cond(\n\t\t\tuse_mutations.any(),\n\t\t\tself._eval_and_update_plr,\n\t\t\tpartial(self._eval_and_update_plr, fake=True),\n\t\t\t*(brng, mutated_levels, mutated_level_idxs, train_state, use_mutations, parent_idxs, mutated_dupe_mask)\n\t\t)\n\t\ttrain_state = mutation_eval_result[1]\n\n\t\t# Collect training env metrics\n\t\tif self.track_env_metrics:\n\t\t\tenv_metrics = self.benv.get_env_metrics(levels)\n\t\telse:\n\t\t\tenv_metrics = None\n\n\t\tplr_stats = self.plr_mgr.get_metrics(train_state.plr_buffer)\n\n\t\tstats = self._compile_stats(update_stats, ep_stats, env_metrics, plr_stats)\n\n\t\tif self.n_devices > 1:\n\t\t\tplr_buffer = train_state.plr_buffer\n\t\t\ttrain_state = train_state.replace(plr_buffer=None)\n\n\t\ttrain_state = train_state.increment()\n\t\tstats.update(dict(n_updates=train_state.n_updates[0]))\n\n\t\treturn (\n\t\t\tstats, \n\t\t\trng, \n\t\t\ttrain_state, \n\t\t\tstate, \n\t\t\tstart_state, \n\t\t\tobs, \n\t\t\tcarry, \n\t\t\textra, \n\t\t\tep_stats,\n\t\t\tplr_buffer\n\t\t)" } ]
import copy import time import numpy as np import jax import minimax.envs as envs import minimax.models as models import minimax.agents as agents from functools import partial from collections import defaultdict from jax.sharding import Mesh, PartitionSpec as P from jax.experimental import mesh_utils from jax.experimental.shard_map import shard_map from .eval_runner import EvalRunner from .dr_runner import DRRunner from .paired_runner import PAIREDRunner from .plr_runner import PLRRunner from minimax.util.rl import UEDScore, PopPLRManager
17,235
runner_cls=DRRunner, ), 'plr': RunnerInfo( runner_cls=PLRRunner, ), 'paired': RunnerInfo( runner_cls=PAIREDRunner, is_ued=True ) } class ExperimentRunner: def __init__( self, train_runner, env_name, agent_rl_algo, student_model_name, teacher_model_name=None, train_runner_kwargs={}, env_kwargs={}, ued_env_kwargs={}, student_rl_kwargs={}, teacher_rl_kwargs={}, student_model_kwargs={}, teacher_model_kwargs={}, eval_kwargs={}, eval_env_kwargs={}, n_devices=1, ): self.env_name = env_name self.agent_rl_algo = agent_rl_algo self.is_ued = RUNNER_INFO[train_runner].is_ued dummy_env = envs.make( env_name, env_kwargs, ued_env_kwargs)[0] # ---- Make agent ---- student_model_kwargs['output_dim'] = dummy_env.action_space().n student_model = models.make( env_name=env_name, model_name=student_model_name, **student_model_kwargs ) student_agent = agents.PPOAgent( model=student_model, n_devices=n_devices, **student_rl_kwargs ) # ---- Handle UED-related settings ---- if self.is_ued: max_teacher_steps = dummy_env.ued_max_episode_steps() teacher_model_kwargs['n_scalar_embeddings'] = max_teacher_steps teacher_model_kwargs['max_scalar'] = max_teacher_steps teacher_model_kwargs['output_dim'] = dummy_env.ued_action_space().n teacher_model = models.make( env_name=env_name, model_name=teacher_model_name, **teacher_model_kwargs ) teacher_agent = agents.PPOAgent( model=teacher_model, n_devices=n_devices, **teacher_rl_kwargs ) train_runner_kwargs.update(dict( teacher_agents=[teacher_agent] )) train_runner_kwargs.update(dict( ued_env_kwargs=ued_env_kwargs )) # Debug, tabulate student and teacher model # import jax.numpy as jnp # dummy_rng = jax.random.PRNGKey(0) # obs, _ = dummy_env.reset(dummy_rng) # hx = student_model.initialize_carry(dummy_rng, (1,)) # ued_obs, _ = dummy_env.reset_teacher(dummy_rng) # ued_hx = teacher_model.initialize_carry(dummy_rng, (1,)) # obs['image'] = jnp.expand_dims(obs['image'], 0) # ued_obs['image'] = jnp.expand_dims(ued_obs['image'], 0) # print(student_model.tabulate(dummy_rng, obs, hx)) # print(teacher_model.tabulate(dummy_rng, ued_obs, hx)) # import pdb; pdb.set_trace() # ---- Set up train runner ---- runner_cls = RUNNER_INFO[train_runner].runner_cls # Set up learning rate annealing parameters lr_init = train_runner_kwargs.lr lr_final = train_runner_kwargs.lr_final lr_anneal_steps = train_runner_kwargs.lr_anneal_steps if lr_final is None: train_runner_kwargs.lr_final = lr_init if train_runner_kwargs.lr_final == train_runner_kwargs.lr: train_runner_kwargs.lr_anneal_steps = 0 self.runner = runner_cls( env_name=env_name, env_kwargs=env_kwargs, student_agents=[student_agent], n_devices=n_devices, **train_runner_kwargs) # ---- Make eval runner ---- if eval_kwargs.get('env_names') is None: self.eval_runner = None else:
""" Copyright (c) Meta Platforms, Inc. and affiliates. All rights reserved. This source code is licensed under the license found in the LICENSE file in the root directory of this source tree. """ class RunnerInfo: def __init__( self, runner_cls, is_ued=False): self.runner_cls = runner_cls self.is_ued = is_ued RUNNER_INFO = { 'dr': RunnerInfo( runner_cls=DRRunner, ), 'plr': RunnerInfo( runner_cls=PLRRunner, ), 'paired': RunnerInfo( runner_cls=PAIREDRunner, is_ued=True ) } class ExperimentRunner: def __init__( self, train_runner, env_name, agent_rl_algo, student_model_name, teacher_model_name=None, train_runner_kwargs={}, env_kwargs={}, ued_env_kwargs={}, student_rl_kwargs={}, teacher_rl_kwargs={}, student_model_kwargs={}, teacher_model_kwargs={}, eval_kwargs={}, eval_env_kwargs={}, n_devices=1, ): self.env_name = env_name self.agent_rl_algo = agent_rl_algo self.is_ued = RUNNER_INFO[train_runner].is_ued dummy_env = envs.make( env_name, env_kwargs, ued_env_kwargs)[0] # ---- Make agent ---- student_model_kwargs['output_dim'] = dummy_env.action_space().n student_model = models.make( env_name=env_name, model_name=student_model_name, **student_model_kwargs ) student_agent = agents.PPOAgent( model=student_model, n_devices=n_devices, **student_rl_kwargs ) # ---- Handle UED-related settings ---- if self.is_ued: max_teacher_steps = dummy_env.ued_max_episode_steps() teacher_model_kwargs['n_scalar_embeddings'] = max_teacher_steps teacher_model_kwargs['max_scalar'] = max_teacher_steps teacher_model_kwargs['output_dim'] = dummy_env.ued_action_space().n teacher_model = models.make( env_name=env_name, model_name=teacher_model_name, **teacher_model_kwargs ) teacher_agent = agents.PPOAgent( model=teacher_model, n_devices=n_devices, **teacher_rl_kwargs ) train_runner_kwargs.update(dict( teacher_agents=[teacher_agent] )) train_runner_kwargs.update(dict( ued_env_kwargs=ued_env_kwargs )) # Debug, tabulate student and teacher model # import jax.numpy as jnp # dummy_rng = jax.random.PRNGKey(0) # obs, _ = dummy_env.reset(dummy_rng) # hx = student_model.initialize_carry(dummy_rng, (1,)) # ued_obs, _ = dummy_env.reset_teacher(dummy_rng) # ued_hx = teacher_model.initialize_carry(dummy_rng, (1,)) # obs['image'] = jnp.expand_dims(obs['image'], 0) # ued_obs['image'] = jnp.expand_dims(ued_obs['image'], 0) # print(student_model.tabulate(dummy_rng, obs, hx)) # print(teacher_model.tabulate(dummy_rng, ued_obs, hx)) # import pdb; pdb.set_trace() # ---- Set up train runner ---- runner_cls = RUNNER_INFO[train_runner].runner_cls # Set up learning rate annealing parameters lr_init = train_runner_kwargs.lr lr_final = train_runner_kwargs.lr_final lr_anneal_steps = train_runner_kwargs.lr_anneal_steps if lr_final is None: train_runner_kwargs.lr_final = lr_init if train_runner_kwargs.lr_final == train_runner_kwargs.lr: train_runner_kwargs.lr_anneal_steps = 0 self.runner = runner_cls( env_name=env_name, env_kwargs=env_kwargs, student_agents=[student_agent], n_devices=n_devices, **train_runner_kwargs) # ---- Make eval runner ---- if eval_kwargs.get('env_names') is None: self.eval_runner = None else:
self.eval_runner = EvalRunner(
0
2023-10-28 12:12:01+00:00
24k
nv-tlabs/vid2player3d
poselib/poselib/skeleton/tests/test_skeleton.py
[ { "identifier": "SkeletonTree", "path": "poselib/poselib/skeleton/skeleton3d.py", "snippet": "class SkeletonTree(Serializable):\n \"\"\"\n A skeleton tree gives a complete description of a rigid skeleton. It describes a tree structure\n over a list of nodes with their names indicated by strings. Each edge in the tree has a local\n translation associated with it which describes the distance between the two nodes that it\n connects. \n\n Basic Usage:\n >>> t = SkeletonTree.from_mjcf(SkeletonTree.__example_mjcf_path__)\n >>> t\n SkeletonTree(\n node_names=['torso', 'front_left_leg', 'aux_1', 'front_left_foot', 'front_right_leg', 'aux_2', 'front_right_foot', 'left_back_leg', 'aux_3', 'left_back_foot', 'right_back_leg', 'aux_4', 'right_back_foot'],\n parent_indices=tensor([-1, 0, 1, 2, 0, 4, 5, 0, 7, 8, 0, 10, 11]),\n local_translation=tensor([[ 0.0000, 0.0000, 0.7500],\n [ 0.0000, 0.0000, 0.0000],\n [ 0.2000, 0.2000, 0.0000],\n [ 0.2000, 0.2000, 0.0000],\n [ 0.0000, 0.0000, 0.0000],\n [-0.2000, 0.2000, 0.0000],\n [-0.2000, 0.2000, 0.0000],\n [ 0.0000, 0.0000, 0.0000],\n [-0.2000, -0.2000, 0.0000],\n [-0.2000, -0.2000, 0.0000],\n [ 0.0000, 0.0000, 0.0000],\n [ 0.2000, -0.2000, 0.0000],\n [ 0.2000, -0.2000, 0.0000]])\n )\n >>> t.node_names\n ['torso', 'front_left_leg', 'aux_1', 'front_left_foot', 'front_right_leg', 'aux_2', 'front_right_foot', 'left_back_leg', 'aux_3', 'left_back_foot', 'right_back_leg', 'aux_4', 'right_back_foot']\n >>> t.parent_indices\n tensor([-1, 0, 1, 2, 0, 4, 5, 0, 7, 8, 0, 10, 11])\n >>> t.local_translation\n tensor([[ 0.0000, 0.0000, 0.7500],\n [ 0.0000, 0.0000, 0.0000],\n [ 0.2000, 0.2000, 0.0000],\n [ 0.2000, 0.2000, 0.0000],\n [ 0.0000, 0.0000, 0.0000],\n [-0.2000, 0.2000, 0.0000],\n [-0.2000, 0.2000, 0.0000],\n [ 0.0000, 0.0000, 0.0000],\n [-0.2000, -0.2000, 0.0000],\n [-0.2000, -0.2000, 0.0000],\n [ 0.0000, 0.0000, 0.0000],\n [ 0.2000, -0.2000, 0.0000],\n [ 0.2000, -0.2000, 0.0000]])\n >>> t.parent_of('front_left_leg')\n 'torso'\n >>> t.index('front_right_foot')\n 6\n >>> t[2]\n 'aux_1'\n \"\"\"\n\n __example_mjcf_path__ = os.path.join(\n os.path.dirname(os.path.realpath(__file__)), \"tests/ant.xml\"\n )\n\n def __init__(self, node_names, parent_indices, local_translation):\n \"\"\"\n :param node_names: a list of names for each tree node\n :type node_names: List[str]\n :param parent_indices: an int32-typed tensor that represents the edge to its parent.\\\n -1 represents the root node\n :type parent_indices: Tensor\n :param local_translation: a 3d vector that gives local translation information\n :type local_translation: Tensor\n \"\"\"\n ln, lp, ll = len(node_names), len(parent_indices), len(local_translation)\n assert len(set((ln, lp, ll))) == 1\n self._node_names = node_names\n self._parent_indices = parent_indices.long()\n self._local_translation = local_translation\n self._node_indices = {self.node_names[i]: i for i in range(len(self))}\n\n def __len__(self):\n \"\"\" number of nodes in the skeleton tree \"\"\"\n return len(self.node_names)\n\n def __iter__(self):\n \"\"\" iterator that iterate through the name of each node \"\"\"\n yield from self.node_names\n\n def __getitem__(self, item):\n \"\"\" get the name of the node given the index \"\"\"\n return self.node_names[item]\n\n def __repr__(self):\n return (\n \"SkeletonTree(\\n node_names={},\\n parent_indices={},\"\n \"\\n local_translation={}\\n)\".format(\n self._indent(repr(self.node_names)),\n self._indent(repr(self.parent_indices)),\n self._indent(repr(self.local_translation)),\n )\n )\n\n def _indent(self, s):\n return \"\\n \".join(s.split(\"\\n\"))\n\n @property\n def node_names(self):\n return self._node_names\n\n @property\n def parent_indices(self):\n return self._parent_indices\n\n @property\n def local_translation(self):\n return self._local_translation\n\n @property\n def num_joints(self):\n \"\"\" number of nodes in the skeleton tree \"\"\"\n return len(self)\n\n @classmethod\n def from_dict(cls, dict_repr, *args, **kwargs):\n return cls(\n list(map(str, dict_repr[\"node_names\"])),\n TensorUtils.from_dict(dict_repr[\"parent_indices\"], *args, **kwargs),\n TensorUtils.from_dict(dict_repr[\"local_translation\"], *args, **kwargs),\n )\n\n def to_dict(self):\n return OrderedDict(\n [\n (\"node_names\", self.node_names),\n (\"parent_indices\", tensor_to_dict(self.parent_indices)),\n (\"local_translation\", tensor_to_dict(self.local_translation)),\n ]\n )\n\n @classmethod\n def from_mjcf(cls, path: str) -> \"SkeletonTree\":\n \"\"\"\n Parses a mujoco xml scene description file and returns a Skeleton Tree.\n We use the model attribute at the root as the name of the tree.\n \n :param path:\n :type path: string\n :return: The skeleton tree constructed from the mjcf file\n :rtype: SkeletonTree\n \"\"\"\n tree = ET.parse(path)\n xml_doc_root = tree.getroot()\n xml_world_body = xml_doc_root.find(\"worldbody\")\n if xml_world_body is None:\n raise ValueError(\"MJCF parsed incorrectly please verify it.\")\n # assume this is the root\n xml_body_root = xml_world_body.find(\"body\")\n if xml_body_root is None:\n raise ValueError(\"MJCF parsed incorrectly please verify it.\")\n\n node_names = []\n parent_indices = []\n local_translation = []\n\n # recursively adding all nodes into the skel_tree\n def _add_xml_node(xml_node, parent_index, node_index):\n node_name = xml_node.attrib.get(\"name\")\n # parse the local translation into float list\n pos = np.fromstring(xml_node.attrib.get(\"pos\"), dtype=float, sep=\" \")\n node_names.append(node_name)\n parent_indices.append(parent_index)\n local_translation.append(pos)\n curr_index = node_index\n node_index += 1\n for next_node in xml_node.findall(\"body\"):\n node_index = _add_xml_node(next_node, curr_index, node_index)\n return node_index\n\n _add_xml_node(xml_body_root, -1, 0)\n\n return cls(\n node_names,\n torch.from_numpy(np.array(parent_indices, dtype=np.int32)),\n torch.from_numpy(np.array(local_translation, dtype=np.float32)),\n )\n\n def parent_of(self, node_name):\n \"\"\" get the name of the parent of the given node\n\n :param node_name: the name of the node\n :type node_name: string\n :rtype: string\n \"\"\"\n return self[int(self.parent_indices[self.index(node_name)].item())]\n\n def index(self, node_name):\n \"\"\" get the index of the node\n \n :param node_name: the name of the node\n :type node_name: string\n :rtype: int\n \"\"\"\n return self._node_indices[node_name]\n\n def drop_nodes_by_names(\n self, node_names: List[str], pairwise_translation=None\n ) -> \"SkeletonTree\":\n new_length = len(self) - len(node_names)\n new_node_names = []\n new_local_translation = torch.zeros(\n new_length, 3, dtype=self.local_translation.dtype\n )\n new_parent_indices = torch.zeros(new_length, dtype=self.parent_indices.dtype)\n parent_indices = self.parent_indices.numpy()\n new_node_indices: dict = {}\n new_node_index = 0\n for node_index in range(len(self)):\n if self[node_index] in node_names:\n continue\n tb_node_index = parent_indices[node_index]\n if tb_node_index != -1:\n local_translation = self.local_translation[node_index, :]\n while tb_node_index != -1 and self[tb_node_index] in node_names:\n local_translation += self.local_translation[tb_node_index, :]\n tb_node_index = parent_indices[tb_node_index]\n assert tb_node_index != -1, \"the root node cannot be dropped\"\n\n if pairwise_translation is not None:\n local_translation = pairwise_translation[\n tb_node_index, node_index, :\n ]\n else:\n local_translation = self.local_translation[node_index, :]\n\n new_node_names.append(self[node_index])\n new_local_translation[new_node_index, :] = local_translation\n if tb_node_index == -1:\n new_parent_indices[new_node_index] = -1\n else:\n new_parent_indices[new_node_index] = new_node_indices[\n self[tb_node_index]\n ]\n new_node_indices[self[node_index]] = new_node_index\n new_node_index += 1\n\n return SkeletonTree(new_node_names, new_parent_indices, new_local_translation)\n\n def keep_nodes_by_names(\n self, node_names: List[str], pairwise_translation=None\n ) -> \"SkeletonTree\":\n nodes_to_drop = list(filter(lambda x: x not in node_names, self))\n return self.drop_nodes_by_names(nodes_to_drop, pairwise_translation)" }, { "identifier": "SkeletonState", "path": "poselib/poselib/skeleton/skeleton3d.py", "snippet": "class SkeletonState(Serializable):\n \"\"\"\n A skeleton state contains all the information needed to describe a static state of a skeleton.\n It requires a skeleton tree, local/global rotation at each joint and the root translation.\n\n Example:\n >>> t = SkeletonTree.from_mjcf(SkeletonTree.__example_mjcf_path__)\n >>> zero_pose = SkeletonState.zero_pose(t)\n >>> plot_skeleton_state(zero_pose) # can be imported from `.visualization.common`\n [plot of the ant at zero pose\n >>> local_rotation = zero_pose.local_rotation.clone()\n >>> local_rotation[2] = torch.tensor([0, 0, 1, 0])\n >>> new_pose = SkeletonState.from_rotation_and_root_translation(\n ... skeleton_tree=t,\n ... r=local_rotation,\n ... t=zero_pose.root_translation,\n ... is_local=True\n ... )\n >>> new_pose.local_rotation\n tensor([[0., 0., 0., 1.],\n [0., 0., 0., 1.],\n [0., 1., 0., 0.],\n [0., 0., 0., 1.],\n [0., 0., 0., 1.],\n [0., 0., 0., 1.],\n [0., 0., 0., 1.],\n [0., 0., 0., 1.],\n [0., 0., 0., 1.],\n [0., 0., 0., 1.],\n [0., 0., 0., 1.],\n [0., 0., 0., 1.],\n [0., 0., 0., 1.]])\n >>> plot_skeleton_state(new_pose) # you should be able to see one of ant's leg is bent\n [plot of the ant with the new pose\n >>> new_pose.global_rotation # the local rotation is propagated to the global rotation at joint #3\n tensor([[0., 0., 0., 1.],\n [0., 0., 0., 1.],\n [0., 1., 0., 0.],\n [0., 1., 0., 0.],\n [0., 0., 0., 1.],\n [0., 0., 0., 1.],\n [0., 0., 0., 1.],\n [0., 0., 0., 1.],\n [0., 0., 0., 1.],\n [0., 0., 0., 1.],\n [0., 0., 0., 1.],\n [0., 0., 0., 1.],\n [0., 0., 0., 1.]])\n\n Global/Local Representation (cont. from the previous example)\n >>> new_pose.is_local\n True\n >>> new_pose.tensor # this will return the local rotation followed by the root translation\n tensor([0., 0., 0., 1., 0., 0., 0., 1., 0., 1., 0., 0., 0., 0., 0., 1., 0., 0.,\n 0., 1., 0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 1.,\n 0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 1., 0., 0.,\n 0.])\n >>> new_pose.tensor.shape # 4 * 13 (joint rotation) + 3 (root translatio\n torch.Size([55])\n >>> new_pose.global_repr().is_local\n False\n >>> new_pose.global_repr().tensor # this will return the global rotation followed by the root translation instead\n tensor([0., 0., 0., 1., 0., 0., 0., 1., 0., 1., 0., 0., 0., 1., 0., 0., 0., 0.,\n 0., 1., 0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 1.,\n 0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 1., 0., 0.,\n 0.])\n >>> new_pose.global_repr().tensor.shape # 4 * 13 (joint rotation) + 3 (root translation\n torch.Size([55])\n \"\"\"\n\n def __init__(self, tensor_backend, skeleton_tree, is_local):\n self._skeleton_tree = skeleton_tree\n self._is_local = is_local\n self.tensor = tensor_backend.clone()\n\n def __len__(self):\n return self.tensor.shape[0]\n\n @property\n def rotation(self):\n if not hasattr(self, \"_rotation\"):\n self._rotation = self.tensor[..., : self.num_joints * 4].reshape(\n *(self.tensor.shape[:-1] + (self.num_joints, 4))\n )\n return self._rotation\n\n @property\n def _local_rotation(self):\n if self._is_local:\n return self.rotation\n else:\n return None\n\n @property\n def _global_rotation(self):\n if not self._is_local:\n return self.rotation\n else:\n return None\n\n @property\n def is_local(self):\n \"\"\" is the rotation represented in local frame? \n \n :rtype: bool\n \"\"\"\n return self._is_local\n\n @property\n def invariant_property(self):\n return {\"skeleton_tree\": self.skeleton_tree, \"is_local\": self.is_local}\n\n @property\n def num_joints(self):\n \"\"\" number of joints in the skeleton tree \n \n :rtype: int\n \"\"\"\n return self.skeleton_tree.num_joints\n\n @property\n def skeleton_tree(self):\n \"\"\" skeleton tree \n \n :rtype: SkeletonTree\n \"\"\"\n return self._skeleton_tree\n\n @property\n def root_translation(self):\n \"\"\" root translation \n \n :rtype: Tensor\n \"\"\"\n if not hasattr(self, \"_root_translation\"):\n self._root_translation = self.tensor[\n ..., self.num_joints * 4 : self.num_joints * 4 + 3\n ]\n return self._root_translation\n\n @property\n def global_transformation(self):\n \"\"\" global transformation of each joint (transform from joint frame to global frame) \"\"\"\n if not hasattr(self, \"_global_transformation\"):\n local_transformation = self.local_transformation\n global_transformation = []\n parent_indices = self.skeleton_tree.parent_indices.numpy()\n # global_transformation = local_transformation.identity_like()\n for node_index in range(len(self.skeleton_tree)):\n parent_index = parent_indices[node_index]\n if parent_index == -1:\n global_transformation.append(\n local_transformation[..., node_index, :]\n )\n else:\n global_transformation.append(\n transform_mul(\n global_transformation[parent_index],\n local_transformation[..., node_index, :],\n )\n )\n self._global_transformation = torch.stack(global_transformation, axis=-2)\n return self._global_transformation\n\n @property\n def global_rotation(self):\n \"\"\" global rotation of each joint (rotation matrix to rotate from joint's F.O.R to global\n F.O.R) \"\"\"\n if self._global_rotation is None:\n if not hasattr(self, \"_comp_global_rotation\"):\n self._comp_global_rotation = transform_rotation(\n self.global_transformation\n )\n return self._comp_global_rotation\n else:\n return self._global_rotation\n\n @property\n def global_translation(self):\n \"\"\" global translation of each joint \"\"\"\n if not hasattr(self, \"_global_translation\"):\n self._global_translation = transform_translation(self.global_transformation)\n return self._global_translation\n\n @property\n def global_translation_xy(self):\n \"\"\" global translation in xy \"\"\"\n trans_xy_data = self.global_translation.zeros_like()\n trans_xy_data[..., 0:2] = self.global_translation[..., 0:2]\n return trans_xy_data\n\n @property\n def global_translation_xz(self):\n \"\"\" global translation in xz \"\"\"\n trans_xz_data = self.global_translation.zeros_like()\n trans_xz_data[..., 0:1] = self.global_translation[..., 0:1]\n trans_xz_data[..., 2:3] = self.global_translation[..., 2:3]\n return trans_xz_data\n\n @property\n def local_rotation(self):\n \"\"\" the rotation from child frame to parent frame given in the order of child nodes appeared\n in `.skeleton_tree.node_names` \"\"\"\n if self._local_rotation is None:\n if not hasattr(self, \"_comp_local_rotation\"):\n local_rotation = quat_identity_like(self.global_rotation)\n for node_index in range(len(self.skeleton_tree)):\n parent_index = self.skeleton_tree.parent_indices[node_index]\n if parent_index == -1:\n local_rotation[..., node_index, :] = self.global_rotation[\n ..., node_index, :\n ]\n else:\n local_rotation[..., node_index, :] = quat_mul_norm(\n quat_inverse(self.global_rotation[..., parent_index, :]),\n self.global_rotation[..., node_index, :],\n )\n self._comp_local_rotation = local_rotation\n return self._comp_local_rotation\n else:\n return self._local_rotation\n\n @property\n def local_transformation(self):\n \"\"\" local translation + local rotation. It describes the transformation from child frame to \n parent frame given in the order of child nodes appeared in `.skeleton_tree.node_names` \"\"\"\n if not hasattr(self, \"_local_transformation\"):\n self._local_transformation = transform_from_rotation_translation(\n r=self.local_rotation, t=self.local_translation\n )\n return self._local_transformation\n\n @property\n def local_translation(self):\n \"\"\" local translation of the skeleton state. It is identical to the local translation in\n `.skeleton_tree.local_translation` except the root translation. The root translation is\n identical to `.root_translation` \"\"\"\n if not hasattr(self, \"_local_translation\"):\n broadcast_shape = (\n tuple(self.tensor.shape[:-1])\n + (len(self.skeleton_tree),)\n + tuple(self.skeleton_tree.local_translation.shape[-1:])\n )\n local_translation = self.skeleton_tree.local_translation.broadcast_to(\n *broadcast_shape\n ).clone()\n local_translation[..., 0, :] = self.root_translation\n self._local_translation = local_translation\n return self._local_translation\n\n # Root Properties\n @property\n def root_translation_xy(self):\n \"\"\" root translation on xy \"\"\"\n if not hasattr(self, \"_root_translation_xy\"):\n self._root_translation_xy = self.global_translation_xy[..., 0, :]\n return self._root_translation_xy\n\n @property\n def global_root_rotation(self):\n \"\"\" root rotation \"\"\"\n if not hasattr(self, \"_global_root_rotation\"):\n self._global_root_rotation = self.global_rotation[..., 0, :]\n return self._global_root_rotation\n\n @property\n def global_root_yaw_rotation(self):\n \"\"\" root yaw rotation \"\"\"\n if not hasattr(self, \"_global_root_yaw_rotation\"):\n self._global_root_yaw_rotation = self.global_root_rotation.yaw_rotation()\n return self._global_root_yaw_rotation\n\n # Properties relative to root\n @property\n def local_translation_to_root(self):\n \"\"\" The 3D translation from joint frame to the root frame. \"\"\"\n if not hasattr(self, \"_local_translation_to_root\"):\n self._local_translation_to_root = (\n self.global_translation - self.root_translation.unsqueeze(-1)\n )\n return self._local_translation_to_root\n\n @property\n def local_rotation_to_root(self):\n \"\"\" The 3D rotation from joint frame to the root frame. It is equivalent to \n The root_R_world * world_R_node \"\"\"\n return (\n quat_inverse(self.global_root_rotation).unsqueeze(-1) * self.global_rotation\n )\n\n def compute_forward_vector(\n self,\n left_shoulder_index,\n right_shoulder_index,\n left_hip_index,\n right_hip_index,\n gaussian_filter_width=20,\n ):\n \"\"\" Computes forward vector based on cross product of the up vector with \n average of the right->left shoulder and hip vectors \"\"\"\n global_positions = self.global_translation\n # Perpendicular to the forward direction.\n # Uses the shoulders and hips to find this.\n side_direction = (\n global_positions[:, left_shoulder_index].numpy()\n - global_positions[:, right_shoulder_index].numpy()\n + global_positions[:, left_hip_index].numpy()\n - global_positions[:, right_hip_index].numpy()\n )\n side_direction = (\n side_direction\n / np.sqrt((side_direction ** 2).sum(axis=-1))[..., np.newaxis]\n )\n\n # Forward direction obtained by crossing with the up direction.\n forward_direction = np.cross(side_direction, np.array([[0, 1, 0]]))\n\n # Smooth the forward direction with a Gaussian.\n # Axis 0 is the time/frame axis.\n forward_direction = filters.gaussian_filter1d(\n forward_direction, gaussian_filter_width, axis=0, mode=\"nearest\"\n )\n forward_direction = (\n forward_direction\n / np.sqrt((forward_direction ** 2).sum(axis=-1))[..., np.newaxis]\n )\n\n return torch.from_numpy(forward_direction)\n\n @staticmethod\n def _to_state_vector(rot, rt):\n state_shape = rot.shape[:-2]\n vr = rot.reshape(*(state_shape + (-1,)))\n vt = rt.broadcast_to(*state_shape + rt.shape[-1:]).reshape(\n *(state_shape + (-1,))\n )\n v = torch.cat([vr, vt], axis=-1)\n return v\n\n @classmethod\n def from_dict(\n cls: Type[\"SkeletonState\"], dict_repr: OrderedDict, *args, **kwargs\n ) -> \"SkeletonState\":\n rot = TensorUtils.from_dict(dict_repr[\"rotation\"], *args, **kwargs)\n rt = TensorUtils.from_dict(dict_repr[\"root_translation\"], *args, **kwargs)\n return cls(\n SkeletonState._to_state_vector(rot, rt),\n SkeletonTree.from_dict(dict_repr[\"skeleton_tree\"], *args, **kwargs),\n dict_repr[\"is_local\"],\n )\n\n def to_dict(self) -> OrderedDict:\n return OrderedDict(\n [\n (\"rotation\", tensor_to_dict(self.rotation)),\n (\"root_translation\", tensor_to_dict(self.root_translation)),\n (\"skeleton_tree\", self.skeleton_tree.to_dict()),\n (\"is_local\", self.is_local),\n ]\n )\n\n @classmethod\n def from_rotation_and_root_translation(cls, skeleton_tree, r, t, is_local=True):\n \"\"\"\n Construct a skeleton state from rotation and root translation\n\n :param skeleton_tree: the skeleton tree\n :type skeleton_tree: SkeletonTree\n :param r: rotation (either global or local)\n :type r: Tensor\n :param t: root translation\n :type t: Tensor\n :param is_local: to indicate that whether the rotation is local or global\n :type is_local: bool, optional, default=True\n \"\"\"\n assert (\n r.dim() > 0\n ), \"the rotation needs to have at least 1 dimension (dim = {})\".format(r.dim)\n return cls(\n SkeletonState._to_state_vector(r, t),\n skeleton_tree=skeleton_tree,\n is_local=is_local,\n )\n\n @classmethod\n def zero_pose(cls, skeleton_tree):\n \"\"\"\n Construct a zero-pose skeleton state from the skeleton tree by assuming that all the local\n rotation is 0 and root translation is also 0.\n\n :param skeleton_tree: the skeleton tree as the rigid body\n :type skeleton_tree: SkeletonTree\n \"\"\"\n return cls.from_rotation_and_root_translation(\n skeleton_tree=skeleton_tree,\n r=quat_identity([skeleton_tree.num_joints]),\n t=torch.zeros(3, dtype=skeleton_tree.local_translation.dtype),\n is_local=True,\n )\n\n def local_repr(self):\n \"\"\" \n Convert the skeleton state into local representation. This will only affects the values of\n .tensor. If the skeleton state already has `is_local=True`. This method will do nothing. \n\n :rtype: SkeletonState\n \"\"\"\n if self.is_local:\n return self\n return SkeletonState.from_rotation_and_root_translation(\n self.skeleton_tree,\n r=self.local_rotation,\n t=self.root_translation,\n is_local=True,\n )\n\n def global_repr(self):\n \"\"\" \n Convert the skeleton state into global representation. This will only affects the values of\n .tensor. If the skeleton state already has `is_local=False`. This method will do nothing. \n\n :rtype: SkeletonState\n \"\"\"\n if not self.is_local:\n return self\n return SkeletonState.from_rotation_and_root_translation(\n self.skeleton_tree,\n r=self.global_rotation,\n t=self.root_translation,\n is_local=False,\n )\n\n def _get_pairwise_average_translation(self):\n global_transform_inv = transform_inverse(self.global_transformation)\n p1 = global_transform_inv.unsqueeze(-2)\n p2 = self.global_transformation.unsqueeze(-3)\n\n pairwise_translation = (\n transform_translation(transform_mul(p1, p2))\n .reshape(-1, len(self.skeleton_tree), len(self.skeleton_tree), 3)\n .mean(axis=0)\n )\n return pairwise_translation\n\n def _transfer_to(self, new_skeleton_tree: SkeletonTree):\n old_indices = list(map(self.skeleton_tree.index, new_skeleton_tree))\n return SkeletonState.from_rotation_and_root_translation(\n new_skeleton_tree,\n r=self.global_rotation[..., old_indices, :],\n t=self.root_translation,\n is_local=False,\n )\n\n def drop_nodes_by_names(\n self, node_names: List[str], estimate_local_translation_from_states: bool = True\n ) -> \"SkeletonState\":\n \"\"\" \n Drop a list of nodes from the skeleton and re-compute the local rotation to match the \n original joint position as much as possible. \n\n :param node_names: a list node names that specifies the nodes need to be dropped\n :type node_names: List of strings\n :param estimate_local_translation_from_states: the boolean indicator that specifies whether\\\n or not to re-estimate the local translation from the states (avg.)\n :type estimate_local_translation_from_states: boolean\n :rtype: SkeletonState\n \"\"\"\n if estimate_local_translation_from_states:\n pairwise_translation = self._get_pairwise_average_translation()\n else:\n pairwise_translation = None\n new_skeleton_tree = self.skeleton_tree.drop_nodes_by_names(\n node_names, pairwise_translation\n )\n return self._transfer_to(new_skeleton_tree)\n\n def keep_nodes_by_names(\n self, node_names: List[str], estimate_local_translation_from_states: bool = True\n ) -> \"SkeletonState\":\n \"\"\" \n Keep a list of nodes and drop all other nodes from the skeleton and re-compute the local \n rotation to match the original joint position as much as possible. \n\n :param node_names: a list node names that specifies the nodes need to be dropped\n :type node_names: List of strings\n :param estimate_local_translation_from_states: the boolean indicator that specifies whether\\\n or not to re-estimate the local translation from the states (avg.)\n :type estimate_local_translation_from_states: boolean\n :rtype: SkeletonState\n \"\"\"\n return self.drop_nodes_by_names(\n list(filter(lambda x: (x not in node_names), self)),\n estimate_local_translation_from_states,\n )\n\n def _remapped_to(\n self, joint_mapping: Dict[str, str], target_skeleton_tree: SkeletonTree\n ):\n joint_mapping_inv = {target: source for source, target in joint_mapping.items()}\n reduced_target_skeleton_tree = target_skeleton_tree.keep_nodes_by_names(\n list(joint_mapping_inv)\n )\n n_joints = (\n len(joint_mapping),\n len(self.skeleton_tree),\n len(reduced_target_skeleton_tree),\n )\n assert (\n len(set(n_joints)) == 1\n ), \"the joint mapping is not consistent with the skeleton trees\"\n source_indices = list(\n map(\n lambda x: self.skeleton_tree.index(joint_mapping_inv[x]),\n reduced_target_skeleton_tree,\n )\n )\n target_local_rotation = self.local_rotation[..., source_indices, :]\n return SkeletonState.from_rotation_and_root_translation(\n skeleton_tree=reduced_target_skeleton_tree,\n r=target_local_rotation,\n t=self.root_translation,\n is_local=True,\n )\n\n def retarget_to(\n self,\n joint_mapping: Dict[str, str],\n source_tpose_local_rotation,\n source_tpose_root_translation: np.ndarray,\n target_skeleton_tree: SkeletonTree,\n target_tpose_local_rotation,\n target_tpose_root_translation: np.ndarray,\n rotation_to_target_skeleton,\n scale_to_target_skeleton: float,\n z_up: bool = True,\n ) -> \"SkeletonState\":\n \"\"\" \n Retarget the skeleton state to a target skeleton tree. This is a naive retarget\n implementation with rough approximations. The function follows the procedures below.\n\n Steps:\n 1. Drop the joints from the source (self) that do not belong to the joint mapping\\\n with an implementation that is similar to \"keep_nodes_by_names()\" - take a\\\n look at the function doc for more details (same for source_tpose)\n \n 2. Rotate the source state and the source tpose by \"rotation_to_target_skeleton\"\\\n to align the source with the target orientation\n \n 3. Extract the root translation and normalize it to match the scale of the target\\\n skeleton\n \n 4. Extract the global rotation from source state relative to source tpose and\\\n re-apply the relative rotation to the target tpose to construct the global\\\n rotation after retargetting\n \n 5. Combine the computed global rotation and the root translation from 3 and 4 to\\\n complete the retargeting.\n \n 6. Make feet on the ground (global translation z)\n\n :param joint_mapping: a dictionary of that maps the joint node from the source skeleton to \\\n the target skeleton\n :type joint_mapping: Dict[str, str]\n \n :param source_tpose_local_rotation: the local rotation of the source skeleton\n :type source_tpose_local_rotation: Tensor\n \n :param source_tpose_root_translation: the root translation of the source tpose\n :type source_tpose_root_translation: np.ndarray\n \n :param target_skeleton_tree: the target skeleton tree\n :type target_skeleton_tree: SkeletonTree\n \n :param target_tpose_local_rotation: the local rotation of the target skeleton\n :type target_tpose_local_rotation: Tensor\n \n :param target_tpose_root_translation: the root translation of the target tpose\n :type target_tpose_root_translation: Tensor\n \n :param rotation_to_target_skeleton: the rotation that needs to be applied to the source\\\n skeleton to align with the target skeleton. Essentially the rotation is t_R_s, where t is\\\n the frame of reference of the target skeleton and s is the frame of reference of the source\\\n skeleton\n :type rotation_to_target_skeleton: Tensor\n :param scale_to_target_skeleton: the factor that needs to be multiplied from source\\\n skeleton to target skeleton (unit in distance). For example, to go from `cm` to `m`, the \\\n factor needs to be 0.01.\n :type scale_to_target_skeleton: float\n :rtype: SkeletonState\n \"\"\"\n\n # STEP 0: Preprocess\n source_tpose = SkeletonState.from_rotation_and_root_translation(\n skeleton_tree=self.skeleton_tree,\n r=source_tpose_local_rotation,\n t=source_tpose_root_translation,\n is_local=True,\n )\n target_tpose = SkeletonState.from_rotation_and_root_translation(\n skeleton_tree=target_skeleton_tree,\n r=target_tpose_local_rotation,\n t=target_tpose_root_translation,\n is_local=True,\n )\n\n # STEP 1: Drop the irrelevant joints\n pairwise_translation = self._get_pairwise_average_translation()\n node_names = list(joint_mapping)\n new_skeleton_tree = self.skeleton_tree.keep_nodes_by_names(\n node_names, pairwise_translation\n )\n\n # TODO: combine the following steps before STEP 3\n source_tpose = source_tpose._transfer_to(new_skeleton_tree)\n source_state = self._transfer_to(new_skeleton_tree)\n\n source_tpose = source_tpose._remapped_to(joint_mapping, target_skeleton_tree)\n source_state = source_state._remapped_to(joint_mapping, target_skeleton_tree)\n\n # STEP 2: Rotate the source to align with the target\n new_local_rotation = source_tpose.local_rotation.clone()\n new_local_rotation[..., 0, :] = quat_mul_norm(\n rotation_to_target_skeleton, source_tpose.local_rotation[..., 0, :]\n )\n\n source_tpose = SkeletonState.from_rotation_and_root_translation(\n skeleton_tree=source_tpose.skeleton_tree,\n r=new_local_rotation,\n t=quat_rotate(rotation_to_target_skeleton, source_tpose.root_translation),\n is_local=True,\n )\n\n new_local_rotation = source_state.local_rotation.clone()\n new_local_rotation[..., 0, :] = quat_mul_norm(\n rotation_to_target_skeleton, source_state.local_rotation[..., 0, :]\n )\n source_state = SkeletonState.from_rotation_and_root_translation(\n skeleton_tree=source_state.skeleton_tree,\n r=new_local_rotation,\n t=quat_rotate(rotation_to_target_skeleton, source_state.root_translation),\n is_local=True,\n )\n\n # STEP 3: Normalize to match the target scale\n root_translation_diff = (\n source_state.root_translation - source_tpose.root_translation\n ) * scale_to_target_skeleton\n\n # STEP 4: the global rotation from source state relative to source tpose and\n # re-apply to the target\n current_skeleton_tree = source_state.skeleton_tree\n target_tpose_global_rotation = source_state.global_rotation[0, :].clone()\n for current_index, name in enumerate(current_skeleton_tree):\n if name in target_tpose.skeleton_tree:\n target_tpose_global_rotation[\n current_index, :\n ] = target_tpose.global_rotation[\n target_tpose.skeleton_tree.index(name), :\n ]\n\n global_rotation_diff = quat_mul_norm(\n source_state.global_rotation, quat_inverse(source_tpose.global_rotation)\n )\n new_global_rotation = quat_mul_norm(\n global_rotation_diff, target_tpose_global_rotation\n )\n\n # STEP 5: Putting 3 and 4 together\n current_skeleton_tree = source_state.skeleton_tree\n shape = source_state.global_rotation.shape[:-1]\n shape = shape[:-1] + target_tpose.global_rotation.shape[-2:-1]\n new_global_rotation_output = quat_identity(shape)\n for current_index, name in enumerate(target_skeleton_tree):\n while name not in current_skeleton_tree:\n name = target_skeleton_tree.parent_of(name)\n parent_index = current_skeleton_tree.index(name)\n new_global_rotation_output[:, current_index, :] = new_global_rotation[\n :, parent_index, :\n ]\n\n source_state = SkeletonState.from_rotation_and_root_translation(\n skeleton_tree=target_skeleton_tree,\n r=new_global_rotation_output,\n t=target_tpose.root_translation + root_translation_diff,\n is_local=False,\n ).local_repr()\n\n return source_state\n\n def retarget_to_by_tpose(\n self,\n joint_mapping: Dict[str, str],\n source_tpose: \"SkeletonState\",\n target_tpose: \"SkeletonState\",\n rotation_to_target_skeleton,\n scale_to_target_skeleton: float,\n ) -> \"SkeletonState\":\n \"\"\" \n Retarget the skeleton state to a target skeleton tree. This is a naive retarget\n implementation with rough approximations. See the method `retarget_to()` for more information\n\n :param joint_mapping: a dictionary of that maps the joint node from the source skeleton to \\\n the target skeleton\n :type joint_mapping: Dict[str, str]\n \n :param source_tpose: t-pose of the source skeleton\n :type source_tpose: SkeletonState\n \n :param target_tpose: t-pose of the target skeleton\n :type target_tpose: SkeletonState\n \n :param rotation_to_target_skeleton: the rotation that needs to be applied to the source\\\n skeleton to align with the target skeleton. Essentially the rotation is t_R_s, where t is\\\n the frame of reference of the target skeleton and s is the frame of reference of the source\\\n skeleton\n :type rotation_to_target_skeleton: Tensor\n :param scale_to_target_skeleton: the factor that needs to be multiplied from source\\\n skeleton to target skeleton (unit in distance). For example, to go from `cm` to `m`, the \\\n factor needs to be 0.01.\n :type scale_to_target_skeleton: float\n :rtype: SkeletonState\n \"\"\"\n assert (\n len(source_tpose.shape) == 0 and len(target_tpose.shape) == 0\n ), \"the retargeting script currently doesn't support vectorized operations\"\n return self.retarget_to(\n joint_mapping,\n source_tpose.local_rotation,\n source_tpose.root_translation,\n target_tpose.skeleton_tree,\n target_tpose.local_rotation,\n target_tpose.root_translation,\n rotation_to_target_skeleton,\n scale_to_target_skeleton,\n )" }, { "identifier": "SkeletonMotion", "path": "poselib/poselib/skeleton/skeleton3d.py", "snippet": "class SkeletonMotion(SkeletonState):\n def __init__(self, tensor_backend, skeleton_tree, is_local, fps, *args, **kwargs):\n self._fps = fps\n super().__init__(tensor_backend, skeleton_tree, is_local, *args, **kwargs)\n\n def clone(self):\n return SkeletonMotion(\n self.tensor.clone(), self.skeleton_tree, self._is_local, self._fps\n )\n\n @property\n def invariant_property(self):\n return {\n \"skeleton_tree\": self.skeleton_tree,\n \"is_local\": self.is_local,\n \"fps\": self.fps,\n }\n\n @property\n def global_velocity(self):\n \"\"\" global velocity \"\"\"\n curr_index = self.num_joints * 4 + 3\n return self.tensor[..., curr_index : curr_index + self.num_joints * 3].reshape(\n *(self.tensor.shape[:-1] + (self.num_joints, 3))\n )\n\n @property\n def global_angular_velocity(self):\n \"\"\" global angular velocity \"\"\"\n curr_index = self.num_joints * 7 + 3\n return self.tensor[..., curr_index : curr_index + self.num_joints * 3].reshape(\n *(self.tensor.shape[:-1] + (self.num_joints, 3))\n )\n\n @property\n def fps(self):\n \"\"\" number of frames per second \"\"\"\n return self._fps\n\n @property\n def time_delta(self):\n \"\"\" time between two adjacent frames \"\"\"\n return 1.0 / self.fps\n\n @property\n def global_root_velocity(self):\n \"\"\" global root velocity \"\"\"\n return self.global_velocity[..., 0, :]\n\n @property\n def global_root_angular_velocity(self):\n \"\"\" global root angular velocity \"\"\"\n return self.global_angular_velocity[..., 0, :]\n\n @classmethod\n def from_state_vector_and_velocity(\n cls,\n skeleton_tree,\n state_vector,\n global_velocity,\n global_angular_velocity,\n is_local,\n fps,\n ):\n \"\"\"\n Construct a skeleton motion from a skeleton state vector, global velocity and angular\n velocity at each joint.\n\n :param skeleton_tree: the skeleton tree that the motion is based on \n :type skeleton_tree: SkeletonTree\n :param state_vector: the state vector from the skeleton state by `.tensor`\n :type state_vector: Tensor\n :param global_velocity: the global velocity at each joint\n :type global_velocity: Tensor\n :param global_angular_velocity: the global angular velocity at each joint\n :type global_angular_velocity: Tensor\n :param is_local: if the rotation ins the state vector is given in local frame\n :type is_local: boolean\n :param fps: number of frames per second\n :type fps: int\n\n :rtype: SkeletonMotion\n \"\"\"\n state_shape = state_vector.shape[:-1]\n v = global_velocity.reshape(*(state_shape + (-1,)))\n av = global_angular_velocity.reshape(*(state_shape + (-1,)))\n new_state_vector = torch.cat([state_vector, v, av], axis=-1)\n return cls(\n new_state_vector, skeleton_tree=skeleton_tree, is_local=is_local, fps=fps,\n )\n\n @classmethod\n def from_skeleton_state(\n cls: Type[\"SkeletonMotion\"], skeleton_state: SkeletonState, fps: int\n ):\n \"\"\"\n Construct a skeleton motion from a skeleton state. The velocities are estimated using second\n order guassian filter along the last axis. The skeleton state must have at least .dim >= 1\n\n :param skeleton_state: the skeleton state that the motion is based on \n :type skeleton_state: SkeletonState\n :param fps: number of frames per second\n :type fps: int\n\n :rtype: SkeletonMotion\n \"\"\"\n assert (\n type(skeleton_state) == SkeletonState\n ), \"expected type of {}, got {}\".format(SkeletonState, type(skeleton_state))\n global_velocity = SkeletonMotion._compute_velocity(\n p=skeleton_state.global_translation, time_delta=1 / fps\n )\n global_angular_velocity = SkeletonMotion._compute_angular_velocity(\n r=skeleton_state.global_rotation, time_delta=1 / fps\n )\n return cls.from_state_vector_and_velocity(\n skeleton_tree=skeleton_state.skeleton_tree,\n state_vector=skeleton_state.tensor,\n global_velocity=global_velocity,\n global_angular_velocity=global_angular_velocity,\n is_local=skeleton_state.is_local,\n fps=fps,\n )\n\n @staticmethod\n def _to_state_vector(rot, rt, vel, avel):\n state_shape = rot.shape[:-2]\n skeleton_state_v = SkeletonState._to_state_vector(rot, rt)\n v = vel.reshape(*(state_shape + (-1,)))\n av = avel.reshape(*(state_shape + (-1,)))\n skeleton_motion_v = torch.cat([skeleton_state_v, v, av], axis=-1)\n return skeleton_motion_v\n\n @classmethod\n def from_dict(\n cls: Type[\"SkeletonMotion\"], dict_repr: OrderedDict, *args, **kwargs\n ) -> \"SkeletonMotion\":\n rot = TensorUtils.from_dict(dict_repr[\"rotation\"], *args, **kwargs)\n rt = TensorUtils.from_dict(dict_repr[\"root_translation\"], *args, **kwargs)\n vel = TensorUtils.from_dict(dict_repr[\"global_velocity\"], *args, **kwargs)\n avel = TensorUtils.from_dict(\n dict_repr[\"global_angular_velocity\"], *args, **kwargs\n )\n return cls(\n SkeletonMotion._to_state_vector(rot, rt, vel, avel),\n skeleton_tree=SkeletonTree.from_dict(\n dict_repr[\"skeleton_tree\"], *args, **kwargs\n ),\n is_local=dict_repr[\"is_local\"],\n fps=dict_repr[\"fps\"],\n )\n\n def to_dict(self) -> OrderedDict:\n return OrderedDict(\n [\n (\"rotation\", tensor_to_dict(self.rotation)),\n (\"root_translation\", tensor_to_dict(self.root_translation)),\n (\"global_velocity\", tensor_to_dict(self.global_velocity)),\n (\"global_angular_velocity\", tensor_to_dict(self.global_angular_velocity)),\n (\"skeleton_tree\", self.skeleton_tree.to_dict()),\n (\"is_local\", self.is_local),\n (\"fps\", self.fps),\n ]\n )\n\n @classmethod\n def from_fbx(\n cls: Type[\"SkeletonMotion\"],\n fbx_file_path,\n fbx_configs,\n skeleton_tree=None,\n is_local=True,\n fps=120,\n root_joint=\"\",\n root_trans_index=0,\n *args,\n **kwargs,\n ) -> \"SkeletonMotion\":\n \"\"\"\n Construct a skeleton motion from a fbx file (TODO - generalize this). If the skeleton tree\n is not given, it will use the first frame of the mocap to construct the skeleton tree.\n\n :param fbx_file_path: the path of the fbx file\n :type fbx_file_path: string\n :param fbx_configs: the configuration in terms of {\"tmp_path\": ..., \"fbx_py27_path\": ...}\n :type fbx_configs: dict\n :param skeleton_tree: the optional skeleton tree that the rotation will be applied to\n :type skeleton_tree: SkeletonTree, optional\n :param is_local: the state vector uses local or global rotation as the representation\n :type is_local: bool, optional, default=True\n :rtype: SkeletonMotion\n \"\"\"\n joint_names, joint_parents, transforms, fps = fbx_to_array(\n fbx_file_path, fbx_configs, root_joint, fps\n )\n # swap the last two axis to match the convention\n local_transform = euclidean_to_transform(\n transformation_matrix=torch.from_numpy(\n np.swapaxes(np.array(transforms), -1, -2),\n ).float()\n )\n local_rotation = transform_rotation(local_transform)\n root_translation = transform_translation(local_transform)[..., root_trans_index, :]\n joint_parents = torch.from_numpy(np.array(joint_parents)).int()\n\n if skeleton_tree is None:\n local_translation = transform_translation(local_transform).reshape(\n -1, len(joint_parents), 3\n )[0]\n skeleton_tree = SkeletonTree(joint_names, joint_parents, local_translation)\n skeleton_state = SkeletonState.from_rotation_and_root_translation(\n skeleton_tree, r=local_rotation, t=root_translation, is_local=True\n )\n if not is_local:\n skeleton_state = skeleton_state.global_repr()\n return cls.from_skeleton_state(\n skeleton_state=skeleton_state, fps=fps\n )\n\n @staticmethod\n def _compute_velocity(p, time_delta, guassian_filter=True):\n velocity = torch.from_numpy(\n filters.gaussian_filter1d(\n np.gradient(p.numpy(), axis=-3), 2, axis=-3, mode=\"nearest\"\n )\n / time_delta,\n )\n return velocity\n\n @staticmethod\n def _compute_angular_velocity(r, time_delta: float, guassian_filter=True):\n # assume the second last dimension is the time axis\n diff_quat_data = quat_identity_like(r)\n diff_quat_data[..., :-1, :, :] = quat_mul_norm(\n r[..., 1:, :, :], quat_inverse(r[..., :-1, :, :])\n )\n diff_angle, diff_axis = quat_angle_axis(diff_quat_data)\n angular_velocity = diff_axis * diff_angle.unsqueeze(-1) / time_delta\n angular_velocity = torch.from_numpy(\n filters.gaussian_filter1d(\n angular_velocity.numpy(), 2, axis=-3, mode=\"nearest\"\n ),\n )\n return angular_velocity\n\n def crop(self, start: int, end: int, fps: Optional[int] = None):\n \"\"\"\n Crop the motion along its last axis. This is equivalent to performing a slicing on the\n object with [..., start: end: skip_every] where skip_every = old_fps / fps. Note that the\n new fps provided must be a factor of the original fps. \n\n :param start: the beginning frame index\n :type start: int\n :param end: the ending frame index\n :type end: int\n :param fps: number of frames per second in the output (if not given the original fps will be used)\n :type fps: int, optional\n :rtype: SkeletonMotion\n \"\"\"\n if fps is None:\n new_fps = int(self.fps)\n old_fps = int(self.fps)\n else:\n new_fps = int(fps)\n old_fps = int(self.fps)\n assert old_fps % fps == 0, (\n \"the resampling doesn't support fps with non-integer division \"\n \"from the original fps: {} => {}\".format(old_fps, fps)\n )\n skip_every = old_fps // new_fps\n s = slice(start, end, skip_every)\n z = self[..., s]\n\n rot = z.local_rotation if z.is_local else z.global_rotation\n rt = z.root_translation\n vel = z.global_velocity\n avel = z.global_angular_velocity\n return SkeletonMotion(\n SkeletonMotion._to_state_vector(rot, rt, vel, avel),\n skeleton_tree=z.skeleton_tree,\n is_local=z.is_local,\n fps=new_fps,\n )\n\n def retarget_to(\n self,\n joint_mapping: Dict[str, str],\n source_tpose_local_rotation,\n source_tpose_root_translation: np.ndarray,\n target_skeleton_tree: \"SkeletonTree\",\n target_tpose_local_rotation,\n target_tpose_root_translation: np.ndarray,\n rotation_to_target_skeleton,\n scale_to_target_skeleton: float,\n z_up: bool = True,\n ) -> \"SkeletonMotion\":\n \"\"\" \n Same as the one in :class:`SkeletonState`. This method discards all velocity information before\n retargeting and re-estimate the velocity after the retargeting. The same fps is used in the\n new retargetted motion.\n\n :param joint_mapping: a dictionary of that maps the joint node from the source skeleton to \\\n the target skeleton\n :type joint_mapping: Dict[str, str]\n \n :param source_tpose_local_rotation: the local rotation of the source skeleton\n :type source_tpose_local_rotation: Tensor\n \n :param source_tpose_root_translation: the root translation of the source tpose\n :type source_tpose_root_translation: np.ndarray\n \n :param target_skeleton_tree: the target skeleton tree\n :type target_skeleton_tree: SkeletonTree\n \n :param target_tpose_local_rotation: the local rotation of the target skeleton\n :type target_tpose_local_rotation: Tensor\n \n :param target_tpose_root_translation: the root translation of the target tpose\n :type target_tpose_root_translation: Tensor\n \n :param rotation_to_target_skeleton: the rotation that needs to be applied to the source\\\n skeleton to align with the target skeleton. Essentially the rotation is t_R_s, where t is\\\n the frame of reference of the target skeleton and s is the frame of reference of the source\\\n skeleton\n :type rotation_to_target_skeleton: Tensor\n :param scale_to_target_skeleton: the factor that needs to be multiplied from source\\\n skeleton to target skeleton (unit in distance). For example, to go from `cm` to `m`, the \\\n factor needs to be 0.01.\n :type scale_to_target_skeleton: float\n :rtype: SkeletonMotion\n \"\"\"\n return SkeletonMotion.from_skeleton_state(\n super().retarget_to(\n joint_mapping,\n source_tpose_local_rotation,\n source_tpose_root_translation,\n target_skeleton_tree,\n target_tpose_local_rotation,\n target_tpose_root_translation,\n rotation_to_target_skeleton,\n scale_to_target_skeleton,\n z_up,\n ),\n self.fps,\n )\n\n def retarget_to_by_tpose(\n self,\n joint_mapping: Dict[str, str],\n source_tpose: \"SkeletonState\",\n target_tpose: \"SkeletonState\",\n rotation_to_target_skeleton,\n scale_to_target_skeleton: float,\n z_up: bool = True,\n ) -> \"SkeletonMotion\":\n \"\"\" \n Same as the one in :class:`SkeletonState`. This method discards all velocity information before\n retargeting and re-estimate the velocity after the retargeting. The same fps is used in the\n new retargetted motion.\n\n :param joint_mapping: a dictionary of that maps the joint node from the source skeleton to \\\n the target skeleton\n :type joint_mapping: Dict[str, str]\n \n :param source_tpose: t-pose of the source skeleton\n :type source_tpose: SkeletonState\n \n :param target_tpose: t-pose of the target skeleton\n :type target_tpose: SkeletonState\n \n :param rotation_to_target_skeleton: the rotation that needs to be applied to the source\\\n skeleton to align with the target skeleton. Essentially the rotation is t_R_s, where t is\\\n the frame of reference of the target skeleton and s is the frame of reference of the source\\\n skeleton\n :type rotation_to_target_skeleton: Tensor\n :param scale_to_target_skeleton: the factor that needs to be multiplied from source\\\n skeleton to target skeleton (unit in distance). For example, to go from `cm` to `m`, the \\\n factor needs to be 0.01.\n :type scale_to_target_skeleton: float\n :rtype: SkeletonMotion\n \"\"\"\n return self.retarget_to(\n joint_mapping,\n source_tpose.local_rotation,\n source_tpose.root_translation,\n target_tpose.skeleton_tree,\n target_tpose.local_rotation,\n target_tpose.root_translation,\n rotation_to_target_skeleton,\n scale_to_target_skeleton,\n z_up,\n )" }, { "identifier": "plot_skeleton_state", "path": "poselib/poselib/visualization/common.py", "snippet": "def plot_skeleton_state(skeleton_state, task_name=\"\"):\n \"\"\"\n Visualize a skeleton state\n\n :param skeleton_state:\n :param task_name:\n :type skeleton_state: SkeletonState\n :type task_name: string, optional\n \"\"\"\n logger.info(\"plotting {}\".format(task_name))\n task = Draw3DSkeletonState(task_name=task_name, skeleton_state=skeleton_state)\n plotter = Matplotlib3DPlotter(task)\n plotter.show()" }, { "identifier": "plot_skeleton_motion_interactive", "path": "poselib/poselib/visualization/common.py", "snippet": "def plot_skeleton_motion_interactive(skeleton_motion, task_name=\"\"):\n \"\"\"\n Visualize a skeleton motion along its first dimension interactively.\n\n :param skeleton_motion:\n :param task_name:\n :type skeleton_motion: SkeletonMotion\n :type task_name: string, optional\n \"\"\"\n for _ in plot_skeleton_motion_interactive_base(skeleton_motion, task_name):\n pass" }, { "identifier": "Matplotlib3DPlotter", "path": "poselib/poselib/visualization/plt_plotter.py", "snippet": "class Matplotlib3DPlotter(BasePlotter):\n _fig: plt.figure # plt figure\n _ax: p3.Axes3D # plt 3d axis\n # stores artist objects for each task (task name as the key)\n _artist_cache: Dict[str, Any]\n # callables for each task primitives\n _create_impl_callables: Dict[str, Callable]\n _update_impl_callables: Dict[str, Callable]\n\n def __init__(self, task: \"BasePlotterTask\") -> None:\n self._fig = plt.figure()\n self._ax = p3.Axes3D(self._fig)\n self._artist_cache = {}\n\n self._create_impl_callables = {\n \"Draw3DLines\": self._lines_create_impl,\n \"Draw3DDots\": self._dots_create_impl,\n \"Draw3DTrail\": self._trail_create_impl,\n }\n self._update_impl_callables = {\n \"Draw3DLines\": self._lines_update_impl,\n \"Draw3DDots\": self._dots_update_impl,\n \"Draw3DTrail\": self._trail_update_impl,\n }\n self._init_lim()\n super().__init__(task)\n\n @property\n def ax(self):\n return self._ax\n\n @property\n def fig(self):\n return self._fig\n\n def show(self):\n plt.show()\n\n def _min(self, x, y):\n if x is None:\n return y\n if y is None:\n return x\n return min(x, y)\n\n def _max(self, x, y):\n if x is None:\n return y\n if y is None:\n return x\n return max(x, y)\n\n def _init_lim(self):\n self._curr_x_min = None\n self._curr_y_min = None\n self._curr_z_min = None\n self._curr_x_max = None\n self._curr_y_max = None\n self._curr_z_max = None\n\n def _update_lim(self, xs, ys, zs):\n self._curr_x_min = self._min(np.min(xs), self._curr_x_min)\n self._curr_y_min = self._min(np.min(ys), self._curr_y_min)\n self._curr_z_min = self._min(np.min(zs), self._curr_z_min)\n self._curr_x_max = self._max(np.max(xs), self._curr_x_max)\n self._curr_y_max = self._max(np.max(ys), self._curr_y_max)\n self._curr_z_max = self._max(np.max(zs), self._curr_z_max)\n\n def _set_lim(self):\n if not (\n self._curr_x_min is None\n or self._curr_x_max is None\n or self._curr_y_min is None\n or self._curr_y_max is None\n or self._curr_z_min is None\n or self._curr_z_max is None\n ):\n self._ax.set_xlim3d(self._curr_x_min, self._curr_x_max)\n self._ax.set_ylim3d(self._curr_y_min, self._curr_y_max)\n self._ax.set_zlim3d(self._curr_z_min, self._curr_z_max)\n self._init_lim()\n\n @staticmethod\n def _lines_extract_xyz_impl(index, lines_task):\n return lines_task[index, :, 0], lines_task[index, :, 1], lines_task[index, :, 2]\n\n @staticmethod\n def _trail_extract_xyz_impl(index, trail_task):\n return (\n trail_task[index : index + 2, 0],\n trail_task[index : index + 2, 1],\n trail_task[index : index + 2, 2],\n )\n\n def _lines_create_impl(self, lines_task):\n color = lines_task.color\n self._artist_cache[lines_task.task_name] = [\n self._ax.plot(\n *Matplotlib3DPlotter._lines_extract_xyz_impl(i, lines_task),\n color=color,\n linewidth=lines_task.line_width,\n alpha=lines_task.alpha\n )[0]\n for i in range(len(lines_task))\n ]\n\n def _lines_update_impl(self, lines_task):\n lines_artists = self._artist_cache[lines_task.task_name]\n for i in range(len(lines_task)):\n artist = lines_artists[i]\n xs, ys, zs = Matplotlib3DPlotter._lines_extract_xyz_impl(i, lines_task)\n artist.set_data(xs, ys)\n artist.set_3d_properties(zs)\n if lines_task.influence_lim:\n self._update_lim(xs, ys, zs)\n\n def _dots_create_impl(self, dots_task):\n color = dots_task.color\n self._artist_cache[dots_task.task_name] = self._ax.plot(\n dots_task[:, 0],\n dots_task[:, 1],\n dots_task[:, 2],\n c=color,\n linestyle=\"\",\n marker=\".\",\n markersize=dots_task.marker_size,\n alpha=dots_task.alpha,\n )[0]\n\n def _dots_update_impl(self, dots_task):\n dots_artist = self._artist_cache[dots_task.task_name]\n dots_artist.set_data(dots_task[:, 0], dots_task[:, 1])\n dots_artist.set_3d_properties(dots_task[:, 2])\n if dots_task.influence_lim:\n self._update_lim(dots_task[:, 0], dots_task[:, 1], dots_task[:, 2])\n\n def _trail_create_impl(self, trail_task):\n color = trail_task.color\n trail_length = len(trail_task) - 1\n self._artist_cache[trail_task.task_name] = [\n self._ax.plot(\n *Matplotlib3DPlotter._trail_extract_xyz_impl(i, trail_task),\n color=trail_task.color,\n linewidth=trail_task.line_width,\n alpha=trail_task.alpha * (1.0 - i / (trail_length - 1))\n )[0]\n for i in range(trail_length)\n ]\n\n def _trail_update_impl(self, trail_task):\n trails_artists = self._artist_cache[trail_task.task_name]\n for i in range(len(trail_task) - 1):\n artist = trails_artists[i]\n xs, ys, zs = Matplotlib3DPlotter._trail_extract_xyz_impl(i, trail_task)\n artist.set_data(xs, ys)\n artist.set_3d_properties(zs)\n if trail_task.influence_lim:\n self._update_lim(xs, ys, zs)\n\n def _create_impl(self, task_list):\n for task in task_list:\n self._create_impl_callables[task.task_type](task)\n self._draw()\n\n def _update_impl(self, task_list):\n for task in task_list:\n self._update_impl_callables[task.task_type](task)\n self._draw()\n\n def _set_aspect_equal_3d(self):\n xlim = self._ax.get_xlim3d()\n ylim = self._ax.get_ylim3d()\n zlim = self._ax.get_zlim3d()\n\n xmean = np.mean(xlim)\n ymean = np.mean(ylim)\n zmean = np.mean(zlim)\n\n plot_radius = max(\n [\n abs(lim - mean_)\n for lims, mean_ in ((xlim, xmean), (ylim, ymean), (zlim, zmean))\n for lim in lims\n ]\n )\n\n self._ax.set_xlim3d([xmean - plot_radius, xmean + plot_radius])\n self._ax.set_ylim3d([ymean - plot_radius, ymean + plot_radius])\n self._ax.set_zlim3d([zmean - plot_radius, zmean + plot_radius])\n\n def _draw(self):\n self._set_lim()\n self._set_aspect_equal_3d()\n self._fig.canvas.draw()\n self._fig.canvas.flush_events()\n plt.pause(0.00001)" }, { "identifier": "Draw3DSkeletonMotion", "path": "poselib/poselib/visualization/skeleton_plotter_tasks.py", "snippet": "class Draw3DSkeletonMotion(BasePlotterTask):\n def __init__(\n self,\n task_name: str,\n skeleton_motion,\n frame_index=None,\n joints_color=\"red\",\n lines_color=\"blue\",\n velocity_color=\"green\",\n angular_velocity_color=\"purple\",\n trail_color=\"black\",\n trail_length=10,\n alpha=1.0,\n ) -> None:\n super().__init__(task_name=task_name, task_type=\"3DSkeletonMotion\")\n self._trail_length = trail_length\n self._skeleton_motion = skeleton_motion\n # if frame_index is None:\n curr_skeleton_motion = self._skeleton_motion.clone()\n if frame_index is not None:\n curr_skeleton_motion.tensor = self._skeleton_motion.tensor[frame_index, :]\n # else:\n # curr_skeleton_motion = self._skeleton_motion[frame_index, :]\n self._skeleton_state_task = Draw3DSkeletonState(\n self.get_scoped_name(\"skeleton_state\"),\n curr_skeleton_motion,\n joints_color=joints_color,\n lines_color=lines_color,\n alpha=alpha,\n )\n vel_lines, avel_lines = Draw3DSkeletonMotion._get_vel_and_avel(\n curr_skeleton_motion\n )\n self._com_pos = curr_skeleton_motion.root_translation.numpy()[\n np.newaxis, ...\n ].repeat(trail_length, axis=0)\n self._vel_task = Draw3DLines(\n self.get_scoped_name(\"velocity\"),\n vel_lines,\n velocity_color,\n influence_lim=False,\n alpha=alpha,\n )\n self._avel_task = Draw3DLines(\n self.get_scoped_name(\"angular_velocity\"),\n avel_lines,\n angular_velocity_color,\n influence_lim=False,\n alpha=alpha,\n )\n self._com_trail_task = Draw3DTrail(\n self.get_scoped_name(\"com_trail\"),\n self._com_pos,\n trail_color,\n marker_size=2,\n influence_lim=True,\n alpha=alpha,\n )\n\n @property\n def name(self):\n return \"3DSkeletonMotion\"\n\n def update(self, frame_index=None, reset_trail=False, skeleton_motion=None) -> None:\n if skeleton_motion is not None:\n self._skeleton_motion = skeleton_motion\n\n curr_skeleton_motion = self._skeleton_motion.clone()\n if frame_index is not None:\n curr_skeleton_motion.tensor = curr_skeleton_motion.tensor[frame_index, :]\n if reset_trail:\n self._com_pos = curr_skeleton_motion.root_translation.numpy()[\n np.newaxis, ...\n ].repeat(self._trail_length, axis=0)\n else:\n self._com_pos = np.concatenate(\n (\n curr_skeleton_motion.root_translation.numpy()[np.newaxis, ...],\n self._com_pos[:-1],\n ),\n axis=0,\n )\n self._skeleton_state_task.update(curr_skeleton_motion)\n self._com_trail_task.update(self._com_pos)\n self._update(*Draw3DSkeletonMotion._get_vel_and_avel(curr_skeleton_motion))\n\n @staticmethod\n def _get_vel_and_avel(skeleton_motion):\n \"\"\"Get all the velocity and angular velocity lines\n \"\"\"\n pos = skeleton_motion.global_translation.numpy()\n vel = skeleton_motion.global_velocity.numpy()\n avel = skeleton_motion.global_angular_velocity.numpy()\n\n vel_lines = np.stack((pos, pos + vel * 0.02), axis=1)\n avel_lines = np.stack((pos, pos + avel * 0.01), axis=1)\n return vel_lines, avel_lines\n\n def _update(self, vel_lines, avel_lines) -> None:\n self._vel_task.update(vel_lines)\n self._avel_task.update(avel_lines)\n\n def __iter__(self):\n yield from self._skeleton_state_task\n yield from self._vel_task\n yield from self._avel_task\n yield from self._com_trail_task" }, { "identifier": "Draw3DSkeletonState", "path": "poselib/poselib/visualization/skeleton_plotter_tasks.py", "snippet": "class Draw3DSkeletonState(BasePlotterTask):\n _lines_task: Draw3DLines # sub-task for drawing lines\n _dots_task: Draw3DDots # sub-task for drawing dots\n\n def __init__(\n self,\n task_name: str,\n skeleton_state,\n joints_color: str = \"red\",\n lines_color: str = \"blue\",\n alpha=1.0,\n ) -> None:\n super().__init__(task_name=task_name, task_type=\"3DSkeletonState\")\n lines, dots = Draw3DSkeletonState._get_lines_and_dots(skeleton_state)\n self._lines_task = Draw3DLines(\n self.get_scoped_name(\"bodies\"), lines, joints_color, alpha=alpha\n )\n self._dots_task = Draw3DDots(\n self.get_scoped_name(\"joints\"), dots, lines_color, alpha=alpha\n )\n\n @property\n def name(self):\n return \"3DSkeleton\"\n\n def update(self, skeleton_state) -> None:\n self._update(*Draw3DSkeletonState._get_lines_and_dots(skeleton_state))\n\n @staticmethod\n def _get_lines_and_dots(skeleton_state):\n \"\"\"Get all the lines and dots needed to draw the skeleton state\n \"\"\"\n assert (\n len(skeleton_state.tensor.shape) == 1\n ), \"the state has to be zero dimensional\"\n dots = skeleton_state.global_translation.numpy()\n skeleton_tree = skeleton_state.skeleton_tree\n parent_indices = skeleton_tree.parent_indices.numpy()\n lines = []\n for node_index in range(len(skeleton_tree)):\n parent_index = parent_indices[node_index]\n if parent_index != -1:\n lines.append([dots[node_index], dots[parent_index]])\n lines = np.array(lines)\n return lines, dots\n\n def _update(self, lines, dots) -> None:\n self._lines_task.update(lines)\n self._dots_task.update(dots)\n\n def __iter__(self):\n yield from self._lines_task\n yield from self._dots_task" } ]
from ...core import * from ..skeleton3d import SkeletonTree, SkeletonState, SkeletonMotion from ...visualization.common import ( plot_skeleton_state, plot_skeleton_motion_interactive, ) from ...visualization.plt_plotter import Matplotlib3DPlotter from ...visualization.skeleton_plotter_tasks import ( Draw3DSkeletonMotion, Draw3DSkeletonState, ) import numpy as np import torch
17,914
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. def test_skel_tree(): skel_tree = SkeletonTree.from_mjcf( "/home/serfcx/DL_Animation/rl_mimic/data/skeletons/humanoid_mimic_mod_2_noind.xml", backend="pytorch", ) skel_tree_rec = SkeletonTree.from_dict(skel_tree.to_dict(), backend="pytorch") # assert skel_tree.to_str() == skel_tree_rec.to_str() print(skel_tree.node_names) print(skel_tree.local_translation) print(skel_tree.parent_indices)
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. def test_skel_tree(): skel_tree = SkeletonTree.from_mjcf( "/home/serfcx/DL_Animation/rl_mimic/data/skeletons/humanoid_mimic_mod_2_noind.xml", backend="pytorch", ) skel_tree_rec = SkeletonTree.from_dict(skel_tree.to_dict(), backend="pytorch") # assert skel_tree.to_str() == skel_tree_rec.to_str() print(skel_tree.node_names) print(skel_tree.local_translation) print(skel_tree.parent_indices)
skel_state = SkeletonState.zero_pose(skeleton_tree=skel_tree)
1
2023-10-30 20:43:43+00:00
24k
masked-spacetime-hashing/msth
MSTH/video_pipeline.py
[ { "identifier": "base_config", "path": "nerfstudio/configs/base_config.py", "snippet": "class PrintableConfig: # pylint: disable=too-few-public-methods\nclass InstantiateConfig(PrintableConfig): # pylint: disable=too-few-public-methods\nclass MachineConfig(PrintableConfig):\nclass LocalWriterConfig(InstantiateConfig):\nclass LoggingConfig(PrintableConfig):\nclass ViewerConfig(PrintableConfig):\n def __str__(self):\n def setup(self, **kwargs) -> Any:\n def setup(self, banner_messages: Optional[List[str]] = None, **kwargs) -> Any:" }, { "identifier": "DataManager", "path": "nerfstudio/data/datamanagers/base_datamanager.py", "snippet": "class DataManager(nn.Module):\n \"\"\"Generic data manager's abstract class\n\n This version of the data manager is designed be a monolithic way to load data and latents,\n especially since this may contain learnable parameters which need to be shared across the train\n and test data managers. The idea is that we have setup methods for train and eval separately and\n this can be a combined train/eval if you want.\n\n Usage:\n To get data, use the next_train and next_eval functions.\n This data manager's next_train and next_eval methods will return 2 things:\n 1. A Raybundle: This will contain the rays we are sampling, with latents and\n conditionals attached (everything needed at inference)\n 2. A \"batch\" of auxiliary information: This will contain the mask, the ground truth\n pixels, etc needed to actually train, score, etc the model\n\n Rationale:\n Because of this abstraction we've added, we can support more NeRF paradigms beyond the\n vanilla nerf paradigm of single-scene, fixed-images, no-learnt-latents.\n We can now support variable scenes, variable number of images, and arbitrary latents.\n\n\n Train Methods:\n setup_train: sets up for being used as train\n iter_train: will be called on __iter__() for the train iterator\n next_train: will be called on __next__() for the training iterator\n get_train_iterable: utility that gets a clean pythonic iterator for your training data\n\n Eval Methods:\n setup_eval: sets up for being used as eval\n iter_eval: will be called on __iter__() for the eval iterator\n next_eval: will be called on __next__() for the eval iterator\n get_eval_iterable: utility that gets a clean pythonic iterator for your eval data\n\n\n Attributes:\n train_count (int): the step number of our train iteration, needs to be incremented manually\n eval_count (int): the step number of our eval iteration, needs to be incremented manually\n train_dataset (Dataset): the dataset for the train dataset\n eval_dataset (Dataset): the dataset for the eval dataset\n\n Additional attributes specific to each subclass are defined in the setup_train and setup_eval\n functions.\n\n \"\"\"\n\n train_dataset: Optional[Dataset] = None\n eval_dataset: Optional[Dataset] = None\n train_sampler: Optional[DistributedSampler] = None\n eval_sampler: Optional[DistributedSampler] = None\n\n def __init__(self):\n \"\"\"Constructor for the DataManager class.\n\n Subclassed DataManagers will likely need to override this constructor.\n\n If you aren't manually calling the setup_train and setup_eval functions from an overriden\n constructor, that you call super().__init__() BEFORE you initialize any\n nn.Modules or nn.Parameters, but AFTER you've already set all the attributes you need\n for the setup functions.\"\"\"\n super().__init__()\n self.train_count = 0\n self.eval_count = 0\n if self.train_dataset and self.test_mode != \"inference\":\n self.setup_train()\n if self.eval_dataset and self.test_mode != \"inference\":\n self.setup_eval()\n\n def forward(self):\n \"\"\"Blank forward method\n\n This is an nn.Module, and so requires a forward() method normally, although in our case\n we do not need a forward() method\"\"\"\n raise NotImplementedError\n\n def iter_train(self):\n \"\"\"The __iter__ function for the train iterator.\n\n This only exists to assist the get_train_iterable function, since we need to pass\n in an __iter__ function for our trivial iterable that we are making.\"\"\"\n self.train_count = 0\n\n def iter_eval(self):\n \"\"\"The __iter__ function for the eval iterator.\n\n This only exists to assist the get_eval_iterable function, since we need to pass\n in an __iter__ function for our trivial iterable that we are making.\"\"\"\n self.eval_count = 0\n\n def get_train_iterable(self, length=-1) -> IterableWrapper:\n \"\"\"Gets a trivial pythonic iterator that will use the iter_train and next_train functions\n as __iter__ and __next__ methods respectively.\n\n This basically is just a little utility if you want to do something like:\n | for ray_bundle, batch in datamanager.get_train_iterable():\n | <eval code here>\n since the returned IterableWrapper is just an iterator with the __iter__ and __next__\n methods (methods bound to our DataManager instance in this case) specified in the constructor.\n \"\"\"\n return IterableWrapper(self.iter_train, self.next_train, length)\n\n def get_eval_iterable(self, length=-1) -> IterableWrapper:\n \"\"\"Gets a trivial pythonic iterator that will use the iter_eval and next_eval functions\n as __iter__ and __next__ methods respectively.\n\n This basically is just a little utility if you want to do something like:\n | for ray_bundle, batch in datamanager.get_eval_iterable():\n | <eval code here>\n since the returned IterableWrapper is just an iterator with the __iter__ and __next__\n methods (methods bound to our DataManager instance in this case) specified in the constructor.\n \"\"\"\n return IterableWrapper(self.iter_eval, self.next_eval, length)\n\n @abstractmethod\n def setup_train(self):\n \"\"\"Sets up the data manager for training.\n\n Here you will define any subclass specific object attributes from the attribute\"\"\"\n\n @abstractmethod\n def setup_eval(self):\n \"\"\"Sets up the data manager for evaluation\"\"\"\n\n @abstractmethod\n def next_train(self, step: int) -> Tuple[RayBundle, Dict]:\n \"\"\"Returns the next batch of data from the train data manager.\n\n Args:\n step: the step number of the eval image to retrieve\n Returns:\n A tuple of the ray bundle for the image, and a dictionary of additional batch information\n such as the groudtruth image.\n \"\"\"\n raise NotImplementedError\n\n @abstractmethod\n def next_eval(self, step: int) -> Tuple[RayBundle, Dict]:\n \"\"\"Returns the next batch of data from the eval data manager.\n\n Args:\n step: the step number of the eval image to retrieve\n Returns:\n A tuple of the ray bundle for the image, and a dictionary of additional batch information\n such as the groudtruth image.\n \"\"\"\n raise NotImplementedError\n\n @abstractmethod\n def next_eval_image(self, step: int) -> Tuple[int, RayBundle, Dict]:\n \"\"\"Retreive the next eval image.\n\n Args:\n step: the step number of the eval image to retrieve\n Returns:\n A tuple of the step number, the ray bundle for the image, and a dictionary of\n additional batch information such as the groudtruth image.\n \"\"\"\n raise NotImplementedError\n\n @abstractmethod\n def get_train_rays_per_batch(self) -> int:\n \"\"\"Returns the number of rays per batch for training.\"\"\"\n raise NotImplementedError\n\n @abstractmethod\n def get_eval_rays_per_batch(self) -> int:\n \"\"\"Returns the number of rays per batch for evaluation.\"\"\"\n raise NotImplementedError\n\n def get_datapath(self) -> Optional[Path]: # pylint:disable=no-self-use\n \"\"\"Returns the path to the data. This is used to determine where to save camera paths.\"\"\"\n return None\n\n def get_training_callbacks( # pylint:disable=no-self-use\n self, training_callback_attributes: TrainingCallbackAttributes # pylint: disable=unused-argument\n ) -> List[TrainingCallback]:\n \"\"\"Returns a list of callbacks to be used during training.\"\"\"\n return []\n\n @abstractmethod\n def get_param_groups(self) -> Dict[str, List[Parameter]]: # pylint: disable=no-self-use\n \"\"\"Get the param groups for the data manager.\n\n Returns:\n A list of dictionaries containing the data manager's param groups.\n \"\"\"\n return {}" }, { "identifier": "DataManagerConfig", "path": "nerfstudio/data/datamanagers/base_datamanager.py", "snippet": "class DataManagerConfig(InstantiateConfig):\n \"\"\"Configuration for data manager instantiation; DataManager is in charge of keeping the train/eval dataparsers;\n After instantiation, data manager holds both train/eval datasets and is in charge of returning unpacked\n train/eval data at each iteration\n \"\"\"\n\n _target: Type = field(default_factory=lambda: DataManager)\n \"\"\"Target class to instantiate.\"\"\"\n data: Optional[Path] = None\n \"\"\"Source of data, may not be used by all models.\"\"\"\n camera_optimizer: Optional[CameraOptimizerConfig] = None\n \"\"\"Specifies the camera pose optimizer used during training. Helpful if poses are noisy.\"\"\"" }, { "identifier": "VanillaDataManager", "path": "nerfstudio/data/datamanagers/base_datamanager.py", "snippet": "class VanillaDataManager(DataManager): # pylint: disable=abstract-method\n \"\"\"Basic stored data manager implementation.\n\n This is pretty much a port over from our old dataloading utilities, and is a little jank\n under the hood. We may clean this up a little bit under the hood with more standard dataloading\n components that can be strung together, but it can be just used as a black box for now since\n only the constructor is likely to change in the future, or maybe passing in step number to the\n next_train and next_eval functions.\n\n Args:\n config: the DataManagerConfig used to instantiate class\n \"\"\"\n\n config: VanillaDataManagerConfig\n train_dataset: InputDataset\n eval_dataset: InputDataset\n train_dataparser_outputs: DataparserOutputs\n train_pixel_sampler: Optional[PixelSampler] = None\n eval_pixel_sampler: Optional[PixelSampler] = None\n\n def __init__(\n self,\n config: VanillaDataManagerConfig,\n device: Union[torch.device, str] = \"cpu\",\n test_mode: Literal[\"test\", \"val\", \"inference\"] = \"val\",\n world_size: int = 1,\n local_rank: int = 0,\n **kwargs, # pylint: disable=unused-argument\n ):\n self.config = config\n self.device = device\n self.world_size = world_size\n self.local_rank = local_rank\n self.sampler = None\n self.test_mode = test_mode\n self.test_split = \"test\" if test_mode in [\"test\", \"inference\"] else \"val\"\n self.dataparser_config = self.config.dataparser\n if self.config.data is not None:\n self.config.dataparser.data = Path(self.config.data)\n else:\n self.config.data = self.config.dataparser.data\n self.dataparser = self.dataparser_config.setup()\n self.train_dataparser_outputs = self.dataparser.get_dataparser_outputs(split=\"train\")\n\n self.train_dataset = self.create_train_dataset()\n self.eval_dataset = self.create_eval_dataset()\n super().__init__()\n\n def create_train_dataset(self) -> InputDataset:\n \"\"\"Sets up the data loaders for training\"\"\"\n return InputDataset(\n dataparser_outputs=self.train_dataparser_outputs,\n scale_factor=self.config.camera_res_scale_factor,\n )\n\n def create_eval_dataset(self) -> InputDataset:\n \"\"\"Sets up the data loaders for evaluation\"\"\"\n return InputDataset(\n dataparser_outputs=self.dataparser.get_dataparser_outputs(split=self.test_split),\n scale_factor=self.config.camera_res_scale_factor,\n )\n\n def _get_pixel_sampler( # pylint: disable=no-self-use\n self, dataset: InputDataset, *args: Any, **kwargs: Any\n ) -> PixelSampler:\n \"\"\"Infer pixel sampler to use.\"\"\"\n if self.config.patch_size > 1:\n return PatchPixelSampler(*args, **kwargs, patch_size=self.config.patch_size)\n\n # If all images are equirectangular, use equirectangular pixel sampler\n is_equirectangular = dataset.cameras.camera_type == CameraType.EQUIRECTANGULAR.value\n if is_equirectangular.all():\n return EquirectangularPixelSampler(*args, **kwargs)\n # Otherwise, use the default pixel sampler\n if is_equirectangular.any():\n CONSOLE.print(\"[bold yellow]Warning: Some cameras are equirectangular, but using default pixel sampler.\")\n return PixelSampler(*args, **kwargs)\n\n def setup_train(self):\n \"\"\"Sets up the data loaders for training\"\"\"\n assert self.train_dataset is not None\n CONSOLE.print(\"Setting up training dataset...\")\n self.train_image_dataloader = CacheDataloader(\n self.train_dataset,\n num_images_to_sample_from=self.config.train_num_images_to_sample_from,\n num_times_to_repeat_images=self.config.train_num_times_to_repeat_images,\n device=self.device,\n num_workers=self.world_size * 4,\n pin_memory=True,\n collate_fn=self.config.collate_fn,\n )\n self.iter_train_image_dataloader = iter(self.train_image_dataloader)\n self.train_pixel_sampler = self._get_pixel_sampler(self.train_dataset, self.config.train_num_rays_per_batch)\n self.train_camera_optimizer = self.config.camera_optimizer.setup(\n num_cameras=self.train_dataset.cameras.size, device=self.device\n )\n self.train_ray_generator = RayGenerator(\n self.train_dataset.cameras.to(self.device),\n self.train_camera_optimizer,\n )\n\n def setup_eval(self):\n \"\"\"Sets up the data loader for evaluation\"\"\"\n assert self.eval_dataset is not None\n CONSOLE.print(\"Setting up evaluation dataset...\")\n self.eval_image_dataloader = CacheDataloader(\n self.eval_dataset,\n num_images_to_sample_from=self.config.eval_num_images_to_sample_from,\n num_times_to_repeat_images=self.config.eval_num_times_to_repeat_images,\n device=self.device,\n num_workers=self.world_size * 4,\n pin_memory=True,\n collate_fn=self.config.collate_fn,\n )\n self.iter_eval_image_dataloader = iter(self.eval_image_dataloader)\n self.eval_pixel_sampler = self._get_pixel_sampler(self.eval_dataset, self.config.eval_num_rays_per_batch)\n self.eval_camera_optimizer = self.config.camera_optimizer.setup(\n num_cameras=self.eval_dataset.cameras.size, device=self.device\n )\n self.eval_ray_generator = RayGenerator(\n self.eval_dataset.cameras.to(self.device),\n self.eval_camera_optimizer,\n )\n # for loading full images\n self.fixed_indices_eval_dataloader = FixedIndicesEvalDataloader(\n input_dataset=self.eval_dataset,\n device=self.device,\n num_workers=self.world_size * 4,\n )\n self.eval_dataloader = RandIndicesEvalDataloader(\n input_dataset=self.eval_dataset,\n device=self.device,\n num_workers=self.world_size * 4,\n )\n\n def next_train(self, step: int) -> Tuple[RayBundle, Dict]:\n \"\"\"Returns the next batch of data from the train dataloader.\"\"\"\n self.train_count += 1\n image_batch = next(self.iter_train_image_dataloader)\n assert self.train_pixel_sampler is not None\n batch = self.train_pixel_sampler.sample(image_batch)\n ray_indices = batch[\"indices\"]\n ray_bundle = self.train_ray_generator(ray_indices)\n\n return ray_bundle, batch\n\n def next_eval(self, step: int) -> Tuple[RayBundle, Dict]:\n \"\"\"Returns the next batch of data from the eval dataloader.\"\"\"\n self.eval_count += 1\n image_batch = next(self.iter_eval_image_dataloader)\n assert self.eval_pixel_sampler is not None\n batch = self.eval_pixel_sampler.sample(image_batch)\n ray_indices = batch[\"indices\"]\n ray_bundle = self.eval_ray_generator(ray_indices)\n return ray_bundle, batch\n\n def next_eval_image(self, step: int) -> Tuple[int, RayBundle, Dict]:\n for camera_ray_bundle, batch in self.eval_dataloader:\n assert camera_ray_bundle.camera_indices is not None\n image_idx = int(camera_ray_bundle.camera_indices[0, 0, 0])\n return image_idx, camera_ray_bundle, batch\n raise ValueError(\"No more eval images\")\n\n def get_train_rays_per_batch(self) -> int:\n return self.config.train_num_rays_per_batch\n\n def get_eval_rays_per_batch(self) -> int:\n return self.config.eval_num_rays_per_batch\n\n def get_datapath(self) -> Path:\n return self.config.dataparser.data\n\n def get_param_groups(self) -> Dict[str, List[Parameter]]: # pylint: disable=no-self-use\n \"\"\"Get the param groups for the data manager.\n Returns:\n A list of dictionaries containing the data manager's param groups.\n \"\"\"\n param_groups = {}\n\n camera_opt_params = list(self.train_camera_optimizer.parameters())\n if self.config.camera_optimizer.mode != \"off\":\n assert len(camera_opt_params) > 0\n param_groups[self.config.camera_optimizer.param_group] = camera_opt_params\n else:\n assert len(camera_opt_params) == 0\n\n return param_groups" }, { "identifier": "VanillaDataManagerConfig", "path": "nerfstudio/data/datamanagers/base_datamanager.py", "snippet": "class VanillaDataManagerConfig(DataManagerConfig):\n \"\"\"A basic data manager\"\"\"\n\n _target: Type = field(default_factory=lambda: VanillaDataManager)\n \"\"\"Target class to instantiate.\"\"\"\n dataparser: AnnotatedDataParserUnion = BlenderDataParserConfig()\n \"\"\"Specifies the dataparser used to unpack the data.\"\"\"\n train_num_rays_per_batch: int = 1024\n \"\"\"Number of rays per batch to use per training iteration.\"\"\"\n train_num_images_to_sample_from: int = -1\n \"\"\"Number of images to sample during training iteration.\"\"\"\n train_num_times_to_repeat_images: int = -1\n \"\"\"When not training on all images, number of iterations before picking new\n images. If -1, never pick new images.\"\"\"\n eval_num_rays_per_batch: int = 1024\n \"\"\"Number of rays per batch to use per eval iteration.\"\"\"\n eval_num_images_to_sample_from: int = -1\n \"\"\"Number of images to sample during eval iteration.\"\"\"\n eval_num_times_to_repeat_images: int = -1\n \"\"\"When not evaluating on all images, number of iterations before picking\n new images. If -1, never pick new images.\"\"\"\n eval_image_indices: Optional[Tuple[int, ...]] = (0,)\n \"\"\"Specifies the image indices to use during eval; if None, uses all.\"\"\"\n camera_optimizer: CameraOptimizerConfig = CameraOptimizerConfig()\n \"\"\"Specifies the camera pose optimizer used during training. Helpful if poses are noisy, such as for data from\n Record3D.\"\"\"\n collate_fn = staticmethod(nerfstudio_collate)\n \"\"\"Specifies the collate function to use for the train and eval dataloaders.\"\"\"\n camera_res_scale_factor: float = 1.0\n \"\"\"The scale factor for scaling spatial data such as images, mask, semantics\n along with relevant information about camera intrinsics\n \"\"\"\n patch_size: int = 1\n \"\"\"Size of patch to sample from. If >1, patch-based sampling will be used.\"\"\"" }, { "identifier": "TrainingCallback", "path": "nerfstudio/engine/callbacks.py", "snippet": "class TrainingCallback:\n \"\"\"Callback class used during training.\n The function 'func' with 'args' and 'kwargs' will be called every 'update_every_num_iters' training iterations,\n including at iteration 0. The function is called after the training iteration.\n\n Args:\n where_to_run: List of locations for when to run callback (before/after iteration)\n func: The function that will be called.\n update_every_num_iters: How often to call the function `func`.\n iters: Tuple of iteration steps to perform callback\n args: args for the function 'func'.\n kwargs: kwargs for the function 'func'.\n \"\"\"\n\n def __init__(\n self,\n where_to_run: List[TrainingCallbackLocation],\n func: Callable,\n update_every_num_iters: Optional[int] = None,\n iters: Optional[Tuple[int, ...]] = None,\n args: Optional[List] = None,\n kwargs: Optional[Dict] = None,\n ):\n assert (\n \"step\" in signature(func).parameters.keys()\n ), f\"'step: int' must be an argument in the callback function 'func': {func.__name__}\"\n self.where_to_run = where_to_run\n self.update_every_num_iters = update_every_num_iters\n self.iters = iters\n self.func = func\n self.args = args if args is not None else []\n self.kwargs = kwargs if kwargs is not None else {}\n\n def run_callback(self, step: int) -> None:\n \"\"\"Callback to run after training step\n\n Args:\n step: current iteration step\n \"\"\"\n if self.update_every_num_iters is not None:\n if step % self.update_every_num_iters == 0:\n self.func(*self.args, **self.kwargs, step=step)\n elif self.iters is not None:\n if step in self.iters:\n self.func(*self.args, **self.kwargs, step=step)\n\n def run_callback_at_location(self, step: int, location: TrainingCallbackLocation) -> None:\n \"\"\"Runs the callback if it's supposed to be run at the given location.\n\n Args:\n step: current iteration step\n location: when to run callback (before/after iteration)\n \"\"\"\n if location in self.where_to_run:\n self.run_callback(step=step)" }, { "identifier": "TrainingCallbackAttributes", "path": "nerfstudio/engine/callbacks.py", "snippet": "class TrainingCallbackAttributes:\n \"\"\"Attributes that can be used to configure training callbacks.\n The callbacks can be specified in the Dataloader or Model implementations.\n Instead of providing access to the entire Trainer object, we only provide these attributes.\n This should be least prone to errors and fairly clean from a user perspective.\"\"\"\n\n # TODO(ethan): type this without circular imports\n optimizers: Optional[InitVar]\n \"\"\"optimizers for training\"\"\"\n grad_scaler: Optional[InitVar]\n \"\"\"gradient scalers\"\"\"\n pipeline: Optional[InitVar]\n \"\"\"reference to training pipeline\"\"\"" }, { "identifier": "Model", "path": "nerfstudio/models/base_model.py", "snippet": "class Model(nn.Module):\n \"\"\"Model class\n Where everything (Fields, Optimizers, Samplers, Visualization, etc) is linked together. This should be\n subclassed for custom NeRF model.\n\n Args:\n config: configuration for instantiating model\n scene_box: dataset scene box\n \"\"\"\n\n config: ModelConfig\n\n def __init__(\n self,\n config: ModelConfig,\n scene_box: SceneBox,\n num_train_data: int,\n **kwargs,\n ) -> None:\n super().__init__()\n self.config = config\n self.scene_box = scene_box\n self.render_aabb = None # the box that we want to render - should be a subset of scene_box\n self.num_train_data = num_train_data\n self.kwargs = kwargs\n self.collider = None\n\n self.populate_modules() # populate the modules\n self.callbacks = None\n # to keep track of which device the nn.Module is on\n self.device_indicator_param = nn.Parameter(torch.empty(0))\n\n @property\n def device(self):\n \"\"\"Returns the device that the model is on.\"\"\"\n return self.device_indicator_param.device\n\n def get_training_callbacks( # pylint:disable=no-self-use\n self, training_callback_attributes: TrainingCallbackAttributes # pylint: disable=unused-argument\n ) -> List[TrainingCallback]:\n \"\"\"Returns a list of callbacks that run functions at the specified training iterations.\"\"\"\n return []\n\n def populate_modules(self):\n \"\"\"Set the necessary modules to get the network working.\"\"\"\n # default instantiates optional modules that are common among many networks\n # NOTE: call `super().populate_modules()` in subclasses\n\n if self.config.enable_collider:\n self.collider = NearFarCollider(\n near_plane=self.config.collider_params[\"near_plane\"], far_plane=self.config.collider_params[\"far_plane\"]\n )\n\n @abstractmethod\n def get_param_groups(self) -> Dict[str, List[Parameter]]:\n \"\"\"Obtain the parameter groups for the optimizers\n\n Returns:\n Mapping of different parameter groups\n \"\"\"\n\n @abstractmethod\n def get_outputs(self, ray_bundle: RayBundle) -> Dict[str, torch.Tensor]:\n \"\"\"Takes in a Ray Bundle and returns a dictionary of outputs.\n\n Args:\n ray_bundle: Input bundle of rays. This raybundle should have all the\n needed information to compute the outputs.\n\n Returns:\n Outputs of model. (ie. rendered colors)\n \"\"\"\n\n def forward(self, ray_bundle: RayBundle) -> Dict[str, torch.Tensor]:\n \"\"\"Run forward starting with a ray bundle. This outputs different things depending on the configuration\n of the model and whether or not the batch is provided (whether or not we are training basically)\n\n Args:\n ray_bundle: containing all the information needed to render that ray latents included\n \"\"\"\n\n if self.collider is not None:\n ray_bundle = self.collider(ray_bundle)\n\n return self.get_outputs(ray_bundle)\n\n def get_metrics_dict(self, outputs, batch) -> Dict[str, torch.Tensor]:\n \"\"\"Compute and returns metrics.\n\n Args:\n outputs: the output to compute loss dict to\n batch: ground truth batch corresponding to outputs\n \"\"\"\n # pylint: disable=unused-argument\n # pylint: disable=no-self-use\n return {}\n\n @abstractmethod\n def get_loss_dict(self, outputs, batch, metrics_dict=None) -> Dict[str, torch.Tensor]:\n \"\"\"Computes and returns the losses dict.\n\n Args:\n outputs: the output to compute loss dict to\n batch: ground truth batch corresponding to outputs\n metrics_dict: dictionary of metrics, some of which we can use for loss\n \"\"\"\n\n @torch.no_grad()\n def get_outputs_for_camera_ray_bundle(self, camera_ray_bundle: RayBundle) -> Dict[str, torch.Tensor]:\n \"\"\"Takes in camera parameters and computes the output of the model.\n\n Args:\n camera_ray_bundle: ray bundle to calculate outputs over\n \"\"\"\n num_rays_per_chunk = self.config.eval_num_rays_per_chunk\n image_height, image_width = camera_ray_bundle.origins.shape[:2]\n num_rays = len(camera_ray_bundle)\n outputs_lists = defaultdict(list)\n with Timer(\"forwarding\"):\n _t1 = time.time()\n for i in range(0, num_rays, num_rays_per_chunk):\n start_idx = i\n end_idx = i + num_rays_per_chunk\n ray_bundle = camera_ray_bundle.get_row_major_sliced_ray_bundle(start_idx, end_idx)\n outputs = self.forward(ray_bundle=ray_bundle)\n for output_name, output in outputs.items(): # type: ignore\n outputs_lists[output_name].append(output)\n print(f\"forwarding took {time.time() - _t1} seconds\")\n outputs = {}\n for output_name, outputs_list in outputs_lists.items():\n if not torch.is_tensor(outputs_list[0]):\n # TODO: handle lists of tensors as well\n continue\n if output_name == \"mask_val\":\n outputs[\"mask_val\"] = torch.cat(outputs_list, dim=0)\n outputs[output_name] = torch.cat(outputs_list).view(image_height, image_width, -1) # type: ignore\n return outputs\n\n @abstractmethod\n def get_image_metrics_and_images(\n self, outputs: Dict[str, torch.Tensor], batch: Dict[str, torch.Tensor]\n ) -> Tuple[Dict[str, float], Dict[str, torch.Tensor]]:\n \"\"\"Writes the test image outputs.\n TODO: This shouldn't return a loss\n\n Args:\n image_idx: Index of the image.\n step: Current step.\n batch: Batch of data.\n outputs: Outputs of the model.\n\n Returns:\n A dictionary of metrics.\n \"\"\"\n\n def load_model(self, loaded_state: Dict[str, Any]) -> None:\n \"\"\"Load the checkpoint from the given path\n\n Args:\n loaded_state: dictionary of pre-trained model states\n \"\"\"\n state = {key.replace(\"module.\", \"\"): value for key, value in loaded_state[\"model\"].items()}\n self.load_state_dict(state) # type: ignore\n\n def update_to_step(self, step: int) -> None:\n \"\"\"Called when loading a model from a checkpoint. Sets any model parameters that change over\n training to the correct value, based on the training step of the checkpoint.\n\n Args:\n step: training step of the loaded checkpoint\n \"\"\"" }, { "identifier": "ModelConfig", "path": "nerfstudio/models/base_model.py", "snippet": "class ModelConfig(InstantiateConfig):\n \"\"\"Configuration for model instantiation\"\"\"\n\n _target: Type = field(default_factory=lambda: Model)\n \"\"\"target class to instantiate\"\"\"\n enable_collider: bool = True\n \"\"\"Whether to create a scene collider to filter rays.\"\"\"\n collider_params: Optional[Dict[str, float]] = to_immutable_dict({\"near_plane\": 2.0, \"far_plane\": 6.0})\n \"\"\"parameters to instantiate scene collider with\"\"\"\n loss_coefficients: Dict[str, float] = to_immutable_dict({\"rgb_loss_coarse\": 1.0, \"rgb_loss_fine\": 1.0})\n \"\"\"parameters to instantiate density field with\"\"\"\n eval_num_rays_per_chunk: int = 4096\n \"\"\"specifies number of rays per chunk during eval\"\"\"" }, { "identifier": "Pipeline", "path": "nerfstudio/pipelines/base_pipeline.py", "snippet": "class Pipeline(nn.Module):\n \"\"\"The intent of this class is to provide a higher level interface for the Model\n that will be easy to use for our Trainer class.\n\n This class will contain high level functions for the model like getting the loss\n dictionaries and visualization code. It should have ways to get the next iterations\n training loss, evaluation loss, and generate whole images for visualization. Each model\n class should be 1:1 with a pipeline that can act as a standardized interface and hide\n differences in how each model takes in and outputs data.\n\n This class's function is to hide the data manager and model classes from the trainer,\n worrying about:\n 1) Fetching data with the data manager\n 2) Feeding the model the data and fetching the loss\n Hopefully this provides a higher level interface for the trainer to use, and\n simplifying the model classes, which each may have different forward() methods\n and so on.\n\n Args:\n config: configuration to instantiate pipeline\n device: location to place model and data\n test_mode:\n 'train': loads train/eval datasets into memory\n 'test': loads train/test dataset into memory\n 'inference': does not load any dataset into memory\n world_size: total number of machines available\n local_rank: rank of current machine\n\n Attributes:\n datamanager: The data manager that will be used\n model: The model that will be used\n \"\"\"\n\n # pylint: disable=abstract-method\n\n datamanager: DataManager\n _model: Model\n\n @property\n def model(self):\n \"\"\"Returns the unwrapped model if in ddp\"\"\"\n return module_wrapper(self._model)\n\n @property\n def device(self):\n \"\"\"Returns the device that the model is on.\"\"\"\n return self.model.device\n\n def load_state_dict(self, state_dict: Mapping[str, Any], strict: bool = True):\n model_state = {\n key.replace(\"_model.\", \"\"): value for key, value in state_dict.items() if key.startswith(\"_model.\")\n }\n pipeline_state = {key: value for key, value in state_dict.items() if not key.startswith(\"_model.\")}\n self._model.load_state_dict(model_state, strict=strict)\n super().load_state_dict(pipeline_state, strict=False)\n\n @profiler.time_function\n def get_train_loss_dict(self, step: int):\n \"\"\"This function gets your training loss dict. This will be responsible for\n getting the next batch of data from the DataManager and interfacing with the\n Model class, feeding the data to the model's forward function.\n\n Args:\n step: current iteration step to update sampler if using DDP (distributed)\n \"\"\"\n if self.world_size > 1 and step:\n assert self.datamanager.train_sampler is not None\n self.datamanager.train_sampler.set_epoch(step)\n ray_bundle, batch = self.datamanager.next_train(step)\n model_outputs = self.model(ray_bundle, batch)\n metrics_dict = self.model.get_metrics_dict(model_outputs, batch)\n loss_dict = self.model.get_loss_dict(model_outputs, batch, metrics_dict)\n\n return model_outputs, loss_dict, metrics_dict\n\n @profiler.time_function\n def get_eval_loss_dict(self, step: int):\n \"\"\"This function gets your evaluation loss dict. It needs to get the data\n from the DataManager and feed it to the model's forward function\n\n Args:\n step: current iteration step\n \"\"\"\n self.eval()\n if self.world_size > 1:\n assert self.datamanager.eval_sampler is not None\n self.datamanager.eval_sampler.set_epoch(step)\n ray_bundle, batch = self.datamanager.next_eval(step)\n model_outputs = self.model(ray_bundle, batch)\n metrics_dict = self.model.get_metrics_dict(model_outputs, batch)\n loss_dict = self.model.get_loss_dict(model_outputs, batch, metrics_dict)\n self.train()\n return model_outputs, loss_dict, metrics_dict\n\n @abstractmethod\n @profiler.time_function\n def get_eval_image_metrics_and_images(self, step: int):\n \"\"\"This function gets your evaluation loss dict. It needs to get the data\n from the DataManager and feed it to the model's forward function\n\n Args:\n step: current iteration step\n \"\"\"\n\n @abstractmethod\n @profiler.time_function\n def get_average_eval_image_metrics(self, step: Optional[int] = None):\n \"\"\"Iterate over all the images in the eval dataset and get the average.\"\"\"\n\n def load_pipeline(self, loaded_state: Dict[str, Any], step: int) -> None:\n \"\"\"Load the checkpoint from the given path\n\n Args:\n loaded_state: pre-trained model state dict\n step: training step of the loaded checkpoint\n \"\"\"\n\n def get_training_callbacks(\n self, training_callback_attributes: TrainingCallbackAttributes\n ) -> List[TrainingCallback]:\n \"\"\"Returns the training callbacks from both the Dataloader and the Model.\"\"\"\n\n def get_param_groups(self) -> Dict[str, List[Parameter]]:\n \"\"\"Get the param groups for the pipeline.\n\n Returns:\n A list of dictionaries containing the pipeline's param groups.\n \"\"\"" }, { "identifier": "VanillaPipelineConfig", "path": "nerfstudio/pipelines/base_pipeline.py", "snippet": "class VanillaPipelineConfig(cfg.InstantiateConfig):\n \"\"\"Configuration for pipeline instantiation\"\"\"\n\n _target: Type = field(default_factory=lambda: VanillaPipeline)\n \"\"\"target class to instantiate\"\"\"\n datamanager: DataManagerConfig = VanillaDataManagerConfig()\n \"\"\"specifies the datamanager config\"\"\"\n model: ModelConfig = ModelConfig()\n \"\"\"specifies the model config\"\"\"" }, { "identifier": "colormaps", "path": "nerfstudio/utils/colormaps.py", "snippet": "def apply_colormap(image: TensorType[\"bs\":..., 1], cmap=\"viridis\") -> TensorType[\"bs\":..., \"rgb\":3]:\ndef apply_depth_colormap(\n depth: TensorType[\"bs\":..., 1],\n accumulation: Optional[TensorType[\"bs\":..., 1]] = None,\n near_plane: Optional[float] = None,\n far_plane: Optional[float] = None,\n cmap=\"turbo\",\n) -> TensorType[\"bs\":..., \"rgb\":3]:\ndef apply_boolean_colormap(\n image: TensorType[\"bs\":..., 1, bool],\n true_color: TensorType[\"bs\":..., \"rgb\":3] = colors.WHITE,\n false_color: TensorType[\"bs\":..., \"rgb\":3] = colors.BLACK,\n) -> TensorType[\"bs\":..., \"rgb\":3]:" }, { "identifier": "profiler", "path": "nerfstudio/utils/profiler.py", "snippet": "CONSOLE = Console(width=120)\nPROFILER = []\ndef time_function(func: Callable) -> Callable:\n def wrapper(*args, **kwargs):\ndef flush_profiler(config: cfg.LoggingConfig):\ndef setup_profiler(config: cfg.LoggingConfig):\n def __init__(self, config: cfg.LoggingConfig):\n def update_time(self, func_name: str, start_time: float, end_time: float):\n def print_profile(self):\nclass Profiler:" }, { "identifier": "SpaceTimeDataManager", "path": "MSTH/datamanager.py", "snippet": "class SpaceTimeDataManager(DataManager):\n train_dataset: Union[VideoDatasetAllCached, VideoDatasetAllCachedUint8]\n eval_dataset: Union[VideoDatasetAllCached, VideoDatasetAllCachedUint8]\n train_dataparser_outputs: VideoDataParserOutputs\n train_pixel_sampler: Union[PixelTimeUniformSampler, PixelTimeSampler, SpatioTemporalSampler]\n eval_pixel_sampler: Union[PixelTimeUniformSampler, PixelTimeSampler]\n\n def __init__(\n self,\n config: SpaceTimeDataManagerConfig,\n device: Union[torch.device, str] = \"cpu\",\n test_mode: Literal[\"test\", \"val\", \"inference\"] = \"val\",\n world_size: int = 1,\n local_rank: int = 0,\n **kwargs,\n ):\n self.train_count = 0\n self.config = config\n self.device = device\n self.world_size = world_size\n self.local_rank = local_rank\n self.sampler = None\n self.test_mode = test_mode\n self.test_split = \"test\" if test_mode in [\"test\", \"inference\"] else \"val\"\n self.dataparser_config = self.config.dataparser\n if self.config.data is not None:\n self.config.dataparser.data = Path(self.config.data)\n else:\n self.config.data = self.config.dataparser.data\n self.dataparser = self.dataparser_config.setup()\n self.train_dataparser_outputs = self.dataparser.get_dataparser_outputs(split=\"train\")\n\n self.train_dataset = self.create_train_dataset()\n self.eval_dataset = self.create_eval_dataset()\n super().__init__()\n\n def create_train_dataset(self):\n if not self.config.use_uint8:\n return VideoDatasetAllCached(\n self.train_dataparser_outputs, self.config.camera_res_scale_factor, self.config.mask_extend_radius\n )\n else:\n return VideoDatasetAllCachedUint8(\n self.train_dataparser_outputs,\n self.config.camera_res_scale_factor,\n self.config.mask_extend_radius,\n use_median=self.config.use_median,\n )\n\n def create_eval_dataset(self):\n if not self.config.use_uint8:\n return VideoDatasetAllCached(\n self.dataparser.get_dataparser_outputs(split=self.test_split),\n self.config.camera_res_scale_factor,\n self.config.mask_extend_radius,\n )\n else:\n return VideoDatasetAllCachedUint8(\n self.dataparser.get_dataparser_outputs(split=self.test_split),\n self.config.camera_res_scale_factor,\n self.config.mask_extend_radius,\n use_mask=False,\n )\n\n def setup_train(self):\n CONSOLE.print(\"Setting up for training\")\n # sampler_cls = spacetime_samplers[self.config.sampler]\n # sampler_args = {\"dataset\": self.train_dataset, \"num_rays_per_batch\": self.config.train_num_rays_per_batch}\n # sampler_args.update(self.config.sampler_extra_args)\n # self.train_pixel_sampler = sampler_cls(**sampler_args)\n # if not self.config.use_stratified_pixel_sampler:\n sampler_args = {\n \"dataset\": self.train_dataset,\n \"num_rays_per_batch\": self.config.train_num_rays_per_batch,\n \"device\": self.device,\n }\n\n if self.config.spatial_temporal_sampler == \"uniform\":\n if not self.config.use_all_uniform_sampler:\n self.train_pixel_sampler = PixelTimeUniformSampler(\n self.train_dataset, self.config.train_num_rays_per_batch\n )\n else:\n self.train_pixel_sampler = PixelTimeUniformSampler_origin(\n self.train_dataset, self.config.train_num_rays_per_batch\n )\n\n elif self.config.spatial_temporal_sampler == \"stratified\":\n self.train_pixel_sampler = PixelTimeSampler(\n self.train_dataset,\n self.config.train_num_rays_per_batch,\n static_dynamic_ratio=self.config.static_dynamic_sampling_ratio,\n static_dynamic_ratio_end=self.config.static_dynamic_sampling_ratio_end,\n total_steps=self.config.static_ratio_decay_total_steps,\n )\n elif self.config.spatial_temporal_sampler == \"st\":\n extra_args = dict(\n static_dynamic_ratio=self.config.static_dynamic_sampling_ratio,\n static_dynamic_ratio_end=self.config.static_dynamic_sampling_ratio_end,\n total_steps=self.config.static_ratio_decay_total_steps,\n n_time_for_dynamic=self.config.n_time_for_dynamic,\n use_temporal_weight=self.config.use_temporal_weight,\n )\n sampler_args.update(extra_args)\n del sampler_args[\"device\"]\n\n sampler_type = spacetime_samplers[self.config.train_sampler_type]\n sampler_args.update(spacetime_samplers_default_args[self.config.train_sampler_type])\n sampler_args.update(self.config.train_sampler_args)\n\n self.train_pixel_sampler = sampler_type(**sampler_args)\n\n self.train_camera_optimizer = self.config.camera_optimizer.setup(\n num_cameras=self.train_dataset.cameras.size, device=self.device\n )\n self.train_ray_generator = RayGenerator(\n self.train_dataset.cameras.to(self.device),\n self.train_camera_optimizer,\n )\n\n def setup_eval(self):\n CONSOLE.print(\"Setting up for evaluating\")\n self.eval_camera_optimizer = self.config.camera_optimizer.setup(\n num_cameras=self.eval_dataset.cameras.size, device=self.device\n )\n\n self.eval_pixel_sampler = PixelTimeUniformSampler(\n self.eval_dataset,\n self.config.eval_num_rays_per_batch,\n )\n\n self.eval_ray_generator = RayGenerator(self.eval_dataset.cameras.to(self.device), self.eval_camera_optimizer)\n\n def next_train(self, step: int) -> Tuple[RayBundle, Dict]:\n if self.config.use_static_dynamic_ratio_anealing:\n assert self.ratio is not None\n assert isinstance(self.train_pixel_sampler, PixelTimeSampler)\n # self.train_pixel_sampler.set_static_dynamic_ratio(self.ratio(step))\n self.train_count += 1\n batch = self.train_pixel_sampler.sample()\n ray_indices = batch[\"indices\"]\n ray_bundle = self.train_ray_generator(ray_indices)\n ray_bundle.times = batch[\"time\"][..., None]\n\n return ray_bundle, batch\n\n def next_eval(self, step: int) -> Tuple[RayBundle, Dict]:\n self.eval_count += 1\n batch = self.eval_pixel_sampler.sample()\n ray_indices = batch[\"indices\"]\n ray_bundle = self.eval_ray_generator(ray_indices)\n ray_bundle.times = batch[\"time\"][..., None]\n\n return ray_bundle, batch\n\n def get_train_rays_per_batch(self) -> int:\n return self.config.train_num_rays_per_batch\n\n def get_eval_rays_per_batch(self) -> int:\n return self.config.eval_num_rays_per_batch\n\n def get_datapath(self) -> Path:\n return self.config.dataparser.data\n\n def get_param_groups(self) -> Dict[str, List[Parameter]]:\n return {}\n\n # def next_eval_image(self, idx: int):\n # # TODO: impl this\n # image_idx = idx\n # if idx < 0:\n # image_idx = random.randint(0, self.eval_dataset.num_cams - 1)\n # camera_ray_bundle = self.eval_ray_generator.cameras.generate_rays(camera_indices=image_idx, keep_shape=True)\n # batch = self.eval_dataset[image_idx]\n # return image_idx, camera_ray_bundle, batch\n\n def next_eval_image(self, frame_idx: int):\n camera_ray_bundle = self.eval_ray_generator.cameras.generate_rays(camera_indices=0, keep_shape=True)\n batch = self.eval_dataset.frames[0, frame_idx]\n if batch.dtype != torch.float32:\n batch = batch.to(torch.float32) / 255.0\n batch = {\"image\": batch}\n batch[\"time\"] = frame_idx / self.eval_dataset.num_frames\n camera_ray_bundle.times = torch.zeros_like(camera_ray_bundle.origins[..., :1]).to(camera_ray_bundle.origins)\n camera_ray_bundle.times.fill_(batch[\"time\"])\n\n return frame_idx, camera_ray_bundle, batch\n\n def next_eval_image_incremental(self, frame_idx, camera_ray_bundle):\n batch = self.eval_dataset.frames[0, frame_idx]\n if batch.dtype != torch.float32:\n batch = batch.to(torch.float32) / 255.0\n batch = {\"image\": batch}\n batch[\"time\"] = frame_idx / self.eval_dataset.num_frames\n camera_ray_bundle.times.fill_(batch[\"time\"])\n\n return frame_idx, camera_ray_bundle, batch\n\n # def eval_all_images()" }, { "identifier": "SpaceTimeDataManagerConfig", "path": "MSTH/datamanager.py", "snippet": "class SpaceTimeDataManagerConfig(DataManagerConfig):\n _target: Type = field(default_factory=lambda: SpaceTimeDataManager)\n dataparser: VideoDataParserConfig = VideoDataParserConfig()\n\n collate_fn = staticmethod(nerfstudio_collate)\n \"\"\"Specifies the collate function to use for the train and eval dataloaders.\"\"\"\n camera_res_scale_factor: float = 1.0\n train_num_rays_per_batch: int = 1024\n eval_num_rays_per_batch: int = 1024\n\n camera_optimizer: CameraOptimizerConfig = CameraOptimizerConfig()\n dataset_mask_precomputed_path: Path = Path(\"/data/machine/data/flame_salmon_videos_2/masks.pt\")\n\n mask_extend_radius: int = 5\n use_uint8: bool = True\n use_isg_sampler: bool = True\n static_dynamic_sampling_ratio_end: float = 1\n static_ratio_decay_total_steps: int = 6000\n use_all_uniform_sampler: bool = False\n\n use_stratified_pixel_sampler: bool = False # deprecated\n spatial_temporal_sampler: Literal[\"uniform\", \"stratified\", \"st\"] = \"stratified\"\n use_static_dynamic_ratio_anealing: bool = False\n static_dynamic_sampling_ratio: float = 1.0\n ratio_anealing_start: int = 0\n ratio_anealing_end: int = 20000\n initial_ratio: float = 50\n final_ratio: float = 10\n # n_time_for_dynamic: int = 1 # parameters for spatial_temporal sampler, 1 is equal to stratified sampler\n n_time_for_dynamic: Callable[\n [int], float\n ] = lambda x: 1 # parameters for spatial_temporal sampler, 1 is equal to stratified sampler\n use_temporal_weight: str = \"none\"\n use_median: bool = False\n\n train_sampler_type: str = \"spatio\"\n train_sampler_args: Type = field(default_factory=lambda: {})" }, { "identifier": "VideoDataManager", "path": "MSTH/datamanager.py", "snippet": "class VideoDataManager(DataManager):\n train_dataset: VideoDataset\n eval_dataset: VideoDataset\n train_dataparser_outputs: VideoDataParserOutputs\n train_pixel_sampler: Optional[CompletePixelSampler] = None\n eval_dynamic_pixel_sampler: CompletePixelSampler\n eval_all_pixel_sampler: CompletePixelSampler\n\n def __init__(\n self,\n config: VideoDataManagerConfig,\n device: Union[torch.device, str] = \"cpu\",\n test_mode: Literal[\"test\", \"val\", \"inference\"] = \"val\",\n world_size: int = 1,\n local_rank: int = 0,\n **kwargs,\n ):\n self.static_train_count = 0\n self.dynamic_train_count = 0\n self.dynamic_train_count_inverse = 0\n self.config = config\n self.device = device\n self.world_size = world_size\n self.local_rank = local_rank\n self.sampler = None\n self.test_mode = test_mode\n self.test_split = \"test\" if test_mode in [\"test\", \"inference\"] else \"val\"\n self.dataparser_config = self.config.dataparser\n if self.config.data is not None:\n self.config.dataparser.data = Path(self.config.data)\n else:\n self.config.data = self.config.dataparser.data\n self.dataparser = self.dataparser_config.setup()\n self.train_dataparser_outputs = self.dataparser.get_dataparser_outputs(split=\"train\")\n\n self.train_dataset = self.create_train_dataset()\n self.eval_dataset = self.create_eval_dataset()\n super().__init__()\n\n def create_train_dataset(self) -> VideoDataset:\n return VideoDataset(\n self.train_dataparser_outputs,\n self.config.camera_res_scale_factor,\n self.config.mask_extend_radius,\n self.config.next_n_frames,\n )\n\n def create_eval_dataset(self) -> VideoDataset:\n return VideoDataset(\n self.dataparser.get_dataparser_outputs(split=self.test_split),\n self.config.camera_res_scale_factor,\n self.config.mask_extend_radius,\n self.config.next_n_frames,\n )\n\n def _get_train_pixel_sampler(self, cur_frame_data):\n return CompletePixelSampler(self.config.train_num_rays_per_batch, cur_frame_data)\n\n @property\n def train_num_rays_per_batch(self):\n return self.config.train_num_rays_per_batch\n\n # def _get_eval_pixel_sampler(self):\n\n def setup_train(self):\n # TODO: finish this\n CONSOLE.print(\"Setting up stuffs for training\")\n self.train_pixel_sampler = self._get_train_pixel_sampler(self.train_dataset.get_all_data(self.device))\n self.train_camera_optimizer = self.config.camera_optimizer.setup(\n num_cameras=self.train_dataset.cameras.size, device=self.device\n )\n self.train_ray_generator = RayGenerator(\n self.train_dataset.cameras.to(self.device),\n self.train_camera_optimizer,\n )\n\n def setup_eval(self):\n \"\"\"set up eval preparation\"\"\"\n # TODO: finish here\n CONSOLE.print(\"Setting up stuffs for evaluating\")\n # self.eval_pixel_sampler = self._get_train_pixel_sampler(self.eval_dataset.get_all_data(self.device))\n self.eval_camera_optimizer = self.config.camera_optimizer.setup(\n num_cameras=self.eval_dataset.cameras.size, device=self.device\n )\n self.eval_dynamic_pixel_sampler = CompletePixelSampler(\n self.config.eval_num_rays_per_batch, self.eval_dataset.get_all_data(self.device)\n )\n self.eval_all_pixel_sampler = CompletePixelSampler(\n self.config.eval_num_rays_per_batch, self.eval_dataset.get_all_data(self.device), use_mask=False\n )\n assert self.eval_dataset.cur_frame == 1\n assert self.eval_all_pixel_sampler is not None\n assert self.eval_dynamic_pixel_sampler is not None\n\n self.eval_ray_generator = RayGenerator(self.eval_dataset.cameras.to(self.device), self.eval_camera_optimizer)\n\n def next_train(self, step: int) -> Tuple[RayBundle, Dict]:\n \"\"\"Return next batch\"\"\"\n self.dynamic_train_count += 1\n assert self.train_pixel_sampler is not None\n\n batch = self.train_pixel_sampler.sample()\n ray_indices = batch[\"indices\"]\n ray_bundle = self.train_ray_generator(ray_indices)\n return ray_bundle, batch\n\n def next_train_inverse(self, step: int) -> Tuple[RayBundle, Dict]:\n \"\"\"Return next batch\"\"\"\n self.dynamic_train_count_inverse += 1\n assert self.train_pixel_sampler is not None\n\n batch = self.train_pixel_sampler.sample_inverse()\n ray_indices = batch[\"indices\"]\n ray_bundle = self.train_ray_generator(ray_indices)\n\n return ray_bundle, batch\n\n def next_eval_all(self, step: int):\n \"\"\"Return next eval batch\"\"\"\n ## TODO: impl here\n self.eval_count += 1\n assert self.eval_all_pixel_sampler is not None\n\n batch = self.eval_all_pixel_sampler.sample()\n ray_indices = batch[\"indices\"]\n ray_bundle = self.eval_ray_generator(ray_indices)\n\n return ray_bundle, batch\n\n def next_eval_dynamic(self, step: int):\n assert self.eval_dynamic_pixel_sampler is not None\n\n batch = self.eval_dynamic_pixel_sampler.sample()\n ray_indices = batch[\"indices\"]\n ray_bundle = self.eval_ray_generator(ray_indices)\n\n return ray_bundle, batch\n\n def next_eval_image(self, idx: int):\n # TODO: impl this\n image_idx = idx\n if idx < 0:\n image_idx = random.randint(0, self.eval_dataset.num_cams - 1)\n camera_ray_bundle = self.eval_ray_generator.cameras.generate_rays(camera_indices=image_idx, keep_shape=True)\n batch = self.eval_dataset[image_idx]\n return image_idx, camera_ray_bundle, batch\n\n def get_train_rays_per_batch(self) -> int:\n return self.config.train_num_rays_per_batch\n\n def get_eval_rays_per_batch(self) -> int:\n return self.config.eval_num_rays_per_batch\n\n def get_datapath(self) -> Path:\n return self.config.dataparser.data\n\n def get_param_groups(self) -> Dict[str, List[Parameter]]:\n return {}\n\n def tick(self, tick_n_frames=False):\n # TODO: make sure have done everything needed to tick\n if tick_n_frames:\n self.train_dataset.tick_n_frames()\n self.eval_dataset.tick_n_frames()\n else:\n self.train_dataset.tick()\n self.eval_dataset.tick()\n assert self.train_pixel_sampler is not None\n self.train_pixel_sampler.set_batch(self.train_dataset.get_all_data(self.device))\n assert self.eval_all_pixel_sampler is not None\n assert self.eval_dynamic_pixel_sampler is not None\n self.eval_all_pixel_sampler.set_batch(self.eval_dataset.get_all_data(self.device))\n self.eval_dynamic_pixel_sampler.set_batch(self.eval_dataset.get_all_data(self.device))" }, { "identifier": "VideoDataManagerConfig", "path": "MSTH/datamanager.py", "snippet": "class VideoDataManagerConfig(DataManagerConfig):\n \"\"\"Video Data Manager config\"\"\"\n\n _target: Type = field(default_factory=lambda: VideoDataManager)\n dataparser: VideoDataParserConfig = VideoDataParserConfig()\n\n collate_fn = staticmethod(nerfstudio_collate)\n \"\"\"Specifies the collate function to use for the train and eval dataloaders.\"\"\"\n camera_res_scale_factor: float = 1.0\n train_num_rays_per_batch: int = 1024\n eval_num_rays_per_batch: int = 1024\n\n camera_optimizer: CameraOptimizerConfig = CameraOptimizerConfig()\n\n mask_extend_radius: int = 5\n next_n_frames: int = 1\n \"\"\"mask extend radius for gaussian filter\"\"\"" }, { "identifier": "VideoFeatureDataManager", "path": "MSTH/datamanager.py", "snippet": "class VideoFeatureDataManager(DataManager):\n train_dataset: VideoDatasetWithFeature\n eval_dataset: VideoDatasetWithFeature\n train_dataparser_outputs: VideoDataParserOutputs\n train_pixel_sampler: Optional[CompletePixelSampler] = None\n eval_dynamic_pixel_sampler: CompletePixelSampler\n eval_all_pixel_sampler: CompletePixelSampler\n\n def __init__(\n self,\n config: VideoFeatureDataManagerConfig,\n device: Union[torch.device, str] = \"cpu\",\n test_mode: Literal[\"test\", \"val\", \"inference\"] = \"val\",\n world_size: int = 1,\n local_rank: int = 0,\n **kwargs,\n ):\n self.static_train_count = 0\n self.dynamic_train_count = 0\n self.config = config\n self.device = device\n self.world_size = world_size\n self.local_rank = local_rank\n self.sampler = None\n self.test_mode = test_mode\n self.test_split = \"test\" if test_mode in [\"test\", \"inference\"] else \"val\"\n self.dataparser_config = self.config.dataparser\n if self.config.data is not None:\n self.config.dataparser.data = Path(self.config.data)\n else:\n self.config.data = self.config.dataparser.data\n self.dataparser = self.dataparser_config.setup()\n self.train_dataparser_outputs = self.dataparser.get_dataparser_outputs(split=\"train\")\n\n self.train_dataset = self.create_train_dataset()\n self.eval_dataset = self.create_eval_dataset()\n super().__init__()\n\n def create_train_dataset(self) -> VideoDatasetWithFeature:\n return VideoDatasetWithFeature(\n self.train_dataparser_outputs,\n self.config.camera_res_scale_factor,\n self.config.mask_extend_radius,\n 1,\n self.config.pretrained_path,\n self.config.fe_device,\n )\n\n def create_eval_dataset(self) -> VideoDatasetWithFeature:\n return VideoDatasetWithFeature(\n self.dataparser.get_dataparser_outputs(split=self.test_split),\n self.config.camera_res_scale_factor,\n self.config.mask_extend_radius,\n 1,\n self.config.pretrained_path,\n self.config.fe_device,\n )\n\n def _get_train_pixel_sampler(self, cur_frame_data):\n return CompletePixelSampler(self.config.train_num_rays_per_batch, cur_frame_data)\n\n @property\n def train_num_rays_per_batch(self):\n return self.config.train_num_rays_per_batch\n\n # def _get_eval_pixel_sampler(self):\n\n def setup_train(self):\n # TODO: finish this\n CONSOLE.print(\"Setting up stuffs for training\")\n self.train_pixel_sampler = self._get_train_pixel_sampler(self.train_dataset.get_all_data(self.device))\n self.train_camera_optimizer = self.config.camera_optimizer.setup(\n num_cameras=self.train_dataset.cameras.size, device=self.device\n )\n self.train_ray_generator = RayGenerator(\n self.train_dataset.cameras.to(self.device),\n self.train_camera_optimizer,\n )\n\n def setup_eval(self):\n \"\"\"set up eval preparation\"\"\"\n # TODO: finish here\n CONSOLE.print(\"Setting up stuffs for evaluating\")\n # self.eval_pixel_sampler = self._get_train_pixel_sampler(self.eval_dataset.get_all_data(self.device))\n self.eval_camera_optimizer = self.config.camera_optimizer.setup(\n num_cameras=self.eval_dataset.cameras.size, device=self.device\n )\n self.eval_dynamic_pixel_sampler = CompletePixelSampler(\n self.config.eval_num_rays_per_batch, self.eval_dataset.get_all_data(self.device)\n )\n self.eval_all_pixel_sampler = CompletePixelSampler(\n self.config.eval_num_rays_per_batch, self.eval_dataset.get_all_data(self.device), use_mask=False\n )\n assert self.eval_dataset.cur_frame == 1\n assert self.eval_all_pixel_sampler is not None\n assert self.eval_dynamic_pixel_sampler is not None\n\n self.eval_ray_generator = RayGenerator(self.eval_dataset.cameras.to(self.device), self.eval_camera_optimizer)\n\n def next_train(self, step: int) -> Tuple[RayBundle, Dict]:\n \"\"\"Return next batch\"\"\"\n self.dynamic_train_count += 1\n assert self.train_pixel_sampler is not None\n\n batch = self.train_pixel_sampler.sample()\n ray_indices = batch[\"indices\"]\n ray_bundle = self.train_ray_generator(ray_indices)\n return ray_bundle, batch\n\n def next_train_inverse(self, step: int) -> Tuple[RayBundle, Dict]:\n \"\"\"Return next batch\"\"\"\n self.dynamic_train_count_inverse += 1\n assert self.train_pixel_sampler is not None\n\n batch = self.train_pixel_sampler.sample_inverse()\n ray_indices = batch[\"indices\"]\n ray_bundle = self.train_ray_generator(ray_indices)\n\n return ray_bundle, batch\n\n def next_eval_all(self, step: int):\n \"\"\"Return next eval batch\"\"\"\n ## TODO: impl here\n self.eval_count += 1\n assert self.eval_all_pixel_sampler is not None\n\n batch = self.eval_all_pixel_sampler.sample()\n ray_indices = batch[\"indices\"]\n ray_bundle = self.eval_ray_generator(ray_indices)\n\n return ray_bundle, batch\n\n def next_eval_dynamic(self, step: int):\n assert self.eval_dynamic_pixel_sampler is not None\n\n batch = self.eval_dynamic_pixel_sampler.sample()\n ray_indices = batch[\"indices\"]\n ray_bundle = self.eval_ray_generator(ray_indices)\n\n return ray_bundle, batch\n\n def next_eval_image(self, idx: int):\n # TODO: impl this\n image_idx = idx\n if idx < 0:\n image_idx = random.randint(0, self.eval_dataset.num_cams - 1)\n camera_ray_bundle = self.eval_ray_generator.cameras.generate_rays(camera_indices=image_idx, keep_shape=True)\n batch = self.eval_dataset[image_idx]\n return image_idx, camera_ray_bundle, batch\n\n def get_train_rays_per_batch(self) -> int:\n return self.config.train_num_rays_per_batch\n\n def get_eval_rays_per_batch(self) -> int:\n return self.config.eval_num_rays_per_batch\n\n def get_datapath(self) -> Path:\n return self.config.dataparser.data\n\n def get_param_groups(self) -> Dict[str, List[Parameter]]:\n return {}\n\n def tick(self, tick_n_frames=False):\n # TODO: make sure have done everything needed to tick\n if tick_n_frames:\n self.train_dataset.tick_n_frames()\n self.eval_dataset.tick_n_frames()\n else:\n self.train_dataset.tick()\n self.eval_dataset.tick()\n assert self.train_pixel_sampler is not None\n # extract features\n self.train_dataset.extract_cur_frame_feature()\n self.train_pixel_sampler.set_batch(self.train_dataset.get_all_data(self.device))\n assert self.eval_all_pixel_sampler is not None\n assert self.eval_dynamic_pixel_sampler is not None\n self.eval_all_pixel_sampler.set_batch(self.eval_dataset.get_all_data(self.device))\n self.eval_dynamic_pixel_sampler.set_batch(self.eval_dataset.get_all_data(self.device))\n\n @property\n def get_num_dynamic_rays(self):\n return self.train_pixel_sampler.all_indices.size(0)" }, { "identifier": "VideoFeatureDataManagerConfig", "path": "MSTH/datamanager.py", "snippet": "class VideoFeatureDataManagerConfig(DataManagerConfig):\n \"\"\"Video Data Manager config\"\"\"\n\n _target: Type = field(default_factory=lambda: VideoFeatureDataManager)\n dataparser: VideoDataParserConfig = VideoDataParserConfig()\n\n collate_fn = staticmethod(nerfstudio_collate)\n \"\"\"Specifies the collate function to use for the train and eval dataloaders.\"\"\"\n camera_res_scale_factor: float = 1.0\n train_num_rays_per_batch: int = 1024\n eval_num_rays_per_batch: int = 1024\n\n camera_optimizer: CameraOptimizerConfig = CameraOptimizerConfig()\n\n mask_extend_radius: int = 5\n pretrained_path = \"/data/czl/nerf/MSTH/MSTH/ibrnet/model_255000.pth\"\n next_n_frames: int = 1\n fe_device: str = \"cpu\"\n \"\"\"mask extend radius for gaussian filter\"\"\"" }, { "identifier": "get_render_cameras", "path": "MSTH/SpaceTimeHashing/render.py", "snippet": "def get_render_cameras(\n train_cameras: Cameras,\n ref_camera: Cameras,\n near,\n far,\n rads_scale=1.0,\n N_views=300,\n n_frames=300,\n downscale=1,\n offset=[-0.05, 0, 0],\n # offset=[0.0, 0, 0],\n):\n dt = 0.75\n c2ws_all = train_cameras.camera_to_worlds.numpy()\n c2w = average_poses(c2ws_all)\n print(c2w)\n up = np.array([0.0, 0.0, 1.0])\n\n # focal = 0.5\n focal = 1.0\n print(\"focal\", focal)\n\n zdelta = near * 0.2\n rads = np.array([0.45400773, 0.1343679, 0.05063616]) * rads_scale\n print(rads)\n render_poses = render_path_spiral(c2w, up, rads, focal, zdelta, zrate=0.9, N=n_frames)\n\n rposes = np.stack(render_poses, axis=0)\n # rposes[..., 0, 3] -= 0.2\n rposes[..., :3, 3] += np.array(offset)\n render_c2ws = torch.from_numpy(rposes)[..., :3, :].to(torch.float32)\n times = torch.linspace(0, n_frames - 1, n_frames) / n_frames\n times = times[..., None].to(torch.float32)\n\n H = int(get_elem(train_cameras.height))\n W = int(get_elem(train_cameras.width))\n cx = get_elem(train_cameras.cx)\n cy = get_elem(train_cameras.cy)\n print(\"H\", H)\n print(\"W\", W)\n print(\"cx\", cx)\n print(\"cy\", cy)\n\n render_cams = Cameras(\n render_c2ws,\n fx=get_elem(train_cameras.fx),\n fy=get_elem(train_cameras.fy),\n cx=cx,\n cy=cy,\n width=W,\n height=H,\n times=times,\n )\n render_cams.rescale_output_resolution(1 / downscale)\n\n return render_cams" }, { "identifier": "Timer", "path": "MSTH/utils.py", "snippet": "class Timer:\n recorder = defaultdict(list)\n\n def __init__(self, des=\"\", verbose=True, record=False) -> None:\n self.des = des\n self.verbose = verbose\n self.record = record\n\n def __enter__(self):\n return self\n self.start = time.time()\n self.start_cuda = torch.cuda.Event(enable_timing=True)\n self.end_cuda = torch.cuda.Event(enable_timing=True)\n self.start_cuda.record()\n return self\n\n def __exit__(self, *args):\n return\n self.end = time.time()\n self.end_cuda.record()\n self.interval = self.end - self.start\n if self.verbose:\n torch.cuda.synchronize()\n print(f\"[cudasync]{self.des} consuming {self.start_cuda.elapsed_time(self.end_cuda)/1000.:.8f}\")\n\n print(f\"{self.des} consuming {self.interval:.8f}\")\n if self.record:\n Timer.recorder[self.des].append(self.interval)\n\n @staticmethod\n def show_recorder():\n pprint(Timer.recorder)" }, { "identifier": "get_chart", "path": "MSTH/utils.py", "snippet": "def get_chart(x, y):\n fig, ax = plt.subplots()\n #\n ax.plot(x, y)\n return ax" } ]
import random import typing import cv2 import imageio import numpy as np import torch import torch.distributed as dist import wandb from abc import abstractmethod from dataclasses import dataclass, field from time import time from typing import Any, Dict, List, Mapping, Optional, Type, Union, cast from rich.progress import ( BarColumn, MofNCompleteColumn, Progress, TextColumn, TimeElapsedColumn, ) from torch import nn from torch.nn import Parameter from torch.nn.parallel import DistributedDataParallel as DDP from torchvision.io import write_video from tqdm import tqdm, trange from typing_extensions import Literal from nerfstudio.configs import base_config as cfg from nerfstudio.data.datamanagers.base_datamanager import ( DataManager, DataManagerConfig, VanillaDataManager, VanillaDataManagerConfig, ) from nerfstudio.engine.callbacks import TrainingCallback, TrainingCallbackAttributes from nerfstudio.models.base_model import Model, ModelConfig from nerfstudio.pipelines.base_pipeline import Pipeline, VanillaPipelineConfig from nerfstudio.utils import colormaps, profiler from MSTH.datamanager import ( SpaceTimeDataManager, SpaceTimeDataManagerConfig, VideoDataManager, VideoDataManagerConfig, VideoFeatureDataManager, VideoFeatureDataManagerConfig, ) from MSTH.SpaceTimeHashing.render import get_render_cameras from MSTH.utils import Timer, get_chart
16,369
from __future__ import annotations def module_wrapper(ddp_or_model: Union[DDP, Model]) -> Model: """ If DDP, then return the .module. Otherwise, return the model. """ if isinstance(ddp_or_model, DDP): return cast(Model, ddp_or_model.module) return ddp_or_model @dataclass class VideoPipelineConfig(cfg.InstantiateConfig): _target: Type = field(default_factory=lambda: VideoPipeline) datamanager: Union[
from __future__ import annotations def module_wrapper(ddp_or_model: Union[DDP, Model]) -> Model: """ If DDP, then return the .module. Otherwise, return the model. """ if isinstance(ddp_or_model, DDP): return cast(Model, ddp_or_model.module) return ddp_or_model @dataclass class VideoPipelineConfig(cfg.InstantiateConfig): _target: Type = field(default_factory=lambda: VideoPipeline) datamanager: Union[
VideoDataManagerConfig, VideoFeatureDataManagerConfig, DataManagerConfig
2
2023-10-26 04:39:15+00:00
24k
chenruduan/OAReactDiff
oa_reactdiff/trainer/pl_trainer.py
[ { "identifier": "ProcessedQM9", "path": "oa_reactdiff/dataset/qm9.py", "snippet": "class ProcessedQM9(BaseQM9):\n def __init__(\n self,\n npz_path,\n center=True,\n pad_fragments=2,\n device=\"cpu\",\n zero_charge=False,\n remove_h=False,\n **kwargs,\n ):\n super().__init__(\n npz_path=npz_path,\n center=center,\n device=device,\n zero_charge=zero_charge,\n remove_h=remove_h,\n )\n\n self.n_fragments = pad_fragments + 1\n self.device = torch.device(device)\n\n n_samples = len(self.raw_dataset[\"charges\"])\n self.n_samples = n_samples\n\n self.data = {}\n self.process_molecules(\"raw_dataset\", n_samples, idx=0)\n\n for idx in range(pad_fragments):\n self.patch_dummy_molecules(idx + 1)\n\n self.data[\"condition\"] = [\n torch.zeros(\n size=(1, 1),\n dtype=torch.int64,\n device=self.device,\n )\n for _ in range(self.n_samples)\n ]" }, { "identifier": "ProcessedDoubleQM9", "path": "oa_reactdiff/dataset/qm9.py", "snippet": "class ProcessedDoubleQM9(BaseQM9):\n def __init__(\n self,\n npz_path,\n center=True,\n pad_fragments=1,\n device=\"cpu\",\n zero_charge=False,\n remove_h=False,\n **kwargs,\n ):\n super().__init__(\n npz_path=npz_path,\n center=center,\n device=device,\n zero_charge=zero_charge,\n remove_h=remove_h,\n )\n\n self.n_fragments = pad_fragments + 2\n self.device = torch.device(device)\n n_samples = len(self.raw_dataset[\"charges\"])\n self.n_samples = len(self.raw_dataset[\"charges\"])\n\n self.get_subsets()\n self.get_pairs()\n\n self.data = {}\n self.process_molecules(\"frag1_data\", n_samples, idx=0)\n self.process_molecules(\"frag2_data\", n_samples, idx=1)\n\n for idx in range(pad_fragments):\n self.patch_dummy_molecules(idx + 2)\n\n self.data[\"condition\"] = [\n torch.zeros(\n size=(1, 1),\n dtype=torch.int64,\n device=self.device,\n )\n for _ in range(self.n_samples)\n ]\n\n def get_pairs(self):\n self.frag1_data, self.frag2_data = {}, {}\n frag1_O_idx_1sthalf = np.random.choice(\n len(self.hasO_set[\"charges\"]),\n int(self.n_samples / 2),\n replace=True,\n )\n frag2_N_idx_1sthalf = np.random.choice(\n len(self.hasN_set[\"charges\"]),\n int(self.n_samples / 2),\n replace=True,\n )\n frag1_N_idx_2ndhalf = np.random.choice(\n len(self.hasN_set[\"charges\"]),\n int(self.n_samples / 2),\n replace=True,\n )\n frag2_O_idx_2ndhalf = np.random.choice(\n len(self.hasO_set[\"charges\"]),\n int(self.n_samples / 2),\n replace=True,\n )\n self.frag1_data = {\n key: np.concatenate(\n [\n self.hasO_set[key][frag1_O_idx_1sthalf],\n self.hasN_set[key][frag1_N_idx_2ndhalf],\n ],\n axis=0,\n )\n for key in self.raw_dataset\n }\n self.frag2_data = {\n key: np.concatenate(\n [\n self.hasN_set[key][frag2_N_idx_1sthalf],\n self.hasO_set[key][frag2_O_idx_2ndhalf],\n ],\n axis=0,\n )\n for key in self.raw_dataset\n }" }, { "identifier": "ProcessedTripleQM9", "path": "oa_reactdiff/dataset/qm9.py", "snippet": "class ProcessedTripleQM9(BaseQM9):\n def __init__(\n self,\n npz_path,\n center=True,\n pad_fragments=0,\n device=\"cpu\",\n zero_charge=False,\n remove_h=False,\n **kwargs,\n ):\n super().__init__(\n npz_path=npz_path,\n center=center,\n device=device,\n zero_charge=zero_charge,\n remove_h=remove_h,\n )\n\n self.n_fragments = pad_fragments + 3\n self.device = torch.device(device)\n n_samples = len(self.raw_dataset[\"charges\"])\n self.n_samples = len(self.raw_dataset[\"charges\"])\n\n self.get_subsets()\n self.get_pairs()\n\n self.data = {}\n self.process_molecules(\"frag1_data\", n_samples, idx=0)\n self.process_molecules(\"frag2_data\", n_samples, idx=1)\n self.process_molecules(\"frag3_data\", n_samples, idx=2)\n\n for idx in range(pad_fragments):\n self.patch_dummy_molecules(idx + 3)\n\n self.data[\"condition\"] = [\n torch.zeros(\n size=(1, 1),\n dtype=torch.int64,\n device=self.device,\n )\n for _ in range(self.n_samples)\n ]\n\n def get_pairs(self):\n n1 = int(self.n_samples / 3)\n n2 = int(self.n_samples / 3)\n n3 = self.n_samples - n1 - n2\n self.frag1_data, self.frag2_data = {}, {}\n frag1_O_idx_1_3 = np.random.choice(\n len(self.hasO_set[\"charges\"]),\n n1,\n replace=True,\n )\n frag2_N_idx_1_3 = np.random.choice(\n len(self.hasN_set[\"charges\"]),\n n1,\n replace=True,\n )\n frag3_F_idx_1_3 = np.random.choice(\n len(self.hasF_set[\"charges\"]),\n n1,\n replace=True,\n )\n frag1_F_idx_2_3 = np.random.choice(\n len(self.hasF_set[\"charges\"]),\n n2,\n replace=True,\n )\n frag2_O_idx_2_3 = np.random.choice(\n len(self.hasO_set[\"charges\"]),\n n2,\n replace=True,\n )\n frag3_N_idx_2_3 = np.random.choice(\n len(self.hasN_set[\"charges\"]),\n n2,\n replace=True,\n )\n frag1_N_idx_3_3 = np.random.choice(\n len(self.hasN_set[\"charges\"]),\n n3,\n replace=True,\n )\n frag2_F_idx_3_3 = np.random.choice(\n len(self.hasF_set[\"charges\"]),\n n3,\n replace=True,\n )\n frag3_O_idx_3_3 = np.random.choice(\n len(self.hasO_set[\"charges\"]),\n n3,\n replace=True,\n )\n self.frag1_data = {\n key: np.concatenate(\n [\n self.hasO_set[key][frag1_O_idx_1_3],\n self.hasF_set[key][frag1_F_idx_2_3],\n self.hasN_set[key][frag1_N_idx_3_3],\n ],\n axis=0,\n )\n for key in self.raw_dataset\n }\n self.frag2_data = {\n key: np.concatenate(\n [\n self.hasN_set[key][frag2_N_idx_1_3],\n self.hasO_set[key][frag2_O_idx_2_3],\n self.hasF_set[key][frag2_F_idx_3_3],\n ],\n axis=0,\n )\n for key in self.raw_dataset\n }\n self.frag3_data = {\n key: np.concatenate(\n [\n self.hasF_set[key][frag3_F_idx_1_3],\n self.hasN_set[key][frag3_N_idx_2_3],\n self.hasO_set[key][frag3_O_idx_3_3],\n ],\n axis=0,\n )\n for key in self.raw_dataset\n }" }, { "identifier": "ProcessedTS1x", "path": "oa_reactdiff/dataset/transition1x.py", "snippet": "class ProcessedTS1x(BaseDataset):\n def __init__(\n self,\n npz_path,\n center=True,\n pad_fragments=0,\n device=\"cpu\",\n zero_charge=False,\n remove_h=False,\n single_frag_only=True,\n swapping_react_prod=False,\n append_frag=False,\n reflection=False,\n use_by_ind=False,\n only_ts=False,\n confidence_model=False,\n position_key=\"positions\",\n ediff=None,\n **kwargs,\n ):\n super().__init__(\n npz_path=npz_path,\n center=center,\n device=device,\n zero_charge=zero_charge,\n remove_h=remove_h,\n )\n if confidence_model:\n use_by_ind = False\n if remove_h:\n print(\"remove_h is ignored because it is not reasonble for TS.\")\n if single_frag_only:\n single_frag_inds = np.where(\n np.array(self.raw_dataset[\"single_fragment\"]) == 1\n )[0]\n else:\n single_frag_inds = np.array(range(len(self.raw_dataset[\"single_fragment\"])))\n if use_by_ind:\n use_inds = self.raw_dataset[\"use_ind\"]\n else:\n use_inds = range(len(self.raw_dataset[\"single_fragment\"]))\n single_frag_inds = list(set(single_frag_inds).intersection(set(use_inds)))\n\n data_duplicated = copy.deepcopy(self.raw_dataset)\n for k, mapped_k in FRAG_MAPPING.items():\n for v, val in data_duplicated[k].items():\n self.raw_dataset[k][v] = [val[ii] for ii in single_frag_inds]\n if swapping_react_prod:\n mapped_val = data_duplicated[mapped_k][v]\n self.raw_dataset[k][v] += [\n mapped_val[ii] for ii in single_frag_inds\n ]\n if reflection:\n for k, mapped_k in FRAG_MAPPING.items():\n for v, val in self.raw_dataset[k].items():\n if v in [\"wB97x_6-31G(d).forces\", position_key]:\n self.raw_dataset[k][v] += [reflect_z(_val) for _val in val]\n else:\n self.raw_dataset[k][v] += val\n\n self.reactant = self.raw_dataset[\"reactant\"]\n self.transition_state = self.raw_dataset[\"transition_state\"]\n self.product = self.raw_dataset[\"product\"]\n\n self.n_fragments = pad_fragments + 3\n self.device = torch.device(device)\n n_samples = len(self.reactant[\"charges\"])\n self.n_samples = len(self.reactant[\"charges\"])\n\n self.data = {}\n repeat = 2 if swapping_react_prod else 1\n if confidence_model:\n self.data[\"target\"] = torch.tensor(\n self.raw_dataset[\"target\"] * repeat\n ).unsqueeze(1)\n self.data[\"rmsd\"] = torch.tensor(\n self.raw_dataset[\"rmsd\"] * repeat\n ).unsqueeze(1)\n if ediff is not None:\n self.data[\"ediff\"] = torch.tensor(\n self.raw_dataset[ediff][\"ediff\"] * repeat\n ).unsqueeze(1)\n if not only_ts:\n if not append_frag:\n self.process_molecules(\n \"reactant\", n_samples, idx=0, position_key=position_key\n )\n self.process_molecules(\"transition_state\", n_samples, idx=1)\n self.process_molecules(\n \"product\", n_samples, idx=2, position_key=position_key\n )\n else:\n self.process_molecules(\n \"reactant\",\n n_samples,\n idx=0,\n append_charge=0,\n position_key=position_key,\n )\n self.process_molecules(\n \"transition_state\", n_samples, idx=1, append_charge=1\n )\n self.process_molecules(\n \"product\",\n n_samples,\n idx=2,\n append_charge=0,\n position_key=position_key,\n )\n\n for idx in range(pad_fragments):\n self.patch_dummy_molecules(idx + 3)\n else:\n if not append_frag:\n self.process_molecules(\"transition_state\", n_samples, idx=0)\n else:\n self.process_molecules(\n \"transition_state\", n_samples, idx=0, append_charge=1\n )\n # for idx in range(2):\n # self.patch_dummy_molecules(idx + 1)\n\n self.data[\"condition\"] = [\n torch.zeros(\n size=(1, 1),\n dtype=torch.int64,\n device=self.device,\n )\n for _ in range(self.n_samples)\n ]" }, { "identifier": "EGNNDynamics", "path": "oa_reactdiff/dynamics/egnn_dynamics.py", "snippet": "class EGNNDynamics(BaseDynamics):\n def __init__(\n self,\n model_config: Dict,\n fragment_names: List[str],\n node_nfs: List[int],\n edge_nf: int,\n condition_nf: int = 0,\n pos_dim: int = 3,\n update_pocket_coords: bool = True,\n condition_time: bool = True,\n edge_cutoff: Optional[float] = None,\n model: nn.Module = EGNN,\n device: torch.device = torch.device(\"cuda\"),\n enforce_same_encoding: Optional[List] = None,\n source: Optional[Dict] = None,\n ) -> None:\n r\"\"\"Base dynamics class set up for denoising process.\n\n Args:\n model_config (Dict): config for the equivariant model.\n fragment_names (List[str]): list of names for fragments\n node_nfs (List[int]): list of number of input node attributues.\n edge_nf (int): number of input edge attributes.\n condition_nf (int): number of attributes for conditional generation.\n Defaults to 0.\n pos_dim (int): dimension for position vector. Defaults to 3.\n update_pocket_coords (bool): whether to update positions of everything.\n Defaults to True.\n condition_time (bool): whether to condition on time. Defaults to True.\n edge_cutoff (Optional[float]): cutoff for building intra-fragment edges.\n Defaults to None.\n model (Optional[nn.Module]): Module for equivariant model. Defaults to None.\n \"\"\"\n super().__init__(\n model_config,\n fragment_names,\n node_nfs,\n edge_nf,\n condition_nf,\n pos_dim,\n update_pocket_coords,\n condition_time,\n edge_cutoff,\n model,\n device,\n enforce_same_encoding,\n source=source,\n )\n\n def forward(\n self,\n xh: List[Tensor],\n edge_index: Tensor,\n t: Tensor,\n conditions: Tensor,\n n_frag_switch: Tensor,\n combined_mask: Tensor,\n edge_attr: Optional[Tensor] = None,\n ) -> Tuple[List[Tensor], Tensor]:\n r\"\"\"predict noise /mu.\n\n Args:\n xh (List[Tensor]): list of concatenated tensors for pos and h\n edge_index (Tensor): [n_edge, 2]\n t (Tensor): time tensor. If dim is 1, same for all samples;\n otherwise different t for different samples\n conditions (Tensor): condition tensors\n n_frag_switch (Tensor): [n_nodes], fragment index for each nodes\n combined_mask (Tensor): [n_nodes], sample index for each node\n edge_attr (Optional[Tensor]): [n_edge, dim_edge_attribute]. Defaults to None.\n\n Raises:\n NotImplementedError: The fragement-position-fixed mode is not implement.\n\n Returns:\n Tuple[List[Tensor], Tensor]: updated pos-h and edge attributes\n \"\"\"\n pos = torch.concat(\n [_xh[:, : self.pos_dim].clone() for _xh in xh],\n dim=0,\n )\n h = torch.concat(\n [\n self.encoders[ii](xh[ii][:, self.pos_dim :].clone())\n for ii, name in enumerate(self.fragment_names)\n ],\n dim=0,\n )\n if self.edge_encoder is not None:\n edge_attr = self.edge_encoder(edge_attr)\n\n condition_dim = 0\n if self.condition_time:\n if len(t.size()) == 1:\n # t is the same for all elements in batch.\n h_time = torch.empty_like(h[:, 0:1]).fill_(t.item())\n else:\n # t is different over the batch dimension.\n h_time = t[combined_mask]\n h = torch.cat([h, h_time], dim=1)\n condition_dim += 1\n\n if self.condition_nf > 0:\n h_condition = conditions[combined_mask]\n h = torch.cat([h, h_condition], dim=1)\n condition_dim += self.condition_nf\n\n subgraph_mask = get_subgraph_mask(edge_index, n_frag_switch)\n if self.update_pocket_coords:\n update_coords_mask = None\n else:\n raise NotImplementedError # no need to mask pos for inpainting mode.\n\n h_final, pos_final, edge_attr_final = self.model(\n h,\n pos,\n edge_index,\n edge_attr,\n node_mask=None,\n edge_mask=None,\n update_coords_mask=update_coords_mask,\n subgraph_mask=subgraph_mask[:, None],\n )\n vel = pos_final - pos\n if torch.any(torch.isnan(vel)):\n print(\"Warning: detected nan in pos, resetting EGNN output to randn.\")\n vel = torch.randn_like(vel)\n if torch.any(torch.isnan(vel)):\n print(\"Warning: detected nan in h, resetting EGNN output to randn.\")\n h_final = torch.randn_like(h_final)\n\n h_final = h_final[:, :-condition_dim]\n\n frag_index = self.compute_frag_index(n_frag_switch)\n xh_final = [\n torch.cat(\n [\n self.remove_mean_batch(\n vel[frag_index[ii] : frag_index[ii + 1]],\n combined_mask[frag_index[ii] : frag_index[ii + 1]],\n ),\n self.decoders[ii](h_final[frag_index[ii] : frag_index[ii + 1]]),\n ],\n dim=-1,\n )\n for ii, name in enumerate(self.fragment_names)\n ]\n\n # xh_final = self.enpose_pbc(xh_final)\n\n if edge_attr_final is None or edge_attr_final.size(1) <= max(1, self.dist_dim):\n edge_attr_final = None\n else:\n edge_attr_final = self.edge_decoder(edge_attr_final)\n return xh_final, edge_attr_final\n\n @staticmethod\n def enpose_pbc(xh: List[Tensor], magnitude=10.0) -> List[Tensor]:\n xrange = magnitude * 2\n xh = [torch.remainder(_xh + magnitude, xrange) - magnitude for _xh in xh]\n return xh\n\n @staticmethod\n def compute_frag_index(n_frag_switch: Tensor) -> np.ndarray:\n counts = [\n torch.where(n_frag_switch == ii)[0].numel()\n for ii in torch.unique(n_frag_switch)\n ]\n return np.concatenate([np.array([0]), np.cumsum(counts)])\n\n @torch.no_grad()\n def adjust_edge_attr_on_new_eij(\n self,\n edge_index: Tensor,\n edge_attr: Tensor,\n edge_index_new: Tensor,\n ) -> Tensor:\n r\"\"\"Get ready new edge attributes (e_ij) given old {ij, e_ij} and new {ij}\n\n Args:\n edge_index (Tensor): ij\n edge_attr (Tensor): e_ij\n edge_index_new (Tensor): new ij\n\n Raises:\n ValueError: finding multiple entries for the same ij pair\n\n Returns:\n Tensor: new e_ij\n \"\"\"\n edge_index_T = torch.transpose(edge_index, 1, 0)\n edge_index_new_T = torch.transpose(edge_index_new, 1, 0)\n\n edge_attr_new = []\n for _ind, ij in enumerate(edge_index_new_T):\n ind = torch.where((ij == edge_index_T).all(dim=1))[0]\n if ind.size(0) > 1:\n raise ValueError(f\"ind should only be 0 or 1, getting {ind}\")\n\n if ind.size(0) == 0:\n self.create_new_edge_attr(\n ind_new=_ind,\n ij_new=ij,\n edge_index_new_T=edge_index_new_T,\n edge_attr_new=edge_attr_new,\n edge_attr=edge_attr,\n )\n else:\n edge_attr_new.append(edge_attr[ind.item()].detach())\n return torch.stack(edge_attr_new, dim=0)\n\n @staticmethod\n def init_edge_attr(sample_edge_attr):\n r\"\"\"initialize edge attributes.\"\"\"\n return torch.rand_like(sample_edge_attr)\n\n def create_new_edge_attr(\n self,\n ind_new: Tensor,\n ij_new: Tensor,\n edge_index_new_T: Tensor,\n edge_attr_new: List[Tensor],\n edge_attr: Tensor,\n ) -> List[Tensor]:\n r\"\"\"Create new edge attrbution for ij that is not present in old connections\n\n Args:\n ind_new (Tensor): natural index of new ij\n ij_new (Tensor): new ij\n edge_index_new_T (Tensor): new edge indexes, [n_edge, 2]\n edge_attr_new (List[Tensor]): list of new edge attributes\n edge_attr (Tensor): old edge attributes\n\n Raises:\n ValueError: not ji found for ij in new indexes\n\n Returns:\n List[Tensor]: list of new edge attributes\n \"\"\"\n ij_new_reverse = ij_new[torch.tensor([1, 0])]\n ind_new_reverse = torch.where((ij_new_reverse == edge_index_new_T).all(dim=1))[\n 0\n ]\n print(ind_new_reverse)\n if ind_new_reverse.size(0) == 0:\n raise ValueError(f\"should always find a reverse ind.\")\n # print(ij_new, ind_new, ind_new_reverse)\n if ind_new_reverse.item() >= ind_new:\n edge_attr_new.append(self.init_edge_attr(edge_attr[0]))\n else:\n edge_attr_new.append(edge_attr_new[ind_new_reverse.item()])\n return edge_attr_new\n\n @staticmethod\n def remove_mean_batch(x, indices):\n mean = scatter_mean(x, indices, dim=0)\n x = x - mean[indices]\n return x" }, { "identifier": "Confidence", "path": "oa_reactdiff/dynamics/confidence.py", "snippet": "class Confidence(BaseDynamics):\n def __init__(\n self,\n model_config: Dict,\n fragment_names: List[str],\n node_nfs: List[int],\n edge_nf: int,\n condition_nf: int = 0,\n pos_dim: int = 3,\n edge_cutoff: Optional[float] = None,\n model: nn.Module = EGNN,\n device: torch.device = torch.device(\"cuda\"),\n enforce_same_encoding: Optional[List] = None,\n source: Optional[Dict] = None,\n **kwargs,\n ) -> None:\n r\"\"\"Confindence score for generated samples.\n\n Args:\n model_config (Dict): config for the equivariant model.\n fragment_names (List[str]): list of names for fragments\n node_nfs (List[int]): list of number of input node attributues.\n edge_nf (int): number of input edge attributes.\n condition_nf (int): number of attributes for conditional generation.\n Defaults to 0.\n pos_dim (int): dimension for position vector. Defaults to 3.\n update_pocket_coords (bool): whether to update positions of everything.\n Defaults to True.\n condition_time (bool): whether to condition on time. Defaults to True.\n edge_cutoff (Optional[float]): cutoff for building intra-fragment edges.\n Defaults to None.\n model (Optional[nn.Module]): Module for equivariant model. Defaults to None.\n \"\"\"\n model_config.update({\"for_conf\": True})\n update_pocket_coords = True\n condition_time = (True,)\n super().__init__(\n model_config,\n fragment_names,\n node_nfs,\n edge_nf,\n condition_nf,\n pos_dim,\n update_pocket_coords,\n condition_time,\n edge_cutoff,\n model,\n device,\n enforce_same_encoding,\n source=source,\n )\n\n hidden_channels = model_config[\"hidden_channels\"]\n self.readout = GatedMLP(\n in_dim=hidden_channels,\n out_dims=[hidden_channels, hidden_channels, 1],\n activation=\"swish\",\n bias=True,\n last_layer_no_activation=True,\n )\n\n def _forward(\n self,\n xh: List[Tensor],\n edge_index: Tensor,\n t: Tensor,\n conditions: Tensor,\n n_frag_switch: Tensor,\n combined_mask: Tensor,\n edge_attr: Optional[Tensor] = None,\n ) -> Tensor:\n r\"\"\"predict confidence.\n\n Args:\n xh (List[Tensor]): list of concatenated tensors for pos and h\n edge_index (Tensor): [n_edge, 2]\n t (Tensor): time tensor. If dim is 1, same for all samples;\n otherwise different t for different samples\n conditions (Tensor): condition tensors\n n_frag_switch (Tensor): [n_nodes], fragment index for each nodes\n combined_mask (Tensor): [n_nodes], sample index for each node\n edge_attr (Optional[Tensor]): [n_edge, dim_edge_attribute]. Defaults to None.\n\n Raises:\n NotImplementedError: The fragement-position-fixed mode is not implement.\n\n Returns:\n Tensor: binary probability of confidence fo each graph.\n \"\"\"\n pos = torch.concat(\n [_xh[:, : self.pos_dim].clone() for _xh in xh],\n dim=0,\n )\n h = torch.concat(\n [\n self.encoders[ii](xh[ii][:, self.pos_dim :].clone())\n for ii, name in enumerate(self.fragment_names)\n ],\n dim=0,\n )\n if self.edge_encoder is not None:\n edge_attr = self.edge_encoder(edge_attr)\n\n condition_dim = 0\n if self.condition_time:\n if len(t.size()) == 1:\n # t is the same for all elements in batch.\n h_time = torch.empty_like(h[:, 0:1]).fill_(t.item())\n else:\n # t is different over the batch dimension.\n h_time = t[combined_mask]\n h = torch.cat([h, h_time], dim=1)\n condition_dim += 1\n\n if self.condition_nf > 0:\n h_condition = conditions[combined_mask]\n h = torch.cat([h, h_condition], dim=1)\n condition_dim += self.condition_nf\n\n subgraph_mask = get_subgraph_mask(edge_index, n_frag_switch)\n if self.update_pocket_coords:\n update_coords_mask = None\n else:\n raise NotImplementedError # no need to mask pos for inpainting mode.\n\n node_features = self.model(\n h,\n pos,\n edge_index,\n edge_attr,\n node_mask=None,\n edge_mask=None,\n update_coords_mask=update_coords_mask,\n subgraph_mask=subgraph_mask[:, None],\n ) # (n_node, n_hidden)\n\n graph_features = scatter_mean(\n node_features,\n index=combined_mask,\n dim=0,\n ) # (n_system, n_hidden)\n conf = self.readout(graph_features)\n return conf.squeeze()\n\n def forward(\n self,\n representations: List[Dict],\n conditions: Tensor,\n ):\n masks = [repre[\"mask\"] for repre in representations]\n combined_mask = torch.cat(masks)\n edge_index = get_edges_index(combined_mask, remove_self_edge=True)\n fragments_nodes = [repr[\"size\"] for repr in representations]\n n_frag_switch = get_n_frag_switch(fragments_nodes)\n\n xh = [\n torch.cat(\n [repre[feature_type] for feature_type in FEATURE_MAPPING],\n dim=1,\n )\n for repre in representations\n ]\n\n pred = self._forward(\n xh=xh,\n edge_index=edge_index,\n t=torch.tensor([0]),\n conditions=conditions,\n n_frag_switch=n_frag_switch,\n combined_mask=combined_mask,\n edge_attr=None,\n )\n return pred" }, { "identifier": "DiffSchedule", "path": "oa_reactdiff/diffusion/_schedule.py", "snippet": "class DiffSchedule(nn.Module):\n def __init__(self, gamma_module: nn.Module, norm_values: Tuple[float]) -> None:\n super().__init__()\n self.gamma_module = gamma_module\n self.norm_values = norm_values\n self.check_issues_norm_values()\n\n @staticmethod\n def inflate_batch_array(array, target):\n r\"\"\"\n Inflates the batch array (array) with only a single axis\n (i.e. shape = (batch_size,), or possibly more empty axes\n (i.e. shape (batch_size, 1, ..., 1)) to match the target shape.\n \"\"\"\n target_shape = (array.size(0),) + (1,) * (len(target.size()) - 1)\n return array.view(target_shape)\n\n def sigma(self, gamma, target_tensor):\n r\"\"\"Computes sigma given gamma.\"\"\"\n return self.inflate_batch_array(torch.sqrt(torch.sigmoid(gamma)), target_tensor)\n\n def alpha(self, gamma, target_tensor):\n r\"\"\"Computes alpha given gamma.\"\"\"\n return self.inflate_batch_array(\n torch.sqrt(torch.sigmoid(-gamma)), target_tensor\n )\n\n @staticmethod\n def SNR(gamma):\n r\"\"\"Computes signal to noise ratio (alpha^2/sigma^2) given gamma.\"\"\"\n return torch.exp(-gamma)\n\n def sigma_and_alpha_t_given_s(\n self, gamma_t: Tensor, gamma_s: Tensor, target_tensor: Tensor\n ) -> tuple[Tensor, Tensor, Tensor]:\n r\"\"\"\n Computes sigma t given s, using gamma_t and gamma_s. Used during sampling.\n These are defined as:\n alpha t given s = alpha t / alpha s,\n sigma t given s = sqrt(1 - (alpha t given s) ^2 ).\n \"\"\"\n sigma2_t_given_s = self.inflate_batch_array(\n -torch.expm1(F.softplus(gamma_s) - F.softplus(gamma_t)), target_tensor\n )\n\n # alpha_t_given_s = alpha_t / alpha_s\n log_alpha2_t = F.logsigmoid(-gamma_t)\n log_alpha2_s = F.logsigmoid(-gamma_s)\n log_alpha2_t_given_s = log_alpha2_t - log_alpha2_s\n\n alpha_t_given_s = torch.exp(0.5 * log_alpha2_t_given_s)\n alpha_t_given_s = self.inflate_batch_array(alpha_t_given_s, target_tensor)\n\n sigma_t_given_s = torch.sqrt(sigma2_t_given_s)\n\n return sigma2_t_given_s, sigma_t_given_s, alpha_t_given_s\n\n def check_issues_norm_values(self, num_stdevs=8):\n zeros = torch.zeros((1, 1))\n gamma_0 = self.gamma_module(zeros)\n sigma_0 = self.sigma(gamma_0, target_tensor=zeros).item()\n\n # Checked if 1 / norm_value is still larger than 10 * standard\n # deviation.\n norm_value = self.norm_values[1]\n\n if sigma_0 * num_stdevs > 1.0 / norm_value:\n raise ValueError(\n f\"Value for normalization value {norm_value} probably too \"\n f\"large with sigma_0 {sigma_0:.5f} and \"\n f\"1 / norm_value = {1. / norm_value}\"\n )" }, { "identifier": "PredefinedNoiseSchedule", "path": "oa_reactdiff/diffusion/_schedule.py", "snippet": "class PredefinedNoiseSchedule(nn.Module):\n r\"\"\"\n Predefined noise schedule. Essentially creates a lookup array for predefined\n (non-learned) noise schedules.\n \"\"\"\n\n def __init__(\n self,\n noise_schedule: str,\n timesteps: int,\n precision: float,\n ):\n super().__init__()\n self.timesteps = timesteps\n\n if \"cosine\" in noise_schedule:\n splits = noise_schedule.split(\"_\")\n assert len(splits) <= 2\n power = 1 if len(splits) == 1 else float(splits[1])\n alphas2 = cosine_beta_schedule(timesteps, raise_to_power=power)\n elif \"polynomial\" in noise_schedule:\n splits = noise_schedule.split(\"_\")\n assert len(splits) == 2\n power = float(splits[1])\n alphas2 = polynomial_schedule(timesteps, s=precision, power=power)\n elif \"csin\" in noise_schedule:\n splits = noise_schedule.split(\"_\")\n assert len(splits) == 4\n start, end, tau = float(splits[1]), float(splits[2]), float(splits[3])\n alphas2 = ccosine_schedule(timesteps, start=start, end=end, tau=tau)\n elif \"linear\" in noise_schedule:\n alphas2 = linear_schedule(timesteps)\n else:\n raise ValueError(noise_schedule)\n\n # print(\"alphas2\", alphas2)\n\n sigmas2 = 1 - alphas2\n\n log_alphas2 = np.log(alphas2)\n log_sigmas2 = np.log(sigmas2)\n\n log_alphas2_to_sigmas2 = log_alphas2 - log_sigmas2\n\n # print(\"gamma\", -log_alphas2_to_sigmas2)\n\n self.gamma = torch.nn.Parameter(\n torch.from_numpy(-log_alphas2_to_sigmas2).float(), requires_grad=False\n )\n\n def forward(self, t):\n t_int = torch.round(t * self.timesteps).long()\n return self.gamma[t_int]" }, { "identifier": "Normalizer", "path": "oa_reactdiff/diffusion/_normalizer.py", "snippet": "class Normalizer(nn.Module):\n def __init__(\n self,\n norm_values: Tuple = (1.0, 1.0, 1.0),\n norm_biases: Tuple = (0.0, 0.0, 0.0),\n pos_dim: int = 3,\n ) -> None:\n super().__init__()\n self.norm_values = norm_values\n self.norm_biases = norm_biases\n self.pos_dim = pos_dim\n\n def normalize(self, representations: List[Dict]) -> List[Dict]:\n for ii in range(len(representations)):\n for jj, feature_type in enumerate(FEATURE_MAPPING):\n representations[ii][feature_type] = (\n representations[ii][feature_type] - self.norm_biases[jj]\n ) / self.norm_values[jj]\n return representations\n\n def unnormalize(self, x: Tensor, ind: int) -> Tensor:\n return x * self.norm_values[ind] + self.norm_biases[ind]\n\n def unnormalize_z(self, z_combined: List[Tensor]) -> List[Tensor]:\n for ii in range(len(z_combined)):\n z_combined[ii][:, : self.pos_dim] = self.unnormalize(\n z_combined[ii][:, : self.pos_dim], 0\n )\n z_combined[ii][:, self.pos_dim : -1] = self.unnormalize(\n z_combined[ii][:, self.pos_dim : -1], 1\n )\n z_combined[ii][:, -1:] = self.unnormalize(z_combined[ii][:, -1:], 2)\n return z_combined" }, { "identifier": "FEATURE_MAPPING", "path": "oa_reactdiff/diffusion/_normalizer.py", "snippet": "FEATURE_MAPPING = [\"pos\", \"one_hot\", \"charge\"]" }, { "identifier": "EnVariationalDiffusion", "path": "oa_reactdiff/diffusion/en_diffusion.py", "snippet": "class EnVariationalDiffusion(nn.Module):\n \"\"\"\n The E(n) Diffusion Module.\n \"\"\"\n\n def __init__(\n self,\n dynamics: EGNNDynamics,\n schdule: DiffSchedule,\n normalizer: Normalizer,\n size_histogram: Optional[Dict] = None,\n loss_type: str = \"l2\",\n pos_only: bool = False,\n fixed_idx: Optional[List] = None,\n ):\n super().__init__()\n assert loss_type in {\"vlb\", \"l2\"}\n\n self.dynamics = dynamics\n self.schedule = schdule\n self.normalizer = normalizer\n self.size_histogram = size_histogram\n self.loss_type = loss_type\n self.pos_only = pos_only\n self.fixed_idx = fixed_idx or []\n\n self.pos_dim = dynamics.pos_dim\n self.node_nfs = dynamics.node_nfs\n self.fragment_names = dynamics.fragment_names\n self.T = schdule.gamma_module.timesteps\n self.norm_values = normalizer.norm_values\n self.norm_biases = normalizer.norm_biases\n\n # ------ FORWARD PASS ------\n\n def forward(\n self,\n representations: List[Dict],\n conditions: Tensor,\n return_pred: bool = False,\n ):\n r\"\"\"\n Computes the loss and NLL terms.\n\n #TODO: edge_attr not considered at all\n \"\"\"\n num_sample = representations[0][\"size\"].size(0)\n n_nodes = torch.stack(\n [repr[\"size\"] for repr in representations],\n dim=0,\n ).sum(dim=0)\n device = representations[0][\"pos\"].device\n masks = [repre[\"mask\"] for repre in representations]\n combined_mask = torch.cat(masks)\n edge_index = get_edges_index(combined_mask, remove_self_edge=True)\n fragments_nodes = [repr[\"size\"] for repr in representations]\n n_frag_switch = get_n_frag_switch(fragments_nodes)\n\n # Normalize data, take into account volume change in x.\n representations = self.normalizer.normalize(representations)\n\n # Likelihood change due to normalization\n delta_log_px = self.delta_log_px(n_nodes.sum())\n\n # Sample a timestep t for each example in batch\n # At evaluation time, loss_0 will be computed separately to decrease\n # variance in the estimator (costs two forward passes)\n lowest_t = 0 if self.training else 1\n t_int = torch.randint(\n lowest_t, self.T + 1, size=(num_sample, 1), device=device\n ).float()\n s_int = t_int - 1 # previous timestep\n\n # Masks: important to compute log p(x | z0).\n t_is_zero = (t_int == 0).float()\n t_is_not_zero = 1 - t_is_zero\n\n # Normalize t to [0, 1]. Note that the negative\n # step of s will never be used, since then p(x | z0) is computed.\n s = s_int / self.T\n t = t_int / self.T\n\n # Compute gamma_s and gamma_t via the network.\n gamma_s = self.schedule.inflate_batch_array(\n self.schedule.gamma_module(s), representations[0][\"pos\"]\n )\n gamma_t = self.schedule.inflate_batch_array(\n self.schedule.gamma_module(t), representations[0][\"pos\"]\n )\n\n # Concatenate x, and h[categorical].\n xh = [\n torch.cat(\n [repre[feature_type] for feature_type in FEATURE_MAPPING],\n dim=1,\n )\n for repre in representations\n ]\n\n # Find noised representation\n z_t, eps_xh = self.noised_representation(xh, masks, gamma_t)\n\n # Neural net prediction.\n net_eps_xh, net_eps_edge_attr = self.dynamics(\n xh=z_t,\n edge_index=edge_index,\n t=t,\n conditions=conditions,\n n_frag_switch=n_frag_switch,\n combined_mask=combined_mask,\n edge_attr=None, # TODO: no edge_attr is considered now\n )\n\n if return_pred:\n return eps_xh, net_eps_xh\n\n # TODO: LJ term not implemented\n # xh_lig_hat = self.xh_given_zt_and_epsilon(z_t_lig, net_out_lig, gamma_t,\n # ligand['mask'])\n if self.pos_only:\n for ii in range(len(masks)):\n net_eps_xh[ii][:, self.pos_dim :] = torch.zeros_like(\n net_eps_xh[ii][:, self.pos_dim :],\n device=device,\n )\n # Compute the L2 error.\n error_t: List[Tensor] = [\n utils.sum_except_batch(\n (eps_xh[ii] - net_eps_xh[ii]) ** 2,\n masks[ii],\n dim_size=num_sample,\n )\n for ii in range(len(masks))\n ] # TODO: no edge_attr contribution\n\n # Compute weighting with SNR: (1 - SNR(s-t)) for epsilon parametrization\n SNR_weight = (1 - self.schedule.SNR(gamma_s - gamma_t)).squeeze(1)\n assert error_t[0].size() == SNR_weight.size()\n\n # The _constants_ depending on sigma_0 from the\n # cross entropy term E_q(z0 | x) [log p(x | z0)].\n neg_log_constants = -self.log_constants_p_x_given_z0(\n n_nodes=n_nodes, device=device\n )\n\n # The KL between q(zT | x) and p(zT) = Normal(0, 1).\n # Should be close to zero.\n # kl_prior = self.kl_prior_with_pocket(\n # xh_lig, xh_pocket, ligand['mask'], pocket['mask'],\n # ligand['size'] + pocket['size'])\n # TODO: approximate KL prior with zero now, which should not influence training.\n kl_prior = torch.zeros_like(neg_log_constants)\n\n if self.training:\n # Computes the L_0 term (even if gamma_t is not actually gamma_0)\n # and this will later be selected via masking.\n log_p_h_given_z0 = self.log_pxh_given_z0_without_constants(\n representations=representations,\n z_t=z_t,\n eps_xh=eps_xh,\n net_eps_xh=net_eps_xh,\n gamma_t=gamma_t,\n epsilon=1e-10,\n )\n loss_0_x = [\n -_log_p_fragment * t_is_zero.squeeze()\n for _log_p_fragment in log_p_h_given_z0[0]\n ]\n loss_0_cat = [\n -_log_p_fragment * t_is_zero.squeeze()\n for _log_p_fragment in log_p_h_given_z0[1]\n ]\n loss_0_charge = [\n -_log_p_fragment * t_is_zero.squeeze()\n for _log_p_fragment in log_p_h_given_z0[2]\n ]\n\n # apply t_is_zero mask\n error_t = [_error_t * t_is_not_zero.squeeze() for _error_t in error_t]\n\n else:\n # Compute noise values for t = 0.\n t_zeros = torch.zeros_like(s)\n gamma_0 = self.schedule.inflate_batch_array(\n self.schedule.gamma_module(t_zeros), representations[0][\"pos\"]\n )\n\n # Sample z_0 given x, h for timestep t, from q(z_t | x, h)\n z_0, eps_0_xh = self.noised_representation(xh, masks, gamma_0)\n net_eps_0_xh, net_eps_0_edge_attr = self.dynamics(\n xh=z_0,\n edge_index=edge_index,\n t=t_zeros,\n conditions=conditions,\n n_frag_switch=n_frag_switch,\n combined_mask=combined_mask,\n edge_attr=None, # TODO: no edge_attr is considered now\n )\n\n log_p_h_given_z0 = self.log_pxh_given_z0_without_constants(\n representations=representations,\n z_t=z_0,\n eps_xh=eps_0_xh,\n net_eps_xh=net_eps_0_xh,\n gamma_t=gamma_0,\n epsilon=1e-10,\n )\n loss_0_x = [-_log_p_fragment for _log_p_fragment in log_p_h_given_z0[0]]\n loss_0_cat = [-_log_p_fragment for _log_p_fragment in log_p_h_given_z0[1]]\n loss_0_charge = [\n -_log_p_fragment for _log_p_fragment in log_p_h_given_z0[2]\n ]\n\n loss_terms = {\n \"delta_log_px\": delta_log_px,\n \"error_t\": error_t,\n \"SNR_weight\": SNR_weight,\n \"loss_0_x\": loss_0_x,\n \"loss_0_cat\": loss_0_cat,\n \"loss_0_charge\": loss_0_charge,\n \"neg_log_constants\": neg_log_constants,\n \"kl_prior\": kl_prior,\n \"log_pN\": torch.zeros_like(kl_prior),\n \"t_int\": t_int.squeeze(),\n \"net_eps_xh\": net_eps_xh,\n \"eps_xh\": eps_xh,\n }\n return loss_terms\n\n def delta_log_px(self, num_nodes):\n return -self.subspace_dimensionality(num_nodes) * np.log(self.norm_values[0])\n\n def subspace_dimensionality(self, input_size):\n r\"\"\"\n Compute the dimensionality on translation-invariant linear subspace\n where distributions on x are defined.\n \"\"\"\n return (input_size - 1) * self.pos_dim\n\n def noised_representation(\n self,\n xh: List[Tensor],\n masks: List[Tensor],\n gamma_t: Tensor,\n ) -> Tuple[List[Tensor], List[Tensor]]:\n # Compute alpha_t and sigma_t from gamma.\n alpha_t = self.schedule.alpha(gamma_t, xh[0])\n sigma_t = self.schedule.sigma(gamma_t, xh[0])\n\n # Sample zt ~ Normal(alpha_t x, sigma_t)\n eps_xh = self.sample_combined_position_feature_noise(masks)\n\n # Sample z_t given x, h for timestep t, from q(z_t | x, h)\n z_t = [\n alpha_t[masks[ii]] * xh[ii] + sigma_t[masks[ii]] * eps_xh[ii]\n for ii in range(len(masks))\n ]\n\n return z_t, eps_xh\n\n def sample_combined_position_feature_noise(\n self,\n masks: List[Tensor],\n ) -> List[Tensor]:\n r\"\"\"\n Samples mean-centered normal noise for z_x, and standard normal noise for z_h.\n Note that we only need to put the center of gravity of *each fragment* to the origin.\n \"\"\"\n eps_xh = []\n for ii, mask in enumerate(masks):\n _eps_x = utils.sample_center_gravity_zero_gaussian_batch(\n size=(len(mask), self.pos_dim),\n indices=[mask],\n )\n _eps_h = utils.sample_gaussian(\n size=(len(mask), self.node_nfs[ii] - self.pos_dim),\n device=mask.device,\n )\n if self.pos_only:\n _eps_h = torch.zeros_like(_eps_h, device=mask.device)\n eps_xh.append(torch.cat([_eps_x, _eps_h], dim=1))\n for idx in self.fixed_idx:\n eps_xh[idx] = torch.zeros_like(eps_xh[idx], device=mask.device)\n return eps_xh\n\n def log_constants_p_x_given_z0(self, n_nodes, device):\n r\"\"\"Computes p(x|z0).\"\"\"\n\n batch_size = len(n_nodes)\n degrees_of_freedom_x = self.subspace_dimensionality(n_nodes).to(device)\n\n zeros = torch.zeros((batch_size, 1), device=device)\n gamma_0 = self.schedule.gamma_module(zeros)\n\n # Recall that sigma_x = sqrt(sigma_0^2 / alpha_0^2) = SNR(-0.5 gamma_0).\n log_sigma_x = 0.5 * gamma_0.view(batch_size)\n return degrees_of_freedom_x * (-log_sigma_x - 0.5 * np.log(2 * np.pi))\n\n def kl_prior(self):\n return NotImplementedError\n\n @staticmethod\n def gaussian_KL(q_mu_minus_p_mu_squared, q_sigma, p_sigma, d):\n \"\"\"Computes the KL distance between two normal distributions.\n Args:\n q_mu_minus_p_mu_squared: Squared difference between mean of\n distribution q and distribution p: ||mu_q - mu_p||^2\n q_sigma: Standard deviation of distribution q.\n p_sigma: Standard deviation of distribution p.\n d: dimension\n Returns:\n The KL distance\n \"\"\"\n return (\n d * torch.log(p_sigma / q_sigma)\n + 0.5 * (d * q_sigma**2 + q_mu_minus_p_mu_squared) / (p_sigma**2)\n - 0.5 * d\n )\n\n def log_pxh_given_z0_without_constants(\n self,\n representations: List[Dict],\n z_t: List[Tensor],\n eps_xh: List[Tensor],\n net_eps_xh: List[Tensor],\n gamma_t: Tensor,\n epsilon: float = 1e-10,\n ) -> List[List[Tensor]]:\n # Compute sigma_0 and rescale to the integer scale of the data.\n # for pos\n log_p_x_given_z0_without_constants = [\n -0.5\n * (\n utils.sum_except_batch(\n (eps_xh[ii][:, : self.pos_dim] - net_eps_xh[ii][:, : self.pos_dim])\n ** 2,\n representations[ii][\"mask\"],\n dim_size=representations[0][\"size\"].size(0),\n )\n )\n for ii in range(len(representations))\n ]\n\n # only keep first several elements\n z_t = [_z_t[:, : 3 + 5 + 1] for _z_t in z_t]\n for ii, repr in enumerate(representations):\n representations[ii][\"charge\"] = representations[ii][\"charge\"][:, :1]\n # for ohe of atom types\n sigma_0 = self.schedule.sigma(gamma_t, target_tensor=z_t[0])\n sigma_0_cat = sigma_0 * self.normalizer.norm_values[1]\n atoms = [\n self.normalizer.unnormalize(repr[\"one_hot\"], ind=1)\n for repr in representations\n ]\n est_atoms = [\n self.normalizer.unnormalize(_z_t[:, self.pos_dim : -1], ind=1)\n for _z_t in z_t\n ]\n centered_atoms = [_est_atoms - 1 for _est_atoms in est_atoms]\n log_ph_cat_proportionals = [\n torch.log(\n utils.cdf_standard_gaussian(\n (centered_atoms[ii] + 0.5)\n / sigma_0_cat[representations[ii][\"mask\"]]\n )\n - utils.cdf_standard_gaussian(\n (centered_atoms[ii] - 0.5)\n / sigma_0_cat[representations[ii][\"mask\"]]\n )\n + epsilon\n )\n for ii in range(len(representations))\n ]\n log_probabilities = [\n _log_ph_cat_proportionals\n - torch.logsumexp(\n _log_ph_cat_proportionals,\n dim=1,\n keepdim=True,\n )\n for _log_ph_cat_proportionals in log_ph_cat_proportionals\n ]\n log_p_hcat_given_z0 = [\n utils.sum_except_batch(\n log_probabilities[ii] * atoms[ii],\n representations[ii][\"mask\"],\n dim_size=representations[0][\"size\"].size(0),\n )\n for ii in range(len(representations))\n ]\n\n # for atom charge\n sigma_0_charge = sigma_0 * self.normalizer.norm_values[2]\n charges = [\n self.normalizer.unnormalize(repr[\"charge\"], ind=2)\n for repr in representations\n ]\n est_charges = [\n self.normalizer.unnormalize(_z_t[:, -1:], ind=2).long() for _z_t in z_t\n ]\n for ii in range(len(representations)):\n assert charges[ii].size() == est_charges[ii].size()\n centered_charges = [\n charges[ii] - est_charges[ii] for ii in range(len(representations))\n ]\n log_ph_charge_proportionals = [\n torch.log(\n utils.cdf_standard_gaussian(\n (centered_charges[ii] + 0.5)\n / sigma_0_charge[representations[ii][\"mask\"]]\n )\n - utils.cdf_standard_gaussian(\n (centered_charges[ii] - 0.5)\n / sigma_0_charge[representations[ii][\"mask\"]]\n )\n + epsilon\n )\n for ii in range(len(representations))\n ]\n log_p_hcharge_given_z0 = [\n utils.sum_except_batch(\n log_ph_charge_proportionals[ii],\n representations[ii][\"mask\"],\n dim_size=representations[0][\"size\"].size(0),\n )\n for ii in range(len(representations))\n ]\n\n log_p_h_given_z0 = [\n log_p_x_given_z0_without_constants,\n log_p_hcat_given_z0,\n log_p_hcharge_given_z0,\n ]\n return log_p_h_given_z0\n\n # ------ INVERSE PASS ------\n\n @torch.no_grad()\n def sample(\n self,\n n_samples: int,\n fragments_nodes: List[torch.tensor],\n conditions: Optional[Tensor] = None,\n return_frames: int = 1,\n timesteps: Optional[int] = None,\n h0: Optional[List[Tensor]] = None,\n ):\n r\"\"\"\n Draw samples from the generative model. Optionally, return intermediate\n states for visualization purposes.\n \"\"\"\n timesteps = self.T if timesteps is None else timesteps\n assert 0 < return_frames <= timesteps\n assert timesteps % return_frames == 0\n assert h0 is not None if self.pos_only else True\n\n fragments_masks = [\n get_mask_for_frag(natm_nodes) for natm_nodes in fragments_nodes\n ]\n combined_mask = torch.cat(fragments_masks)\n edge_index = get_edges_index(combined_mask, remove_self_edge=True)\n n_frag_switch = get_n_frag_switch(fragments_nodes)\n\n zt_xh = self.sample_combined_position_feature_noise(masks=fragments_masks)\n if self.pos_only:\n zt_xh = [\n torch.cat([zt_xh[ii][:, : self.pos_dim], h0[ii]], dim=1)\n for ii in range(len(h0))\n ]\n\n utils.assert_mean_zero_with_mask(\n torch.cat(\n [_zt_xh[:, : self.pos_dim] for _zt_xh in zt_xh],\n dim=0,\n ),\n combined_mask,\n )\n\n out_samples = [\n [\n torch.zeros((return_frames,) + _zt_xh.size(), device=_zt_xh.device)\n for _zt_xh in zt_xh\n ]\n for _ in range(return_frames)\n ]\n\n # Iteratively sample p(z_s | z_t) for t = 1, ..., T, with s = t - 1.\n for s in reversed(range(0, timesteps)):\n s_array = torch.full((n_samples, 1), fill_value=s, device=zt_xh[0].device)\n t_array = s_array + 1\n s_array = s_array / timesteps\n t_array = t_array / timesteps\n\n # print(s, zt_xh)\n\n zt_xh = self.sample_p_zs_given_zt(\n s=s_array,\n t=t_array,\n zt_xh=zt_xh,\n edge_index=edge_index,\n n_frag_switch=n_frag_switch,\n masks=fragments_masks,\n conditions=conditions,\n fix_noise=False,\n )\n if self.pos_only:\n zt_xh = [\n torch.cat([zt_xh[ii][:, : self.pos_dim], h0[ii]], dim=1)\n for ii in range(len(h0))\n ]\n\n # save frame\n if (s * return_frames) % timesteps == 0:\n idx = (s * return_frames) // timesteps\n out_samples[idx] = self.normalizer.unnormalize_z(zt_xh)\n\n pos, cat, charge = self.sample_p_xh_given_z0(\n z0_xh=zt_xh,\n edge_index=edge_index,\n n_frag_switch=n_frag_switch,\n masks=fragments_masks,\n batch_size=n_samples,\n conditions=conditions,\n )\n if self.pos_only:\n cat = [_h0[:, :-1] for _h0 in h0]\n charge = [_h0[:, -1:] for _h0 in h0]\n utils.assert_mean_zero_with_mask(\n torch.cat(\n [_pos[:, : self.pos_dim] for _pos in pos],\n dim=0,\n ),\n combined_mask,\n )\n\n # Overwrite last frame with the resulting x and h.\n out_samples[0] = [\n torch.cat([pos[ii], cat[ii], charge[ii]], dim=1) for ii in range(len(pos))\n ]\n return out_samples, fragments_masks\n\n def sample_p_zs_given_zt(\n self,\n s: Tensor,\n t: Tensor,\n zt_xh: List[Tensor],\n edge_index: Tensor,\n n_frag_switch: Tensor,\n masks: List[Tensor],\n conditions: Optional[Tensor] = None,\n fix_noise: bool = False,\n ):\n \"\"\"Samples from zs ~ p(zs | zt). Only used during sampling.\"\"\"\n gamma_s = self.schedule.gamma_module(s)\n gamma_t = self.schedule.gamma_module(t)\n\n (\n sigma2_t_given_s,\n sigma_t_given_s,\n alpha_t_given_s,\n ) = self.schedule.sigma_and_alpha_t_given_s(gamma_t, gamma_s, zt_xh[0])\n\n sigma_s = self.schedule.sigma(gamma_s, target_tensor=zt_xh[0])\n sigma_t = self.schedule.sigma(gamma_t, target_tensor=zt_xh[0])\n\n # Neural net prediction.\n combined_mask = torch.cat(masks)\n net_eps_xh, net_eps_edge_attr = self.dynamics(\n xh=zt_xh,\n edge_index=edge_index,\n t=t,\n conditions=conditions,\n n_frag_switch=n_frag_switch,\n combined_mask=combined_mask,\n edge_attr=None, # TODO: no edge_attr is considered now\n )\n utils.assert_mean_zero_with_mask(\n torch.cat(\n [_zt_xh[:, : self.pos_dim] for _zt_xh in zt_xh],\n dim=0,\n ),\n combined_mask,\n )\n utils.assert_mean_zero_with_mask(\n torch.cat(\n [_net_eps_xh[:, : self.pos_dim] for _net_eps_xh in net_eps_xh],\n dim=0,\n ),\n combined_mask,\n )\n\n # Note: mu_{t->s} = 1 / alpha_{t|s} z_t - sigma_{t|s}^2 / sigma_t / alpha_{t|s} epsilon\n # follows from the definition of mu_{t->s} and Equ. (7) in the EDM paper\n mu = [\n zt_xh[ii] / alpha_t_given_s[masks[ii]]\n - net_eps_xh[ii] * (sigma2_t_given_s / alpha_t_given_s / sigma_t)[masks[ii]]\n for ii in range(len(zt_xh))\n ]\n\n # Compute sigma for p(zs | zt).\n sigma = sigma_t_given_s * sigma_s / sigma_t\n\n # Sample zs given the paramters derived from zt.\n zs_xh = self.sample_normal(mu=mu, sigma=sigma, masks=masks, fix_noise=fix_noise)\n\n # Project down to avoid numerical runaway of the center of gravity.\n for ii in range(len(masks)):\n zs_xh[ii][:, : self.pos_dim] = utils.remove_mean_batch(\n zs_xh[ii][:, : self.pos_dim],\n masks[ii],\n )\n return zs_xh\n\n def sample_normal(\n self,\n mu: List[Tensor],\n sigma: Tensor,\n masks: List[Tensor],\n fix_noise: bool = False,\n ) -> List[Tensor]:\n r\"\"\"Samples from a Normal distribution.\"\"\"\n if fix_noise:\n # bs = 1 if fix_noise else mu.size(0)\n raise NotImplementedError(\"fix_noise option isn't implemented yet\")\n eps_xh = self.sample_combined_position_feature_noise(masks=masks)\n zs_xh = [mu[ii] + sigma[masks[ii]] * eps_xh[ii] for ii in range(len(masks))]\n return zs_xh\n\n def sample_p_xh_given_z0(\n self,\n z0_xh: List[Tensor],\n edge_index: Tensor,\n n_frag_switch: Tensor,\n masks: List[Tensor],\n batch_size: int,\n conditions: Optional[Tensor] = None,\n fix_noise: bool = False,\n ) -> Tuple[List[Tensor]]:\n \"\"\"Samples x ~ p(x|z0).\"\"\"\n t_zeros = torch.zeros(size=(batch_size, 1), device=z0_xh[0].device)\n gamma_0 = self.schedule.gamma_module(t_zeros)\n # Computes sqrt(sigma_0^2 / alpha_0^2)\n sigma_x = self.schedule.SNR(-0.5 * gamma_0)\n net_eps_xh, net_eps_edge_attr = self.dynamics(\n xh=z0_xh,\n edge_index=edge_index,\n t=t_zeros,\n conditions=conditions,\n n_frag_switch=n_frag_switch,\n combined_mask=torch.cat(masks),\n edge_attr=None, # TODO: no edge_attr is considered now\n )\n\n # Compute mu for p(zs | zt).\n mu_x = self.compute_x_pred(\n net_eps_xh=net_eps_xh,\n zt_xh=z0_xh,\n gamma_t=gamma_0,\n masks=masks,\n )\n x0_xh = self.sample_normal(\n mu=mu_x, sigma=sigma_x, masks=masks, fix_noise=fix_noise\n )\n\n pos_0 = [\n self.normalizer.unnormalize(x0_xh[ii][:, : self.pos_dim], ii)\n for ii in range(len(masks))\n ]\n cat_0 = [\n self.normalizer.unnormalize(x0_xh[ii][:, self.pos_dim : -1], ii)\n for ii in range(len(masks))\n ]\n charge_0 = [\n torch.round(self.normalizer.unnormalize(x0_xh[ii][:, -1:], ii)).long()\n for ii in range(len(masks))\n ]\n\n cat_0 = [\n F.one_hot(torch.argmax(cat_0[ii], dim=1), self.node_nfs[ii] - 4).long()\n for ii in range(len(masks))\n ]\n return pos_0, cat_0, charge_0\n\n def compute_x_pred(\n self,\n net_eps_xh: List[Tensor],\n zt_xh: List[Tensor],\n gamma_t: Tensor,\n masks: List[Tensor],\n ) -> List[Tensor]:\n \"\"\"Commputes x_pred, i.e. the most likely prediction of x.\"\"\"\n sigma_t = self.schedule.sigma(gamma_t, target_tensor=net_eps_xh[0])\n alpha_t = self.schedule.alpha(gamma_t, target_tensor=net_eps_xh[0])\n x_pred = [\n 1.0 / alpha_t[masks[ii]] * (zt_xh[ii] - sigma_t[masks[ii]] * net_eps_xh[ii])\n for ii in range(len(masks))\n ]\n return x_pred\n\n # ------ INPAINT ------\n @torch.no_grad()\n def inpaint(\n self,\n n_samples: int,\n fragments_nodes: List[torch.tensor],\n conditions: Optional[Tensor] = None,\n return_frames: int = 1,\n resamplings: int = 1,\n jump_length: int = 1,\n timesteps: Optional[int] = None,\n xh_fixed: Optional[List[Tensor]] = None,\n frag_fixed: Optional[List] = None,\n ):\n r\"\"\"\n Draw samples from the generative model. Optionally, return intermediate\n states for visualization purposes.\n \"\"\"\n timesteps = self.T if timesteps is None else timesteps\n assert 0 < return_frames <= timesteps\n assert timesteps % return_frames == 0\n assert len(xh_fixed)\n\n fragments_masks = [\n get_mask_for_frag(natm_nodes) for natm_nodes in fragments_nodes\n ]\n combined_mask = torch.cat(fragments_masks)\n edge_index = get_edges_index(combined_mask, remove_self_edge=True)\n n_frag_switch = get_n_frag_switch(fragments_nodes)\n\n h0 = [_xh_fixed[:, self.pos_dim :].long() for _xh_fixed in xh_fixed]\n\n for ii, _ in enumerate(xh_fixed):\n xh_fixed[ii][:, : self.pos_dim] = utils.remove_mean_batch(\n xh_fixed[ii][:, : self.pos_dim],\n fragments_masks[ii],\n )\n utils.assert_mean_zero_with_mask(\n torch.cat(\n [_xh_fixed[:, : self.pos_dim] for _xh_fixed in xh_fixed],\n dim=0,\n ),\n combined_mask,\n )\n\n zt_xh = self.sample_combined_position_feature_noise(masks=fragments_masks)\n if self.pos_only:\n zt_xh = [\n torch.cat([zt_xh[ii][:, : self.pos_dim], h0[ii]], dim=1)\n for ii in range(len(h0))\n ]\n\n utils.assert_mean_zero_with_mask(\n torch.cat(\n [_zt_xh[:, : self.pos_dim] for _zt_xh in zt_xh],\n dim=0,\n ),\n combined_mask,\n )\n\n out_samples = [\n [\n torch.zeros((return_frames,) + _zt_xh.size(), device=_zt_xh.device)\n for _zt_xh in zt_xh\n ]\n for _ in range(return_frames)\n ]\n\n schedule = get_repaint_schedule(resamplings, jump_length, timesteps)\n s = timesteps - 1\n for i, n_denoise_steps in enumerate(schedule):\n for j in range(n_denoise_steps):\n s_array = torch.full(\n (n_samples, 1), fill_value=s, device=zt_xh[0].device\n )\n t_array = s_array + 1\n s_array = s_array / timesteps\n t_array = t_array / timesteps\n\n gamma_s = self.schedule.inflate_batch_array(\n self.schedule.gamma_module(s_array), xh_fixed[0]\n )\n\n zt_known, _ = self.noised_representation(\n xh_fixed, fragments_masks, gamma_s\n )\n zt_unknown = self.sample_p_zs_given_zt(\n s=s_array,\n t=t_array,\n zt_xh=zt_xh,\n edge_index=edge_index,\n n_frag_switch=n_frag_switch,\n masks=fragments_masks,\n conditions=conditions,\n fix_noise=False,\n )\n\n if self.pos_only:\n zt_known = [\n torch.cat([zt_known[ii][:, : self.pos_dim], h0[ii]], dim=1)\n for ii in range(len(h0))\n ]\n zt_unknown = [\n torch.cat([zt_unknown[ii][:, : self.pos_dim], h0[ii]], dim=1)\n for ii in range(len(h0))\n ]\n\n zt_xh = [\n zt_known[ii] if ii in frag_fixed else zt_unknown[ii]\n for ii in range(len(h0))\n ]\n\n # Noise combined representation, i.e., resample\n if j == n_denoise_steps - 1 and i < len(schedule) - 1:\n # Go back jump_length steps\n t = s + jump_length\n t_array = torch.full(\n (n_samples, 1), fill_value=t, device=zt_xh[0].device\n )\n t_array = t_array / timesteps\n\n gamma_s = self.schedule.inflate_batch_array(\n self.schedule.gamma_module(s_array), xh_fixed[0]\n )\n gamma_t = self.schedule.inflate_batch_array(\n self.schedule.gamma_module(t_array), xh_fixed[0]\n )\n\n zt_xh = self.sample_p_zt_given_zs(\n zt_xh, fragments_masks, gamma_t, gamma_s\n )\n s = t\n\n s = s - 1\n\n # # save frame\n # if (s * return_frames) % timesteps == 0:\n # idx = (s * return_frames) // timesteps\n # out_samples[idx] = self.normalizer.unnormalize_z(zt_xh)\n\n pos, cat, charge = self.sample_p_xh_given_z0(\n z0_xh=zt_xh,\n edge_index=edge_index,\n n_frag_switch=n_frag_switch,\n masks=fragments_masks,\n batch_size=n_samples,\n conditions=conditions,\n )\n if self.pos_only:\n cat = [_h0[:, :-1] for _h0 in h0]\n charge = [_h0[:, -1:] for _h0 in h0]\n utils.assert_mean_zero_with_mask(\n torch.cat(\n [_pos[:, : self.pos_dim] for _pos in pos],\n dim=0,\n ),\n combined_mask,\n )\n\n # Overwrite last frame with the resulting x and h.\n out_samples[0] = [\n torch.cat([pos[ii], cat[ii], charge[ii]], dim=1) for ii in range(len(pos))\n ]\n return out_samples, fragments_masks\n\n # ------ INPAINT ------\n @torch.no_grad()\n def inpaint_fixed(\n self,\n n_samples: int,\n fragments_nodes: List[torch.tensor],\n conditions: Optional[Tensor] = None,\n return_frames: int = 1,\n resamplings: int = 1,\n jump_length: int = 1,\n timesteps: Optional[int] = None,\n xh_fixed: Optional[List[Tensor]] = None,\n frag_fixed: Optional[List] = None,\n ):\n r\"\"\"\n Draw samples from the generative model. Optionally, return intermediate\n states for visualization purposes.\n \"\"\"\n timesteps = self.T if timesteps is None else timesteps\n assert 0 < return_frames <= timesteps\n assert timesteps % return_frames == 0\n assert len(xh_fixed)\n\n fragments_masks = [\n get_mask_for_frag(natm_nodes) for natm_nodes in fragments_nodes\n ]\n combined_mask = torch.cat(fragments_masks)\n edge_index = get_edges_index(combined_mask, remove_self_edge=True)\n n_frag_switch = get_n_frag_switch(fragments_nodes)\n\n h0 = [_xh_fixed[:, self.pos_dim :].long() for _xh_fixed in xh_fixed]\n\n for ii, _ in enumerate(xh_fixed):\n xh_fixed[ii][:, : self.pos_dim] = utils.remove_mean_batch(\n xh_fixed[ii][:, : self.pos_dim],\n fragments_masks[ii],\n )\n utils.assert_mean_zero_with_mask(\n torch.cat(\n [_xh_fixed[:, : self.pos_dim] for _xh_fixed in xh_fixed],\n dim=0,\n ),\n combined_mask,\n )\n\n zt_xh = self.sample_combined_position_feature_noise(masks=fragments_masks)\n if self.pos_only:\n zt_xh = [\n torch.cat([zt_xh[ii][:, : self.pos_dim], h0[ii]], dim=1)\n for ii in range(len(h0))\n ]\n\n utils.assert_mean_zero_with_mask(\n torch.cat(\n [_zt_xh[:, : self.pos_dim] for _zt_xh in zt_xh],\n dim=0,\n ),\n combined_mask,\n )\n\n out_samples = [\n [\n torch.zeros((return_frames,) + _zt_xh.size(), device=_zt_xh.device)\n for _zt_xh in zt_xh\n ]\n for _ in range(return_frames)\n ]\n\n schedule = get_repaint_schedule(resamplings, jump_length, timesteps)\n s = timesteps - 1\n for i, n_denoise_steps in enumerate(schedule):\n for j in range(n_denoise_steps):\n s_array = torch.full(\n (n_samples, 1), fill_value=s, device=zt_xh[0].device\n )\n t_array = s_array + 1\n s_array = s_array / timesteps\n t_array = t_array / timesteps\n\n gamma_s = self.schedule.inflate_batch_array(\n self.schedule.gamma_module(s_array), xh_fixed[0]\n )\n\n zt_known, _ = self.noised_representation(\n xh_fixed, fragments_masks, gamma_s\n )\n zt_unknown = self.sample_p_zs_given_zt(\n s=s_array,\n t=t_array,\n zt_xh=zt_xh,\n edge_index=edge_index,\n n_frag_switch=n_frag_switch,\n masks=fragments_masks,\n conditions=conditions,\n fix_noise=False,\n )\n\n if self.pos_only:\n zt_known = [\n torch.cat([zt_known[ii][:, : self.pos_dim], h0[ii]], dim=1)\n for ii in range(len(h0))\n ]\n zt_unknown = [\n torch.cat([zt_unknown[ii][:, : self.pos_dim], h0[ii]], dim=1)\n for ii in range(len(h0))\n ]\n\n zt_xh = [\n zt_known[ii] if ii in frag_fixed else zt_unknown[ii]\n for ii in range(len(h0))\n ]\n\n # Noise combined representation, i.e., resample\n if j == n_denoise_steps - 1 and i < len(schedule) - 1:\n # Go back jump_length steps\n t = s + jump_length\n t_array = torch.full(\n (n_samples, 1), fill_value=t, device=zt_xh[0].device\n )\n t_array = t_array / timesteps\n\n gamma_s = self.schedule.inflate_batch_array(\n self.schedule.gamma_module(s_array), xh_fixed[0]\n )\n gamma_t = self.schedule.inflate_batch_array(\n self.schedule.gamma_module(t_array), xh_fixed[0]\n )\n\n zt_xh = self.sample_p_zt_given_zs(\n zt_xh, fragments_masks, gamma_t, gamma_s\n )\n s = t\n\n s = s - 1\n\n # # save frame\n # if (s * return_frames) % timesteps == 0:\n # idx = (s * return_frames) // timesteps\n # out_samples[idx] = self.normalizer.unnormalize_z(zt_xh)\n\n pos, cat, charge = self.sample_p_xh_given_z0(\n z0_xh=zt_xh,\n edge_index=edge_index,\n n_frag_switch=n_frag_switch,\n masks=fragments_masks,\n batch_size=n_samples,\n conditions=conditions,\n )\n if self.pos_only:\n cat = [_h0[:, :-1] for _h0 in h0]\n charge = [_h0[:, -1:] for _h0 in h0]\n utils.assert_mean_zero_with_mask(\n torch.cat(\n [_pos[:, : self.pos_dim] for _pos in pos],\n dim=0,\n ),\n combined_mask,\n )\n\n # Overwrite last frame with the resulting x and h.\n out_samples[0] = [\n torch.cat([pos[ii], cat[ii], charge[ii]], dim=1) for ii in range(len(pos))\n ]\n return out_samples, fragments_masks\n\n def sample_p_zt_given_zs(\n self,\n zs: List[Tensor],\n masks: List[Tensor],\n gamma_t: Tensor,\n gamma_s: Tensor,\n fix_noise: bool = False,\n ) -> List[Tensor]:\n (\n sigma2_t_given_s,\n sigma_t_given_s,\n alpha_t_given_s,\n ) = self.schedule.sigma_and_alpha_t_given_s(gamma_t, gamma_s, zs[0])\n\n mu = [alpha_t_given_s[masks[ii]] * zs[ii] for ii in range(len(masks))]\n zt = self.sample_normal(\n mu=mu, sigma=sigma_t_given_s, masks=masks, fix_noise=fix_noise\n )\n\n for ii in range(len(masks)):\n zt[ii][:, : self.pos_dim] = utils.remove_mean_batch(\n zt[ii][:, : self.pos_dim],\n masks[ii],\n )\n return zt" }, { "identifier": "average_over_batch_metrics", "path": "oa_reactdiff/trainer/_metrics.py", "snippet": "def average_over_batch_metrics(batch_metrics: List[Dict], allowed: List = []):\n epoch_metrics = {}\n effective_batch = {}\n for ii, out in enumerate(batch_metrics):\n for k, v in out.items():\n if not (k in allowed or len(allowed) == 0):\n continue\n if ii == 0:\n epoch_metrics[k] = v\n effective_batch[k] = 1\n else:\n if not np.isnan(v):\n epoch_metrics[k] += v\n effective_batch[k] += 1\n for k in epoch_metrics:\n epoch_metrics[k] /= effective_batch[k]\n return epoch_metrics" }, { "identifier": "pretty_print", "path": "oa_reactdiff/trainer/_metrics.py", "snippet": "def pretty_print(epoch, metric_dict, prefix=\"Train\"):\n out = f\"{prefix} epoch {epoch} \"\n for k, v in metric_dict.items():\n out += f\"{k} {v:.2f} \"\n print(out)" }, { "identifier": "batch_rmsd", "path": "oa_reactdiff/analyze/rmsd.py", "snippet": "def batch_rmsd(\n fragments_nodes: List[Tensor],\n out_samples: List[Tensor],\n xh: List[Tensor],\n idx: int = 1,\n threshold=0.5,\n):\n rmsds = []\n out_samples_use = out_samples[idx]\n xh_use = xh[idx]\n nodes = fragments_nodes[idx].long().cpu().numpy()\n start_ind, end_ind = 0, 0\n for jj, natoms in enumerate(nodes):\n end_ind += natoms\n mol1 = xh2pmg(out_samples_use[start_ind:end_ind])\n mol2 = xh2pmg(xh_use[start_ind:end_ind])\n try:\n rmsd = pymatgen_rmsd(mol1, mol2, ignore_chirality=True, threshold=threshold)\n except:\n rmsd = 1.0\n rmsds.append(min(rmsd, 1.0))\n start_ind = end_ind\n return rmsds" } ]
from typing import Dict, List, Optional, Tuple from pathlib import Path from torch import nn from torch.utils.data import DataLoader from torch.optim.lr_scheduler import CosineAnnealingWarmRestarts, StepLR from pytorch_lightning import LightningModule from torchmetrics.classification import ( BinaryAccuracy, BinaryAUROC, BinaryF1Score, BinaryPrecision, BinaryCohenKappa, ) from torchmetrics import PearsonCorrCoef, SpearmanCorrCoef, MeanAbsoluteError from oa_reactdiff.dataset import ( ProcessedQM9, ProcessedDoubleQM9, ProcessedTripleQM9, ProcessedTS1x, ) from oa_reactdiff.dynamics import EGNNDynamics, Confidence from oa_reactdiff.diffusion._schedule import DiffSchedule, PredefinedNoiseSchedule from oa_reactdiff.diffusion._normalizer import Normalizer, FEATURE_MAPPING from oa_reactdiff.diffusion.en_diffusion import EnVariationalDiffusion from oa_reactdiff.trainer._metrics import average_over_batch_metrics, pretty_print from oa_reactdiff.analyze.rmsd import batch_rmsd import torch import copy import torch.nn.functional as F import numpy as np import pandas as pd import oa_reactdiff.utils.training_tools as utils
19,881
PROCESS_FUNC = { "QM9": ProcessedQM9, "DoubleQM9": ProcessedDoubleQM9, "TripleQM9": ProcessedTripleQM9, "TS1x": ProcessedTS1x, } FILE_TYPE = { "QM9": ".npz", "DoubleQM9": ".npz", "TripleQM9": ".npz", "TS1x": ".pkl", } LR_SCHEDULER = { "cos": CosineAnnealingWarmRestarts, "step": StepLR, } class DDPMModule(LightningModule): def __init__( self, model_config: Dict, optimizer_config: Dict, training_config: Dict, node_nfs: List[int] = [9] * 3, edge_nf: int = 4, condition_nf: int = 3, fragment_names: List[str] = ["inorg_node", "org_edge", "org_node"], pos_dim: int = 3, update_pocket_coords: bool = True, condition_time: bool = True, edge_cutoff: Optional[float] = None, norm_values: Tuple = (1.0, 1.0, 1.0), norm_biases: Tuple = (0.0, 0.0, 0.0), noise_schedule: str = "polynomial_2", timesteps: int = 1000, precision: float = 1e-5, loss_type: str = "l2", pos_only: bool = False, process_type: Optional[str] = None, model: nn.Module = None, enforce_same_encoding: Optional[List] = None, scales: List[float] = [1.0, 1.0, 1.0], eval_epochs: int = 20, source: Optional[Dict] = None, fixed_idx: Optional[List] = None, ) -> None: super().__init__()
PROCESS_FUNC = { "QM9": ProcessedQM9, "DoubleQM9": ProcessedDoubleQM9, "TripleQM9": ProcessedTripleQM9, "TS1x": ProcessedTS1x, } FILE_TYPE = { "QM9": ".npz", "DoubleQM9": ".npz", "TripleQM9": ".npz", "TS1x": ".pkl", } LR_SCHEDULER = { "cos": CosineAnnealingWarmRestarts, "step": StepLR, } class DDPMModule(LightningModule): def __init__( self, model_config: Dict, optimizer_config: Dict, training_config: Dict, node_nfs: List[int] = [9] * 3, edge_nf: int = 4, condition_nf: int = 3, fragment_names: List[str] = ["inorg_node", "org_edge", "org_node"], pos_dim: int = 3, update_pocket_coords: bool = True, condition_time: bool = True, edge_cutoff: Optional[float] = None, norm_values: Tuple = (1.0, 1.0, 1.0), norm_biases: Tuple = (0.0, 0.0, 0.0), noise_schedule: str = "polynomial_2", timesteps: int = 1000, precision: float = 1e-5, loss_type: str = "l2", pos_only: bool = False, process_type: Optional[str] = None, model: nn.Module = None, enforce_same_encoding: Optional[List] = None, scales: List[float] = [1.0, 1.0, 1.0], eval_epochs: int = 20, source: Optional[Dict] = None, fixed_idx: Optional[List] = None, ) -> None: super().__init__()
egnn_dynamics = EGNNDynamics(
4
2023-10-30 02:53:38+00:00
24k
nv-tlabs/pacer
poselib/poselib/skeleton/tests/test_skeleton.py
[ { "identifier": "SkeletonTree", "path": "poselib/poselib/skeleton/skeleton3d.py", "snippet": "class SkeletonTree(Serializable):\n \"\"\"\n A skeleton tree gives a complete description of a rigid skeleton. It describes a tree structure\n over a list of nodes with their names indicated by strings. Each edge in the tree has a local\n translation associated with it which describes the distance between the two nodes that it\n connects. \n\n Basic Usage:\n >>> t = SkeletonTree.from_mjcf(SkeletonTree.__example_mjcf_path__)\n >>> t\n SkeletonTree(\n node_names=['torso', 'front_left_leg', 'aux_1', 'front_left_foot', 'front_right_leg', 'aux_2', 'front_right_foot', 'left_back_leg', 'aux_3', 'left_back_foot', 'right_back_leg', 'aux_4', 'right_back_foot'],\n parent_indices=tensor([-1, 0, 1, 2, 0, 4, 5, 0, 7, 8, 0, 10, 11]),\n local_translation=tensor([[ 0.0000, 0.0000, 0.7500],\n [ 0.0000, 0.0000, 0.0000],\n [ 0.2000, 0.2000, 0.0000],\n [ 0.2000, 0.2000, 0.0000],\n [ 0.0000, 0.0000, 0.0000],\n [-0.2000, 0.2000, 0.0000],\n [-0.2000, 0.2000, 0.0000],\n [ 0.0000, 0.0000, 0.0000],\n [-0.2000, -0.2000, 0.0000],\n [-0.2000, -0.2000, 0.0000],\n [ 0.0000, 0.0000, 0.0000],\n [ 0.2000, -0.2000, 0.0000],\n [ 0.2000, -0.2000, 0.0000]])\n )\n >>> t.node_names\n ['torso', 'front_left_leg', 'aux_1', 'front_left_foot', 'front_right_leg', 'aux_2', 'front_right_foot', 'left_back_leg', 'aux_3', 'left_back_foot', 'right_back_leg', 'aux_4', 'right_back_foot']\n >>> t.parent_indices\n tensor([-1, 0, 1, 2, 0, 4, 5, 0, 7, 8, 0, 10, 11])\n >>> t.local_translation\n tensor([[ 0.0000, 0.0000, 0.7500],\n [ 0.0000, 0.0000, 0.0000],\n [ 0.2000, 0.2000, 0.0000],\n [ 0.2000, 0.2000, 0.0000],\n [ 0.0000, 0.0000, 0.0000],\n [-0.2000, 0.2000, 0.0000],\n [-0.2000, 0.2000, 0.0000],\n [ 0.0000, 0.0000, 0.0000],\n [-0.2000, -0.2000, 0.0000],\n [-0.2000, -0.2000, 0.0000],\n [ 0.0000, 0.0000, 0.0000],\n [ 0.2000, -0.2000, 0.0000],\n [ 0.2000, -0.2000, 0.0000]])\n >>> t.parent_of('front_left_leg')\n 'torso'\n >>> t.index('front_right_foot')\n 6\n >>> t[2]\n 'aux_1'\n \"\"\"\n\n __example_mjcf_path__ = os.path.join(\n os.path.dirname(os.path.realpath(__file__)), \"tests/ant.xml\"\n )\n\n def __init__(self, node_names, parent_indices, local_translation, local_xml_rotation):\n \"\"\"\n :param node_names: a list of names for each tree node\n :type node_names: List[str]\n :param parent_indices: an int32-typed tensor that represents the edge to its parent.\\\n -1 represents the root node\n :type parent_indices: Tensor\n :param local_translation: a 3d vector that gives local translation information\n :type local_translation: Tensor\n \"\"\"\n ln, lp, ll = len(node_names), len(parent_indices), len(local_translation)\n assert len(set((ln, lp, ll))) == 1\n self._node_names = node_names\n self._parent_indices = parent_indices.long()\n self._local_translation = local_translation\n self._local_xml_rotation = local_xml_rotation\n self._node_indices = {self.node_names[i]: i for i in range(len(self))}\n\n def __len__(self):\n \"\"\" number of nodes in the skeleton tree \"\"\"\n return len(self.node_names)\n\n def __iter__(self):\n \"\"\" iterator that iterate through the name of each node \"\"\"\n yield from self.node_names\n\n def __getitem__(self, item):\n \"\"\" get the name of the node given the index \"\"\"\n return self.node_names[item]\n\n def __repr__(self):\n return (\n \"SkeletonTree(\\n node_names={},\\n parent_indices={},\"\n \"\\n local_translation={}\\n)\".format(\n self._indent(repr(self.node_names)),\n self._indent(repr(self.parent_indices)),\n self._indent(repr(self.local_translation)),\n )\n )\n\n def _indent(self, s):\n return \"\\n \".join(s.split(\"\\n\"))\n\n @property\n def node_names(self):\n return self._node_names\n\n @property\n def parent_indices(self):\n return self._parent_indices\n\n @property\n def local_translation(self):\n return self._local_translation\n\n @property\n def num_joints(self):\n \"\"\" number of nodes in the skeleton tree \"\"\"\n return len(self)\n\n @classmethod\n def from_dict(cls, dict_repr, *args, **kwargs):\n return cls(\n list(map(str, dict_repr[\"node_names\"])),\n TensorUtils.from_dict(dict_repr[\"parent_indices\"], *args,\n **kwargs),\n TensorUtils.from_dict(dict_repr[\"local_translation\"], *args,\n **kwargs),\n TensorUtils.from_dict(dict_repr[\"local_xml_rotation\"], *args,\n **kwargs),\n )\n\n def to_dict(self):\n return OrderedDict(\n [\n (\"node_names\", self.node_names),\n (\"parent_indices\", tensor_to_dict(self.parent_indices)),\n (\"local_translation\", tensor_to_dict(self.local_translation)),\n (\"local_xml_rotation\", tensor_to_dict(self._local_xml_rotation)),\n ]\n )\n\n @classmethod\n def from_mjcf(cls, path: str) -> \"SkeletonTree\":\n \"\"\"\n Parses a mujoco xml scene description file and returns a Skeleton Tree.\n We use the model attribute at the root as the name of the tree.\n \n :param path:\n :type path: string\n :return: The skeleton tree constructed from the mjcf file\n :rtype: SkeletonTree\n \"\"\"\n tree = ET.parse(path)\n xml_doc_root = tree.getroot()\n xml_world_body = xml_doc_root.find(\"worldbody\")\n if xml_world_body is None:\n raise ValueError(\"MJCF parsed incorrectly please verify it.\")\n # assume this is the root\n xml_body_root = xml_world_body.find(\"body\")\n if xml_body_root is None:\n raise ValueError(\"MJCF parsed incorrectly please verify it.\")\n\n node_names = []\n parent_indices = []\n local_translation = []\n local_xml_rotation = []\n\n # recursively adding all nodes into the skel_tree\n def _add_xml_node(xml_node, parent_index, node_index):\n node_name = xml_node.attrib.get(\"name\")\n # parse the local translation into float list\n pos = np.fromstring(xml_node.attrib.get(\"pos\"), dtype=float, sep=\" \")\n quat = np.fromstring(xml_node.attrib.get(\"quat\", \"1 0 0 0\"), dtype=float, sep=\" \")[[1, 2, 3, 0]]\n node_names.append(node_name)\n parent_indices.append(parent_index)\n local_translation.append(pos)\n local_xml_rotation.append(quat)\n curr_index = node_index\n node_index += 1\n for next_node in xml_node.findall(\"body\"):\n node_index = _add_xml_node(next_node, curr_index, node_index)\n return node_index\n\n _add_xml_node(xml_body_root, -1, 0)\n\n return cls(\n node_names,\n torch.from_numpy(np.array(parent_indices, dtype=np.int32)),\n torch.from_numpy(np.array(local_translation, dtype=np.float32)),\n torch.from_numpy(np.array(local_xml_rotation, dtype=np.float32)),\n )\n\n def parent_of(self, node_name):\n \"\"\" get the name of the parent of the given node\n\n :param node_name: the name of the node\n :type node_name: string\n :rtype: string\n \"\"\"\n return self[int(self.parent_indices[self.index(node_name)].item())]\n\n def index(self, node_name):\n \"\"\" get the index of the node\n \n :param node_name: the name of the node\n :type node_name: string\n :rtype: int\n \"\"\"\n return self._node_indices[node_name]\n\n def drop_nodes_by_names(\n self, node_names: List[str], pairwise_translation=None\n ) -> \"SkeletonTree\":\n new_length = len(self) - len(node_names)\n new_node_names = []\n new_local_translation = torch.zeros(\n new_length, 3, dtype=self.local_translation.dtype\n )\n new_parent_indices = torch.zeros(new_length, dtype=self.parent_indices.dtype)\n parent_indices = self.parent_indices.numpy()\n new_node_indices: dict = {}\n new_node_index = 0\n for node_index in range(len(self)):\n if self[node_index] in node_names:\n continue\n tb_node_index = parent_indices[node_index]\n if tb_node_index != -1:\n local_translation = self.local_translation[node_index, :]\n while tb_node_index != -1 and self[tb_node_index] in node_names:\n local_translation += self.local_translation[tb_node_index, :]\n tb_node_index = parent_indices[tb_node_index]\n assert tb_node_index != -1, \"the root node cannot be dropped\"\n\n if pairwise_translation is not None:\n local_translation = pairwise_translation[\n tb_node_index, node_index, :\n ]\n else:\n local_translation = self.local_translation[node_index, :]\n\n new_node_names.append(self[node_index])\n new_local_translation[new_node_index, :] = local_translation\n if tb_node_index == -1:\n new_parent_indices[new_node_index] = -1\n else:\n new_parent_indices[new_node_index] = new_node_indices[\n self[tb_node_index]\n ]\n new_node_indices[self[node_index]] = new_node_index\n new_node_index += 1\n\n return SkeletonTree(new_node_names, new_parent_indices, new_local_translation)\n\n def keep_nodes_by_names(\n self, node_names: List[str], pairwise_translation=None\n ) -> \"SkeletonTree\":\n nodes_to_drop = list(filter(lambda x: x not in node_names, self))\n return self.drop_nodes_by_names(nodes_to_drop, pairwise_translation)" }, { "identifier": "SkeletonState", "path": "poselib/poselib/skeleton/skeleton3d.py", "snippet": "class SkeletonState(Serializable):\n \"\"\"\n A skeleton state contains all the information needed to describe a static state of a skeleton.\n It requires a skeleton tree, local/global rotation at each joint and the root translation.\n\n Example:\n >>> t = SkeletonTree.from_mjcf(SkeletonTree.__example_mjcf_path__)\n >>> zero_pose = SkeletonState.zero_pose(t)\n >>> plot_skeleton_state(zero_pose) # can be imported from `.visualization.common`\n [plot of the ant at zero pose\n >>> local_rotation = zero_pose.local_rotation.clone()\n >>> local_rotation[2] = torch.tensor([0, 0, 1, 0])\n >>> new_pose = SkeletonState.from_rotation_and_root_translation(\n ... skeleton_tree=t,\n ... r=local_rotation,\n ... t=zero_pose.root_translation,\n ... is_local=True\n ... )\n >>> new_pose.local_rotation\n tensor([[0., 0., 0., 1.],\n [0., 0., 0., 1.],\n [0., 1., 0., 0.],\n [0., 0., 0., 1.],\n [0., 0., 0., 1.],\n [0., 0., 0., 1.],\n [0., 0., 0., 1.],\n [0., 0., 0., 1.],\n [0., 0., 0., 1.],\n [0., 0., 0., 1.],\n [0., 0., 0., 1.],\n [0., 0., 0., 1.],\n [0., 0., 0., 1.]])\n >>> plot_skeleton_state(new_pose) # you should be able to see one of ant's leg is bent\n [plot of the ant with the new pose\n >>> new_pose.global_rotation # the local rotation is propagated to the global rotation at joint #3\n tensor([[0., 0., 0., 1.],\n [0., 0., 0., 1.],\n [0., 1., 0., 0.],\n [0., 1., 0., 0.],\n [0., 0., 0., 1.],\n [0., 0., 0., 1.],\n [0., 0., 0., 1.],\n [0., 0., 0., 1.],\n [0., 0., 0., 1.],\n [0., 0., 0., 1.],\n [0., 0., 0., 1.],\n [0., 0., 0., 1.],\n [0., 0., 0., 1.]])\n\n Global/Local Representation (cont. from the previous example)\n >>> new_pose.is_local\n True\n >>> new_pose.tensor # this will return the local rotation followed by the root translation\n tensor([0., 0., 0., 1., 0., 0., 0., 1., 0., 1., 0., 0., 0., 0., 0., 1., 0., 0.,\n 0., 1., 0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 1.,\n 0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 1., 0., 0.,\n 0.])\n >>> new_pose.tensor.shape # 4 * 13 (joint rotation) + 3 (root translatio\n torch.Size([55])\n >>> new_pose.global_repr().is_local\n False\n >>> new_pose.global_repr().tensor # this will return the global rotation followed by the root translation instead\n tensor([0., 0., 0., 1., 0., 0., 0., 1., 0., 1., 0., 0., 0., 1., 0., 0., 0., 0.,\n 0., 1., 0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 1.,\n 0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 1., 0., 0.,\n 0.])\n >>> new_pose.global_repr().tensor.shape # 4 * 13 (joint rotation) + 3 (root translation\n torch.Size([55])\n \"\"\"\n\n def __init__(self, tensor_backend, skeleton_tree, is_local):\n self._skeleton_tree = skeleton_tree\n self._is_local = is_local\n self.tensor = tensor_backend.clone()\n\n def __len__(self):\n return self.tensor.shape[0]\n\n @property\n def rotation(self):\n if not hasattr(self, \"_rotation\"):\n self._rotation = self.tensor[..., : self.num_joints * 4].reshape(\n *(self.tensor.shape[:-1] + (self.num_joints, 4))\n )\n return self._rotation\n\n @property\n def _local_rotation(self):\n if self._is_local:\n return self.rotation\n else:\n return None\n\n @property\n def _global_rotation(self):\n if not self._is_local:\n return self.rotation\n else:\n return None\n\n @property\n def is_local(self):\n \"\"\" is the rotation represented in local frame? \n \n :rtype: bool\n \"\"\"\n return self._is_local\n\n @property\n def invariant_property(self):\n return {\"skeleton_tree\": self.skeleton_tree, \"is_local\": self.is_local}\n\n @property\n def num_joints(self):\n \"\"\" number of joints in the skeleton tree \n \n :rtype: int\n \"\"\"\n return self.skeleton_tree.num_joints\n\n @property\n def skeleton_tree(self):\n \"\"\" skeleton tree \n \n :rtype: SkeletonTree\n \"\"\"\n return self._skeleton_tree\n\n @property\n def root_translation(self):\n \"\"\" root translation \n \n :rtype: Tensor\n \"\"\"\n if not hasattr(self, \"_root_translation\"):\n self._root_translation = self.tensor[\n ..., self.num_joints * 4 : self.num_joints * 4 + 3\n ]\n return self._root_translation\n\n @property\n def global_transformation(self):\n \"\"\" global transformation of each joint (transform from joint frame to global frame) \"\"\"\n # Forward kinemaitcs.\n \n if not hasattr(self, \"_global_transformation\"):\n local_transformation = self.local_transformation.clone()\n global_transformation = []\n parent_indices = self.skeleton_tree.parent_indices.numpy()\n # global_transformation = local_transformation.identity_like()\n \n local_transformation[..., :4] = quat_mul(\n self.skeleton_tree._local_xml_rotation,\n local_transformation[..., :4])\n\n for node_index in range(len(self.skeleton_tree)):\n parent_index = parent_indices[node_index]\n if parent_index == -1:\n global_transformation.append(\n local_transformation[..., node_index, :]\n )\n else:\n # Here to factor in the local xml rotation\n\n global_transformation.append(\n transform_mul(\n global_transformation[parent_index],\n local_transformation[..., node_index, :],\n )\n )\n self._global_transformation = torch.stack(global_transformation, axis=-2)\n return self._global_transformation\n\n @property\n def global_rotation(self):\n \"\"\" global rotation of each joint (rotation matrix to rotate from joint's F.O.R to global\n F.O.R) \"\"\"\n if self._global_rotation is None:\n if not hasattr(self, \"_comp_global_rotation\"):\n self._comp_global_rotation = transform_rotation(\n self.global_transformation\n )\n return self._comp_global_rotation\n else:\n return self._global_rotation\n\n @property\n def global_translation(self):\n \"\"\" global translation of each joint \"\"\"\n if not hasattr(self, \"_global_translation\"):\n self._global_translation = transform_translation(self.global_transformation)\n return self._global_translation\n\n @property\n def global_translation_xy(self):\n \"\"\" global translation in xy \"\"\"\n trans_xy_data = self.global_translation.zeros_like()\n trans_xy_data[..., 0:2] = self.global_translation[..., 0:2]\n return trans_xy_data\n\n @property\n def global_translation_xz(self):\n \"\"\" global translation in xz \"\"\"\n trans_xz_data = self.global_translation.zeros_like()\n trans_xz_data[..., 0:1] = self.global_translation[..., 0:1]\n trans_xz_data[..., 2:3] = self.global_translation[..., 2:3]\n return trans_xz_data\n\n @property\n def local_rotation(self):\n \"\"\" the rotation from child frame to parent frame given in the order of child nodes appeared\n in `.skeleton_tree.node_names` \"\"\"\n if self._local_rotation is None:\n if not hasattr(self, \"_comp_local_rotation\"):\n local_rotation = quat_identity_like(self.global_rotation)\n for node_index in range(len(self.skeleton_tree)):\n parent_index = self.skeleton_tree.parent_indices[node_index]\n if parent_index == -1:\n local_rotation[..., node_index, :] = self.global_rotation[\n ..., node_index, :\n ]\n else:\n local_rotation[..., node_index, :] = quat_mul_norm(\n quat_inverse(self.global_rotation[..., parent_index, :]),\n self.global_rotation[..., node_index, :],\n )\n self._comp_local_rotation = local_rotation\n return self._comp_local_rotation\n else:\n return self._local_rotation\n\n @property\n def local_transformation(self):\n \"\"\" local translation + local rotation. It describes the transformation from child frame to \n parent frame given in the order of child nodes appeared in `.skeleton_tree.node_names` \"\"\"\n if not hasattr(self, \"_local_transformation\"):\n self._local_transformation = transform_from_rotation_translation(\n r=self.local_rotation, t=self.local_translation\n )\n return self._local_transformation\n\n @property\n def local_translation(self):\n \"\"\" local translation of the skeleton state. It is identical to the local translation in\n `.skeleton_tree.local_translation` except the root translation. The root translation is\n identical to `.root_translation` \"\"\"\n if not hasattr(self, \"_local_translation\"):\n broadcast_shape = (\n tuple(self.tensor.shape[:-1])\n + (len(self.skeleton_tree),)\n + tuple(self.skeleton_tree.local_translation.shape[-1:])\n )\n local_translation = self.skeleton_tree.local_translation.broadcast_to(\n *broadcast_shape\n ).clone()\n local_translation[..., 0, :] = self.root_translation\n self._local_translation = local_translation\n return self._local_translation\n\n # Root Properties\n @property\n def root_translation_xy(self):\n \"\"\" root translation on xy \"\"\"\n if not hasattr(self, \"_root_translation_xy\"):\n self._root_translation_xy = self.global_translation_xy[..., 0, :]\n return self._root_translation_xy\n\n @property\n def global_root_rotation(self):\n \"\"\" root rotation \"\"\"\n if not hasattr(self, \"_global_root_rotation\"):\n self._global_root_rotation = self.global_rotation[..., 0, :]\n return self._global_root_rotation\n\n @property\n def global_root_yaw_rotation(self):\n \"\"\" root yaw rotation \"\"\"\n if not hasattr(self, \"_global_root_yaw_rotation\"):\n self._global_root_yaw_rotation = self.global_root_rotation.yaw_rotation()\n return self._global_root_yaw_rotation\n\n # Properties relative to root\n @property\n def local_translation_to_root(self):\n \"\"\" The 3D translation from joint frame to the root frame. \"\"\"\n if not hasattr(self, \"_local_translation_to_root\"):\n self._local_translation_to_root = (\n self.global_translation - self.root_translation.unsqueeze(-1)\n )\n return self._local_translation_to_root\n\n @property\n def local_rotation_to_root(self):\n \"\"\" The 3D rotation from joint frame to the root frame. It is equivalent to \n The root_R_world * world_R_node \"\"\"\n return (\n quat_inverse(self.global_root_rotation).unsqueeze(-1) * self.global_rotation\n )\n\n def compute_forward_vector(\n self,\n left_shoulder_index,\n right_shoulder_index,\n left_hip_index,\n right_hip_index,\n gaussian_filter_width=20,\n ):\n \"\"\" Computes forward vector based on cross product of the up vector with \n average of the right->left shoulder and hip vectors \"\"\"\n global_positions = self.global_translation\n # Perpendicular to the forward direction.\n # Uses the shoulders and hips to find this.\n side_direction = (\n global_positions[:, left_shoulder_index].numpy()\n - global_positions[:, right_shoulder_index].numpy()\n + global_positions[:, left_hip_index].numpy()\n - global_positions[:, right_hip_index].numpy()\n )\n side_direction = (\n side_direction\n / np.sqrt((side_direction ** 2).sum(axis=-1))[..., np.newaxis]\n )\n\n # Forward direction obtained by crossing with the up direction.\n forward_direction = np.cross(side_direction, np.array([[0, 1, 0]]))\n\n # Smooth the forward direction with a Gaussian.\n # Axis 0 is the time/frame axis.\n forward_direction = filters.gaussian_filter1d(\n forward_direction, gaussian_filter_width, axis=0, mode=\"nearest\"\n )\n forward_direction = (\n forward_direction\n / np.sqrt((forward_direction ** 2).sum(axis=-1))[..., np.newaxis]\n )\n\n return torch.from_numpy(forward_direction)\n\n @staticmethod\n def _to_state_vector(rot, rt):\n # Tensorbackend: local rotation and translation, rotation is is in quat 33 * 4 + 3\n state_shape = rot.shape[:-2]\n vr = rot.reshape(*(state_shape + (-1,)))\n vt = rt.broadcast_to(*state_shape + rt.shape[-1:]).reshape(\n *(state_shape + (-1,))\n )\n v = torch.cat([vr, vt], axis=-1)\n return v\n\n @classmethod\n def from_dict(\n cls: Type[\"SkeletonState\"], dict_repr: OrderedDict, *args, **kwargs\n ) -> \"SkeletonState\":\n rot = TensorUtils.from_dict(dict_repr[\"rotation\"], *args, **kwargs)\n rt = TensorUtils.from_dict(dict_repr[\"root_translation\"], *args, **kwargs)\n return cls(\n SkeletonState._to_state_vector(rot, rt),\n SkeletonTree.from_dict(dict_repr[\"skeleton_tree\"], *args, **kwargs),\n dict_repr[\"is_local\"],\n )\n\n def to_dict(self) -> OrderedDict:\n return OrderedDict(\n [\n (\"rotation\", tensor_to_dict(self.rotation)),\n (\"root_translation\", tensor_to_dict(self.root_translation)),\n (\"skeleton_tree\", self.skeleton_tree.to_dict()),\n (\"is_local\", self.is_local),\n ]\n )\n\n @classmethod\n def from_rotation_and_root_translation(cls, skeleton_tree, r, t, is_local=True):\n \"\"\"\n Construct a skeleton state from rotation and root translation\n\n :param skeleton_tree: the skeleton tree\n :type skeleton_tree: SkeletonTree\n :param r: rotation (either global or local)\n :type r: Tensor\n :param t: root translation\n :type t: Tensor\n :param is_local: to indicate that whether the rotation is local or global\n :type is_local: bool, optional, default=True\n \"\"\"\n assert (\n r.dim() > 0\n ), \"the rotation needs to have at least 1 dimension (dim = {})\".format(r.dim)\n return cls(\n SkeletonState._to_state_vector(r, t),\n skeleton_tree=skeleton_tree,\n is_local=is_local,\n )\n\n @classmethod\n def zero_pose(cls, skeleton_tree):\n \"\"\"\n Construct a zero-pose skeleton state from the skeleton tree by assuming that all the local\n rotation is 0 and root translation is also 0.\n\n :param skeleton_tree: the skeleton tree as the rigid body\n :type skeleton_tree: SkeletonTree\n \"\"\"\n return cls.from_rotation_and_root_translation(\n skeleton_tree=skeleton_tree,\n r=quat_identity([skeleton_tree.num_joints]),\n t=torch.zeros(3, dtype=skeleton_tree.local_translation.dtype),\n is_local=True,\n )\n\n def local_repr(self):\n \"\"\" \n Convert the skeleton state into local representation. This will only affects the values of\n .tensor. If the skeleton state already has `is_local=True`. This method will do nothing. \n\n :rtype: SkeletonState\n \"\"\"\n if self.is_local:\n return self\n return SkeletonState.from_rotation_and_root_translation(\n self.skeleton_tree,\n r=self.local_rotation,\n t=self.root_translation,\n is_local=True,\n )\n\n def global_repr(self):\n \"\"\" \n Convert the skeleton state into global representation. This will only affects the values of\n .tensor. If the skeleton state already has `is_local=False`. This method will do nothing. \n\n :rtype: SkeletonState\n \"\"\"\n if not self.is_local:\n return self\n return SkeletonState.from_rotation_and_root_translation(\n self.skeleton_tree,\n r=self.global_rotation,\n t=self.root_translation,\n is_local=False,\n )\n\n def _get_pairwise_average_translation(self):\n global_transform_inv = transform_inverse(self.global_transformation)\n p1 = global_transform_inv.unsqueeze(-2)\n p2 = self.global_transformation.unsqueeze(-3)\n\n pairwise_translation = (\n transform_translation(transform_mul(p1, p2))\n .reshape(-1, len(self.skeleton_tree), len(self.skeleton_tree), 3)\n .mean(axis=0)\n )\n return pairwise_translation\n\n def _transfer_to(self, new_skeleton_tree: SkeletonTree):\n old_indices = list(map(self.skeleton_tree.index, new_skeleton_tree))\n return SkeletonState.from_rotation_and_root_translation(\n new_skeleton_tree,\n r=self.global_rotation[..., old_indices, :],\n t=self.root_translation,\n is_local=False,\n )\n\n def drop_nodes_by_names(\n self, node_names: List[str], estimate_local_translation_from_states: bool = True\n ) -> \"SkeletonState\":\n \"\"\" \n Drop a list of nodes from the skeleton and re-compute the local rotation to match the \n original joint position as much as possible. \n\n :param node_names: a list node names that specifies the nodes need to be dropped\n :type node_names: List of strings\n :param estimate_local_translation_from_states: the boolean indicator that specifies whether\\\n or not to re-estimate the local translation from the states (avg.)\n :type estimate_local_translation_from_states: boolean\n :rtype: SkeletonState\n \"\"\"\n if estimate_local_translation_from_states:\n pairwise_translation = self._get_pairwise_average_translation()\n else:\n pairwise_translation = None\n new_skeleton_tree = self.skeleton_tree.drop_nodes_by_names(\n node_names, pairwise_translation\n )\n return self._transfer_to(new_skeleton_tree)\n\n def keep_nodes_by_names(\n self, node_names: List[str], estimate_local_translation_from_states: bool = True\n ) -> \"SkeletonState\":\n \"\"\" \n Keep a list of nodes and drop all other nodes from the skeleton and re-compute the local \n rotation to match the original joint position as much as possible. \n\n :param node_names: a list node names that specifies the nodes need to be dropped\n :type node_names: List of strings\n :param estimate_local_translation_from_states: the boolean indicator that specifies whether\\\n or not to re-estimate the local translation from the states (avg.)\n :type estimate_local_translation_from_states: boolean\n :rtype: SkeletonState\n \"\"\"\n return self.drop_nodes_by_names(\n list(filter(lambda x: (x not in node_names), self)),\n estimate_local_translation_from_states,\n )\n\n def _remapped_to(\n self, joint_mapping: Dict[str, str], target_skeleton_tree: SkeletonTree\n ):\n joint_mapping_inv = {target: source for source, target in joint_mapping.items()}\n reduced_target_skeleton_tree = target_skeleton_tree.keep_nodes_by_names(\n list(joint_mapping_inv)\n )\n n_joints = (\n len(joint_mapping),\n len(self.skeleton_tree),\n len(reduced_target_skeleton_tree),\n )\n assert (\n len(set(n_joints)) == 1\n ), \"the joint mapping is not consistent with the skeleton trees\"\n source_indices = list(\n map(\n lambda x: self.skeleton_tree.index(joint_mapping_inv[x]),\n reduced_target_skeleton_tree,\n )\n )\n target_local_rotation = self.local_rotation[..., source_indices, :]\n return SkeletonState.from_rotation_and_root_translation(\n skeleton_tree=reduced_target_skeleton_tree,\n r=target_local_rotation,\n t=self.root_translation,\n is_local=True,\n )\n\n def retarget_to(\n self,\n joint_mapping: Dict[str, str],\n source_tpose_local_rotation,\n source_tpose_root_translation: np.ndarray,\n target_skeleton_tree: SkeletonTree,\n target_tpose_local_rotation,\n target_tpose_root_translation: np.ndarray,\n rotation_to_target_skeleton,\n scale_to_target_skeleton: float,\n z_up: bool = True,\n ) -> \"SkeletonState\":\n \"\"\" \n Retarget the skeleton state to a target skeleton tree. This is a naive retarget\n implementation with rough approximations. The function follows the procedures below.\n\n Steps:\n 1. Drop the joints from the source (self) that do not belong to the joint mapping\\\n with an implementation that is similar to \"keep_nodes_by_names()\" - take a\\\n look at the function doc for more details (same for source_tpose)\n \n 2. Rotate the source state and the source tpose by \"rotation_to_target_skeleton\"\\\n to align the source with the target orientation\n \n 3. Extract the root translation and normalize it to match the scale of the target\\\n skeleton\n \n 4. Extract the global rotation from source state relative to source tpose and\\\n re-apply the relative rotation to the target tpose to construct the global\\\n rotation after retargetting\n \n 5. Combine the computed global rotation and the root translation from 3 and 4 to\\\n complete the retargeting.\n \n 6. Make feet on the ground (global translation z)\n\n :param joint_mapping: a dictionary of that maps the joint node from the source skeleton to \\\n the target skeleton\n :type joint_mapping: Dict[str, str]\n \n :param source_tpose_local_rotation: the local rotation of the source skeleton\n :type source_tpose_local_rotation: Tensor\n \n :param source_tpose_root_translation: the root translation of the source tpose\n :type source_tpose_root_translation: np.ndarray\n \n :param target_skeleton_tree: the target skeleton tree\n :type target_skeleton_tree: SkeletonTree\n \n :param target_tpose_local_rotation: the local rotation of the target skeleton\n :type target_tpose_local_rotation: Tensor\n \n :param target_tpose_root_translation: the root translation of the target tpose\n :type target_tpose_root_translation: Tensor\n \n :param rotation_to_target_skeleton: the rotation that needs to be applied to the source\\\n skeleton to align with the target skeleton. Essentially the rotation is t_R_s, where t is\\\n the frame of reference of the target skeleton and s is the frame of reference of the source\\\n skeleton\n :type rotation_to_target_skeleton: Tensor\n :param scale_to_target_skeleton: the factor that needs to be multiplied from source\\\n skeleton to target skeleton (unit in distance). For example, to go from `cm` to `m`, the \\\n factor needs to be 0.01.\n :type scale_to_target_skeleton: float\n :rtype: SkeletonState\n \"\"\"\n\n # STEP 0: Preprocess\n source_tpose = SkeletonState.from_rotation_and_root_translation(\n skeleton_tree=self.skeleton_tree,\n r=source_tpose_local_rotation,\n t=source_tpose_root_translation,\n is_local=True,\n )\n target_tpose = SkeletonState.from_rotation_and_root_translation(\n skeleton_tree=target_skeleton_tree,\n r=target_tpose_local_rotation,\n t=target_tpose_root_translation,\n is_local=True,\n )\n\n # STEP 1: Drop the irrelevant joints\n pairwise_translation = self._get_pairwise_average_translation()\n node_names = list(joint_mapping)\n new_skeleton_tree = self.skeleton_tree.keep_nodes_by_names(\n node_names, pairwise_translation\n )\n\n # TODO: combine the following steps before STEP 3\n source_tpose = source_tpose._transfer_to(new_skeleton_tree)\n source_state = self._transfer_to(new_skeleton_tree)\n\n source_tpose = source_tpose._remapped_to(joint_mapping, target_skeleton_tree)\n source_state = source_state._remapped_to(joint_mapping, target_skeleton_tree)\n\n # STEP 2: Rotate the source to align with the target\n new_local_rotation = source_tpose.local_rotation.clone()\n new_local_rotation[..., 0, :] = quat_mul_norm(\n rotation_to_target_skeleton, source_tpose.local_rotation[..., 0, :]\n )\n\n source_tpose = SkeletonState.from_rotation_and_root_translation(\n skeleton_tree=source_tpose.skeleton_tree,\n r=new_local_rotation,\n t=quat_rotate(rotation_to_target_skeleton, source_tpose.root_translation),\n is_local=True,\n )\n\n new_local_rotation = source_state.local_rotation.clone()\n new_local_rotation[..., 0, :] = quat_mul_norm(\n rotation_to_target_skeleton, source_state.local_rotation[..., 0, :]\n )\n source_state = SkeletonState.from_rotation_and_root_translation(\n skeleton_tree=source_state.skeleton_tree,\n r=new_local_rotation,\n t=quat_rotate(rotation_to_target_skeleton, source_state.root_translation),\n is_local=True,\n )\n\n # STEP 3: Normalize to match the target scale\n root_translation_diff = (\n source_state.root_translation - source_tpose.root_translation\n ) * scale_to_target_skeleton\n\n # STEP 4: the global rotation from source state relative to source tpose and\n # re-apply to the target\n current_skeleton_tree = source_state.skeleton_tree\n target_tpose_global_rotation = source_state.global_rotation[0, :].clone()\n for current_index, name in enumerate(current_skeleton_tree):\n if name in target_tpose.skeleton_tree:\n target_tpose_global_rotation[\n current_index, :\n ] = target_tpose.global_rotation[\n target_tpose.skeleton_tree.index(name), :\n ]\n\n global_rotation_diff = quat_mul_norm(\n source_state.global_rotation, quat_inverse(source_tpose.global_rotation)\n )\n new_global_rotation = quat_mul_norm(\n global_rotation_diff, target_tpose_global_rotation\n )\n\n # STEP 5: Putting 3 and 4 together\n current_skeleton_tree = source_state.skeleton_tree\n shape = source_state.global_rotation.shape[:-1]\n shape = shape[:-1] + target_tpose.global_rotation.shape[-2:-1]\n new_global_rotation_output = quat_identity(shape)\n for current_index, name in enumerate(target_skeleton_tree):\n while name not in current_skeleton_tree:\n name = target_skeleton_tree.parent_of(name)\n parent_index = current_skeleton_tree.index(name)\n new_global_rotation_output[:, current_index, :] = new_global_rotation[\n :, parent_index, :\n ]\n\n source_state = SkeletonState.from_rotation_and_root_translation(\n skeleton_tree=target_skeleton_tree,\n r=new_global_rotation_output,\n t=target_tpose.root_translation + root_translation_diff,\n is_local=False,\n ).local_repr()\n\n return source_state\n\n def retarget_to_by_tpose(\n self,\n joint_mapping: Dict[str, str],\n source_tpose: \"SkeletonState\",\n target_tpose: \"SkeletonState\",\n rotation_to_target_skeleton,\n scale_to_target_skeleton: float,\n ) -> \"SkeletonState\":\n \"\"\" \n Retarget the skeleton state to a target skeleton tree. This is a naive retarget\n implementation with rough approximations. See the method `retarget_to()` for more information\n\n :param joint_mapping: a dictionary of that maps the joint node from the source skeleton to \\\n the target skeleton\n :type joint_mapping: Dict[str, str]\n \n :param source_tpose: t-pose of the source skeleton\n :type source_tpose: SkeletonState\n \n :param target_tpose: t-pose of the target skeleton\n :type target_tpose: SkeletonState\n \n :param rotation_to_target_skeleton: the rotation that needs to be applied to the source\\\n skeleton to align with the target skeleton. Essentially the rotation is t_R_s, where t is\\\n the frame of reference of the target skeleton and s is the frame of reference of the source\\\n skeleton\n :type rotation_to_target_skeleton: Tensor\n :param scale_to_target_skeleton: the factor that needs to be multiplied from source\\\n skeleton to target skeleton (unit in distance). For example, to go from `cm` to `m`, the \\\n factor needs to be 0.01.\n :type scale_to_target_skeleton: float\n :rtype: SkeletonState\n \"\"\"\n assert (\n len(source_tpose.shape) == 0 and len(target_tpose.shape) == 0\n ), \"the retargeting script currently doesn't support vectorized operations\"\n return self.retarget_to(\n joint_mapping,\n source_tpose.local_rotation,\n source_tpose.root_translation,\n target_tpose.skeleton_tree,\n target_tpose.local_rotation,\n target_tpose.root_translation,\n rotation_to_target_skeleton,\n scale_to_target_skeleton,\n )" }, { "identifier": "SkeletonMotion", "path": "poselib/poselib/skeleton/skeleton3d.py", "snippet": "class SkeletonMotion(SkeletonState):\n def __init__(self, tensor_backend, skeleton_tree, is_local, fps, *args, **kwargs):\n self._fps = fps\n super().__init__(tensor_backend, skeleton_tree, is_local, *args, **kwargs)\n\n def clone(self):\n return SkeletonMotion(\n self.tensor.clone(), self.skeleton_tree, self._is_local, self._fps\n )\n\n @property\n def invariant_property(self):\n return {\n \"skeleton_tree\": self.skeleton_tree,\n \"is_local\": self.is_local,\n \"fps\": self.fps,\n }\n\n @property\n def global_velocity(self):\n \"\"\" global velocity \"\"\"\n curr_index = self.num_joints * 4 + 3\n return self.tensor[..., curr_index : curr_index + self.num_joints * 3].reshape(\n *(self.tensor.shape[:-1] + (self.num_joints, 3))\n )\n\n @property\n def global_angular_velocity(self):\n \"\"\" global angular velocity \"\"\"\n curr_index = self.num_joints * 7 + 3\n return self.tensor[..., curr_index : curr_index + self.num_joints * 3].reshape(\n *(self.tensor.shape[:-1] + (self.num_joints, 3))\n )\n\n @property\n def fps(self):\n \"\"\" number of frames per second \"\"\"\n return self._fps\n\n @property\n def time_delta(self):\n \"\"\" time between two adjacent frames \"\"\"\n return 1.0 / self.fps\n\n @property\n def global_root_velocity(self):\n \"\"\" global root velocity \"\"\"\n return self.global_velocity[..., 0, :]\n\n @property\n def global_root_angular_velocity(self):\n \"\"\" global root angular velocity \"\"\"\n return self.global_angular_velocity[..., 0, :]\n\n @classmethod\n def from_state_vector_and_velocity(\n cls,\n skeleton_tree,\n state_vector,\n global_velocity,\n global_angular_velocity,\n is_local,\n fps,\n ):\n \"\"\"\n Construct a skeleton motion from a skeleton state vector, global velocity and angular\n velocity at each joint.\n\n :param skeleton_tree: the skeleton tree that the motion is based on \n :type skeleton_tree: SkeletonTree\n :param state_vector: the state vector from the skeleton state by `.tensor`\n :type state_vector: Tensor\n :param global_velocity: the global velocity at each joint\n :type global_velocity: Tensor\n :param global_angular_velocity: the global angular velocity at each joint\n :type global_angular_velocity: Tensor\n :param is_local: if the rotation ins the state vector is given in local frame\n :type is_local: boolean\n :param fps: number of frames per second\n :type fps: int\n\n :rtype: SkeletonMotion\n \"\"\"\n state_shape = state_vector.shape[:-1]\n v = global_velocity.reshape(*(state_shape + (-1,)))\n av = global_angular_velocity.reshape(*(state_shape + (-1,)))\n new_state_vector = torch.cat([state_vector, v, av], axis=-1)\n return cls(\n new_state_vector, skeleton_tree=skeleton_tree, is_local=is_local, fps=fps,\n )\n\n @classmethod\n def from_skeleton_state(\n cls: Type[\"SkeletonMotion\"], skeleton_state: SkeletonState, fps: int\n ):\n \"\"\"\n Construct a skeleton motion from a skeleton state. The velocities are estimated using second\n order guassian filter along the last axis. The skeleton state must have at least .dim >= 1\n\n :param skeleton_state: the skeleton state that the motion is based on \n :type skeleton_state: SkeletonState\n :param fps: number of frames per second\n :type fps: int\n\n :rtype: SkeletonMotion\n \"\"\"\n\n assert (\n type(skeleton_state) == SkeletonState\n ), \"expected type of {}, got {}\".format(SkeletonState, type(skeleton_state))\n\n global_velocity = SkeletonMotion._compute_velocity(\n p=skeleton_state.global_translation, time_delta=1 / fps\n )\n global_angular_velocity = SkeletonMotion._compute_angular_velocity(\n r=skeleton_state.global_rotation, time_delta=1 / fps\n )\n\n return cls.from_state_vector_and_velocity(\n skeleton_tree=skeleton_state.skeleton_tree,\n state_vector=skeleton_state.tensor,\n global_velocity=global_velocity,\n global_angular_velocity=global_angular_velocity,\n is_local=skeleton_state.is_local,\n fps=fps,\n )\n\n @staticmethod\n def _to_state_vector(rot, rt, vel, avel):\n state_shape = rot.shape[:-2]\n skeleton_state_v = SkeletonState._to_state_vector(rot, rt)\n v = vel.reshape(*(state_shape + (-1,)))\n av = avel.reshape(*(state_shape + (-1,)))\n skeleton_motion_v = torch.cat([skeleton_state_v, v, av], axis=-1)\n return skeleton_motion_v\n\n @classmethod\n def from_dict(\n cls: Type[\"SkeletonMotion\"], dict_repr: OrderedDict, *args, **kwargs\n ) -> \"SkeletonMotion\":\n rot = TensorUtils.from_dict(dict_repr[\"rotation\"], *args, **kwargs)\n rt = TensorUtils.from_dict(dict_repr[\"root_translation\"], *args, **kwargs)\n vel = TensorUtils.from_dict(dict_repr[\"global_velocity\"], *args, **kwargs)\n avel = TensorUtils.from_dict(\n dict_repr[\"global_angular_velocity\"], *args, **kwargs\n )\n return cls(\n SkeletonMotion._to_state_vector(rot, rt, vel, avel),\n skeleton_tree=SkeletonTree.from_dict(\n dict_repr[\"skeleton_tree\"], *args, **kwargs\n ),\n is_local=dict_repr[\"is_local\"],\n fps=dict_repr[\"fps\"],\n )\n\n def to_dict(self) -> OrderedDict:\n return OrderedDict(\n [\n (\"rotation\", tensor_to_dict(self.rotation)),\n (\"root_translation\", tensor_to_dict(self.root_translation)),\n (\"global_velocity\", tensor_to_dict(self.global_velocity)),\n (\"global_angular_velocity\", tensor_to_dict(self.global_angular_velocity)),\n (\"skeleton_tree\", self.skeleton_tree.to_dict()),\n (\"is_local\", self.is_local),\n (\"fps\", self.fps),\n ]\n )\n\n @classmethod\n def from_fbx(\n cls: Type[\"SkeletonMotion\"],\n fbx_file_path,\n fbx_configs,\n skeleton_tree=None,\n is_local=True,\n fps=120,\n root_joint=\"\",\n root_trans_index=0,\n *args,\n **kwargs,\n ) -> \"SkeletonMotion\":\n \"\"\"\n Construct a skeleton motion from a fbx file (TODO - generalize this). If the skeleton tree\n is not given, it will use the first frame of the mocap to construct the skeleton tree.\n\n :param fbx_file_path: the path of the fbx file\n :type fbx_file_path: string\n :param fbx_configs: the configuration in terms of {\"tmp_path\": ..., \"fbx_py27_path\": ...}\n :type fbx_configs: dict\n :param skeleton_tree: the optional skeleton tree that the rotation will be applied to\n :type skeleton_tree: SkeletonTree, optional\n :param is_local: the state vector uses local or global rotation as the representation\n :type is_local: bool, optional, default=True\n :rtype: SkeletonMotion\n \"\"\"\n joint_names, joint_parents, transforms, fps = fbx_to_array(\n fbx_file_path, fbx_configs, root_joint, fps\n )\n # swap the last two axis to match the convention\n local_transform = euclidean_to_transform(\n transformation_matrix=torch.from_numpy(\n np.swapaxes(np.array(transforms), -1, -2),\n ).float()\n )\n local_rotation = transform_rotation(local_transform)\n root_translation = transform_translation(local_transform)[..., root_trans_index, :]\n joint_parents = torch.from_numpy(np.array(joint_parents)).int()\n\n if skeleton_tree is None:\n local_translation = transform_translation(local_transform).reshape(\n -1, len(joint_parents), 3\n )[0]\n skeleton_tree = SkeletonTree(joint_names, joint_parents, local_translation)\n skeleton_state = SkeletonState.from_rotation_and_root_translation(\n skeleton_tree, r=local_rotation, t=root_translation, is_local=True\n )\n if not is_local:\n skeleton_state = skeleton_state.global_repr()\n return cls.from_skeleton_state(\n skeleton_state=skeleton_state, fps=fps\n )\n\n @staticmethod\n def _compute_velocity(p, time_delta, guassian_filter=True):\n velocity = torch.from_numpy(\n filters.gaussian_filter1d(\n np.gradient(p.numpy(), axis=-3), 2, axis=-3, mode=\"nearest\"\n )\n / time_delta,\n )\n return velocity\n\n @staticmethod\n def _compute_angular_velocity(r, time_delta: float, guassian_filter=True):\n # assume the second last dimension is the time axis\n diff_quat_data = quat_identity_like(r)\n diff_quat_data[..., :-1, :, :] = quat_mul_norm(\n r[..., 1:, :, :], quat_inverse(r[..., :-1, :, :])\n )\n diff_angle, diff_axis = quat_angle_axis(diff_quat_data)\n angular_velocity = diff_axis * diff_angle.unsqueeze(-1) / time_delta\n angular_velocity = torch.from_numpy(\n filters.gaussian_filter1d(\n angular_velocity.numpy(), 2, axis=-3, mode=\"nearest\"\n ),\n )\n return angular_velocity\n\n def crop(self, start: int, end: int, fps: Optional[int] = None):\n \"\"\"\n Crop the motion along its last axis. This is equivalent to performing a slicing on the\n object with [..., start: end: skip_every] where skip_every = old_fps / fps. Note that the\n new fps provided must be a factor of the original fps. \n\n :param start: the beginning frame index\n :type start: int\n :param end: the ending frame index\n :type end: int\n :param fps: number of frames per second in the output (if not given the original fps will be used)\n :type fps: int, optional\n :rtype: SkeletonMotion\n \"\"\"\n if fps is None:\n new_fps = int(self.fps)\n old_fps = int(self.fps)\n else:\n new_fps = int(fps)\n old_fps = int(self.fps)\n assert old_fps % fps == 0, (\n \"the resampling doesn't support fps with non-integer division \"\n \"from the original fps: {} => {}\".format(old_fps, fps)\n )\n skip_every = old_fps // new_fps\n s = slice(start, end, skip_every)\n z = self[..., s]\n\n rot = z.local_rotation if z.is_local else z.global_rotation\n rt = z.root_translation\n vel = z.global_velocity\n avel = z.global_angular_velocity\n return SkeletonMotion(\n SkeletonMotion._to_state_vector(rot, rt, vel, avel),\n skeleton_tree=z.skeleton_tree,\n is_local=z.is_local,\n fps=new_fps,\n )\n\n def retarget_to(\n self,\n joint_mapping: Dict[str, str],\n source_tpose_local_rotation,\n source_tpose_root_translation: np.ndarray,\n target_skeleton_tree: \"SkeletonTree\",\n target_tpose_local_rotation,\n target_tpose_root_translation: np.ndarray,\n rotation_to_target_skeleton,\n scale_to_target_skeleton: float,\n z_up: bool = True,\n ) -> \"SkeletonMotion\":\n \"\"\" \n Same as the one in :class:`SkeletonState`. This method discards all velocity information before\n retargeting and re-estimate the velocity after the retargeting. The same fps is used in the\n new retargetted motion.\n\n :param joint_mapping: a dictionary of that maps the joint node from the source skeleton to \\\n the target skeleton\n :type joint_mapping: Dict[str, str]\n \n :param source_tpose_local_rotation: the local rotation of the source skeleton\n :type source_tpose_local_rotation: Tensor\n \n :param source_tpose_root_translation: the root translation of the source tpose\n :type source_tpose_root_translation: np.ndarray\n \n :param target_skeleton_tree: the target skeleton tree\n :type target_skeleton_tree: SkeletonTree\n \n :param target_tpose_local_rotation: the local rotation of the target skeleton\n :type target_tpose_local_rotation: Tensor\n \n :param target_tpose_root_translation: the root translation of the target tpose\n :type target_tpose_root_translation: Tensor\n \n :param rotation_to_target_skeleton: the rotation that needs to be applied to the source\\\n skeleton to align with the target skeleton. Essentially the rotation is t_R_s, where t is\\\n the frame of reference of the target skeleton and s is the frame of reference of the source\\\n skeleton\n :type rotation_to_target_skeleton: Tensor\n :param scale_to_target_skeleton: the factor that needs to be multiplied from source\\\n skeleton to target skeleton (unit in distance). For example, to go from `cm` to `m`, the \\\n factor needs to be 0.01.\n :type scale_to_target_skeleton: float\n :rtype: SkeletonMotion\n \"\"\"\n return SkeletonMotion.from_skeleton_state(\n super().retarget_to(\n joint_mapping,\n source_tpose_local_rotation,\n source_tpose_root_translation,\n target_skeleton_tree,\n target_tpose_local_rotation,\n target_tpose_root_translation,\n rotation_to_target_skeleton,\n scale_to_target_skeleton,\n z_up,\n ),\n self.fps,\n )\n\n def retarget_to_by_tpose(\n self,\n joint_mapping: Dict[str, str],\n source_tpose: \"SkeletonState\",\n target_tpose: \"SkeletonState\",\n rotation_to_target_skeleton,\n scale_to_target_skeleton: float,\n z_up: bool = True,\n ) -> \"SkeletonMotion\":\n \"\"\" \n Same as the one in :class:`SkeletonState`. This method discards all velocity information before\n retargeting and re-estimate the velocity after the retargeting. The same fps is used in the\n new retargetted motion.\n\n :param joint_mapping: a dictionary of that maps the joint node from the source skeleton to \\\n the target skeleton\n :type joint_mapping: Dict[str, str]\n \n :param source_tpose: t-pose of the source skeleton\n :type source_tpose: SkeletonState\n \n :param target_tpose: t-pose of the target skeleton\n :type target_tpose: SkeletonState\n \n :param rotation_to_target_skeleton: the rotation that needs to be applied to the source\\\n skeleton to align with the target skeleton. Essentially the rotation is t_R_s, where t is\\\n the frame of reference of the target skeleton and s is the frame of reference of the source\\\n skeleton\n :type rotation_to_target_skeleton: Tensor\n :param scale_to_target_skeleton: the factor that needs to be multiplied from source\\\n skeleton to target skeleton (unit in distance). For example, to go from `cm` to `m`, the \\\n factor needs to be 0.01.\n :type scale_to_target_skeleton: float\n :rtype: SkeletonMotion\n \"\"\"\n return self.retarget_to(\n joint_mapping,\n source_tpose.local_rotation,\n source_tpose.root_translation,\n target_tpose.skeleton_tree,\n target_tpose.local_rotation,\n target_tpose.root_translation,\n rotation_to_target_skeleton,\n scale_to_target_skeleton,\n z_up,\n )" }, { "identifier": "plot_skeleton_state", "path": "poselib/poselib/visualization/common.py", "snippet": "def plot_skeleton_state(skeleton_state, task_name=\"\"):\n \"\"\"\n Visualize a skeleton state\n\n :param skeleton_state:\n :param task_name:\n :type skeleton_state: SkeletonState\n :type task_name: string, optional\n \"\"\"\n logger.info(\"plotting {}\".format(task_name))\n task = Draw3DSkeletonState(task_name=task_name, skeleton_state=skeleton_state)\n plotter = Matplotlib3DPlotter(task)\n plotter.show()" }, { "identifier": "plot_skeleton_motion_interactive", "path": "poselib/poselib/visualization/common.py", "snippet": "def plot_skeleton_motion_interactive(skeleton_motion, task_name=\"\"):\n \"\"\"\n Visualize a skeleton motion along its first dimension interactively.\n\n :param skeleton_motion:\n :param task_name:\n :type skeleton_motion: SkeletonMotion\n :type task_name: string, optional\n \"\"\"\n for _ in plot_skeleton_motion_interactive_base(skeleton_motion, task_name):\n pass" }, { "identifier": "Matplotlib3DPlotter", "path": "poselib/poselib/visualization/plt_plotter.py", "snippet": "class Matplotlib3DPlotter(BasePlotter):\n _fig: plt.figure # plt figure\n _ax: p3.Axes3D # plt 3d axis\n # stores artist objects for each task (task name as the key)\n _artist_cache: Dict[str, Any]\n # callables for each task primitives\n _create_impl_callables: Dict[str, Callable]\n _update_impl_callables: Dict[str, Callable]\n\n def __init__(self, task: \"BasePlotterTask\") -> None:\n self._fig = plt.figure()\n self._ax = p3.Axes3D(self._fig)\n self._artist_cache = {}\n\n self._create_impl_callables = {\n \"Draw3DLines\": self._lines_create_impl,\n \"Draw3DDots\": self._dots_create_impl,\n \"Draw3DTrail\": self._trail_create_impl,\n }\n self._update_impl_callables = {\n \"Draw3DLines\": self._lines_update_impl,\n \"Draw3DDots\": self._dots_update_impl,\n \"Draw3DTrail\": self._trail_update_impl,\n }\n self._init_lim()\n super().__init__(task)\n\n @property\n def ax(self):\n return self._ax\n\n @property\n def fig(self):\n return self._fig\n\n def show(self):\n plt.show()\n\n def _min(self, x, y):\n if x is None:\n return y\n if y is None:\n return x\n return min(x, y)\n\n def _max(self, x, y):\n if x is None:\n return y\n if y is None:\n return x\n return max(x, y)\n\n def _init_lim(self):\n self._curr_x_min = None\n self._curr_y_min = None\n self._curr_z_min = None\n self._curr_x_max = None\n self._curr_y_max = None\n self._curr_z_max = None\n\n def _update_lim(self, xs, ys, zs):\n self._curr_x_min = self._min(np.min(xs), self._curr_x_min)\n self._curr_y_min = self._min(np.min(ys), self._curr_y_min)\n self._curr_z_min = self._min(np.min(zs), self._curr_z_min)\n self._curr_x_max = self._max(np.max(xs), self._curr_x_max)\n self._curr_y_max = self._max(np.max(ys), self._curr_y_max)\n self._curr_z_max = self._max(np.max(zs), self._curr_z_max)\n\n def _set_lim(self):\n if not (\n self._curr_x_min is None\n or self._curr_x_max is None\n or self._curr_y_min is None\n or self._curr_y_max is None\n or self._curr_z_min is None\n or self._curr_z_max is None\n ):\n self._ax.set_xlim3d(self._curr_x_min, self._curr_x_max)\n self._ax.set_ylim3d(self._curr_y_min, self._curr_y_max)\n self._ax.set_zlim3d(self._curr_z_min, self._curr_z_max)\n self._init_lim()\n\n @staticmethod\n def _lines_extract_xyz_impl(index, lines_task):\n return lines_task[index, :, 0], lines_task[index, :, 1], lines_task[index, :, 2]\n\n @staticmethod\n def _trail_extract_xyz_impl(index, trail_task):\n return (\n trail_task[index : index + 2, 0],\n trail_task[index : index + 2, 1],\n trail_task[index : index + 2, 2],\n )\n\n def _lines_create_impl(self, lines_task):\n color = lines_task.color\n self._artist_cache[lines_task.task_name] = [\n self._ax.plot(\n *Matplotlib3DPlotter._lines_extract_xyz_impl(i, lines_task),\n color=color,\n linewidth=lines_task.line_width,\n alpha=lines_task.alpha\n )[0]\n for i in range(len(lines_task))\n ]\n\n def _lines_update_impl(self, lines_task):\n lines_artists = self._artist_cache[lines_task.task_name]\n for i in range(len(lines_task)):\n artist = lines_artists[i]\n xs, ys, zs = Matplotlib3DPlotter._lines_extract_xyz_impl(i, lines_task)\n artist.set_data(xs, ys)\n artist.set_3d_properties(zs)\n if lines_task.influence_lim:\n self._update_lim(xs, ys, zs)\n\n def _dots_create_impl(self, dots_task):\n color = dots_task.color\n self._artist_cache[dots_task.task_name] = self._ax.plot(\n dots_task[:, 0],\n dots_task[:, 1],\n dots_task[:, 2],\n c=color,\n linestyle=\"\",\n marker=\".\",\n markersize=dots_task.marker_size,\n alpha=dots_task.alpha,\n )[0]\n\n def _dots_update_impl(self, dots_task):\n dots_artist = self._artist_cache[dots_task.task_name]\n dots_artist.set_data(dots_task[:, 0], dots_task[:, 1])\n dots_artist.set_3d_properties(dots_task[:, 2])\n if dots_task.influence_lim:\n self._update_lim(dots_task[:, 0], dots_task[:, 1], dots_task[:, 2])\n\n def _trail_create_impl(self, trail_task):\n color = trail_task.color\n trail_length = len(trail_task) - 1\n self._artist_cache[trail_task.task_name] = [\n self._ax.plot(\n *Matplotlib3DPlotter._trail_extract_xyz_impl(i, trail_task),\n color=trail_task.color,\n linewidth=trail_task.line_width,\n alpha=trail_task.alpha * (1.0 - i / (trail_length - 1))\n )[0]\n for i in range(trail_length)\n ]\n\n def _trail_update_impl(self, trail_task):\n trails_artists = self._artist_cache[trail_task.task_name]\n for i in range(len(trail_task) - 1):\n artist = trails_artists[i]\n xs, ys, zs = Matplotlib3DPlotter._trail_extract_xyz_impl(i, trail_task)\n artist.set_data(xs, ys)\n artist.set_3d_properties(zs)\n if trail_task.influence_lim:\n self._update_lim(xs, ys, zs)\n\n def _create_impl(self, task_list):\n for task in task_list:\n self._create_impl_callables[task.task_type](task)\n self._draw()\n\n def _update_impl(self, task_list):\n for task in task_list:\n self._update_impl_callables[task.task_type](task)\n self._draw()\n\n def _set_aspect_equal_3d(self):\n xlim = self._ax.get_xlim3d()\n ylim = self._ax.get_ylim3d()\n zlim = self._ax.get_zlim3d()\n\n xmean = np.mean(xlim)\n ymean = np.mean(ylim)\n zmean = np.mean(zlim)\n\n plot_radius = max(\n [\n abs(lim - mean_)\n for lims, mean_ in ((xlim, xmean), (ylim, ymean), (zlim, zmean))\n for lim in lims\n ]\n )\n\n self._ax.set_xlim3d([xmean - plot_radius, xmean + plot_radius])\n self._ax.set_ylim3d([ymean - plot_radius, ymean + plot_radius])\n self._ax.set_zlim3d([zmean - plot_radius, zmean + plot_radius])\n\n def _draw(self):\n self._set_lim()\n self._set_aspect_equal_3d()\n self._fig.canvas.draw()\n self._fig.canvas.flush_events()\n plt.pause(0.00001)" }, { "identifier": "Draw3DSkeletonMotion", "path": "poselib/poselib/visualization/skeleton_plotter_tasks.py", "snippet": "class Draw3DSkeletonMotion(BasePlotterTask):\n def __init__(\n self,\n task_name: str,\n skeleton_motion,\n frame_index=None,\n joints_color=\"red\",\n lines_color=\"blue\",\n velocity_color=\"green\",\n angular_velocity_color=\"purple\",\n trail_color=\"black\",\n trail_length=10,\n alpha=1.0,\n ) -> None:\n super().__init__(task_name=task_name, task_type=\"3DSkeletonMotion\")\n self._trail_length = trail_length\n self._skeleton_motion = skeleton_motion\n # if frame_index is None:\n curr_skeleton_motion = self._skeleton_motion.clone()\n if frame_index is not None:\n curr_skeleton_motion.tensor = self._skeleton_motion.tensor[frame_index, :]\n # else:\n # curr_skeleton_motion = self._skeleton_motion[frame_index, :]\n self._skeleton_state_task = Draw3DSkeletonState(\n self.get_scoped_name(\"skeleton_state\"),\n curr_skeleton_motion,\n joints_color=joints_color,\n lines_color=lines_color,\n alpha=alpha,\n )\n vel_lines, avel_lines = Draw3DSkeletonMotion._get_vel_and_avel(\n curr_skeleton_motion\n )\n self._com_pos = curr_skeleton_motion.root_translation.numpy()[\n np.newaxis, ...\n ].repeat(trail_length, axis=0)\n self._vel_task = Draw3DLines(\n self.get_scoped_name(\"velocity\"),\n vel_lines,\n velocity_color,\n influence_lim=False,\n alpha=alpha,\n )\n self._avel_task = Draw3DLines(\n self.get_scoped_name(\"angular_velocity\"),\n avel_lines,\n angular_velocity_color,\n influence_lim=False,\n alpha=alpha,\n )\n self._com_trail_task = Draw3DTrail(\n self.get_scoped_name(\"com_trail\"),\n self._com_pos,\n trail_color,\n marker_size=2,\n influence_lim=True,\n alpha=alpha,\n )\n\n @property\n def name(self):\n return \"3DSkeletonMotion\"\n\n def update(self, frame_index=None, reset_trail=False, skeleton_motion=None) -> None:\n if skeleton_motion is not None:\n self._skeleton_motion = skeleton_motion\n\n curr_skeleton_motion = self._skeleton_motion.clone()\n if frame_index is not None:\n curr_skeleton_motion.tensor = curr_skeleton_motion.tensor[frame_index, :]\n if reset_trail:\n self._com_pos = curr_skeleton_motion.root_translation.numpy()[\n np.newaxis, ...\n ].repeat(self._trail_length, axis=0)\n else:\n self._com_pos = np.concatenate(\n (\n curr_skeleton_motion.root_translation.numpy()[np.newaxis, ...],\n self._com_pos[:-1],\n ),\n axis=0,\n )\n self._skeleton_state_task.update(curr_skeleton_motion)\n self._com_trail_task.update(self._com_pos)\n self._update(*Draw3DSkeletonMotion._get_vel_and_avel(curr_skeleton_motion))\n\n @staticmethod\n def _get_vel_and_avel(skeleton_motion):\n \"\"\"Get all the velocity and angular velocity lines\n \"\"\"\n pos = skeleton_motion.global_translation.numpy()\n vel = skeleton_motion.global_velocity.numpy()\n avel = skeleton_motion.global_angular_velocity.numpy()\n\n vel_lines = np.stack((pos, pos + vel * 0.02), axis=1)\n avel_lines = np.stack((pos, pos + avel * 0.01), axis=1)\n return vel_lines, avel_lines\n\n def _update(self, vel_lines, avel_lines) -> None:\n self._vel_task.update(vel_lines)\n self._avel_task.update(avel_lines)\n\n def __iter__(self):\n yield from self._skeleton_state_task\n yield from self._vel_task\n yield from self._avel_task\n yield from self._com_trail_task" }, { "identifier": "Draw3DSkeletonState", "path": "poselib/poselib/visualization/skeleton_plotter_tasks.py", "snippet": "class Draw3DSkeletonState(BasePlotterTask):\n _lines_task: Draw3DLines # sub-task for drawing lines\n _dots_task: Draw3DDots # sub-task for drawing dots\n\n def __init__(\n self,\n task_name: str,\n skeleton_state,\n joints_color: str = \"red\",\n lines_color: str = \"blue\",\n alpha=1.0,\n ) -> None:\n super().__init__(task_name=task_name, task_type=\"3DSkeletonState\")\n lines, dots = Draw3DSkeletonState._get_lines_and_dots(skeleton_state)\n self._lines_task = Draw3DLines(\n self.get_scoped_name(\"bodies\"), lines, joints_color, alpha=alpha\n )\n self._dots_task = Draw3DDots(\n self.get_scoped_name(\"joints\"), dots, lines_color, alpha=alpha\n )\n\n @property\n def name(self):\n return \"3DSkeleton\"\n\n def update(self, skeleton_state) -> None:\n self._update(*Draw3DSkeletonState._get_lines_and_dots(skeleton_state))\n\n @staticmethod\n def _get_lines_and_dots(skeleton_state):\n \"\"\"Get all the lines and dots needed to draw the skeleton state\n \"\"\"\n assert (\n len(skeleton_state.tensor.shape) == 1\n ), \"the state has to be zero dimensional\"\n dots = skeleton_state.global_translation.numpy()\n skeleton_tree = skeleton_state.skeleton_tree\n parent_indices = skeleton_tree.parent_indices.numpy()\n lines = []\n for node_index in range(len(skeleton_tree)):\n parent_index = parent_indices[node_index]\n if parent_index != -1:\n lines.append([dots[node_index], dots[parent_index]])\n lines = np.array(lines)\n return lines, dots\n\n def _update(self, lines, dots) -> None:\n self._lines_task.update(lines)\n self._dots_task.update(dots)\n\n def __iter__(self):\n yield from self._lines_task\n yield from self._dots_task" } ]
from ...core import * from ..skeleton3d import SkeletonTree, SkeletonState, SkeletonMotion from ...visualization.common import ( plot_skeleton_state, plot_skeleton_motion_interactive, ) from ...visualization.plt_plotter import Matplotlib3DPlotter from ...visualization.skeleton_plotter_tasks import ( Draw3DSkeletonMotion, Draw3DSkeletonState, ) import numpy as np import torch
18,830
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. def test_skel_tree(): skel_tree = SkeletonTree.from_mjcf( "/home/serfcx/DL_Animation/rl_mimic/data/skeletons/humanoid_mimic_mod_2_noind.xml", backend="pytorch", ) skel_tree_rec = SkeletonTree.from_dict(skel_tree.to_dict(), backend="pytorch") # assert skel_tree.to_str() == skel_tree_rec.to_str() print(skel_tree.node_names) print(skel_tree.local_translation) print(skel_tree.parent_indices) skel_state = SkeletonState.zero_pose(skeleton_tree=skel_tree) plot_skeleton_state(task_name="draw_skeleton", skeleton_state=skel_state) skel_state = skel_state.drop_nodes_by_names(["right_hip", "left_hip"]) plot_skeleton_state(task_name="draw_skeleton", skeleton_state=skel_state) def test_skel_motion(): skel_motion = SkeletonMotion.from_file( "/tmp/tmp.npy", backend="pytorch", load_context=True ) plot_skeleton_motion_interactive(skel_motion) def test_grad(): source_motion = SkeletonMotion.from_file( "c:\\Users\\bmatusch\\carbmimic\\data\\motions\\JogFlatTerrain_01_ase.npy", backend="pytorch", device="cuda:0", ) source_tpose = SkeletonState.from_file( "c:\\Users\\bmatusch\\carbmimic\\data\\skeletons\\fox_tpose.npy", backend="pytorch", device="cuda:0", ) target_tpose = SkeletonState.from_file( "c:\\Users\\bmatusch\\carbmimic\\data\\skeletons\\flex_tpose.npy", backend="pytorch", device="cuda:0", ) target_skeleton_tree = target_tpose.skeleton_tree joint_mapping = { "upArm_r": "right_shoulder", "upArm_l": "left_shoulder", "loArm_r": "right_elbow", "loArm_l": "left_elbow", "upLeg_r": "right_hip", "upLeg_l": "left_hip", "loLeg_r": "right_knee", "loLeg_l": "left_knee", "foot_r": "right_ankle", "foot_l": "left_ankle", "hips": "pelvis", "neckA": "neck", "spineA": "abdomen", } rotation_to_target_skeleton = quat_from_angle_axis( angle=torch.tensor(90.0).float(), axis=torch.tensor([1, 0, 0]).float(), degree=True, ) target_motion = source_motion.retarget_to( joint_mapping=joint_mapping, source_tpose_local_rotation=source_tpose.local_rotation, source_tpose_root_translation=source_tpose.root_translation, target_skeleton_tree=target_skeleton_tree, target_tpose_local_rotation=target_tpose.local_rotation, target_tpose_root_translation=target_tpose.root_translation, rotation_to_target_skeleton=rotation_to_target_skeleton, scale_to_target_skeleton=0.01, ) target_state = SkeletonState( target_motion.tensor[800, :], target_motion.skeleton_tree, target_motion.is_local, ) skeleton_tree = target_state.skeleton_tree root_translation = target_state.root_translation global_translation = target_state.global_translation q = np.zeros((len(skeleton_tree), 4), dtype=np.float32) q[..., 3] = 1.0 q = torch.from_numpy(q) max_its = 10000
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. def test_skel_tree(): skel_tree = SkeletonTree.from_mjcf( "/home/serfcx/DL_Animation/rl_mimic/data/skeletons/humanoid_mimic_mod_2_noind.xml", backend="pytorch", ) skel_tree_rec = SkeletonTree.from_dict(skel_tree.to_dict(), backend="pytorch") # assert skel_tree.to_str() == skel_tree_rec.to_str() print(skel_tree.node_names) print(skel_tree.local_translation) print(skel_tree.parent_indices) skel_state = SkeletonState.zero_pose(skeleton_tree=skel_tree) plot_skeleton_state(task_name="draw_skeleton", skeleton_state=skel_state) skel_state = skel_state.drop_nodes_by_names(["right_hip", "left_hip"]) plot_skeleton_state(task_name="draw_skeleton", skeleton_state=skel_state) def test_skel_motion(): skel_motion = SkeletonMotion.from_file( "/tmp/tmp.npy", backend="pytorch", load_context=True ) plot_skeleton_motion_interactive(skel_motion) def test_grad(): source_motion = SkeletonMotion.from_file( "c:\\Users\\bmatusch\\carbmimic\\data\\motions\\JogFlatTerrain_01_ase.npy", backend="pytorch", device="cuda:0", ) source_tpose = SkeletonState.from_file( "c:\\Users\\bmatusch\\carbmimic\\data\\skeletons\\fox_tpose.npy", backend="pytorch", device="cuda:0", ) target_tpose = SkeletonState.from_file( "c:\\Users\\bmatusch\\carbmimic\\data\\skeletons\\flex_tpose.npy", backend="pytorch", device="cuda:0", ) target_skeleton_tree = target_tpose.skeleton_tree joint_mapping = { "upArm_r": "right_shoulder", "upArm_l": "left_shoulder", "loArm_r": "right_elbow", "loArm_l": "left_elbow", "upLeg_r": "right_hip", "upLeg_l": "left_hip", "loLeg_r": "right_knee", "loLeg_l": "left_knee", "foot_r": "right_ankle", "foot_l": "left_ankle", "hips": "pelvis", "neckA": "neck", "spineA": "abdomen", } rotation_to_target_skeleton = quat_from_angle_axis( angle=torch.tensor(90.0).float(), axis=torch.tensor([1, 0, 0]).float(), degree=True, ) target_motion = source_motion.retarget_to( joint_mapping=joint_mapping, source_tpose_local_rotation=source_tpose.local_rotation, source_tpose_root_translation=source_tpose.root_translation, target_skeleton_tree=target_skeleton_tree, target_tpose_local_rotation=target_tpose.local_rotation, target_tpose_root_translation=target_tpose.root_translation, rotation_to_target_skeleton=rotation_to_target_skeleton, scale_to_target_skeleton=0.01, ) target_state = SkeletonState( target_motion.tensor[800, :], target_motion.skeleton_tree, target_motion.is_local, ) skeleton_tree = target_state.skeleton_tree root_translation = target_state.root_translation global_translation = target_state.global_translation q = np.zeros((len(skeleton_tree), 4), dtype=np.float32) q[..., 3] = 1.0 q = torch.from_numpy(q) max_its = 10000
task = Draw3DSkeletonState(task_name="", skeleton_state=target_state)
7
2023-10-31 20:47:12+00:00
24k
Improbable-AI/dexenv
dexenv/envs/dclaw_multiobjs.py
[ { "identifier": "DClawBase", "path": "dexenv/envs/dclaw_base.py", "snippet": "class DClawBase(VecTask):\n\n def __init__(self, cfg, sim_device, rl_device, graphics_device_id):\n\n self.cfg = cfg\n headless = self.cfg.headless\n self.randomize = self.cfg[\"task\"][\"randomize\"]\n if self.randomize:\n logger.warning(f'Domain randomization is enabled!')\n self.randomization_params = self.cfg[\"task\"][\"randomization_params\"]\n self.aggregate_mode = self.cfg[\"env\"][\"aggregateMode\"]\n\n self.dist_reward_scale = self.cfg[\"env\"][\"rew\"][\"distRewardScale\"]\n self.rot_reward_scale = self.cfg[\"env\"][\"rew\"][\"rotRewardScale\"]\n self.success_tolerance = self.cfg[\"env\"][\"rew\"][\"successTolerance\"]\n self.reach_goal_bonus = self.cfg[\"env\"][\"rew\"][\"reachGoalBonus\"]\n self.fall_dist = self.cfg[\"env\"][\"rew\"][\"fallDistance\"]\n self.fall_penalty = self.cfg[\"env\"][\"rew\"][\"fallPenalty\"]\n self.rot_eps = self.cfg[\"env\"][\"rew\"][\"rotEps\"]\n\n self.vel_obs_scale = 0.2 # scale factor of velocity based observations\n self.force_torque_obs_scale = 10.0 # scale factor of velocity based observations\n\n self.reset_position_noise = self.cfg[\"env\"][\"resetPositionNoise\"]\n self.reset_rotation_noise = self.cfg[\"env\"][\"resetRotationNoise\"]\n self.reset_dof_pos_noise = self.cfg[\"env\"][\"resetDofPosRandomInterval\"]\n self.reset_dof_vel_noise = self.cfg[\"env\"][\"resetDofVelRandomInterval\"]\n\n self.force_scale = self.cfg[\"env\"].get(\"forceScale\", 0.0)\n self.force_prob_range = self.cfg[\"env\"].get(\"forceProbRange\", [0.001, 0.1])\n self.force_decay = self.cfg[\"env\"].get(\"forceDecay\", 0.99)\n self.force_decay_interval = self.cfg[\"env\"].get(\"forceDecayInterval\", 0.08)\n\n self.dclaw_dof_speed_scale = self.cfg[\"env\"][\"dofSpeedScale\"]\n # self.act_moving_average = self.cfg[\"env\"][\"actionsMovingAverage\"]\n\n self.debug_viz = self.cfg[\"env\"][\"enableDebugVis\"]\n\n self.max_episode_length = self.cfg[\"env\"][\"episodeLength\"]\n self.reset_time = self.cfg[\"env\"].get(\"resetTime\", -1.0)\n self.print_success_stat = self.cfg[\"env\"][\"printNumSuccesses\"]\n self.max_consecutive_successes = self.cfg[\"env\"][\"maxConsecutiveSuccesses\"]\n self.av_factor = self.cfg[\"env\"].get(\"averFactor\", 0.1)\n\n self.object_type = self.cfg[\"env\"][\"objectType\"]\n\n self.asset_files_dict = {\n \"block\": \"urdf/objects/cube_multicolor.urdf\",\n \"egg\": \"mjcf/open_ai_assets/hand/egg.xml\",\n \"airplane\": \"single_objects/airplane/model.urdf\",\n 'power_drill': 'single_objects/power_drill/model.urdf',\n 'mug': 'single_objects/mug/model.urdf',\n 'elephant': 'asymm/train/elephant/var_000/model.urdf',\n 'train': 'asymm/train/train/var_000/model.urdf',\n 'stanford_bunny': 'asymm/train/stanford_bunny/var_004/model.urdf'\n\n }\n self.objs_in_isaacgym = ['block', 'egg']\n\n if \"asset\" in self.cfg[\"env\"]:\n self.asset_files_dict[\"block\"] = self.cfg[\"env\"][\"asset\"].get(\"assetFileNameBlock\",\n self.asset_files_dict[\"block\"])\n self.asset_files_dict[\"egg\"] = self.cfg[\"env\"][\"asset\"].get(\"assetFileNameEgg\",\n self.asset_files_dict[\"egg\"])\n\n self.obs_type = self.cfg[\"env\"][\"observationType\"]\n\n if not (self.obs_type in [\"full_no_vel\", \"full\", \"full_state\"]):\n raise Exception(\n \"Unknown type of observations!\\nobservationType should be one of: [openai, full_no_vel, full, full_state]\")\n\n print(\"Obs type:\", self.obs_type)\n\n ## TODO: change value here\n self.num_obs_dict = {\n \"full_no_vel\": 42,\n \"full\": 87,\n \"full_state\": 114\n }\n\n self.up_axis = 'z'\n\n num_states = 0\n\n self.cfg[\"env\"][\"numObservations\"] = self.num_obs_dict[self.obs_type]\n self.cfg[\"env\"][\"numStates\"] = num_states\n self.cfg[\"env\"][\"numActions\"] = 12\n self.hist_buf_reset_env_ids = None\n\n super().__init__(config=self.cfg,\n sim_device=sim_device,\n rl_device=rl_device,\n graphics_device_id=graphics_device_id,\n headless=headless)\n\n self.dt = self.sim_params.dt\n control_freq_inv = self.cfg[\"env\"].get(\"controlFrequencyInv\", 1)\n if self.reset_time > 0.0:\n self.max_episode_length = int(round(self.reset_time / (control_freq_inv * self.dt)))\n print(\"Reset time: \", self.reset_time)\n print(\"New episode length: \", self.max_episode_length)\n\n if self.viewer != None:\n cam_pos = gymapi.Vec3(0.16, -0.5, 0.5)\n cam_target = gymapi.Vec3(0.0, 0.0, 0.15)\n self.gym.viewer_camera_look_at(self.viewer, None, cam_pos, cam_target)\n\n actor_root_state_tensor = self.gym.acquire_actor_root_state_tensor(self.sim)\n dof_state_tensor = self.gym.acquire_dof_state_tensor(self.sim)\n rigid_body_tensor = self.gym.acquire_rigid_body_state_tensor(self.sim)\n dof_force_tensor = self.gym.acquire_dof_force_tensor(self.sim)\n\n if self.obs_type == \"full_state\":\n sensor_tensor = self.gym.acquire_force_sensor_tensor(self.sim)\n self.vec_sensor_tensor = gymtorch.wrap_tensor(sensor_tensor).view(self.num_envs, self.num_fingertips * 6)\n\n dof_force_tensor = self.gym.acquire_dof_force_tensor(self.sim)\n self.dof_force_tensor = gymtorch.wrap_tensor(dof_force_tensor).view(self.num_envs,\n self.num_dclaw_dofs)\n\n self.gym.refresh_actor_root_state_tensor(self.sim)\n self.gym.refresh_dof_state_tensor(self.sim)\n if self.cfg.env.dof_torque_on:\n self.gym.refresh_dof_force_tensor(self.sim)\n self.gym.refresh_rigid_body_state_tensor(self.sim)\n\n self.dof_state = gymtorch.wrap_tensor(dof_state_tensor)\n self.dclaw_dof_state = self.dof_state.view(self.num_envs, -1, 2)[:, :self.num_dclaw_dofs]\n self.dclaw_dof_pos = self.dclaw_dof_state[..., 0]\n self.dclaw_dof_vel = self.dclaw_dof_state[..., 1]\n if self.cfg.env.dof_torque_on:\n self.dclaw_dof_torque = gymtorch.wrap_tensor(dof_force_tensor).view(self.num_envs, -1)\n else:\n self.dclaw_dof_torque = None\n\n self.rigid_body_states = gymtorch.wrap_tensor(rigid_body_tensor).view(self.num_envs, -1, 13)\n self.num_bodies = self.rigid_body_states.shape[1]\n\n self.root_state_tensor = gymtorch.wrap_tensor(actor_root_state_tensor).view(-1, 13)\n\n if self.cfg.env.rew.pen_tb_contact:\n _net_cf = self.gym.acquire_net_contact_force_tensor(self.sim)\n self.net_contact_force = gymtorch.wrap_tensor(_net_cf).view(self.num_envs, -1, 3)\n table_handle = self.gym.find_actor_handle(self.envs[0], 'table')\n self.table_body_index = self.gym.find_actor_rigid_body_index(self.envs[0],\n table_handle,\n 'table',\n gymapi.DOMAIN_ENV)\n logger.warning(f'Table body index:{self.table_body_index}')\n self.table_contact_force = self.net_contact_force[:, self.table_body_index]\n\n self.num_dofs = self.gym.get_sim_dof_count(self.sim) // self.num_envs\n self.prev_targets = torch.zeros((self.num_envs, self.num_dofs), dtype=torch.float, device=self.device)\n self.cur_targets = torch.zeros((self.num_envs, self.num_dofs), dtype=torch.float, device=self.device)\n\n self.global_indices = torch.arange(self.num_envs * 3, dtype=torch.int32, device=self.device).view(self.num_envs, -1)\n\n self.reset_goal_buf = self.reset_buf.clone()\n self.successes = torch.zeros(self.num_envs, dtype=torch.float, device=self.device)\n self.consecutive_successes = torch.zeros(1, dtype=torch.float, device=self.device)\n\n self.av_factor = to_torch(self.av_factor, dtype=torch.float, device=self.device)\n\n self.total_successes = 0\n self.total_resets = 0\n\n self.force_decay = to_torch(self.force_decay, dtype=torch.float, device=self.device)\n self.force_prob_range = to_torch(self.force_prob_range, dtype=torch.float, device=self.device)\n self.random_force_prob = torch.exp((torch.log(self.force_prob_range[0]) - torch.log(self.force_prob_range[1]))\n * torch.rand(self.num_envs, device=self.device) + torch.log(\n self.force_prob_range[1]))\n\n self.rb_forces = torch.zeros((self.num_envs, self.num_bodies, 3), dtype=torch.float, device=self.device)\n\n self.num_actions = self.num_dclaw_dofs\n self.actions = self.zero_actions()\n DClawBase.compute_observations(self)\n self.num_observations = self.obs_buf.shape[-1]\n self.cfg.env.numObservations = self.num_observations\n self.create_ob_act_space()\n\n def create_sim(self):\n self.dt = self.cfg[\"sim\"][\"dt\"]\n self.up_axis_idx = self.set_sim_params_up_axis(self.sim_params, self.up_axis)\n\n self.sim = super().create_sim(self.device_id, self.graphics_device_id, self.physics_engine, self.sim_params)\n self._create_ground_plane()\n self._create_envs(self.num_envs, self.cfg[\"env\"]['envSpacing'], int(np.sqrt(self.num_envs)))\n\n if self.randomize:\n self.apply_randomizations(self.randomization_params)\n\n def _create_ground_plane(self):\n plane_params = gymapi.PlaneParams()\n plane_params.normal = gymapi.Vec3(0.0, 0.0, 1.0)\n plane_params.distance = 0.1\n self.gym.add_ground(self.sim, plane_params)\n\n def _create_envs(self, num_envs, spacing, num_per_row):\n lower = gymapi.Vec3(-spacing, -spacing, 0.0)\n upper = gymapi.Vec3(spacing, spacing, spacing)\n\n asset_root = dexenv.LIB_PATH.joinpath('assets', 'dclaw').as_posix()\n object_asset_file = self.asset_files_dict[self.object_type]\n\n dclaw_asset, dclaw_dof_props = self.get_dclaw_asset(asset_root=asset_root)\n table_asset = self.get_table_asset()\n table_pose = self.get_table_pose()\n\n if self.obs_type == \"full_state\":\n sensor_pose = gymapi.Transform()\n for ft_handle in self.fingertip_handles:\n self.gym.create_asset_force_sensor(dclaw_asset, ft_handle, sensor_pose)\n\n if self.object_type in self.objs_in_isaacgym:\n asset_root = get_module_path('isaacgymenvs').parent.joinpath('assets').as_posix()\n else:\n asset_root = dexenv.LIB_PATH.joinpath('assets').as_posix()\n\n object_asset_options = gymapi.AssetOptions()\n if self.cfg.env.vhacd:\n object_asset_options.convex_decomposition_from_submeshes = True\n\n object_asset = self.gym.load_asset(self.sim, asset_root, object_asset_file, object_asset_options)\n\n object_asset_options.disable_gravity = True\n goal_asset = self.gym.load_asset(self.sim, asset_root, object_asset_file, object_asset_options)\n\n dclaw_start_pose = self.get_dclaw_start_pose()\n object_start_pose = self.get_object_start_pose(dclaw_start_pose)\n\n goal_start_pose = self.get_goal_object_start_pose(object_start_pose=object_start_pose)\n\n self.dclaws = []\n self.envs = []\n\n self.object_init_state = []\n self.hand_start_states = []\n\n self.hand_indices = []\n self.fingertip_indices = []\n self.object_indices = []\n self.goal_object_indices = []\n\n self.render_camera_handles = []\n if self.cfg.rgb_render:\n render_cam_pose, render_cam_params = self.get_visual_render_camera_setup()\n\n self.fingertip_handles = [self.gym.find_asset_rigid_body_index(dclaw_asset, name) for name in\n self.fingertips]\n print(f'Fingertip handles:{self.fingertip_handles}')\n\n dclaw_rb_count = self.gym.get_asset_rigid_body_count(dclaw_asset)\n object_rb_count = self.gym.get_asset_rigid_body_count(object_asset)\n object_rs_count = self.gym.get_asset_rigid_shape_count(object_asset)\n self.object_rb_handles = list(range(dclaw_rb_count, dclaw_rb_count + object_rb_count))\n self.object_handles = []\n\n max_agg_bodies = self.num_dclaw_bodies + 2 * object_rb_count + 1\n max_agg_shapes = self.num_dclaw_shapes + 2 * object_rs_count + 1\n\n for i in range(self.num_envs):\n env_ptr = self.gym.create_env(\n self.sim, lower, upper, num_per_row\n )\n\n if self.aggregate_mode >= 1:\n self.gym.begin_aggregate(env_ptr, max_agg_bodies, max_agg_shapes, True)\n\n self.create_hand_actor(env_ptr=env_ptr,\n dclaw_asset=dclaw_asset,\n dclaw_start_pose=dclaw_start_pose,\n dclaw_dof_props=dclaw_dof_props,\n env_id=i)\n\n object_handle = self.gym.create_actor(env_ptr, object_asset, object_start_pose, \"object\", i, 0, 1)\n self.object_handles.append(object_handle)\n self.object_init_state.append([object_start_pose.p.x, object_start_pose.p.y, object_start_pose.p.z,\n object_start_pose.r.x, object_start_pose.r.y, object_start_pose.r.z,\n object_start_pose.r.w,\n 0, 0, 0, 0, 0, 0])\n object_idx = self.gym.get_actor_index(env_ptr, object_handle, gymapi.DOMAIN_SIM)\n self.object_indices.append(object_idx)\n\n goal_handle = self.gym.create_actor(env_ptr, goal_asset, goal_start_pose, \"goal_object\", i + self.num_envs,\n 0, 2)\n goal_object_idx = self.gym.get_actor_index(env_ptr, goal_handle, gymapi.DOMAIN_SIM)\n self.goal_object_indices.append(goal_object_idx)\n\n if self.cfg.env.blockscale is not None and self.cfg.env.objectType == 'block':\n blockscale = float(self.cfg.env.blockscale)\n self.gym.set_actor_scale(env_ptr, object_handle, blockscale)\n self.gym.set_actor_scale(env_ptr, goal_handle, blockscale)\n\n if self.object_type != \"block\":\n self.gym.set_rigid_body_color(\n env_ptr, object_handle, 0, gymapi.MESH_VISUAL, gymapi.Vec3(0.6, 0.72, 0.98))\n self.gym.set_rigid_body_color(\n env_ptr, goal_handle, 0, gymapi.MESH_VISUAL, gymapi.Vec3(0.6, 0.72, 0.98))\n table_handle = self.gym.create_actor(env_ptr, table_asset, table_pose, \"table\", i, 0)\n\n if self.cfg.rgb_render:\n render_camera_handle = self.create_camera(render_cam_pose, env_ptr, render_cam_params)\n self.render_camera_handles.append(render_camera_handle[0])\n\n if self.aggregate_mode > 0:\n self.gym.end_aggregate(env_ptr)\n\n self.envs.append(env_ptr)\n\n self.setup_torch_states()\n\n def create_camera(self, camera_poses, env_ptr, camera_params):\n cam_handles = []\n for ic in range(min(len(camera_poses), self.cfg.cam.cam_num)):\n camera_handle = self.gym.create_camera_sensor(env_ptr, camera_params)\n if isinstance(camera_poses[ic], tuple):\n self.gym.set_camera_location(camera_handle, env_ptr, camera_poses[ic][0], camera_poses[ic][1])\n else:\n self.gym.set_camera_transform(camera_handle, env_ptr, camera_poses[ic])\n cam_handles.append(camera_handle)\n return cam_handles\n\n def get_visual_render_camera_setup(self):\n cam_pos = np.array([-0.7, 0, 0.5])\n cam_focus_pt = np.array([0.08, 0, 0.15])\n cam_focus_pt = gymapi.Vec3(*cam_focus_pt)\n cam_pos = gymapi.Vec3(*cam_pos)\n camera_poses = [(cam_pos, cam_focus_pt)]\n camera_params = get_camera_params(width=self.cfg.cam.visual_render_width,\n height=self.cfg.cam.visual_render_height,\n hov=45,\n cuda=False)\n return camera_poses, camera_params\n\n def create_hand_actor(self, env_ptr, dclaw_asset, dclaw_start_pose, dclaw_dof_props, env_id):\n dclaw_actor = self.gym.create_actor(env_ptr, dclaw_asset, dclaw_start_pose, \"hand\", env_id, 0, 0)\n if self.cfg.env.dof_torque_on:\n self.gym.enable_actor_dof_force_sensors(env_ptr, dclaw_actor)\n self.hand_start_states.append(\n [dclaw_start_pose.p.x, dclaw_start_pose.p.y, dclaw_start_pose.p.z,\n dclaw_start_pose.r.x, dclaw_start_pose.r.y, dclaw_start_pose.r.z,\n dclaw_start_pose.r.w,\n 0, 0, 0, 0, 0, 0])\n self.gym.set_actor_dof_properties(env_ptr, dclaw_actor, dclaw_dof_props)\n hand_idx = self.gym.get_actor_index(env_ptr, dclaw_actor, gymapi.DOMAIN_SIM)\n self.hand_indices.append(hand_idx)\n\n self.gym.set_actor_dof_states(env_ptr, dclaw_actor, self.dclaw_default_dof_states, gymapi.STATE_ALL)\n if self.obs_type == \"full_state\":\n self.gym.enable_actor_dof_force_sensors(env_ptr, dclaw_actor)\n self.dclaws.append(dclaw_actor)\n self.set_hand_color(env_ptr, dclaw_actor)\n\n def set_hand_color(self, env_ptr, dclaw_actor):\n rgd_dict = self.gym.get_actor_rigid_body_dict(env_ptr, dclaw_actor)\n for bd, bd_id in rgd_dict.items():\n if bd not in dclaw_body_color_mapping:\n continue\n color = gymapi.Vec3(*dclaw_body_color_mapping[bd])\n self.gym.set_rigid_body_color(env_ptr, dclaw_actor,\n bd_id, gymapi.MESH_VISUAL,\n color)\n\n def get_table_asset(self):\n asset_options = gymapi.AssetOptions()\n asset_options.armature = 0.001\n asset_options.fix_base_link = True\n asset_options.thickness = 0.001\n asset_options.disable_gravity = True\n table_dims = gymapi.Vec3(0.6, 0.6, 0.1)\n table_asset = self.gym.create_box(self.sim,\n table_dims.x,\n table_dims.y,\n table_dims.z,\n asset_options)\n table_props = self.gym.get_asset_rigid_shape_properties(table_asset)\n for p in table_props:\n p.friction = self.cfg.env.table.friction\n p.torsion_friction = self.cfg.env.table.torsion_friction\n p.restitution = self.cfg.env.table.restitution\n p.rolling_friction = self.cfg.env.table.rolling_friction\n self.gym.set_asset_rigid_shape_properties(table_asset, table_props)\n return table_asset\n\n def get_table_pose(self):\n object_start_pose = gymapi.Transform()\n object_start_pose.p = gymapi.Vec3()\n object_start_pose.p.x = 0\n object_start_pose.p.y = 0\n object_start_pose.p.z = -0.05\n return object_start_pose\n\n def get_dclaw_start_pose(self):\n dclaw_start_pose = gymapi.Transform()\n dclaw_start_pose.p = gymapi.Vec3(*get_axis_params(0.25, self.up_axis_idx))\n dclaw_start_pose.r = gymapi.Quat.from_axis_angle(gymapi.Vec3(0, 1, 0), np.pi)\n return dclaw_start_pose\n\n def setup_torch_states(self):\n self.render_rgb_obs_buf = None\n if self.cfg.rgb_render:\n self.gym.set_light_parameters(self.sim, 0, gymapi.Vec3(0.9, 0.9, 0.9),\n gymapi.Vec3(0.9, 0.9, 0.9), gymapi.Vec3(0, 0, 0))\n else:\n self.gym.set_light_parameters(self.sim, 0, gymapi.Vec3(0.9, 0.9, 0.9),\n gymapi.Vec3(0.7, 0.7, 0.7), gymapi.Vec3(0, 0, 0))\n self.object_init_state = to_torch(self.object_init_state, device=self.device, dtype=torch.float).view(\n self.num_envs, 13)\n self.goal_states = self.object_init_state.clone()\n self.goal_states[:, self.up_axis_idx] -= 0.04\n self.goal_init_state = self.goal_states.clone()\n self.hand_start_states = to_torch(self.hand_start_states, device=self.device).view(self.num_envs, 13)\n\n self.fingertip_handles = to_torch(self.fingertip_handles, dtype=torch.long, device=self.device)\n self.object_rb_handles = to_torch(self.object_rb_handles, dtype=torch.long, device=self.device)\n self.object_rb_masses = None\n self.update_obj_mass()\n self.hand_indices = to_torch(self.hand_indices, dtype=torch.long, device=self.device)\n self.object_indices = to_torch(self.object_indices, dtype=torch.long, device=self.device)\n self.goal_object_indices = to_torch(self.goal_object_indices, dtype=torch.long, device=self.device)\n\n def get_dclaw_asset(self, asset_root=None, asset_options=None):\n # load dclaw asset\n if asset_options is None:\n asset_options = gymapi.AssetOptions()\n asset_options.flip_visual_attachments = False\n asset_options.fix_base_link = True\n asset_options.collapse_fixed_joints = False\n asset_options.disable_gravity = False\n asset_options.thickness = 0.001\n asset_options.angular_damping = 0.01\n asset_options.override_inertia = True\n asset_options.override_com = True\n logger.info(f'VHACD:{self.cfg.env.vhacd}')\n if self.cfg.env.vhacd:\n asset_options.convex_decomposition_from_submeshes = True\n if self.cfg.physics_engine == \"physx\":\n # if self.physics_engine == gymapi.SIM_PHYSX:\n asset_options.use_physx_armature = True\n asset_options.default_dof_drive_mode = gymapi.DOF_MODE_POS\n\n if asset_root is None:\n asset_root = dexenv.LIB_PATH.joinpath('assets', 'dclaw_4f').as_posix()\n robot_name = self.cfg.env.robot\n asset_root = pathlib_file(asset_root).parent.joinpath(f'{robot_name}').as_posix()\n dclaw_asset = self.gym.load_asset(self.sim, asset_root, f\"{robot_name}.urdf\", asset_options)\n print(f'Dclaw asset root:{asset_root} robot name:{robot_name}')\n\n self.num_dclaw_bodies = self.gym.get_asset_rigid_body_count(dclaw_asset)\n self.num_dclaw_shapes = self.gym.get_asset_rigid_shape_count(dclaw_asset)\n self.num_dclaw_dofs = self.gym.get_asset_dof_count(dclaw_asset)\n\n print(f'D-Claw:')\n print(f'\\t Number of bodies: {self.num_dclaw_bodies}')\n print(f'\\t Number of shapes: {self.num_dclaw_shapes}')\n print(f'\\t Number of dofs: {self.num_dclaw_dofs}')\n\n self.dclaw_asset_dof_dict = self.gym.get_asset_dof_dict(dclaw_asset)\n joint_names = self.dclaw_asset_dof_dict.keys()\n logger.info(f'Joint names:{joint_names}')\n\n self.dof_joint_indices = list(self.dclaw_asset_dof_dict.values())\n dinds = np.array(self.dof_joint_indices)\n assert np.all(np.diff(dinds) > 0) # check if it's in a sorted order (ascending)\n\n rb_links = self.gym.get_asset_rigid_body_names(dclaw_asset)\n self.fingertips = [x for x in rb_links if 'tip_link' in x] # [\"one_tip_link\", \"two_tip_link\", \"three_tip_link\"]\n self.num_fingertips = len(self.fingertips)\n\n print(f'Number of fingertips:{self.num_fingertips} Fingertips:{self.fingertips}')\n\n print(f'Actuator --- DoF Index')\n for act_name, act_index in zip(joint_names, self.dof_joint_indices):\n print(f'\\t {act_name} {act_index}')\n\n dclaw_dof_props = self.gym.get_asset_dof_properties(dclaw_asset)\n\n def set_dof_prop(props, prop_name, val):\n if np.isscalar(val):\n props[prop_name].fill(val)\n elif len(val) == 3:\n props[prop_name] = np.array(list(val) * int(len(props[prop_name]) / 3))\n else:\n props[prop_name] = np.array(val)\n\n if self.cfg[\"env\"][\"dof_vel_hard_limit\"] is not None:\n vel_hard_limit = self.cfg[\"env\"][\"dof_vel_hard_limit\"] if not self.cfg.env.soft_control else self.cfg[\"env\"][\"soft_dof_vel_hard_limit\"]\n print(f'Setting DOF velocity limit to:{vel_hard_limit}')\n set_dof_prop(dclaw_dof_props, 'velocity', vel_hard_limit)\n if self.cfg[\"env\"][\"effort_limit\"] is not None:\n effort_limit = self.cfg[\"env\"][\"effort_limit\"] if not self.cfg.env.soft_control else self.cfg[\"env\"][\"soft_effort_limit\"]\n print(f'Setting DOF effort limit to:{effort_limit}')\n set_dof_prop(dclaw_dof_props, 'effort', effort_limit)\n if self.cfg[\"env\"][\"stiffness\"] is not None:\n stiffness = self.cfg[\"env\"][\"stiffness\"] if not self.cfg.env.soft_control else self.cfg[\"env\"][\"soft_stiffness\"]\n print(f'Setting stiffness to:{stiffness}')\n set_dof_prop(dclaw_dof_props, 'stiffness', stiffness)\n if self.cfg[\"env\"][\"damping\"] is not None:\n damping = self.cfg[\"env\"][\"damping\"] if not self.cfg.env.soft_control else self.cfg[\"env\"][\"soft_damping\"]\n print(f'Setting damping to:{damping}')\n set_dof_prop(dclaw_dof_props, 'damping', damping)\n\n self.dclaw_dof_lower_limits = []\n self.dclaw_dof_upper_limits = []\n\n self.dclaw_default_dof_states = np.zeros(self.num_dclaw_dofs, dtype=gymapi.DofState.dtype)\n self.dclaw_default_dof_pos = self.dclaw_default_dof_states['pos']\n self.dclaw_default_dof_vel = self.dclaw_default_dof_states['vel']\n for i in range(self.num_dclaw_dofs):\n self.dclaw_dof_lower_limits.append(dclaw_dof_props['lower'][i])\n self.dclaw_dof_upper_limits.append(dclaw_dof_props['upper'][i])\n if i % 3 == 1:\n self.dclaw_default_dof_pos[i] = 0.8\n elif i % 3 == 2:\n self.dclaw_default_dof_pos[i] = -1.1\n else:\n self.dclaw_default_dof_pos[i] = 0.\n self.dclaw_default_dof_vel[i] = 0.0\n\n self.dof_joint_indices = to_torch(self.dof_joint_indices, dtype=torch.long, device=self.device)\n self.dclaw_dof_lower_limits = to_torch(self.dclaw_dof_lower_limits, device=self.device)\n self.dclaw_dof_upper_limits = to_torch(self.dclaw_dof_upper_limits, device=self.device)\n self.dclaw_default_dof_pos = to_torch(self.dclaw_default_dof_pos, device=self.device)\n self.dclaw_default_dof_vel = to_torch(self.dclaw_default_dof_vel, device=self.device)\n\n self.fingertip_handles = [self.gym.find_asset_rigid_body_index(dclaw_asset, name) for name in\n self.fingertips]\n\n dclaw_asset_props = self.gym.get_asset_rigid_shape_properties(dclaw_asset)\n for p in dclaw_asset_props:\n p.friction = self.cfg.env.hand.friction\n p.torsion_friction = self.cfg.env.hand.torsion_friction\n p.rolling_friction = self.cfg.env.hand.rolling_friction\n p.restitution = self.cfg.env.hand.restitution\n self.gym.set_asset_rigid_shape_properties(dclaw_asset, dclaw_asset_props)\n return dclaw_asset, dclaw_dof_props\n\n def get_object_start_pose(self, dclaw_start_pose):\n object_start_pose = gymapi.Transform()\n object_start_pose.p = gymapi.Vec3()\n if self.cfg.env.obj_init_delta_pos is not None:\n delta_pos = self.cfg.env.obj_init_delta_pos\n object_start_pose.p.x = dclaw_start_pose.p.x + delta_pos[0]\n object_start_pose.p.y = dclaw_start_pose.p.y + delta_pos[1]\n object_start_pose.p.z = dclaw_start_pose.p.z + delta_pos[2]\n else:\n object_start_pose.p.x = dclaw_start_pose.p.x\n pose_dy, pose_dz = 0., -0.13\n object_start_pose.p.y = dclaw_start_pose.p.y + pose_dy\n object_start_pose.p.z = dclaw_start_pose.p.z + pose_dz\n return object_start_pose\n\n def get_goal_object_start_pose(self, object_start_pose):\n self.goal_displacement = gymapi.Vec3(0., 0, 0.25)\n self.goal_displacement_tensor = to_torch(\n [self.goal_displacement.x, self.goal_displacement.y, self.goal_displacement.z], device=self.device)\n goal_start_pose = gymapi.Transform()\n goal_start_pose.p = object_start_pose.p + self.goal_displacement\n return goal_start_pose\n\n def set_dof_props(self, props_dict):\n param_setters_map = get_property_setter_map(self.gym)\n param_getters_map = get_property_getter_map(self.gym)\n prop_name = 'dof_properties'\n setter = param_setters_map[prop_name]\n for env_id in range(len(self.envs)):\n env = self.envs[env_id]\n handle = self.gym.find_actor_handle(env, 'hand')\n prop = param_getters_map[prop_name](env, handle)\n for dof_prop_name, dof_prop_values in props_dict.items():\n if env_id == 0:\n assert len(dof_prop_values) == len(self.envs)\n prop_val = dof_prop_values[env_id]\n prop[dof_prop_name].fill(prop_val)\n success = setter(env, handle, prop)\n if not success:\n logger.warning(f'Setting dof properties is not successful!')\n\n def update_obj_mass(self, env_ids=None):\n object_rb_masses = []\n env_pool = env_ids if env_ids is not None else list(range(self.num_envs))\n if len(env_pool) < 1:\n return\n for env_id, object_handle in zip(env_pool, self.object_handles):\n env_ptr = self.envs[env_id]\n object_rb_props = self.gym.get_actor_rigid_body_properties(env_ptr, object_handle)\n object_rb_masses.append([prop.mass for prop in object_rb_props])\n if self.object_rb_masses is None:\n self.object_rb_masses = to_torch(object_rb_masses, dtype=torch.float, device=self.device)\n else:\n self.object_rb_masses[env_pool] = to_torch(object_rb_masses, dtype=torch.float, device=self.device)\n\n def reset(self) -> torch.Tensor:\n \"\"\"Reset the environment.\n Returns:\n Observation dictionary\n \"\"\"\n zero_actions = self.zero_actions()\n self.reset_buf.fill_(1)\n self.reset_goal_buf.fill_(1)\n if self.cfg.env.action_ema is not None:\n self.action_ema_val = zero_actions.clone()\n # step the simulator\n\n self.step(zero_actions)\n\n return self.update_obs()\n\n def compute_reward(self, actions):\n res = compute_dclaw_reward(\n self.reset_buf, self.reset_goal_buf, self.progress_buf,\n self.successes, self.max_episode_length,\n self.object_pos, self.object_rot, self.goal_pos, self.goal_rot,\n self.cfg['env']['rew'], self.actions,\n self.fingertip_pos, self.fingertip_vel, self.object_linvel, self.object_angvel,\n self.dclaw_dof_vel, self.dclaw_dof_torque,\n table_cf=self.table_contact_force if self.cfg.env.rew.pen_tb_contact else None\n )\n self.rew_buf[:] = res[0] * self.cfg.env.rew.rew_scale\n self.done_buf[:] = res[1]\n self.reset_buf[:] = res[2]\n self.reset_goal_buf[:] = res[3]\n self.progress_buf[:] = res[4]\n self.successes[:] = res[5]\n abs_rot_dist = res[6]\n reward_terms = res[7]\n timeout_envs = res[8]\n\n self.extras['success'] = self.reset_goal_buf.detach().to(self.rl_device).flatten()\n self.extras['abs_dist'] = abs_rot_dist.detach().to(self.rl_device)\n self.extras['TimeLimit.truncated'] = timeout_envs.detach().to(self.rl_device)\n for reward_key, reward_val in reward_terms.items():\n self.extras[reward_key] = reward_val.detach()\n\n def get_images(self):\n rgb = self.render_rgb_obs_buf\n return rgb\n\n def compute_observations(self):\n self.gym.refresh_dof_state_tensor(self.sim)\n if self.cfg.env.dof_torque_on:\n self.gym.refresh_dof_force_tensor(self.sim)\n self.gym.refresh_actor_root_state_tensor(self.sim)\n self.gym.refresh_rigid_body_state_tensor(self.sim)\n\n if self.obs_type == \"full_state\":\n self.gym.refresh_force_sensor_tensor(self.sim)\n self.gym.refresh_dof_force_tensor(self.sim)\n\n if self.cfg.env.rew.pen_tb_contact:\n self.gym.refresh_net_contact_force_tensor(self.sim)\n\n self.object_pose = self.root_state_tensor[self.object_indices, 0:7]\n self.object_pos = self.root_state_tensor[self.object_indices, 0:3]\n self.object_rot = self.root_state_tensor[self.object_indices, 3:7]\n self.object_linvel = self.root_state_tensor[self.object_indices, 7:10]\n self.object_angvel = self.root_state_tensor[self.object_indices, 10:13]\n\n self.goal_pose = self.goal_states[:, 0:7]\n self.goal_pos = self.goal_states[:, 0:3]\n self.goal_rot = self.goal_states[:, 3:7]\n\n self.fingertip_state = self.rigid_body_states[:, self.fingertip_handles][:, :, 0:13]\n self.fingertip_pos = self.rigid_body_states[:, self.fingertip_handles][:, :, 0:3]\n self.fingertip_vel = self.rigid_body_states[:, self.fingertip_handles][:, :, 7:13]\n\n if self.obs_type == \"full_no_vel\":\n obs_buf = self.compute_full_observations(no_vel=True)\n elif self.obs_type == \"full\":\n obs_buf = self.compute_full_observations()\n elif self.obs_type == \"full_state\":\n obs_buf = self.compute_full_state()\n else:\n print(\"Unkown observations type!\")\n self.obs_buf = obs_buf\n\n if self.cfg.rgb_render:\n self.gym.fetch_results(self.sim, True)\n self.gym.step_graphics(self.sim)\n self.gym.render_all_camera_sensors(self.sim)\n self.gym.start_access_image_tensors(self.sim)\n self.render_rgb_obs_buf = self.get_numpy_rgb_images(self.render_camera_handles)\n self.gym.end_access_image_tensors(self.sim)\n\n def allocate_ob_buffers(self):\n self.obs_buf = torch.zeros(\n (self.num_envs, self.num_obs), device=self.device, dtype=torch.float)\n\n def compute_full_observations(self, no_vel=False):\n scaled_dof_pos = unscale(\n self.dclaw_dof_pos,\n self.dclaw_dof_lower_limits,\n self.dclaw_dof_upper_limits\n )\n quat_dist = quat_mul(self.object_rot, quat_conjugate(self.goal_rot))\n\n if no_vel:\n out = torch.cat(\n [\n scaled_dof_pos,\n self.object_pose,\n self.goal_rot,\n quat_dist,\n self.fingertip_pos.reshape(self.num_envs, 3 * self.num_fingertips),\n self.actions\n ],\n dim=-1\n )\n else:\n out = torch.cat(\n [\n scaled_dof_pos,\n self.vel_obs_scale * self.dclaw_dof_vel,\n self.object_pose,\n self.object_linvel,\n self.vel_obs_scale * self.object_angvel,\n self.goal_rot,\n quat_dist,\n self.fingertip_state.reshape(self.num_envs, 13 * self.num_fingertips),\n self.actions\n ],\n dim=-1\n )\n return out\n\n def compute_full_state(self):\n obs_buf = self.compute_full_observations()\n obs_no_actions = obs_buf[:, :-9]\n actions = obs_buf[:, -9:]\n out = torch.cat(\n [\n obs_no_actions,\n self.force_torque_obs_scale * self.dof_force_tensor,\n self.force_torque_obs_scale * self.vec_sensor_tensor,\n actions\n ],\n dim=-1\n )\n\n return out\n\n def update_obs(self):\n if self.randomize:\n self.obs_buf = self.dr_randomizations['observations']['noise_lambda'](self.obs_buf)\n\n self.obs_dict[\"ob\"] = torch.clamp(self.obs_buf, -self.clip_obs, self.clip_obs).to(self.rl_device)\n if self.num_states > 0:\n self.obs_dict[\"state\"] = self.get_state()\n return self.obs_dict\n\n def reset_target_pose(self, env_ids, apply_reset=False):\n new_rot = random_quaternions(num=len(env_ids), device=self.device, order='xyzw')\n\n self.goal_states[env_ids, 0:3] = self.goal_init_state[env_ids, 0:3]\n self.goal_states[env_ids, 3:7] = new_rot\n self.root_state_tensor[self.goal_object_indices[env_ids], 0:3] = self.goal_states[env_ids, 0:3] + self.goal_displacement_tensor\n self.root_state_tensor[self.goal_object_indices[env_ids], 3:7] = self.goal_states[env_ids, 3:7]\n self.root_state_tensor[self.goal_object_indices[env_ids], 7:13] = torch.zeros_like(\n self.root_state_tensor[self.goal_object_indices[env_ids], 7:13])\n\n if apply_reset:\n goal_object_indices = self.goal_object_indices[env_ids].to(torch.int32)\n self.gym.set_actor_root_state_tensor_indexed(self.sim,\n gymtorch.unwrap_tensor(self.root_state_tensor),\n gymtorch.unwrap_tensor(goal_object_indices), len(env_ids))\n self.reset_goal_buf[env_ids] = 0\n\n def reset_idx(self, env_ids, goal_env_ids):\n if self.randomize and not self.cfg.env.rand_once:\n self.apply_randomizations(self.randomization_params)\n\n rand_floats = torch_rand_float(-1.0, 1.0, (len(env_ids), self.num_dclaw_dofs * 2 + 3), device=self.device)\n\n self.reset_target_pose(env_ids)\n self.rb_forces[env_ids, :, :] = 0.0\n\n self.root_state_tensor[self.object_indices[env_ids]] = self.object_init_state[env_ids].clone()\n self.root_state_tensor[self.object_indices[env_ids], 0:3] = self.object_init_state[env_ids, 0:3] + \\\n self.reset_position_noise * rand_floats[:, 0:3]\n\n new_object_rot = random_quaternions(num=len(env_ids), device=self.device, order='xyzw')\n\n self.root_state_tensor[self.object_indices[env_ids], 3:7] = new_object_rot\n self.root_state_tensor[self.object_indices[env_ids], 7:13] = torch.zeros_like(\n self.root_state_tensor[self.object_indices[env_ids], 7:13])\n\n object_indices = torch.unique(torch.cat([self.object_indices[env_ids],\n self.goal_object_indices[env_ids],\n self.goal_object_indices[goal_env_ids]]).to(torch.int32))\n self.gym.set_actor_root_state_tensor_indexed(self.sim,\n gymtorch.unwrap_tensor(self.root_state_tensor),\n gymtorch.unwrap_tensor(object_indices), len(object_indices))\n self.random_force_prob[env_ids] = torch.exp(\n (torch.log(self.force_prob_range[0]) - torch.log(self.force_prob_range[1]))\n * torch.rand(len(env_ids), device=self.device) + torch.log(self.force_prob_range[1]))\n\n delta_max = self.dclaw_dof_upper_limits - self.dclaw_default_dof_pos\n delta_min = self.dclaw_dof_lower_limits - self.dclaw_default_dof_pos\n rand_delta = delta_min + (delta_max - delta_min) * rand_floats[:, 3:3 + self.num_dclaw_dofs]\n\n pos = self.dclaw_default_dof_pos + self.reset_dof_pos_noise * rand_delta\n self.dclaw_dof_pos[env_ids, :] = pos\n self.dclaw_dof_vel[env_ids, :] = self.dclaw_default_dof_vel + \\\n self.reset_dof_vel_noise * rand_floats[:,\n 3 + self.num_dclaw_dofs:3 + self.num_dclaw_dofs * 2]\n self.prev_targets[env_ids, :self.num_dclaw_dofs] = pos\n self.cur_targets[env_ids, :self.num_dclaw_dofs] = pos\n\n hand_indices = self.hand_indices[env_ids].to(torch.int32)\n self.gym.set_dof_position_target_tensor_indexed(self.sim,\n gymtorch.unwrap_tensor(self.prev_targets),\n gymtorch.unwrap_tensor(hand_indices), len(env_ids))\n self.gym.set_dof_state_tensor_indexed(self.sim,\n gymtorch.unwrap_tensor(self.dof_state),\n gymtorch.unwrap_tensor(hand_indices), len(env_ids))\n\n self.progress_buf[env_ids] = 0\n self.reset_buf[env_ids] = 0\n self.successes[env_ids] = 0\n\n def get_numpy_rgb_images(self, camera_handles):\n rgb_obs_buf = []\n for cam_handles, env in zip(camera_handles, self.envs):\n cam_ob = []\n if isinstance(cam_handles, list):\n for cam_handle in cam_handles:\n color_image = self.gym.get_camera_image(self.sim, env, cam_handle, gymapi.IMAGE_COLOR)\n color_image = color_image.reshape(color_image.shape[0], -1, 4)[..., :3]\n cam_ob.append(color_image)\n rgb_obs_buf.append(cam_ob)\n else:\n color_image = self.gym.get_camera_image(self.sim, env, cam_handles, gymapi.IMAGE_COLOR)\n color_image = color_image.reshape(color_image.shape[0], -1, 4)[..., :3]\n rgb_obs_buf.append(color_image)\n rgb_obs_buf = np.stack(rgb_obs_buf)\n return rgb_obs_buf\n\n def pre_physics_step(self, actions):\n env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1)\n goal_env_ids = self.reset_goal_buf.nonzero(as_tuple=False).squeeze(-1)\n\n if len(goal_env_ids) > 0 and len(env_ids) == 0:\n self.reset_target_pose(goal_env_ids, apply_reset=True)\n elif len(goal_env_ids) > 0:\n self.reset_target_pose(goal_env_ids)\n\n if len(env_ids) > 0:\n self.reset_idx(env_ids, goal_env_ids)\n\n self.actions = actions.clone().to(self.device)\n\n if self.cfg.env.action_ema is not None:\n self.action_ema_val[env_ids] = 0\n self.action_ema_val[goal_env_ids] = 0\n self.actions = self.actions * self.cfg.env.action_ema + self.action_ema_val * (1 - self.cfg.env.action_ema)\n self.action_ema_val = self.actions.clone()\n if self.cfg.env.dof_vel_pol_limit is not None:\n delta_action = self.actions * self.cfg.env.dof_vel_pol_limit * (self.dt * self.cfg.env.controlFrequencyInv)\n else:\n delta_action = self.dclaw_dof_speed_scale * self.dt * self.actions\n if self.cfg.env.relativeToPrevTarget:\n targets = self.prev_targets[:, self.dof_joint_indices] + delta_action\n else:\n targets = self.dclaw_dof_pos + delta_action\n\n self.cur_targets[:, self.dof_joint_indices] = tensor_clamp(targets,\n self.dclaw_dof_lower_limits[\n self.dof_joint_indices],\n self.dclaw_dof_upper_limits[\n self.dof_joint_indices])\n\n self.prev_targets[:, self.dof_joint_indices] = self.cur_targets[:, self.dof_joint_indices]\n self.gym.set_dof_position_target_tensor(self.sim, gymtorch.unwrap_tensor(self.cur_targets))\n\n if self.force_scale > 0.0:\n self.rb_forces *= torch.pow(self.force_decay, self.dt / self.force_decay_interval)\n # apply new forces\n force_indices = (torch.rand(self.num_envs, device=self.device) < self.random_force_prob).nonzero()\n rb_force_shape = self.rb_forces[force_indices, self.object_rb_handles, :].shape\n rb_force_dir = torch.randn(rb_force_shape, device=self.device)\n rb_force_dir = rb_force_dir / rb_force_dir.norm(dim=-1, keepdim=True)\n self.rb_forces[force_indices, self.object_rb_handles, :] = rb_force_dir * self.object_rb_masses[force_indices] * self.force_scale\n self.gym.apply_rigid_body_force_tensors(self.sim, gymtorch.unwrap_tensor(self.rb_forces), None,\n gymapi.LOCAL_SPACE)\n\n def post_physics_step(self):\n self.progress_buf += 1\n self.randomize_buf += 1\n\n self.compute_observations()\n self.compute_reward(self.actions)\n\n if self.viewer and self.debug_viz:\n # draw axes on target object\n self.gym.clear_lines(self.viewer)\n self.gym.refresh_rigid_body_state_tensor(self.sim)\n\n for i in range(self.num_envs):\n targetx = (self.goal_pos[i] + quat_apply(self.goal_rot[i],\n to_torch([1, 0, 0], device=self.device) * 0.2)).cpu().numpy()\n targety = (self.goal_pos[i] + quat_apply(self.goal_rot[i],\n to_torch([0, 1, 0], device=self.device) * 0.2)).cpu().numpy()\n targetz = (self.goal_pos[i] + quat_apply(self.goal_rot[i],\n to_torch([0, 0, 1], device=self.device) * 0.2)).cpu().numpy()\n\n p0 = self.goal_pos[i].cpu().numpy() + self.goal_displacement_tensor.cpu().numpy()\n self.gym.add_lines(self.viewer, self.envs[i], 1,\n [p0[0], p0[1], p0[2], targetx[0], targetx[1], targetx[2]], [0.85, 0.1, 0.1])\n self.gym.add_lines(self.viewer, self.envs[i], 1,\n [p0[0], p0[1], p0[2], targety[0], targety[1], targety[2]], [0.1, 0.85, 0.1])\n self.gym.add_lines(self.viewer, self.envs[i], 1,\n [p0[0], p0[1], p0[2], targetz[0], targetz[1], targetz[2]], [0.1, 0.1, 0.85])\n\n objectx = (self.object_pos[i] + quat_apply(self.object_rot[i],\n to_torch([1, 0, 0], device=self.device) * 0.2)).cpu().numpy()\n objecty = (self.object_pos[i] + quat_apply(self.object_rot[i],\n to_torch([0, 1, 0], device=self.device) * 0.2)).cpu().numpy()\n objectz = (self.object_pos[i] + quat_apply(self.object_rot[i],\n to_torch([0, 0, 1], device=self.device) * 0.2)).cpu().numpy()\n\n p0 = self.object_pos[i].cpu().numpy()\n self.gym.add_lines(self.viewer, self.envs[i], 1,\n [p0[0], p0[1], p0[2], objectx[0], objectx[1], objectx[2]], [0.85, 0.1, 0.1])\n self.gym.add_lines(self.viewer, self.envs[i], 1,\n [p0[0], p0[1], p0[2], objecty[0], objecty[1], objecty[2]], [0.1, 0.85, 0.1])\n self.gym.add_lines(self.viewer, self.envs[i], 1,\n [p0[0], p0[1], p0[2], objectz[0], objectz[1], objectz[2]], [0.1, 0.1, 0.85])" }, { "identifier": "chunker_list", "path": "dexenv/utils/common.py", "snippet": "def chunker_list(seq_list, nchunks):\n # split the list into n parts/chunks\n return [seq_list[i::nchunks] for i in range(nchunks)]" }, { "identifier": "get_all_files_with_name", "path": "dexenv/utils/common.py", "snippet": "def get_all_files_with_name(directory, name,\n exclude_patterns=None,\n include_patterns=None,\n sort=True,\n ):\n directory = pathlib_file(directory)\n files = directory.glob(f'**/{name}')\n files = [x for x in files if x.is_file() and x.name == name]\n if exclude_patterns is not None:\n files = filter_with_exclude_patterns(files, exclude_patterns)\n if include_patterns is not None:\n files = filter_with_include_patterns(files, include_patterns)\n if sort:\n files = sorted(files)\n return files" }, { "identifier": "load_from_pickle", "path": "dexenv/utils/common.py", "snippet": "def load_from_pickle(file_name):\n file_name = pathlib_file(file_name)\n with file_name.open('rb') as f:\n data = pkl.load(f)\n return data" }, { "identifier": "load_a_goal_object_asset", "path": "dexenv/utils/isaac_utils.py", "snippet": "@torch.no_grad()\ndef load_a_goal_object_asset(gym, sim, asset_root, object_urdf, asset_options=None, vhacd=True):\n if asset_options is None:\n asset_options = gymapi.AssetOptions()\n if vhacd:\n asset_options.convex_decomposition_from_submeshes = True\n asset_options.thickness = 0.001\n asset_options.disable_gravity = True\n asset_options.override_inertia = True\n # asset_options.override_com = True\n\n rela_file = object_urdf.relative_to(asset_root).as_posix()\n obj_asset = gym.load_asset(sim,\n asset_root.as_posix(),\n rela_file,\n asset_options)\n return obj_asset" }, { "identifier": "load_an_object_asset", "path": "dexenv/utils/isaac_utils.py", "snippet": "@torch.no_grad()\ndef load_an_object_asset(gym, sim, asset_root, object_urdf, asset_options=None, vhacd=True):\n if asset_options is None:\n asset_options = gymapi.AssetOptions()\n asset_options.thickness = 0.001\n asset_options.override_inertia = True\n # asset_options.override_com = True\n if vhacd:\n asset_options.convex_decomposition_from_submeshes = True\n rela_file = object_urdf.relative_to(asset_root).as_posix()\n obj_asset = gym.load_asset(sim,\n asset_root.as_posix(),\n rela_file,\n asset_options)\n return obj_asset" }, { "identifier": "load_obj_texture", "path": "dexenv/utils/isaac_utils.py", "snippet": "@torch.no_grad()\ndef load_obj_texture(gym, sim, object_urdf):\n texture_files = get_all_files_with_suffix(object_urdf.parent, 'png')\n num_textures = len(texture_files)\n if num_textures > 1:\n logger.warning(f'Multiple image files exist, will use the first image as the texture!')\n elif num_textures == 0:\n raise RuntimeError(f'No texture file is found!')\n texture_file = texture_files[0]\n texture_handle = gym.create_texture_from_file(sim,\n texture_file.as_posix(),\n )\n return texture_handle" } ]
import numpy as np import torch import dexenv from gym.utils import seeding from isaacgym import gymapi from loguru import logger from tqdm import tqdm from dexenv.envs.dclaw_base import DClawBase from dexenv.utils.common import chunker_list from dexenv.utils.common import get_all_files_with_name from dexenv.utils.common import load_from_pickle from dexenv.utils.isaac_utils import load_a_goal_object_asset from dexenv.utils.isaac_utils import load_an_object_asset from dexenv.utils.isaac_utils import load_obj_texture
15,583
0, 0, 0, 0, 0, 0]) object_idx = self.gym.get_actor_index(env_ptr, object_handle, gymapi.DOMAIN_SIM) self.object_indices.append(object_idx) self.object_cat_indices.append(object_cat_ids[obj_asset_id]) # add goal object goal_handle = self.gym.create_actor(env_ptr, goal_assets[obj_asset_id], goal_start_pose, "goal_object", i + self.num_envs, 0, 2) goal_object_idx = self.gym.get_actor_index(env_ptr, goal_handle, gymapi.DOMAIN_SIM) self.goal_object_indices.append(goal_object_idx) if self.cfg.obj.load_texture: self.gym.set_rigid_body_texture(env_ptr, object_handle, 0, gymapi.MESH_VISUAL_AND_COLLISION, object_textures[obj_asset_id] ) self.gym.set_rigid_body_texture(env_ptr, goal_handle, 0, gymapi.MESH_VISUAL_AND_COLLISION, object_textures[obj_asset_id] ) else: color = np.array([179, 193, 134]) / 255.0 self.gym.set_rigid_body_color( env_ptr, object_handle, 0, gymapi.MESH_VISUAL, gymapi.Vec3(*color)) self.gym.set_rigid_body_color( env_ptr, goal_handle, 0, gymapi.MESH_VISUAL, gymapi.Vec3(*color)) table_handle = self.gym.create_actor(env_ptr, table_asset, table_pose, "table", i, 0) self.gym.set_rigid_body_color(env_ptr, table_handle, 0, gymapi.MESH_VISUAL, gymapi.Vec3(180 / 255., 180 / 255., 180 / 255.)) if self.cfg.rgb_render: render_camera_handle = self.create_camera(render_cam_pose, env_ptr, render_cam_params) self.render_camera_handles.append(render_camera_handle[0]) if self.aggregate_mode > 0: self.gym.end_aggregate(env_ptr) self.envs.append(env_ptr) object_rb_props = self.gym.get_actor_rigid_body_properties(env_ptr, object_handle) self.object_rb_masses = [prop.mass for prop in object_rb_props] self.setup_torch_states() self.env_obj_ids = torch.LongTensor(env_obj_ids).to(self.device).view(-1, 1) self.object_cat_indices = torch.LongTensor(self.object_cat_indices).to(self.device).view(-1, 1) def parse_obj_dataset(self, dataset): asset_root = dexenv.LIB_PATH.joinpath('assets') split_dataset_name = dataset.split(':') if len(split_dataset_name) == 1: dataset_path = asset_root.joinpath(dataset, 'train') else: target_object = split_dataset_name[1] dataset_path = asset_root.joinpath(split_dataset_name[0], 'train', target_object) logger.warning(f'Dataset path:{dataset_path}') urdf_files = get_all_files_with_name(dataset_path, name='model.urdf') permute_ids = self.np_random.permutation(np.arange(len(urdf_files))) permuted_urdfs = [urdf_files[i] for i in permute_ids] object_categories = sorted(list(set([self.get_object_category(urdf) for urdf in permuted_urdfs]))) obj_name_to_id = {name: idx for idx, name in enumerate(object_categories)} return permuted_urdfs, dataset_path, obj_name_to_id def get_object_category(self, urdf_path): cat = urdf_path.parents[0].name if 'var_' in cat: cat = urdf_path.parents[1].name return cat def load_object_asset(self): asset_root = dexenv.LIB_PATH.joinpath('assets') object_urdfs = self.object_urdfs object_assets, goal_assets, object_ids, object_tex_handles, object_ptds = [], [], [], [], [] object_cat_ids = [] if self.cfg.obj.object_id is not None: urdf_to_load = self.object_urdfs[self.cfg.obj.object_id] logger.info(f'Loading a single object: {urdf_to_load}') obj_asset, goal_asset, texture_handle, ptd = self.load_an_object(asset_root, urdf_to_load) object_assets.append(obj_asset) goal_assets.append(goal_asset) object_ids.append(self.object_urdfs.index(urdf_to_load)) object_tex_handles.append(texture_handle) object_ptds.append(ptd) object_cat_ids.append(self.obj_name_to_cat_id[self.get_object_category(urdf_to_load)]) else: if self.cfg.obj.start_id is None: start = 0 end = min(len(object_urdfs), self.cfg.obj.num_objs) else: start = self.cfg.obj.start_id end = min(start + self.cfg.obj.num_objs, len(object_urdfs)) iters = range(start, end) logger.info(f'Loading object IDs from {start} to {end}.') for idx in tqdm(iters, desc='Loading Asset'): urdf_to_load = object_urdfs[idx] obj_asset, goal_asset, texture_handle, ptd = self.load_an_object(asset_root, urdf_to_load) object_assets.append(obj_asset) goal_assets.append(goal_asset) object_ids.append(self.object_urdfs.index(urdf_to_load)) object_tex_handles.append(texture_handle) object_ptds.append(ptd) object_cat_ids.append(self.obj_name_to_cat_id[self.get_object_category(urdf_to_load)]) return object_assets, goal_assets, object_ids, object_tex_handles, object_ptds, object_cat_ids def load_an_object(self, asset_root, object_urdf): out = [] obj_asset = load_an_object_asset(self.gym, self.sim, asset_root, object_urdf, vhacd=self.cfg.env.vhacd) obj_asset = self.change_obj_asset_dyn(obj_asset) goal_obj_asset = load_a_goal_object_asset(self.gym, self.sim, asset_root, object_urdf, vhacd=False) ptd = None if self.cfg.env.loadCADPTD: ptd_file = object_urdf.parent.joinpath(f'point_cloud_{self.cfg.env.objCadNumPts}_pts.pkl') if ptd_file.exists():
class DclawMultiObjs(DClawBase): def __init__(self, cfg, sim_device, rl_device, graphics_device_id): self.set_random_gen() self.object_urdfs, self.dataset_path, self.obj_name_to_cat_id = self.parse_obj_dataset(cfg.obj.dataset) self.num_objects = len(self.object_urdfs) logger.info(f'Object urdf root path:{self.dataset_path}.') logger.info(f'Number of available objects:{self.num_objects}.') super().__init__(cfg=cfg, sim_device=sim_device, rl_device=rl_device, graphics_device_id=graphics_device_id) def set_random_gen(self, seed=12345): self.np_random, seed = seeding.np_random(seed) def _create_envs(self, num_envs, spacing, num_per_row): lower = gymapi.Vec3(-spacing, -spacing, 0.0) upper = gymapi.Vec3(spacing, spacing, spacing) asset_root = dexenv.LIB_PATH.joinpath('assets', 'dclaw').as_posix() dclaw_asset, dclaw_dof_props = self.get_dclaw_asset(asset_root=asset_root) # load manipulated object and goal assets table_asset = self.get_table_asset() table_pose = self.get_table_pose() object_assets, goal_assets, object_ids, object_textures, object_ptds, object_cat_ids = self.load_object_asset() # create fingertip force sensors, if needed if self.obs_type == "full_state": sensor_pose = gymapi.Transform() for ft_handle in self.fingertip_handles: self.gym.create_asset_force_sensor(dclaw_asset, ft_handle, sensor_pose) dclaw_start_pose = self.get_dclaw_start_pose() object_start_pose = self.get_object_start_pose(dclaw_start_pose) goal_start_pose = self.get_goal_object_start_pose(object_start_pose=object_start_pose) self.dclaws = [] self.envs = [] self.object_init_state = [] self.hand_start_states = [] self.hand_indices = [] self.fingertip_indices = [] self.object_indices = [] self.object_cat_indices = [] self.goal_object_indices = [] self.render_camera_handles = [] if self.cfg.rgb_render: render_cam_pose, render_cam_params = self.get_visual_render_camera_setup() self.fingertip_handles = [self.gym.find_asset_rigid_body_index(dclaw_asset, name) for name in self.fingertips] dclaw_rb_count = self.gym.get_asset_rigid_body_count(dclaw_asset) object_rb_count = self.gym.get_asset_rigid_body_count(object_assets[0]) self.object_rb_handles = list(range(dclaw_rb_count, dclaw_rb_count + object_rb_count)) self.object_handles = [] num_object_assets = len(object_assets) env_obj_ids = [] for i in range(self.num_envs): # create env instance obj_asset_id = i % num_object_assets env_obj_ids.append(object_ids[obj_asset_id]) env_ptr = self.gym.create_env( self.sim, lower, upper, num_per_row ) if self.aggregate_mode >= 1: # compute aggregate size obj_num_bodies = self.gym.get_asset_rigid_body_count(object_assets[obj_asset_id]) obj_num_shapes = self.gym.get_asset_rigid_shape_count(object_assets[obj_asset_id]) max_agg_bodies = self.num_dclaw_bodies + obj_num_bodies * 2 + 1 max_agg_shapes = self.num_dclaw_shapes + obj_num_shapes * 2 + 1 self.gym.begin_aggregate(env_ptr, max_agg_bodies, max_agg_shapes, True) self.create_hand_actor(env_ptr=env_ptr, dclaw_asset=dclaw_asset, dclaw_start_pose=dclaw_start_pose, dclaw_dof_props=dclaw_dof_props, env_id=i) # add object object_handle = self.gym.create_actor(env_ptr, object_assets[obj_asset_id], object_start_pose, "object", i, 0, 1) self.object_handles.append(object_handle) self.object_init_state.append([object_start_pose.p.x, object_start_pose.p.y, object_start_pose.p.z, object_start_pose.r.x, object_start_pose.r.y, object_start_pose.r.z, object_start_pose.r.w, 0, 0, 0, 0, 0, 0]) object_idx = self.gym.get_actor_index(env_ptr, object_handle, gymapi.DOMAIN_SIM) self.object_indices.append(object_idx) self.object_cat_indices.append(object_cat_ids[obj_asset_id]) # add goal object goal_handle = self.gym.create_actor(env_ptr, goal_assets[obj_asset_id], goal_start_pose, "goal_object", i + self.num_envs, 0, 2) goal_object_idx = self.gym.get_actor_index(env_ptr, goal_handle, gymapi.DOMAIN_SIM) self.goal_object_indices.append(goal_object_idx) if self.cfg.obj.load_texture: self.gym.set_rigid_body_texture(env_ptr, object_handle, 0, gymapi.MESH_VISUAL_AND_COLLISION, object_textures[obj_asset_id] ) self.gym.set_rigid_body_texture(env_ptr, goal_handle, 0, gymapi.MESH_VISUAL_AND_COLLISION, object_textures[obj_asset_id] ) else: color = np.array([179, 193, 134]) / 255.0 self.gym.set_rigid_body_color( env_ptr, object_handle, 0, gymapi.MESH_VISUAL, gymapi.Vec3(*color)) self.gym.set_rigid_body_color( env_ptr, goal_handle, 0, gymapi.MESH_VISUAL, gymapi.Vec3(*color)) table_handle = self.gym.create_actor(env_ptr, table_asset, table_pose, "table", i, 0) self.gym.set_rigid_body_color(env_ptr, table_handle, 0, gymapi.MESH_VISUAL, gymapi.Vec3(180 / 255., 180 / 255., 180 / 255.)) if self.cfg.rgb_render: render_camera_handle = self.create_camera(render_cam_pose, env_ptr, render_cam_params) self.render_camera_handles.append(render_camera_handle[0]) if self.aggregate_mode > 0: self.gym.end_aggregate(env_ptr) self.envs.append(env_ptr) object_rb_props = self.gym.get_actor_rigid_body_properties(env_ptr, object_handle) self.object_rb_masses = [prop.mass for prop in object_rb_props] self.setup_torch_states() self.env_obj_ids = torch.LongTensor(env_obj_ids).to(self.device).view(-1, 1) self.object_cat_indices = torch.LongTensor(self.object_cat_indices).to(self.device).view(-1, 1) def parse_obj_dataset(self, dataset): asset_root = dexenv.LIB_PATH.joinpath('assets') split_dataset_name = dataset.split(':') if len(split_dataset_name) == 1: dataset_path = asset_root.joinpath(dataset, 'train') else: target_object = split_dataset_name[1] dataset_path = asset_root.joinpath(split_dataset_name[0], 'train', target_object) logger.warning(f'Dataset path:{dataset_path}') urdf_files = get_all_files_with_name(dataset_path, name='model.urdf') permute_ids = self.np_random.permutation(np.arange(len(urdf_files))) permuted_urdfs = [urdf_files[i] for i in permute_ids] object_categories = sorted(list(set([self.get_object_category(urdf) for urdf in permuted_urdfs]))) obj_name_to_id = {name: idx for idx, name in enumerate(object_categories)} return permuted_urdfs, dataset_path, obj_name_to_id def get_object_category(self, urdf_path): cat = urdf_path.parents[0].name if 'var_' in cat: cat = urdf_path.parents[1].name return cat def load_object_asset(self): asset_root = dexenv.LIB_PATH.joinpath('assets') object_urdfs = self.object_urdfs object_assets, goal_assets, object_ids, object_tex_handles, object_ptds = [], [], [], [], [] object_cat_ids = [] if self.cfg.obj.object_id is not None: urdf_to_load = self.object_urdfs[self.cfg.obj.object_id] logger.info(f'Loading a single object: {urdf_to_load}') obj_asset, goal_asset, texture_handle, ptd = self.load_an_object(asset_root, urdf_to_load) object_assets.append(obj_asset) goal_assets.append(goal_asset) object_ids.append(self.object_urdfs.index(urdf_to_load)) object_tex_handles.append(texture_handle) object_ptds.append(ptd) object_cat_ids.append(self.obj_name_to_cat_id[self.get_object_category(urdf_to_load)]) else: if self.cfg.obj.start_id is None: start = 0 end = min(len(object_urdfs), self.cfg.obj.num_objs) else: start = self.cfg.obj.start_id end = min(start + self.cfg.obj.num_objs, len(object_urdfs)) iters = range(start, end) logger.info(f'Loading object IDs from {start} to {end}.') for idx in tqdm(iters, desc='Loading Asset'): urdf_to_load = object_urdfs[idx] obj_asset, goal_asset, texture_handle, ptd = self.load_an_object(asset_root, urdf_to_load) object_assets.append(obj_asset) goal_assets.append(goal_asset) object_ids.append(self.object_urdfs.index(urdf_to_load)) object_tex_handles.append(texture_handle) object_ptds.append(ptd) object_cat_ids.append(self.obj_name_to_cat_id[self.get_object_category(urdf_to_load)]) return object_assets, goal_assets, object_ids, object_tex_handles, object_ptds, object_cat_ids def load_an_object(self, asset_root, object_urdf): out = [] obj_asset = load_an_object_asset(self.gym, self.sim, asset_root, object_urdf, vhacd=self.cfg.env.vhacd) obj_asset = self.change_obj_asset_dyn(obj_asset) goal_obj_asset = load_a_goal_object_asset(self.gym, self.sim, asset_root, object_urdf, vhacd=False) ptd = None if self.cfg.env.loadCADPTD: ptd_file = object_urdf.parent.joinpath(f'point_cloud_{self.cfg.env.objCadNumPts}_pts.pkl') if ptd_file.exists():
ptd = load_from_pickle(ptd_file)
3
2023-10-25 17:22:41+00:00
24k
ai-safety-foundation/sparse_autoencoder
sparse_autoencoder/activation_resampler/tests/test_activation_resampler.py
[ { "identifier": "ActivationResampler", "path": "sparse_autoencoder/activation_resampler/activation_resampler.py", "snippet": "class ActivationResampler:\n \"\"\"Activation resampler.\n\n Collates the number of times each neuron fires over a set number of learned activation vectors,\n and then provides the parameters necessary to reset any dead neurons.\n\n Motivation:\n Over the course of training, a subset of autoencoder neurons will have zero activity across\n a large number of datapoints. The authors of *Towards Monosemanticity: Decomposing Language\n Models With Dictionary Learning* found that “resampling” these dead neurons during training\n improves the number of likely-interpretable features (i.e., those in the high density\n cluster) and reduces total loss. This resampling may be compatible with the Lottery Ticket\n Hypothesis and increase the number of chances the network has to find promising feature\n directions.\n\n An interesting nuance around dead neurons involves the ultralow density cluster. They found\n that if we increase the number of training steps then networks will kill off more of these\n ultralow density neurons. This reinforces the use of the high density cluster as a useful\n metric because there can exist neurons that are de facto dead but will not appear to be when\n looking at the number of dead neurons alone.\n\n This approach is designed to seed new features to fit inputs where the current autoencoder\n performs worst. Resetting the encoder norm and bias are crucial to ensuring this resampled\n neuron will only fire weakly for inputs similar to the one used for its reinitialization.\n This was done to minimize interference with the rest of the network.\n\n Warning:\n The optimizer should be reset after applying this function, as the Adam state will be\n incorrect for the modified weights and biases.\n\n Warning:\n This approach is also known to create sudden loss spikes, and resampling too frequently\n causes training to diverge.\n \"\"\"\n\n _activations_seen_since_last_resample: int = 0\n \"\"\"Number of activations since we last resampled.\"\"\"\n\n _collated_neuron_activity: Float[Tensor, Axis.names(Axis.COMPONENT, Axis.LEARNT_FEATURE)]\n \"\"\"Collated neuron activity, over the current data collection window.\"\"\"\n\n _threshold_is_dead_portion_fires: float\n \"\"\"Threshold for determining if a neuron has fired (or is dead).\"\"\"\n\n _max_n_resamples: int\n \"\"\"Maximum number of times that resampling should be performed.\"\"\"\n\n _n_activations_collated_since_last_resample: int = 0\n \"\"\"Number of activations collated since we last resampled.\n\n Number of vectors used to collate neuron activity, over the current collation window.\n \"\"\"\n\n _n_components: int\n \"\"\"Number of components.\"\"\"\n\n _n_times_resampled: int = 0\n \"\"\"Number of times that resampling has been performed.\"\"\"\n\n neuron_activity_window_end: int\n \"\"\"End of the window for collecting neuron activity.\"\"\"\n\n neuron_activity_window_start: int\n \"\"\"Start of the window for collecting neuron activity.\"\"\"\n\n @validate_call\n def __init__(\n self,\n n_learned_features: PositiveInt,\n n_components: NonNegativeInt = 1,\n resample_interval: PositiveInt = 200_000_000,\n max_n_resamples: NonNegativeInt = 4,\n n_activations_activity_collate: PositiveInt = 100_000_000,\n resample_dataset_size: PositiveInt = 819_200,\n threshold_is_dead_portion_fires: Annotated[float, Field(strict=True, ge=0, le=1)] = 0.0,\n ) -> None:\n r\"\"\"Initialize the activation resampler.\n\n Defaults to values used in the Anthropic Towards Monosemanticity paper.\n\n Args:\n n_learned_features: Number of learned features\n n_components: Number of components that the SAE is being trained on.\n resample_interval: Interval in number of autoencoder input activation vectors trained\n on, before resampling.\n max_n_resamples: Maximum number of resamples to perform throughout the entire pipeline.\n Set to inf if you want to have no limit.\n n_activations_activity_collate: Number of autoencoder learned activation vectors to\n collate before resampling (the activation resampler will start collecting on vector\n $\\text{resample_interval} - \\text{n_steps_collate}$).\n resample_dataset_size: Number of autoencoder input activations to use for calculating\n the loss, as part of the resampling process to create the reset neuron weights.\n threshold_is_dead_portion_fires: Threshold for determining if a neuron is dead (has\n \"fired\" in less than this portion of the collated sample).\n\n Raises:\n ValueError: If any of the arguments are invalid (e.g. negative integers).\n \"\"\"\n if n_activations_activity_collate > resample_interval:\n error_message = (\n \"Number of steps to collate must be less than or equal to the resample interval.\"\n )\n raise ValueError(error_message)\n\n super().__init__()\n self.neuron_activity_window_end = resample_interval\n self.neuron_activity_window_start = resample_interval - n_activations_activity_collate\n self._max_n_resamples = max_n_resamples\n self._collated_neuron_activity = torch.zeros(\n (n_components, n_learned_features), dtype=torch.int64\n )\n self._resample_dataset_size = resample_dataset_size\n self._threshold_is_dead_portion_fires = threshold_is_dead_portion_fires\n self._n_components = n_components\n\n def _get_dead_neuron_indices(\n self,\n ) -> list[Int64[Tensor, Axis.names(Axis.LEARNT_FEATURE_IDX)]]:\n \"\"\"Identify the indices of neurons that are dead.\n\n Identifies any neurons that have fired less than the threshold portion of the collated\n sample size.\n\n Example:\n >>> resampler = ActivationResampler(n_learned_features=6, n_components=2)\n >>> resampler._collated_neuron_activity = torch.tensor(\n ... [[1, 1, 0, 0, 1, 1], [1, 1, 1, 1, 1, 0]]\n ... )\n >>> resampler._get_dead_neuron_indices()\n [tensor([2, 3]), tensor([5])]\n\n Returns:\n List of dead neuron indices for each component.\n\n Raises:\n ValueError: If no neuron activity has been collated yet.\n \"\"\"\n # Check we have already collated some neuron activity\n if torch.all(self._collated_neuron_activity == 0):\n error_message = \"Cannot get dead neuron indices without neuron activity.\"\n raise ValueError(error_message)\n\n # Find any neurons that fire less than the threshold portion of times\n threshold_is_dead_n_fires: int = int(\n self._n_activations_collated_since_last_resample * self._threshold_is_dead_portion_fires\n )\n\n return [\n torch.where(self._collated_neuron_activity[component_idx] <= threshold_is_dead_n_fires)[\n 0\n ].to(dtype=torch.int64)\n for component_idx in range(self._n_components)\n ]\n\n def compute_loss_and_get_activations(\n self,\n store: ActivationStore,\n autoencoder: SparseAutoencoder | DataParallel[SparseAutoencoder] | DeepSpeedEngine,\n loss_fn: AbstractLoss,\n train_batch_size: int,\n ) -> LossInputActivationsTuple:\n \"\"\"Compute the loss on a random subset of inputs.\n\n Motivation:\n Helps find input vectors that have high SAE loss, so that we can resample dead neurons\n in a way that improves performance on these specific input vectors.\n\n Args:\n store: Activation store.\n autoencoder: Sparse autoencoder model.\n loss_fn: Loss function.\n train_batch_size: Train batch size (also used for resampling).\n\n Returns:\n A tuple of loss per item, and all input activations.\n\n Raises:\n ValueError: If the number of items in the store is less than the number of inputs\n \"\"\"\n with torch.no_grad():\n loss_batches: list[Float[Tensor, Axis.BATCH]] = []\n input_activations_batches: list[\n Float[Tensor, Axis.names(Axis.BATCH, Axis.INPUT_OUTPUT_FEATURE)]\n ] = []\n dataloader = DataLoader(store, batch_size=train_batch_size)\n n_inputs = self._resample_dataset_size\n n_batches_required: int = n_inputs // train_batch_size\n model_device: torch.device = get_model_device(autoencoder)\n\n for batch_idx, batch in enumerate(iter(dataloader)):\n input_activations_batches.append(batch)\n source_activations = batch.to(model_device)\n learned_activations, reconstructed_activations = autoencoder(source_activations)\n loss_batches.append(\n loss_fn.forward(\n source_activations, learned_activations, reconstructed_activations\n )\n )\n if batch_idx >= n_batches_required:\n break\n\n loss_per_item = torch.cat(loss_batches).to(model_device)\n input_activations = torch.cat(input_activations_batches).to(model_device)\n\n # Check we generated enough data\n if len(loss_per_item) < n_inputs:\n error_message = (\n f\"Cannot get {n_inputs} items from the store, \"\n f\"as only {len(loss_per_item)} were available.\"\n )\n raise ValueError(error_message)\n\n return LossInputActivationsTuple(loss_per_item, input_activations)\n\n @staticmethod\n def assign_sampling_probabilities(\n loss: Float[Tensor, Axis.names(Axis.BATCH, Axis.COMPONENT_OPTIONAL)],\n ) -> Float[Tensor, Axis.names(Axis.BATCH, Axis.COMPONENT_OPTIONAL)]:\n \"\"\"Assign the sampling probabilities for each input activations vector.\n\n Assign each input vector a probability of being picked that is proportional to the square of\n the autoencoder's loss on that input.\n\n Examples:\n >>> loss = torch.tensor([1.0, 2.0, 3.0])\n >>> ActivationResampler.assign_sampling_probabilities(loss).round(decimals=2)\n tensor([0.0700, 0.2900, 0.6400])\n\n >>> loss = torch.tensor([[1.0, 2], [2, 4], [3, 6]])\n >>> ActivationResampler.assign_sampling_probabilities(loss).round(decimals=2)\n tensor([[0.0700, 0.0700],\n [0.2900, 0.2900],\n [0.6400, 0.6400]])\n\n Args:\n loss: Loss per item.\n\n Returns:\n A tensor of probabilities for each item.\n \"\"\"\n square_loss = loss.pow(2)\n return square_loss / square_loss.sum(0)\n\n @staticmethod\n def sample_input(\n probabilities: Float[Tensor, Axis.names(Axis.BATCH, Axis.COMPONENT_OPTIONAL)],\n input_activations: Float[\n Tensor, Axis.names(Axis.BATCH, Axis.COMPONENT_OPTIONAL, Axis.INPUT_OUTPUT_FEATURE)\n ],\n n_samples: list[int],\n ) -> list[Float[Tensor, Axis.names(Axis.DEAD_FEATURE, Axis.INPUT_OUTPUT_FEATURE)]]:\n \"\"\"Sample an input vector based on the provided probabilities.\n\n Example:\n >>> probabilities = torch.tensor([[0.1], [0.2], [0.7]])\n >>> input_activations = torch.tensor([[[1.0, 2.0]], [[3.0, 4.0]], [[5.0, 6.0]]])\n >>> _seed = torch.manual_seed(0) # For reproducibility in example\n >>> sampled_input = ActivationResampler.sample_input(\n ... probabilities, input_activations, [2]\n ... )\n >>> sampled_input[0].tolist()\n [[5.0, 6.0], [3.0, 4.0]]\n\n Args:\n probabilities: Probabilities for each input.\n input_activations: Input activation vectors.\n n_samples: Number of samples to take (number of dead neurons).\n\n Returns:\n Sampled input activation vector.\n\n Raises:\n ValueError: If the number of samples is greater than the number of input activations.\n \"\"\"\n sampled_inputs: list[\n Float[Tensor, Axis.names(Axis.DEAD_FEATURE, Axis.INPUT_OUTPUT_FEATURE)]\n ] = []\n\n for component_idx, component_n_samples in enumerate(n_samples):\n component_probabilities: Float[Tensor, Axis.BATCH] = get_component_slice_tensor(\n input_tensor=probabilities,\n n_dim_with_component=2,\n component_dim=1,\n component_idx=component_idx,\n )\n\n component_input_activations: Float[\n Tensor, Axis.names(Axis.BATCH, Axis.INPUT_OUTPUT_FEATURE)\n ] = get_component_slice_tensor(\n input_tensor=input_activations,\n n_dim_with_component=3,\n component_dim=1,\n component_idx=component_idx,\n )\n\n if component_n_samples > len(component_input_activations):\n exception_message = (\n f\"Cannot sample {component_n_samples} inputs from \"\n f\"{len(component_input_activations)} input activations.\"\n )\n raise ValueError(exception_message)\n\n # Handle the 0 dead neurons case\n if component_n_samples == 0:\n sampled_inputs.append(\n torch.empty(\n (0, component_input_activations.shape[-1]),\n dtype=component_input_activations.dtype,\n device=component_input_activations.device,\n )\n )\n continue\n\n # Handle the 1+ dead neuron case\n component_sample_indices: Int64[Tensor, Axis.LEARNT_FEATURE_IDX] = torch.multinomial(\n component_probabilities, num_samples=component_n_samples\n )\n sampled_inputs.append(component_input_activations[component_sample_indices, :])\n\n return sampled_inputs\n\n @staticmethod\n def renormalize_and_scale(\n sampled_input: Float[Tensor, Axis.names(Axis.DEAD_FEATURE, Axis.INPUT_OUTPUT_FEATURE)],\n neuron_activity: Int64[Tensor, Axis.names(Axis.LEARNT_FEATURE)],\n encoder_weight: Float[Tensor, Axis.names(Axis.LEARNT_FEATURE, Axis.INPUT_OUTPUT_FEATURE)],\n ) -> Float[Tensor, Axis.names(Axis.DEAD_FEATURE, Axis.INPUT_OUTPUT_FEATURE)]:\n \"\"\"Renormalize and scale the resampled dictionary vectors.\n\n Renormalize the input vector to equal the average norm of the encoder weights for alive\n neurons times 0.2.\n\n Example:\n >>> from torch.nn import Parameter\n >>> _seed = torch.manual_seed(0) # For reproducibility in example\n >>> sampled_input = torch.tensor([[3.0, 4.0]])\n >>> neuron_activity = torch.tensor([3, 0, 5, 0, 1, 3])\n >>> encoder_weight = Parameter(torch.ones((6, 2)))\n >>> rescaled_input = ActivationResampler.renormalize_and_scale(\n ... sampled_input,\n ... neuron_activity,\n ... encoder_weight\n ... )\n >>> rescaled_input.round(decimals=1)\n tensor([[0.2000, 0.2000]])\n\n Args:\n sampled_input: Tensor of the sampled input activation.\n neuron_activity: Tensor representing the number of times each neuron fired.\n encoder_weight: Tensor of encoder weights.\n\n Returns:\n Rescaled sampled input.\n\n Raises:\n ValueError: If there are no alive neurons.\n \"\"\"\n alive_neuron_mask: Bool[Tensor, \" learned_features\"] = neuron_activity > 0\n\n # Check there is at least one alive neuron\n if not torch.any(alive_neuron_mask):\n error_message = \"No alive neurons found.\"\n raise ValueError(error_message)\n\n # Handle no dead neurons\n n_dead_neurons = len(sampled_input)\n if n_dead_neurons == 0:\n return torch.empty(\n (0, sampled_input.shape[-1]), dtype=sampled_input.dtype, device=sampled_input.device\n )\n\n # Calculate the average norm of the encoder weights for alive neurons.\n detached_encoder_weight = encoder_weight.detach() # Don't track gradients\n alive_encoder_weights: Float[\n Tensor, Axis.names(Axis.ALIVE_FEATURE, Axis.INPUT_OUTPUT_FEATURE)\n ] = detached_encoder_weight[alive_neuron_mask, :]\n average_alive_norm: Float[Tensor, Axis.SINGLE_ITEM] = alive_encoder_weights.norm(\n dim=-1\n ).mean()\n\n # Renormalize the input vector to equal the average norm of the encoder weights for alive\n # neurons times 0.2.\n renormalized_input: Float[\n Tensor, Axis.names(Axis.DEAD_FEATURE, Axis.INPUT_OUTPUT_FEATURE)\n ] = torch.nn.functional.normalize(sampled_input, dim=-1)\n return renormalized_input * (average_alive_norm * 0.2)\n\n def resample_dead_neurons(\n self,\n activation_store: ActivationStore,\n autoencoder: SparseAutoencoder | DataParallel[SparseAutoencoder] | DeepSpeedEngine,\n loss_fn: AbstractLoss,\n train_batch_size: int,\n ) -> list[ParameterUpdateResults]:\n \"\"\"Resample dead neurons.\n\n Args:\n activation_store: Activation store.\n autoencoder: Sparse autoencoder model.\n loss_fn: Loss function.\n train_batch_size: Train batch size (also used for resampling).\n\n Returns:\n For each component that the SAE is being trained on, the indices of dead neurons and the\n updates for the encoder and decoder weights and biases.\n \"\"\"\n parameter_update_results: list[ParameterUpdateResults] = []\n\n with torch.no_grad():\n dead_neuron_indices: list[\n Int64[Tensor, Axis.names(Axis.LEARNT_FEATURE_IDX)]\n ] = self._get_dead_neuron_indices()\n\n # Compute the loss for the current model on a random subset of inputs and get the\n # activations.\n loss_per_item, input_activations = self.compute_loss_and_get_activations(\n store=activation_store,\n autoencoder=autoencoder,\n loss_fn=loss_fn,\n train_batch_size=train_batch_size,\n )\n\n # Assign each input vector a probability of being picked that is proportional to the\n # square of the autoencoder's loss on that input.\n sample_probabilities: Float[\n Tensor, Axis.names(Axis.BATCH, Axis.COMPONENT_OPTIONAL)\n ] = self.assign_sampling_probabilities(loss_per_item)\n\n # For each dead neuron sample an input according to these probabilities.\n sampled_input: list[\n Float[Tensor, Axis.names(Axis.DEAD_FEATURE, Axis.INPUT_OUTPUT_FEATURE)]\n ] = self.sample_input(\n sample_probabilities, input_activations, [len(dead) for dead in dead_neuron_indices]\n )\n\n for component_idx in range(self._n_components):\n # Renormalize each input vector to have unit L2 norm and set this to be the\n # dictionary vector for the dead autoencoder neuron.\n renormalized_input: Float[\n Tensor, Axis.names(Axis.DEAD_FEATURE, Axis.INPUT_OUTPUT_FEATURE)\n ] = torch.nn.functional.normalize(sampled_input[component_idx], dim=-1)\n\n dead_decoder_weight_updates = rearrange(\n renormalized_input, \"dead_neuron input_feature -> input_feature dead_neuron\"\n )\n\n # For the corresponding encoder vector, renormalize the input vector to equal the\n # average norm of the encoder weights for alive neurons times 0.2. Set the\n # corresponding encoder bias element to zero.\n encoder_weight: Float[\n Tensor, Axis.names(Axis.LEARNT_FEATURE, Axis.INPUT_OUTPUT_FEATURE)\n ] = get_component_slice_tensor(autoencoder.encoder.weight, 3, 0, component_idx)\n\n rescaled_sampled_input = self.renormalize_and_scale(\n sampled_input=sampled_input[component_idx],\n neuron_activity=self._collated_neuron_activity[component_idx],\n encoder_weight=encoder_weight,\n )\n\n dead_encoder_bias_updates = torch.zeros_like(\n dead_neuron_indices[component_idx],\n dtype=dead_decoder_weight_updates.dtype,\n device=dead_decoder_weight_updates.device,\n )\n\n parameter_update_results.append(\n ParameterUpdateResults(\n dead_neuron_indices=dead_neuron_indices[component_idx],\n dead_encoder_weight_updates=rescaled_sampled_input,\n dead_encoder_bias_updates=dead_encoder_bias_updates,\n dead_decoder_weight_updates=dead_decoder_weight_updates,\n )\n )\n\n return parameter_update_results\n\n def step_resampler(\n self,\n batch_neuron_activity: Int64[Tensor, Axis.names(Axis.COMPONENT, Axis.LEARNT_FEATURE)],\n activation_store: ActivationStore,\n autoencoder: SparseAutoencoder | DataParallel[SparseAutoencoder] | DeepSpeedEngine,\n loss_fn: AbstractLoss,\n train_batch_size: int,\n ) -> list[ParameterUpdateResults] | None:\n \"\"\"Step the resampler, collating neuron activity and resampling if necessary.\n\n Args:\n batch_neuron_activity: Number of times each neuron fired in the current batch.\n activation_store: Activation store.\n autoencoder: Sparse autoencoder model.\n loss_fn: Loss function.\n train_batch_size: Train batch size (also used for resampling).\n\n Returns:\n Parameter update results (for each component that the SAE is being trained on) if\n resampling is due. Otherwise None.\n \"\"\"\n # Update the counter\n self._activations_seen_since_last_resample += len(activation_store)\n\n if self._n_times_resampled < self._max_n_resamples:\n # Collate neuron activity, if in the data collection window. For example in the\n # Anthropic Towards Monosemanticity paper, the window started collecting at 100m\n # activations and stopped at 200m (and then repeated this again a few times until the\n # max times to resample was hit).\n if self._activations_seen_since_last_resample >= self.neuron_activity_window_start:\n detached_neuron_activity = batch_neuron_activity.detach().cpu()\n self._collated_neuron_activity.add_(detached_neuron_activity)\n self._n_activations_collated_since_last_resample += train_batch_size\n\n # Check if we should resample.\n if self._activations_seen_since_last_resample >= self.neuron_activity_window_end:\n # Get resampled dictionary vectors\n resample_res = self.resample_dead_neurons(\n activation_store=activation_store,\n autoencoder=autoencoder,\n loss_fn=loss_fn,\n train_batch_size=train_batch_size,\n )\n\n # Update counters\n self._activations_seen_since_last_resample = 0\n self._n_activations_collated_since_last_resample = 0\n self._n_times_resampled += 1\n\n # Reset the collated neuron activity\n self._collated_neuron_activity.zero_()\n\n return resample_res\n\n return None\n\n def __str__(self) -> str:\n \"\"\"Return a string representation of the activation resampler.\"\"\"\n return (\n f\"ActivationResampler(\"\n f\"n_components={self._n_components}, \"\n f\"neuron_activity_window_start={self.neuron_activity_window_end}, \"\n f\"neuron_activity_window_end={self.neuron_activity_window_end}, \"\n f\"max_resamples={self._max_n_resamples}, \"\n f\"resample_dataset_size={self._resample_dataset_size}, \"\n f\"dead_neuron_threshold={self._threshold_is_dead_portion_fires})\"\n )" }, { "identifier": "ActivationStore", "path": "sparse_autoencoder/activation_store/base_store.py", "snippet": "class ActivationStore(\n Dataset[Float[Tensor, Axis.names(Axis.COMPONENT_OPTIONAL, Axis.INPUT_OUTPUT_FEATURE)]], ABC\n):\n \"\"\"Activation Store Abstract Class.\n\n Extends the `torch.utils.data.Dataset` class to provide an activation store, with additional\n :meth:`append` and :meth:`extend` methods (the latter of which should typically be\n non-blocking). The resulting activation store can be used with a `torch.utils.data.DataLoader`\n to iterate over the dataset.\n\n Extend this class if you want to create a new activation store (noting you also need to create\n `__getitem__` and `__len__` methods from the underlying `torch.utils.data.Dataset` class).\n\n Example:\n >>> import torch\n >>> class MyActivationStore(ActivationStore):\n ...\n ... @property\n ... def current_activations_stored_per_component(self):\n ... raise NotImplementedError\n ...\n ... @property\n ... def n_components(self):\n ... raise NotImplementedError\n ...\n ... def __init__(self):\n ... super().__init__()\n ... self._data = [] # In this example, we just store in a list\n ...\n ... def append(self, item) -> None:\n ... self._data.append(item)\n ...\n ... def extend(self, batch):\n ... self._data.extend(batch)\n ...\n ... def empty(self):\n ... self._data = []\n ...\n ... def __getitem__(self, index: int):\n ... return self._data[index]\n ...\n ... def __len__(self) -> int:\n ... return len(self._data)\n ...\n >>> store = MyActivationStore()\n >>> store.append(torch.randn(100))\n >>> print(len(store))\n 1\n \"\"\"\n\n @abstractmethod\n def append(\n self,\n item: Float[Tensor, Axis.names(Axis.INPUT_OUTPUT_FEATURE)],\n component_idx: int,\n ) -> Future | None:\n \"\"\"Add a Single Item to the Store.\"\"\"\n\n @abstractmethod\n def extend(\n self,\n batch: Float[Tensor, Axis.names(Axis.BATCH, Axis.INPUT_OUTPUT_FEATURE)],\n component_idx: int,\n ) -> Future | None:\n \"\"\"Add a Batch to the Store.\"\"\"\n\n @abstractmethod\n def empty(self) -> None:\n \"\"\"Empty the Store.\"\"\"\n\n @property\n @abstractmethod\n def n_components(self) -> int:\n \"\"\"Number of components.\"\"\"\n\n @property\n @abstractmethod\n def current_activations_stored_per_component(self) -> list[int]:\n \"\"\"Current activations stored per component.\"\"\"\n\n @abstractmethod\n def __len__(self) -> int:\n \"\"\"Get the Length of the Store.\"\"\"\n\n @abstractmethod\n def __getitem__(\n self, index: tuple[int, ...] | slice | int\n ) -> Float[Tensor, Axis.names(Axis.ANY)]:\n \"\"\"Get an Item from the Store.\"\"\"\n\n def shuffle(self) -> None:\n \"\"\"Optional shuffle method.\"\"\"\n\n @final\n @validate_call\n def fill_with_test_data(\n self,\n n_batches: PositiveInt = 1,\n batch_size: PositiveInt = 16,\n n_components: PositiveInt = 1,\n input_features: PositiveInt = 256,\n ) -> None:\n \"\"\"Fill the store with test data.\n\n For use when testing your code, to ensure it works with a real activation store.\n\n Warning:\n You may want to use `torch.seed(0)` to make the random data deterministic, if your test\n requires inspecting the data itself.\n\n Example:\n >>> from sparse_autoencoder.activation_store.tensor_store import TensorActivationStore\n >>> store = TensorActivationStore(max_items=100, n_neurons=256, n_components=1)\n >>> store.fill_with_test_data(batch_size=100)\n >>> len(store)\n 100\n\n Args:\n n_batches: Number of batches to fill the store with.\n batch_size: Number of items per batch.\n n_components: Number of source model components the SAE is trained on.\n input_features: Number of input features per item.\n \"\"\"\n for _ in range(n_batches):\n for component_idx in range(n_components):\n sample = torch.rand(batch_size, input_features)\n self.extend(sample, component_idx)" }, { "identifier": "TensorActivationStore", "path": "sparse_autoencoder/activation_store/tensor_store.py", "snippet": "class TensorActivationStore(ActivationStore):\n \"\"\"Tensor Activation Store.\n\n Stores tensors in a (large) tensor of shape (item, neuron). Requires the number of activation\n vectors to be stored to be known in advance. Multiprocess safe.\n\n Extends the `torch.utils.data.Dataset` class to provide a list-based activation store, with\n additional :meth:`append` and :meth:`extend` methods (the latter of which is non-blocking).\n\n Examples:\n Create an empty activation dataset:\n\n >>> import torch\n >>> store = TensorActivationStore(max_items=1000, n_neurons=100, n_components=2)\n\n Add a single activation vector to the dataset (for a component):\n\n >>> store.append(torch.randn(100), component_idx=0)\n >>> store.append(torch.randn(100), component_idx=1)\n >>> len(store)\n 1\n\n Add a [batch, neurons] activation tensor to the dataset:\n\n >>> store.empty()\n >>> batch = torch.randn(10, 100)\n >>> store.extend(batch, component_idx=0)\n >>> store.extend(batch, component_idx=1)\n >>> len(store)\n 10\n\n Shuffle the dataset **before passing it to the DataLoader**:\n\n >>> store.shuffle() # Faster than using the DataLoader shuffle argument\n\n Use the dataloader to iterate over the dataset:\n\n >>> loader = torch.utils.data.DataLoader(store, shuffle=False, batch_size=2)\n >>> next_item = next(iter(loader))\n >>> next_item.shape\n torch.Size([2, 2, 100])\n \"\"\"\n\n _data: Float[Tensor, Axis.names(Axis.ITEMS, Axis.COMPONENT, Axis.INPUT_OUTPUT_FEATURE)]\n \"\"\"Underlying Tensor Data Store.\"\"\"\n\n _items_stored: list[int]\n \"\"\"Number of items stored.\"\"\"\n\n max_items: int\n \"\"\"Maximum Number of Items to Store.\"\"\"\n\n _n_components: int\n \"\"\"Number of components\"\"\"\n\n @property\n def n_components(self) -> int:\n \"\"\"Number of components.\"\"\"\n return self._n_components\n\n @property\n def current_activations_stored_per_component(self) -> list[int]:\n \"\"\"Number of activations stored per component.\"\"\"\n return self._items_stored\n\n @validate_call(config={\"arbitrary_types_allowed\": True})\n def __init__(\n self,\n max_items: PositiveInt,\n n_neurons: PositiveInt,\n n_components: PositiveInt,\n device: torch.device | None = None,\n ) -> None:\n \"\"\"Initialise the Tensor Activation Store.\n\n Args:\n max_items: Maximum number of items to store per component (individual activation\n vectors).\n n_neurons: Number of neurons in each activation vector.\n n_components: Number of components to store (i.e. number of source models).\n device: Device to store the activation vectors on.\n \"\"\"\n self._n_components = n_components\n self._items_stored = [0] * n_components\n self._max_items = max_items\n self._data = torch.empty((max_items, n_components, n_neurons), device=device)\n\n def __len__(self) -> int:\n \"\"\"Length Dunder Method.\n\n Returns the number of activation vectors per component in the dataset.\n\n Example:\n >>> import torch\n >>> store = TensorActivationStore(max_items=10_000_000, n_neurons=100, n_components=1)\n >>> store.append(torch.randn(100), component_idx=0)\n >>> store.append(torch.randn(100), component_idx=0)\n >>> len(store)\n 2\n\n Returns:\n The number of activation vectors in the dataset.\n \"\"\"\n # Min as this is the amount of activations that can be fetched by get_item\n return min(self.current_activations_stored_per_component)\n\n def __sizeof__(self) -> int:\n \"\"\"Sizeof Dunder Method.\n\n Example:\n >>> import torch\n >>> store = TensorActivationStore(max_items=2, n_neurons=100, n_components=1)\n >>> store.__sizeof__() # Pre-allocated tensor of 2x100\n 800\n\n Returns:\n The size of the underlying tensor in bytes.\n \"\"\"\n return self._data.element_size() * self._data.nelement()\n\n def __getitem__(\n self, index: tuple[int, ...] | slice | int\n ) -> Float[Tensor, Axis.names(Axis.ANY)]:\n \"\"\"Get Item Dunder Method.\n\n Examples:\n >>> import torch\n >>> store = TensorActivationStore(max_items=2, n_neurons=5, n_components=1)\n >>> store.append(torch.zeros(5), component_idx=0)\n >>> store.append(torch.ones(5), component_idx=0)\n >>> store[1, 0]\n tensor([1., 1., 1., 1., 1.])\n\n Args:\n index: The index of the tensor to fetch.\n\n Returns:\n The activation store item at the given index.\n \"\"\"\n return self._data[index]\n\n def shuffle(self) -> None:\n \"\"\"Shuffle the Data In-Place.\n\n This is much faster than using the shuffle argument on `torch.utils.data.DataLoader`.\n\n Example:\n >>> import torch\n >>> _seed = torch.manual_seed(42)\n >>> store = TensorActivationStore(max_items=10, n_neurons=1, n_components=1)\n >>> store.append(torch.tensor([0.]), component_idx=0)\n >>> store.append(torch.tensor([1.]), component_idx=0)\n >>> store.append(torch.tensor([2.]), component_idx=0)\n >>> store.shuffle()\n >>> [store[i, 0].item() for i in range(3)]\n [0.0, 2.0, 1.0]\n \"\"\"\n # Generate a permutation of the indices for the active data\n perm = torch.randperm(len(self))\n\n # Use this permutation to shuffle the active data in-place\n self._data[: len(self)] = self._data[perm]\n\n def append(self, item: Float[Tensor, Axis.INPUT_OUTPUT_FEATURE], component_idx: int) -> None:\n \"\"\"Add a single item to the store.\n\n Example:\n >>> import torch\n >>> store = TensorActivationStore(max_items=10, n_neurons=5, n_components=1)\n >>> store.append(torch.zeros(5), component_idx=0)\n >>> store.append(torch.ones(5), component_idx=0)\n >>> store[1, 0]\n tensor([1., 1., 1., 1., 1.])\n\n Args:\n item: The item to append to the dataset.\n component_idx: The component index to append the item to.\n\n Raises:\n IndexError: If there is no space remaining.\n \"\"\"\n # Check we have space\n if self._items_stored[component_idx] + 1 > self._max_items:\n raise StoreFullError\n\n self._data[self._items_stored[component_idx], component_idx] = item.to(\n self._data.device,\n )\n self._items_stored[component_idx] += 1\n\n def extend(\n self,\n batch: Float[Tensor, Axis.names(Axis.BATCH, Axis.INPUT_OUTPUT_FEATURE)],\n component_idx: int,\n ) -> None:\n \"\"\"Add a batch to the store.\n\n Examples:\n >>> import torch\n >>> store = TensorActivationStore(max_items=10, n_neurons=5, n_components=1)\n >>> store.extend(torch.zeros(2, 5), component_idx=0)\n >>> len(store)\n 2\n\n Args:\n batch: The batch to append to the dataset.\n component_idx: The component index to append the batch to.\n\n Raises:\n IndexError: If there is no space remaining.\n \"\"\"\n # Check we have space\n n_activation_tensors: int = batch.shape[0]\n if self._items_stored[component_idx] + n_activation_tensors > self._max_items:\n raise StoreFullError\n\n self._data[\n self._items_stored[component_idx] : self._items_stored[component_idx]\n + n_activation_tensors,\n component_idx,\n ] = batch.to(self._data.device)\n self._items_stored[component_idx] += n_activation_tensors\n\n def empty(self) -> None:\n \"\"\"Empty the store.\n\n Example:\n >>> import torch\n >>> store = TensorActivationStore(max_items=10, n_neurons=5, n_components=1)\n >>> store.extend(torch.zeros(2, 5), component_idx=0)\n >>> len(store)\n 2\n >>> store.empty()\n >>> len(store)\n 0\n \"\"\"\n # We don't need to zero the data, just reset the number of items stored\n self._items_stored = [0 for _ in self._items_stored]" }, { "identifier": "SparseAutoencoder", "path": "sparse_autoencoder/autoencoder/model.py", "snippet": "class SparseAutoencoder(Module):\n \"\"\"Sparse Autoencoder Model.\"\"\"\n\n config: SparseAutoencoderConfig\n \"\"\"Model config.\"\"\"\n\n geometric_median_dataset: Float[\n Tensor, Axis.names(Axis.COMPONENT_OPTIONAL, Axis.INPUT_OUTPUT_FEATURE)\n ]\n \"\"\"Estimated Geometric Median of the Dataset.\n\n Used for initialising :attr:`tied_bias`.\n \"\"\"\n\n tied_bias: Float[\n Parameter, Axis.names(Axis.BATCH, Axis.COMPONENT_OPTIONAL, Axis.INPUT_OUTPUT_FEATURE)\n ]\n \"\"\"Tied Bias Parameter.\n\n The same bias is used pre-encoder and post-decoder.\n \"\"\"\n\n pre_encoder_bias: TiedBias\n \"\"\"Pre-Encoder Bias.\"\"\"\n\n encoder: LinearEncoder\n \"\"\"Encoder.\"\"\"\n\n decoder: UnitNormDecoder\n \"\"\"Decoder.\"\"\"\n\n post_decoder_bias: TiedBias\n \"\"\"Post-Decoder Bias.\"\"\"\n\n def __init__(\n self,\n config: SparseAutoencoderConfig,\n geometric_median_dataset: Float[\n Tensor, Axis.names(Axis.COMPONENT_OPTIONAL, Axis.INPUT_OUTPUT_FEATURE)\n ]\n | None = None,\n ) -> None:\n \"\"\"Initialize the Sparse Autoencoder Model.\n\n Args:\n config: Model config.\n geometric_median_dataset: Estimated geometric median of the dataset.\n \"\"\"\n super().__init__()\n\n self.config = config\n\n # Store the geometric median of the dataset (so that we can reset parameters). This is not a\n # parameter itself (the tied bias parameter is used for that), so gradients are disabled.\n tied_bias_shape = shape_with_optional_dimensions(\n config.n_components, config.n_input_features\n )\n if geometric_median_dataset is not None:\n self.geometric_median_dataset = geometric_median_dataset.clone()\n self.geometric_median_dataset.requires_grad = False\n else:\n self.geometric_median_dataset = torch.zeros(tied_bias_shape)\n self.geometric_median_dataset.requires_grad = False\n\n # Initialize the tied bias\n self.tied_bias = Parameter(torch.empty(tied_bias_shape))\n self.initialize_tied_parameters()\n\n # Initialize the components\n self.pre_encoder_bias = TiedBias(self.tied_bias, TiedBiasPosition.PRE_ENCODER)\n\n self.encoder = LinearEncoder(\n input_features=config.n_input_features,\n learnt_features=config.n_learned_features,\n n_components=config.n_components,\n )\n\n self.decoder = UnitNormDecoder(\n learnt_features=config.n_learned_features,\n decoded_features=config.n_input_features,\n n_components=config.n_components,\n )\n\n self.post_decoder_bias = TiedBias(self.tied_bias, TiedBiasPosition.POST_DECODER)\n\n def forward(\n self,\n x: Float[\n Tensor, Axis.names(Axis.BATCH, Axis.COMPONENT_OPTIONAL, Axis.INPUT_OUTPUT_FEATURE)\n ],\n ) -> ForwardPassResult:\n \"\"\"Forward Pass.\n\n Args:\n x: Input activations (e.g. activations from an MLP layer in a transformer model).\n\n Returns:\n Tuple of learned activations and decoded activations.\n \"\"\"\n x = self.pre_encoder_bias(x)\n learned_activations = self.encoder(x)\n x = self.decoder(learned_activations)\n decoded_activations = self.post_decoder_bias(x)\n\n return ForwardPassResult(learned_activations, decoded_activations)\n\n def initialize_tied_parameters(self) -> None:\n \"\"\"Initialize the tied parameters.\"\"\"\n # The tied bias is initialised as the geometric median of the dataset\n self.tied_bias.data = self.geometric_median_dataset\n\n def reset_parameters(self) -> None:\n \"\"\"Reset the parameters.\"\"\"\n self.initialize_tied_parameters()\n for module in self.network:\n if \"reset_parameters\" in dir(module):\n module.reset_parameters()\n\n @property\n def reset_optimizer_parameter_details(self) -> list[ResetOptimizerParameterDetails]:\n \"\"\"Reset optimizer parameter details.\n\n Details of the parameters that should be reset in the optimizer, when resetting\n dictionary vectors.\n\n Returns:\n List of tuples of the form `(parameter, axis)`, where `parameter` is the parameter to\n reset (e.g. encoder.weight), and `axis` is the axis of the parameter to reset.\n \"\"\"\n return (\n self.encoder.reset_optimizer_parameter_details\n + self.decoder.reset_optimizer_parameter_details\n )\n\n def post_backwards_hook(self) -> None:\n \"\"\"Hook to be called after each learning step.\n\n This can be used to e.g. constrain weights to unit norm.\n \"\"\"\n self.decoder.constrain_weights_unit_norm()\n\n @staticmethod\n @validate_call\n def get_single_component_state_dict(\n state: SparseAutoencoderState, component_idx: NonNegativeInt\n ) -> dict[str, Tensor]:\n \"\"\"Get the state dict for a single component.\n\n Args:\n state: Sparse Autoencoder state.\n component_idx: Index of the component to get the state dict for.\n\n Returns:\n State dict for the component.\n\n Raises:\n ValueError: If the state dict doesn't contain a components dimension.\n \"\"\"\n # Check the state has a components dimension\n if state.config.n_components is None:\n error_message = (\n \"Trying to load a single component from the state dict, but the state dict \"\n \"doesn't contain a components dimension.\"\n )\n raise ValueError(error_message)\n\n # Return the state dict for the component\n return {key: value[component_idx] for key, value in state.state_dict.items()}\n\n def save(self, file_path: Path) -> None:\n \"\"\"Save the model config and state dict to a file.\n\n Args:\n file_path: Path to save the model to.\n \"\"\"\n file_path.parent.mkdir(parents=True, exist_ok=True)\n state = SparseAutoencoderState(config=self.config, state_dict=self.state_dict())\n torch.save(state, file_path)\n\n @staticmethod\n def load(\n file_path: FILE_LIKE,\n component_idx: PositiveInt | None = None,\n ) -> \"SparseAutoencoder\":\n \"\"\"Load the model from a file.\n\n Args:\n file_path: Path to load the model from.\n component_idx: If loading a state dict from a model that has been trained on multiple\n components (e.g. all MLP layers) you may want to to load just one component. In this\n case you can set `component_idx` to the index of the component to load. Note you\n should not set this if you want to load a state dict from a model that has been\n trained on a single component (or if you want to load all components).\n\n Returns:\n The loaded model.\n \"\"\"\n # Load the file\n serialized_state = torch.load(file_path, map_location=torch.device(\"cpu\"))\n state = SparseAutoencoderState.model_validate(serialized_state)\n\n # Initialise the model\n config = SparseAutoencoderConfig(\n n_input_features=state.config.n_input_features,\n n_learned_features=state.config.n_learned_features,\n n_components=state.config.n_components if component_idx is None else None,\n )\n state_dict = (\n SparseAutoencoder.get_single_component_state_dict(state, component_idx)\n if component_idx is not None\n else state.state_dict\n )\n model = SparseAutoencoder(config)\n model.load_state_dict(state_dict)\n\n return model\n\n def save_to_wandb(\n self,\n artifact_name: str,\n directory: DirectoryPath = DEFAULT_TMP_DIR,\n ) -> str:\n \"\"\"Save the model to wandb.\n\n Args:\n artifact_name: A human-readable name for this artifact, which is how you can identify\n this artifact in the UI or reference it in use_artifact calls. Names can contain\n letters, numbers, underscores, hyphens, and dots. The name must be unique across a\n project. Example: \"sweep_name 1e9 activations\".\n directory: Directory to save the model to.\n\n Returns:\n Name of the wandb artifact.\n\n Raises:\n ValueError: If wandb is not initialised.\n \"\"\"\n # Save the file\n directory.mkdir(parents=True, exist_ok=True)\n file_name = artifact_name + \".pt\"\n file_path = directory / file_name\n self.save(file_path)\n\n # Upload to wandb\n if wandb.run is None:\n error_message = \"Trying to save the model to wandb, but wandb is not initialised.\"\n raise ValueError(error_message)\n artifact = wandb.Artifact(\n artifact_name,\n type=\"model\",\n description=\"Sparse Autoencoder model state, created with `sparse_autoencoder`.\",\n )\n artifact.add_file(str(file_path), name=\"sae-model-state.pt\")\n artifact.save()\n wandb.log_artifact(artifact)\n artifact.wait()\n\n return artifact.source_qualified_name\n\n @staticmethod\n def load_from_wandb(\n wandb_artifact_name: str,\n component_idx: PositiveInt | None = None,\n ) -> \"SparseAutoencoder\":\n \"\"\"Load the model from wandb.\n\n Args:\n wandb_artifact_name: Name of the wandb artifact to load the model from (e.g.\n \"username/project/artifact_name:version\").\n component_idx: If loading a state dict from a model that has been trained on multiple\n components (e.g. all MLP layers) you may want to to load just one component. In this\n case you can set `component_idx` to the index of the component to load. Note you\n should not set this if you want to load a state dict from a model that has been\n trained on a single component (or if you want to load all components).\n\n Returns:\n The loaded model.\n \"\"\"\n api = wandb.Api()\n artifact = api.artifact(wandb_artifact_name, type=\"model\")\n download_path = artifact.download()\n return SparseAutoencoder.load(Path(download_path) / \"sae-model-state.pt\", component_idx)\n\n def save_to_hugging_face(\n self,\n file_name: str,\n repo_id: str,\n directory: DirectoryPath = DEFAULT_TMP_DIR,\n hf_access_token: str | None = None,\n ) -> None:\n \"\"\"Save the model to Hugging Face.\n\n Args:\n file_name: Name of the file (e.g. \"model-something.pt\").\n repo_id: ID of the repo to save the model to.\n directory: Directory to save the model to.\n hf_access_token: Hugging Face access token.\n \"\"\"\n # Save the file\n directory.mkdir(parents=True, exist_ok=True)\n file_path = directory / file_name\n self.save(file_path)\n\n # Upload to Hugging Face\n api = HfApi(token=hf_access_token)\n api.upload_file(\n path_or_fileobj=file_path,\n path_in_repo=file_name,\n repo_id=repo_id,\n repo_type=\"model\",\n )\n\n @staticmethod\n def load_from_hugging_face(\n file_name: str,\n repo_id: str,\n component_idx: PositiveInt | None = None,\n ) -> \"SparseAutoencoder\":\n \"\"\"Load the model from Hugging Face.\n\n Args:\n file_name: File name of the .pt state file.\n repo_id: ID of the repo to load the model from.\n component_idx: If loading a state dict from a model that has been trained on multiple\n components (e.g. all MLP layers) you may want to to load just one component. In this\n case you can set `component_idx` to the index of the component to load. Note you\n should not set this if you want to load a state dict from a model that has been\n trained on a single component (or if you want to load all components).\n\n Returns:\n The loaded model.\n \"\"\"\n local_file = hf_hub_download(\n repo_id=repo_id,\n repo_type=\"model\",\n filename=file_name,\n revision=\"main\",\n )\n\n return SparseAutoencoder.load(Path(local_file), component_idx)" }, { "identifier": "SparseAutoencoderConfig", "path": "sparse_autoencoder/autoencoder/model.py", "snippet": "class SparseAutoencoderConfig(BaseModel, frozen=True):\n \"\"\"SAE model config.\"\"\"\n\n n_input_features: PositiveInt\n \"\"\"Number of input features.\n\n E.g. `d_mlp` if training on MLP activations from TransformerLens).\n \"\"\"\n\n n_learned_features: PositiveInt\n \"\"\"Number of learned features.\n\n The initial paper experimented with 1 to 256 times the number of input features, and primarily\n used a multiple of 8.\"\"\"\n\n n_components: PositiveInt | None = None\n \"\"\"Number of source model components the SAE is trained on.\"\"\n\n This is useful if you want to train the SAE on several components of the source model at once.\n If `None`, the SAE is assumed to be trained on just one component (in this case the model won't\n contain a component axis in any of the parameters).\n \"\"\"" }, { "identifier": "L2ReconstructionLoss", "path": "sparse_autoencoder/loss/decoded_activations_l2.py", "snippet": "class L2ReconstructionLoss(AbstractLoss):\n \"\"\"L2 Reconstruction loss.\n\n L2 reconstruction loss is calculated as the sum squared error between each each input vector\n and it's corresponding decoded vector. The original paper found that models trained with some\n loss functions such as cross-entropy loss generally prefer to represent features\n polysemantically, whereas models trained with L2 may achieve the same loss for both\n polysemantic and monosemantic representations of true features.\n\n Example:\n >>> import torch\n >>> loss = L2ReconstructionLoss()\n >>> input_activations = torch.tensor([[5.0, 4], [3.0, 4]])\n >>> output_activations = torch.tensor([[1.0, 5], [1.0, 5]])\n >>> unused_activations = torch.zeros_like(input_activations)\n >>> # Outputs both loss and metrics to log\n >>> loss.forward(input_activations, unused_activations, output_activations)\n tensor([8.5000, 2.5000])\n \"\"\"\n\n _reduction: LossReductionType\n \"\"\"MSE reduction type.\"\"\"\n\n def __init__(self, reduction: LossReductionType = LossReductionType.MEAN) -> None:\n \"\"\"Initialise the L2 reconstruction loss.\n\n Args:\n reduction: MSE reduction type.\n \"\"\"\n super().__init__()\n self._reduction = reduction\n\n def log_name(self) -> str:\n \"\"\"Log name.\n\n Returns:\n Name of the loss module for logging.\n \"\"\"\n return \"l2_reconstruction_loss\"\n\n def forward(\n self,\n source_activations: Float[\n Tensor, Axis.names(Axis.BATCH, Axis.COMPONENT_OPTIONAL, Axis.INPUT_OUTPUT_FEATURE)\n ],\n learned_activations: Float[ # noqa: ARG002\n Tensor, Axis.names(Axis.BATCH, Axis.COMPONENT_OPTIONAL, Axis.LEARNT_FEATURE)\n ],\n decoded_activations: Float[\n Tensor, Axis.names(Axis.BATCH, Axis.COMPONENT_OPTIONAL, Axis.INPUT_OUTPUT_FEATURE)\n ],\n ) -> (\n Float[Tensor, Axis.names(Axis.BATCH, Axis.COMPONENT_OPTIONAL)]\n | Float[Tensor, Axis.names(Axis.BATCH, Axis.COMPONENT_OPTIONAL, Axis.INPUT_OUTPUT_FEATURE)]\n ):\n \"\"\"Calculate the L2 reconstruction loss.\n\n Args:\n source_activations: Source activations (input activations to the autoencoder from the\n source model).\n learned_activations: Learned activations (intermediate activations in the autoencoder).\n decoded_activations: Decoded activations.\n\n Returns:\n Loss per batch item.\n \"\"\"\n square_error_loss = mse_loss(source_activations, decoded_activations, reduction=\"none\")\n\n match self._reduction:\n case LossReductionType.MEAN:\n return square_error_loss.mean(dim=-1)\n case LossReductionType.SUM:\n return square_error_loss.sum(dim=-1)\n case LossReductionType.NONE:\n return square_error_loss" }, { "identifier": "LearnedActivationsL1Loss", "path": "sparse_autoencoder/loss/learned_activations_l1.py", "snippet": "class LearnedActivationsL1Loss(AbstractLoss):\n \"\"\"Learned activations L1 (absolute error) loss.\n\n L1 loss penalty is the absolute sum of the learned activations. The L1 penalty is this\n multiplied by the l1_coefficient (designed to encourage sparsity).\n\n Example:\n >>> l1_loss = LearnedActivationsL1Loss(0.1)\n >>> learned_activations = torch.tensor([[2.0, -3], [2.0, -3]])\n >>> unused_activations = torch.zeros_like(learned_activations)\n >>> # Returns loss and metrics to log\n >>> l1_loss.forward(unused_activations, learned_activations, unused_activations)[0]\n tensor(0.5000)\n \"\"\"\n\n l1_coefficient: float | Float[Tensor, Axis.names(Axis.COMPONENT_OPTIONAL)]\n \"\"\"L1 coefficient.\"\"\"\n\n def log_name(self) -> str:\n \"\"\"Log name.\n\n Returns:\n Name of the loss module for logging.\n \"\"\"\n return \"learned_activations_l1_loss_penalty\"\n\n @validate_call(config={\"arbitrary_types_allowed\": True})\n def __init__(\n self, l1_coefficient: PositiveFloat | Float[Tensor, Axis.names(Axis.COMPONENT_OPTIONAL)]\n ) -> None:\n \"\"\"Initialize the absolute error loss.\n\n Args:\n l1_coefficient: L1 coefficient. The original paper experimented with L1 coefficients of\n [0.01, 0.008, 0.006, 0.004, 0.001]. They used 250 tokens per prompt, so as an\n approximate guide if you use e.g. 2x this number of tokens you might consider using\n 0.5x the l1 coefficient.\n \"\"\"\n self.l1_coefficient = l1_coefficient\n super().__init__()\n\n def _l1_loss(\n self,\n source_activations: Float[ # noqa: ARG002\n Tensor, Axis.names(Axis.BATCH, Axis.COMPONENT_OPTIONAL, Axis.INPUT_OUTPUT_FEATURE)\n ],\n learned_activations: Float[\n Tensor, Axis.names(Axis.BATCH, Axis.COMPONENT_OPTIONAL, Axis.LEARNT_FEATURE)\n ],\n decoded_activations: Float[ # noqa: ARG002s\n Tensor, Axis.names(Axis.BATCH, Axis.COMPONENT_OPTIONAL, Axis.INPUT_OUTPUT_FEATURE)\n ],\n ) -> _L1LossAndPenalty:\n \"\"\"Learned activations L1 (absolute error) loss.\n\n Args:\n source_activations: Source activations (input activations to the autoencoder from the\n source model).\n learned_activations: Learned activations (intermediate activations in the autoencoder).\n decoded_activations: Decoded activations.\n\n Returns:\n Tuple of itemwise absolute loss, and itemwise absolute loss multiplied by the l1\n coefficient.\n \"\"\"\n # Absolute loss is the summed absolute value of the learned activations (i.e. over the\n # learned feature axis).\n itemwise_absolute_loss: Float[\n Tensor, Axis.names(Axis.BATCH, Axis.COMPONENT_OPTIONAL)\n ] = torch.abs(learned_activations).sum(dim=-1)\n\n itemwise_absolute_loss_penalty: Float[\n Tensor, Axis.names(Axis.BATCH, Axis.COMPONENT_OPTIONAL)\n ] = itemwise_absolute_loss * self.l1_coefficient\n\n return _L1LossAndPenalty(\n itemwise_absolute_loss=itemwise_absolute_loss,\n itemwise_absolute_loss_penalty=itemwise_absolute_loss_penalty,\n )\n\n def forward(\n self,\n source_activations: Float[\n Tensor, Axis.names(Axis.BATCH, Axis.COMPONENT_OPTIONAL, Axis.INPUT_OUTPUT_FEATURE)\n ],\n learned_activations: Float[\n Tensor, Axis.names(Axis.BATCH, Axis.COMPONENT_OPTIONAL, Axis.LEARNT_FEATURE)\n ],\n decoded_activations: Float[\n Tensor, Axis.names(Axis.BATCH, Axis.COMPONENT_OPTIONAL, Axis.INPUT_OUTPUT_FEATURE)\n ],\n ) -> Float[Tensor, Axis.names(Axis.BATCH, Axis.COMPONENT_OPTIONAL)]:\n \"\"\"Learned activations L1 (absolute error) loss.\n\n Args:\n source_activations: Source activations (input activations to the autoencoder from the\n source model).\n learned_activations: Learned activations (intermediate activations in the autoencoder).\n decoded_activations: Decoded activations.\n\n Returns:\n Loss per batch item.\n \"\"\"\n return self._l1_loss(\n source_activations, learned_activations, decoded_activations\n ).itemwise_absolute_loss_penalty\n\n # Override to add both the loss and the penalty to the log\n def scalar_loss_with_log(\n self,\n source_activations: Float[\n Tensor, Axis.names(Axis.BATCH, Axis.COMPONENT_OPTIONAL, Axis.INPUT_OUTPUT_FEATURE)\n ],\n learned_activations: Float[\n Tensor, Axis.names(Axis.BATCH, Axis.COMPONENT_OPTIONAL, Axis.LEARNT_FEATURE)\n ],\n decoded_activations: Float[\n Tensor, Axis.names(Axis.BATCH, Axis.COMPONENT_OPTIONAL, Axis.INPUT_OUTPUT_FEATURE)\n ],\n batch_reduction: LossReductionType = LossReductionType.MEAN,\n component_reduction: LossReductionType = LossReductionType.NONE,\n ) -> LossResultWithMetrics:\n \"\"\"Scalar L1 loss (reduced across the batch and component axis) with logging.\n\n Args:\n source_activations: Source activations (input activations to the autoencoder from the\n source model).\n learned_activations: Learned activations (intermediate activations in the autoencoder).\n decoded_activations: Decoded activations.\n batch_reduction: Batch reduction type. Typically you would choose LossReductionType.MEAN\n to make the loss independent of the batch size.\n component_reduction: Component reduction type.\n\n Returns:\n Tuple of the L1 absolute error batch scalar loss and a dict of the properties to log\n (loss before and after the l1 coefficient).\n\n Raises:\n ValueError: If batch_reduction is LossReductionType.NONE.\n \"\"\"\n itemwise_absolute_loss, itemwise_absolute_loss_penalty = self._l1_loss(\n source_activations, learned_activations, decoded_activations\n )\n\n match batch_reduction:\n case LossReductionType.MEAN:\n batch_scalar_loss = itemwise_absolute_loss.mean(0)\n batch_scalar_loss_penalty = itemwise_absolute_loss_penalty.mean(0)\n case LossReductionType.SUM:\n batch_scalar_loss = itemwise_absolute_loss.sum(0)\n batch_scalar_loss_penalty = itemwise_absolute_loss_penalty.sum(0)\n case LossReductionType.NONE:\n error_message = \"Batch reduction type NONE not supported.\"\n raise ValueError(error_message)\n\n # Create the log\n metrics: list[MetricResult] = [\n MetricResult(\n name=\"loss\",\n postfix=\"learned_activations_l1\",\n component_wise_values=batch_scalar_loss.unsqueeze(0)\n if batch_scalar_loss.ndim == 0\n else batch_scalar_loss,\n location=MetricLocation.TRAIN,\n ),\n MetricResult(\n name=\"loss\",\n postfix=self.log_name(),\n component_wise_values=batch_scalar_loss_penalty.unsqueeze(0)\n if batch_scalar_loss_penalty.ndim == 0\n else batch_scalar_loss_penalty,\n location=MetricLocation.TRAIN,\n ),\n ]\n\n match component_reduction:\n case LossReductionType.MEAN:\n batch_scalar_loss_penalty = batch_scalar_loss_penalty.mean(0)\n case LossReductionType.SUM:\n batch_scalar_loss_penalty = batch_scalar_loss_penalty.sum(0)\n case LossReductionType.NONE:\n pass\n\n return LossResultWithMetrics(loss=batch_scalar_loss_penalty, loss_metrics=metrics)\n\n def extra_repr(self) -> str:\n \"\"\"Extra representation string.\"\"\"\n return f\"l1_coefficient={self.l1_coefficient}\"" }, { "identifier": "LossReducer", "path": "sparse_autoencoder/loss/reducer.py", "snippet": "class LossReducer(AbstractLoss):\n \"\"\"Loss reducer.\n\n Reduces multiple loss algorithms into a single loss algorithm (by summing). Analogous to\n nn.Sequential.\n\n Example:\n >>> from sparse_autoencoder.loss.decoded_activations_l2 import L2ReconstructionLoss\n >>> from sparse_autoencoder.loss.learned_activations_l1 import LearnedActivationsL1Loss\n >>> LossReducer(\n ... L2ReconstructionLoss(),\n ... LearnedActivationsL1Loss(0.001),\n ... )\n LossReducer(\n (0): L2ReconstructionLoss()\n (1): LearnedActivationsL1Loss(l1_coefficient=0.001)\n )\n\n \"\"\"\n\n _modules: dict[str, \"AbstractLoss\"]\n \"\"\"Children loss modules.\"\"\"\n\n def log_name(self) -> str:\n \"\"\"Log name.\n\n Returns:\n Name of the loss module for logging.\n \"\"\"\n return \"total_loss\"\n\n def __init__(\n self,\n *loss_modules: AbstractLoss,\n ):\n \"\"\"Initialize the loss reducer.\n\n Args:\n *loss_modules: Loss modules to reduce.\n\n Raises:\n ValueError: If the loss reducer has no loss modules.\n \"\"\"\n super().__init__()\n\n for idx, loss_module in enumerate(loss_modules):\n self._modules[str(idx)] = loss_module\n\n if len(self) == 0:\n error_message = \"Loss reducer must have at least one loss module.\"\n raise ValueError(error_message)\n\n def forward(\n self,\n source_activations: Float[\n Tensor, Axis.names(Axis.BATCH, Axis.COMPONENT_OPTIONAL, Axis.INPUT_OUTPUT_FEATURE)\n ],\n learned_activations: Float[\n Tensor, Axis.names(Axis.BATCH, Axis.COMPONENT_OPTIONAL, Axis.LEARNT_FEATURE)\n ],\n decoded_activations: Float[\n Tensor, Axis.names(Axis.BATCH, Axis.COMPONENT_OPTIONAL, Axis.INPUT_OUTPUT_FEATURE)\n ],\n ) -> Float[Tensor, Axis.names(Axis.BATCH, Axis.COMPONENT_OPTIONAL)]:\n \"\"\"Reduce loss.\n\n Args:\n source_activations: Source activations (input activations to the autoencoder from the\n source model).\n learned_activations: Learned activations (intermediate activations in the autoencoder).\n decoded_activations: Decoded activations.\n\n Returns:\n Mean loss across the batch, summed across the loss modules.\n \"\"\"\n all_modules_loss: Float[Tensor, \"module train_batch\"] = torch.stack(\n [\n loss_module.forward(source_activations, learned_activations, decoded_activations)\n for loss_module in self._modules.values()\n ]\n )\n\n return all_modules_loss.sum(dim=0)\n\n def __dir__(self) -> list[str]:\n \"\"\"Dir dunder method.\"\"\"\n return list(self._modules.__dir__())\n\n def __getitem__(self, idx: int) -> AbstractLoss:\n \"\"\"Get item dunder method.\"\"\"\n return self._modules[str(idx)]\n\n def __iter__(self) -> Iterator[AbstractLoss]:\n \"\"\"Iterator dunder method.\"\"\"\n return iter(self._modules.values())\n\n def __len__(self) -> int:\n \"\"\"Length dunder method.\"\"\"\n return len(self._modules)" }, { "identifier": "Axis", "path": "sparse_autoencoder/tensor_types.py", "snippet": "class Axis(LowercaseStrEnum):\n \"\"\"Tensor axis names.\n\n Used to annotate tensor types.\n\n Example:\n When used directly it prints a string:\n\n >>> print(Axis.INPUT_OUTPUT_FEATURE)\n input_output_feature\n\n The primary use is to annotate tensor types:\n\n >>> from jaxtyping import Float\n >>> from torch import Tensor\n >>> from typing import TypeAlias\n >>> batch: TypeAlias = Float[Tensor, Axis.names(Axis.BATCH, Axis.INPUT_OUTPUT_FEATURE)]\n >>> print(batch)\n <class 'jaxtyping.Float[Tensor, 'batch input_output_feature']'>\n\n You can also join multiple axis together to represent the dimensions of a tensor:\n\n >>> print(Axis.names(Axis.BATCH, Axis.INPUT_OUTPUT_FEATURE))\n batch input_output_feature\n \"\"\"\n\n # Component idx\n COMPONENT = auto()\n \"\"\"Component index.\"\"\"\n\n COMPONENT_OPTIONAL = \"*component\"\n \"\"\"Optional component index.\"\"\"\n\n # Batches\n SOURCE_DATA_BATCH = auto()\n \"\"\"Batch of prompts used to generate source model activations.\"\"\"\n\n BATCH = auto()\n \"\"\"Batch of items that the SAE is being trained on.\"\"\"\n\n STORE_BATCH = auto()\n \"\"\"Batch of items to be written to the store.\"\"\"\n\n ITEMS = auto()\n \"\"\"Arbitrary number of items.\"\"\"\n\n # Features\n INPUT_OUTPUT_FEATURE = auto()\n \"\"\"Input or output feature (e.g. feature in activation vector from source model).\"\"\"\n\n LEARNT_FEATURE = auto()\n \"\"\"Learn feature (e.g. feature in learnt activation vector).\"\"\"\n\n DEAD_FEATURE = auto()\n \"\"\"Dead feature.\"\"\"\n\n ALIVE_FEATURE = auto()\n \"\"\"Alive feature.\"\"\"\n\n # Feature indices\n INPUT_OUTPUT_FEATURE_IDX = auto()\n \"\"\"Input or output feature index.\"\"\"\n\n LEARNT_FEATURE_IDX = auto()\n \"\"\"Learn feature index.\"\"\"\n\n # Other\n POSITION = auto()\n \"\"\"Token position.\"\"\"\n\n SINGLE_ITEM = \"\"\n \"\"\"Single item axis.\"\"\"\n\n ANY = \"...\"\n \"\"\"Any number of axis.\"\"\"\n\n @staticmethod\n def names(*axis: \"Axis\") -> str:\n \"\"\"Join multiple axis together, to represent the dimensions of a tensor.\n\n Example:\n >>> print(Axis.names(Axis.BATCH, Axis.INPUT_OUTPUT_FEATURE))\n batch input_output_feature\n\n Args:\n *axis: Axis to join.\n\n Returns:\n Joined axis string.\n \"\"\"\n return \" \".join(a.value for a in axis)" } ]
from jaxtyping import Float, Int64 from torch import Tensor from torch.nn import Parameter from sparse_autoencoder.activation_resampler.activation_resampler import ActivationResampler from sparse_autoencoder.activation_store.base_store import ActivationStore from sparse_autoencoder.activation_store.tensor_store import TensorActivationStore from sparse_autoencoder.autoencoder.model import SparseAutoencoder, SparseAutoencoderConfig from sparse_autoencoder.loss.decoded_activations_l2 import L2ReconstructionLoss from sparse_autoencoder.loss.learned_activations_l1 import LearnedActivationsL1Loss from sparse_autoencoder.loss.reducer import LossReducer from sparse_autoencoder.tensor_types import Axis import pytest import torch
16,456
"""Tests for the resample_neurons module.""" DEFAULT_N_ACTIVATIONS_STORE: int = 100 DEFAULT_N_INPUT_FEATURES: int = 3 DEFAULT_N_LEARNED_FEATURES: int = 5 DEFAULT_N_COMPONENTS: int = 2 @pytest.fixture() def full_activation_store() -> ActivationStore: """Create a dummy activation store, pre-populated with data.""" store = TensorActivationStore( max_items=DEFAULT_N_ACTIVATIONS_STORE, n_components=DEFAULT_N_COMPONENTS, n_neurons=DEFAULT_N_INPUT_FEATURES, ) store.fill_with_test_data( batch_size=DEFAULT_N_ACTIVATIONS_STORE, input_features=DEFAULT_N_INPUT_FEATURES, n_batches=1, n_components=DEFAULT_N_COMPONENTS, ) return store @pytest.fixture()
"""Tests for the resample_neurons module.""" DEFAULT_N_ACTIVATIONS_STORE: int = 100 DEFAULT_N_INPUT_FEATURES: int = 3 DEFAULT_N_LEARNED_FEATURES: int = 5 DEFAULT_N_COMPONENTS: int = 2 @pytest.fixture() def full_activation_store() -> ActivationStore: """Create a dummy activation store, pre-populated with data.""" store = TensorActivationStore( max_items=DEFAULT_N_ACTIVATIONS_STORE, n_components=DEFAULT_N_COMPONENTS, n_neurons=DEFAULT_N_INPUT_FEATURES, ) store.fill_with_test_data( batch_size=DEFAULT_N_ACTIVATIONS_STORE, input_features=DEFAULT_N_INPUT_FEATURES, n_batches=1, n_components=DEFAULT_N_COMPONENTS, ) return store @pytest.fixture()
def autoencoder_model() -> SparseAutoencoder:
3
2023-10-27 07:37:15+00:00
24k
OATML-Markslab/ProteinNPT
scripts/train.py
[ { "identifier": "ProteinNPTModel", "path": "proteinnpt/model.py", "snippet": "class ProteinNPTModel(nn.Module):\n def __init__(self, args, alphabet):\n super().__init__()\n self.args = args\n self.alphabet = alphabet\n self.alphabet_size = len(alphabet)\n self.padding_idx = alphabet.padding_idx\n self.mask_idx = alphabet.mask_idx\n self.cls_idx = alphabet.cls_idx\n self.eos_idx = alphabet.eos_idx\n self.prepend_bos = alphabet.prepend_bos\n self.append_eos = alphabet.append_eos\n self.target_names_input = self.args.target_config.keys()\n self.target_names = [x for x in self.args.target_config.keys() if self.args.target_config[x][\"in_NPT_loss\"]]\n self.num_targets_input = len(self.target_names_input) #Includes all targets, incl. zero-shot fitness predictions\n self.num_targets = len(self.target_names) #Number of actual targets we want to predict\n self.MSA_sample_sequences = None\n self.training_sample_sequences_indices = None\n self.device = None\n self.optimizer = None\n self.model_type = args.model_type\n self.PNPT_ensemble_test_num_seeds = -1\n self.PNPT_no_reconstruction_error = False\n self.deactivate_col_attention = False\n self.tranception_attention = False\n \n assert self.args.embed_dim % self.args.attention_heads ==0, \"Embedding size {} needs to be a multiple of number of heads {}\".format(self.args.embed_dim, self.args.attention_heads)\n if self.args.aa_embeddings in [\"MSA_Transformer\",\"ESM1v\"]:\n model, _ = utils.esm.pretrained.load_model_and_alphabet(args.embedding_model_location)\n self.aa_embedding = model\n self.aa_embedding_dim = self.aa_embedding.embed_tokens.weight.shape[-1]\n elif self.args.aa_embeddings == \"Tranception\":\n self.aa_embedding_dim = 1280\n config = json.load(open(args.embedding_model_location+os.sep+'config.json'))\n config = utils.tranception.config.TranceptionConfig(**config)\n config.tokenizer = self.alphabet\n config.inference_time_retrieval_type = None\n config.retrieval_aggregation_mode = None\n self.aa_embedding = utils.tranception.model_pytorch.TranceptionLMHeadModel.from_pretrained(pretrained_model_name_or_path=args.embedding_model_location,config=config)\n elif self.args.aa_embeddings == \"Linear_embedding\":\n self.aa_embedding = nn.Embedding(\n self.alphabet_size, self.args.embed_dim, padding_idx=self.padding_idx\n )\n self.aa_positions_embedding = LearnedPositionalEmbedding(\n self.args.max_positions,\n self.args.embed_dim,\n self.padding_idx,\n )\n self.aa_embedding_dim = self.args.embed_dim\n\n if self.aa_embedding_dim != self.args.embed_dim: #Need to project internally\n self.token_embedding_projection = nn.Linear(\n self.aa_embedding_dim,\n self.args.embed_dim\n )\n self.token_embedding_expansion = nn.Linear(\n self.args.embed_dim,\n self.aa_embedding_dim\n )\n\n self.target_embedding = nn.ModuleDict(\n { \n target_name:\n nn.Linear(\n self.args.target_config[target_name][\"dim\"] + 1, #Need to add one as we append the mask flag to each input target \n self.args.embed_dim\n )\n if self.args.target_config[target_name][\"type\"]==\"continuous\"\n else \n nn.Embedding(\n self.args.target_config[target_name][\"dim\"],\n self.args.embed_dim\n )\n for target_name in self.target_names_input\n }\n )\n \n self.dropout_module = nn.Dropout(self.args.dropout)\n\n self.layers = nn.ModuleList(\n [\n AxialTransformerLayer(\n self.args.embed_dim,\n self.args.ffn_embed_dim,\n self.args.attention_heads,\n self.args.dropout,\n self.args.attention_dropout,\n self.args.activation_dropout,\n getattr(self.args, \"max_tokens_per_msa\", self.args.max_tokens_per_msa),\n self.deactivate_col_attention,\n self.tranception_attention,\n self.num_targets_input,\n )\n for _ in range(self.args.num_protein_npt_layers)\n ]\n )\n self.emb_layer_norm_before = ESM1bLayerNorm(self.args.embed_dim)\n self.emb_layer_norm_after = ESM1bLayerNorm(self.args.embed_dim)\n \n if self.args.aa_embeddings in [\"MSA_Transformer\",\"ESM1v\"]:\n weight = self.aa_embedding.embed_tokens.weight\n elif self.args.aa_embeddings == \"Tranception\":\n weight = self.aa_embedding.lm_head.weight\n else:\n weight = self.aa_embedding.weight\n\n self.lm_head = RobertaLMHead(\n embed_dim=self.aa_embedding_dim,\n output_dim=self.alphabet_size,\n weight=weight\n )\n \n target_pred_input_dim = self.args.embed_dim\n\n if args.target_prediction_model==\"MLP\": \n self.layer_pre_head = nn.ModuleDict(\n {\n target_name:\n nn.Sequential(\n nn.Linear(target_pred_input_dim, target_pred_input_dim),\n nn.Dropout(self.args.dropout),\n nn.ReLU()\n ) \n for target_name in self.target_names\n }\n )\n \n if args.target_prediction_model==\"ConvBERT\":\n configuration = ConvBertConfig(\n hidden_size = self.args.embed_dim,\n num_attention_heads = self.args.attention_heads,\n conv_kernel_size = self.args.conv_kernel_size,\n hidden_act = \"gelu\",\n hidden_dropout_prob = self.args.dropout,\n attention_probs_dropout_prob = self.args.dropout\n )\n self.layer_pre_head = ConvBertLayer(configuration)\n \n if args.target_prediction_model==\"CNN\":\n self.layer_pre_head = nn.Sequential(\n nn.Conv1d(in_channels=target_pred_input_dim, out_channels=target_pred_input_dim, kernel_size = self.args.conv_kernel_size, padding='same'),\n nn.Dropout(self.args.dropout),\n nn.ReLU()\n )\n \n if self.args.target_prediction_head == \"Target_embeddings_only\":\n target_pred_input_dim = target_pred_input_dim\n elif self.args.target_prediction_head == \"Target_embeddings_and_AA_embeddings_mean_pooled\":\n target_pred_input_dim = target_pred_input_dim * (1 + self.num_targets_input)\n\n if self.args.augmentation==\"zero_shot_fitness_predictions_covariate\":\n self.zero_shot_fitness_prediction_weight = nn.ModuleDict(\n { \n target_name: nn.Linear(1, self.args.target_config[target_name][\"dim\"], bias=False)\n for target_name in self.target_names\n }\n )\n for target_name in self.target_names:\n torch.nn.init.constant_(self.zero_shot_fitness_prediction_weight[target_name].weight,1e-4)\n\n self.target_pred_head = nn.ModuleDict(\n { \n target_name: nn.Linear(target_pred_input_dim, self.args.target_config[target_name][\"dim\"])\n for target_name in self.target_names\n }\n )\n \n def set_device(self):\n if self.device is None:\n self.device = next(self.parameters()).device\n print(\"Model device: {}\".format(self.device))\n \n def forward(self, tokens, targets=None, zero_shot_fitness_predictions=None, sequence_embeddings=None, repr_layers=[], need_head_weights=False):\n padding_mask = tokens.eq(self.padding_idx) \n if not padding_mask.any(): padding_mask = None\n \n if self.args.aa_embeddings == \"MSA_Transformer\" and self.args.sequence_embeddings_location is None:\n assert tokens.ndim == 3, \"Finding dimension of tokens to be: {}\".format(tokens.ndim)\n num_MSAs_in_batch, num_sequences_in_alignments, seqlen = tokens.size() # N, B, L (seqs with labels, seqs in MSA, seq length)\n batch_size = num_MSAs_in_batch\n else:\n assert tokens.ndim == 2, \"Finding dimension of tokens to be: {}\".format(tokens.ndim)\n batch_size, seqlen = tokens.size() # N, L (seqs with labels, seq length)\n \n if sequence_embeddings is not None:\n x = sequence_embeddings.to(self.device)\n else:\n if self.args.aa_embeddings == \"MSA_Transformer\":\n output = self.aa_embedding(tokens, repr_layers=[12])\n x = output[\"representations\"][12][:] # N, B, L, D\n x = x[:,0,:,:] # N, L, D. #In each MSA batch the first sequence is what we care about. The other MSA sequences were just to compute embeddings and logits\n elif self.args.aa_embeddings == \"ESM1v\":\n last_layer_index = 33\n output = self.aa_embedding(tokens, repr_layers=[last_layer_index])\n x = output[\"representations\"][last_layer_index][:] # N, L, D\n elif self.args.aa_embeddings ==\"Linear_embedding\":\n x = self.aa_embedding(tokens)\n x = x + self.aa_positions_embedding(tokens.view(batch_size, seqlen)).view(x.size()) # Need position embedding in PNPT since we will apply axial attention\n else:\n print(\"AA embeddings not recognized\")\n sys.exit(0)\n \n if self.aa_embedding_dim != self.args.embed_dim: x = self.token_embedding_projection(x)\n \n if self.args.target_prediction_head != \"Target_embeddings_and_AA_embeddings_mean_pooled\": #We mix AA embeddings pre NPT\n if self.args.target_prediction_model == \"CNN\": \n assert len(x.size())==3, \"Size error input\"\n N, L, D = x.size()\n x = x.permute(0,2,1) #N, D, L\n x = self.layer_pre_head(x)\n x = x.permute(0,2,1)\n elif self.args.target_prediction_model == \"ConvBERT\":\n x = self.layer_pre_head(x)[0]\n\n x = x.view(1, batch_size, seqlen, self.args.embed_dim) # 1, N, L, D\n \n #Dimensions for each target (there are self.num_targets of them):\n y = []\n for target_name in self.target_names_input:\n num_sequences_with_target, dim_targets = targets[target_name].shape # N, D_t #In most cases dim_targets = D_t = 2 (original dimension of continuous input + 1 dim for mask)\n y.append(self.target_embedding[target_name](targets[target_name]).view(num_sequences_with_target,1,self.args.embed_dim))\n y = torch.cat(y, dim=-2) #concatenate across second to last dimension # N, num_targets, D\n assert y.shape == (num_sequences_with_target, self.num_targets_input, self.args.embed_dim), \"Error in y shape: {}\".format(y.shape)\n y = y.view(1, num_sequences_with_target, self.num_targets_input, self.args.embed_dim) # 1, N, num_targets, D\n \n #Concatenate AA tokens and targets\n x = torch.cat((x,y),dim=-2) # 1, N, (L+num_targets), D\n x = self.emb_layer_norm_before(x)\n x = self.dropout_module(x)\n\n if padding_mask is not None:\n padding_mask_with_targets = torch.zeros(num_MSAs_in_batch, num_sequences_in_alignments, seqlen + self.num_targets_input)\n padding_mask_with_targets[...,:seqlen] = padding_mask\n padding_mask = padding_mask_with_targets\n x = x * (1 - padding_mask.unsqueeze(-1).type_as(x))\n \n repr_layers = set(repr_layers)\n hidden_representations = {}\n if 0 in repr_layers: hidden_representations[0] = x\n if need_head_weights:\n row_attn_weights = []\n col_attn_weights = []\n\n # 1 x N x L x D -> N x L x 1 x D\n x = x.permute(1, 2, 0, 3)\n for layer_idx, layer in enumerate(self.layers):\n x = layer(\n x,\n self_attn_padding_mask=padding_mask,\n need_head_weights=need_head_weights,\n )\n if need_head_weights:\n x, col_attn, row_attn = x\n col_attn_weights.append(col_attn.permute(2, 0, 1, 3, 4).cpu())\n row_attn_weights.append(row_attn.permute(1, 0, 2, 3).cpu())\n if (layer_idx + 1) in repr_layers:\n hidden_representations[layer_idx + 1] = x.permute(2, 0, 1, 3)\n x = self.emb_layer_norm_after(x)\n x = x.permute(2, 0, 1, 3) # N x L x 1 x D -> 1 x N x L x D\n assert x.shape == (1, num_sequences_with_target, seqlen + self.num_targets_input, self.args.embed_dim), \"Error with axial transformer\"\n # last hidden representation should have layer norm applied\n if (layer_idx + 1) in repr_layers: hidden_representations[layer_idx + 1] = x\n \n # Loss over NPT MLM objective\n if self.aa_embedding_dim != self.args.embed_dim:\n logits_protein_sequence = self.lm_head(self.token_embedding_expansion(x[...,:seqlen,:]))\n else:\n logits_protein_sequence = self.lm_head(x[...,:seqlen,:]) #Remove dependency on targets for final AA predictions. logits size: (1, N, L, Vocab)\n \n x = x.view(num_sequences_with_target, seqlen + self.num_targets_input, self.args.embed_dim)\n x, y = x[:,:seqlen,:], x[:,seqlen:,:] # (N,L,D) and (N,num_targets,D)\n assert y.shape == (num_sequences_with_target, self.num_targets_input, self.args.embed_dim)\n if self.args.target_prediction_head == \"Target_embeddings_and_AA_embeddings_mean_pooled\": \n if self.args.target_prediction_model == \"CNN\": \n assert len(x.size())==3, \"Size error input\"\n N, L, D = x.size()\n x = x.permute(0,2,1) #N, D, L\n x = self.layer_pre_head(x)\n x = x.permute(0,2,1)\n elif self.args.target_prediction_model == \"ConvBERT\":\n x = self.layer_pre_head(x)[0]\n x = x.mean(dim=-2) # N, D\n y = y.view(num_sequences_with_target,self.num_targets_input * self.args.embed_dim)\n y = torch.cat((x,y),dim=-1) # N, (1+num_targets) * D\n \n target_predictions = {}\n for target_index, target_name in enumerate(self.target_names):\n if self.args.target_prediction_head == \"Target_embeddings_and_AA_embeddings_mean_pooled\": \n target_predictions[target_name] = self.target_pred_head[target_name](y).view(-1) #We use the concatenated X and target embeddings (all of them) to predict each target\n else:\n if self.args.target_prediction_model == \"MLP\": y[:,target_index,:] = self.layer_pre_head[target_name](y[:,target_index,:])\n target_predictions[target_name] = self.target_pred_head[target_name](y[:,target_index,:]).view(-1) #input the embedding with the relevant target_index\n if self.args.augmentation==\"zero_shot_fitness_predictions_covariate\":\n target_predictions[target_name] += self.zero_shot_fitness_prediction_weight[target_name](zero_shot_fitness_predictions).squeeze()\n \n result = {\"logits_protein_sequence\": logits_protein_sequence, \"target_predictions\": target_predictions, \"representations\": hidden_representations}\n \n if need_head_weights:\n col_attentions = torch.stack(col_attn_weights, 1)\n row_attentions = torch.stack(row_attn_weights, 1)\n result[\"col_attentions\"] = col_attentions\n result[\"row_attentions\"] = row_attentions\n\n return result\n\n def forward_with_uncertainty(self, tokens, targets, zero_shot_fitness_predictions=None, sequence_embeddings=None, num_MC_dropout_samples=10, number_of_mutated_seqs_to_score=None):\n \"\"\"\n Performs MC dropout to compute predictions and the corresponding uncertainties.\n Assumes 1D predictions (eg., prediction of continuous output)\n \"\"\"\n self.eval() \n for m in self.modules(): #Move all dropout layers in train mode to support MC dropout. Keep everything else in eval mode.\n if m.__class__.__name__.startswith('Dropout'):\n m.train()\n with torch.no_grad():\n predictions_dict = defaultdict(list)\n for _ in range(num_MC_dropout_samples):\n target_predictions_sample = self.forward(tokens, targets, zero_shot_fitness_predictions=zero_shot_fitness_predictions, sequence_embeddings=sequence_embeddings)[\"target_predictions\"]\n for target_name in self.target_names:\n predictions_dict[target_name].append(target_predictions_sample[target_name])\n results_with_uncertainty={}\n for target_name in self.target_names:\n concatenated_target_pred = torch.cat([x.view(-1,1) for x in predictions_dict[target_name]],dim=-1)\n results_with_uncertainty[target_name] = {}\n results_with_uncertainty[target_name]['predictions_avg'] = concatenated_target_pred.mean(dim=-1)\n results_with_uncertainty[target_name]['uncertainty'] = concatenated_target_pred.std(dim=-1)\n return results_with_uncertainty\n \n @property\n def num_layers(self):\n return self.args.num_protein_npt_layers\n \n def max_tokens_per_msa_(self, value: int) -> None:\n \"\"\"\n Batching attention computations when gradients are disabled as per MSA_Transformer\n Set this value to infinity to disable this behavior.\n \"\"\"\n for module in self.modules():\n if isinstance(module, (RowSelfAttention, ColumnSelfAttention)):\n module.max_tokens_per_msa = value\n\n def protein_npt_loss(self, token_predictions_logits, token_labels, target_predictions, target_labels, MLM_reconstruction_loss_weight, label_smoothing=0.0):\n target_prediction_loss_weight = 1.0 - MLM_reconstruction_loss_weight\n total_loss = 0.0\n if (token_labels is not None) and (MLM_reconstruction_loss_weight > 0.0):\n if self.args.aa_embeddings == \"MSA_Transformer\" and self.args.sequence_embeddings_location is None: token_labels = token_labels[:,0,:] #Only keep the token labels for seq to score. Drops the token labels for MSA sequences\n masked_lm_loss = CrossEntropyLoss(reduction=\"mean\", label_smoothing=label_smoothing)(token_predictions_logits.reshape(-1, self.alphabet_size), token_labels.reshape(-1))\n reconstruction_loss = masked_lm_loss\n total_loss += MLM_reconstruction_loss_weight * reconstruction_loss\n else:\n reconstruction_loss = torch.tensor(0.0)\n target_prediction_loss = {}\n for target_name in self.target_names:\n if self.args.target_config[target_name][\"in_NPT_loss\"]:\n if self.args.target_config[target_name][\"type\"]==\"continuous\":\n loss_masked_targets = ~target_labels[target_name].eq(-100) #Masked items are the ones for which the label was not set to -100\n if loss_masked_targets.sum()==0 or torch.isnan(target_labels[target_name][loss_masked_targets]).sum() > 0: #First condition true if we dont mask anything (eg., all target missing at eval). Second condition true if we force-mask one value at train time (to satisfy min_num_labels_masked in mast_target()) and corresponding target value is missing\n tgt_loss = torch.tensor(0.0)\n else:\n tgt_loss = MSELoss(reduction=\"mean\")(target_predictions[target_name][loss_masked_targets], target_labels[target_name][loss_masked_targets]) #we do not average the loss per batch, so that it's easier to do 1 full average across all batches\n if torch.isnan(tgt_loss).sum() > 0:\n print(\"Detected nan loss\")\n print(target_predictions[target_name])\n else:\n tgt_loss = CrossEntropyLoss(reduction=\"mean\", label_smoothing=label_smoothing)(target_predictions[target_name].view(-1, self.args.target_config[target_name][\"dim\"]), target_labels[target_name].view(-1)) # Note: we dont add one to the # of categories in the CE loss here (we dont predict <mask>)\n target_prediction_loss[target_name] = tgt_loss\n \n total_loss += target_prediction_loss_weight * target_prediction_loss[target_name]\n return total_loss, reconstruction_loss, target_prediction_loss\n\n def create_optimizer(self):\n \"\"\"\n Setup the optimizer.\n We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the\n Trainer's init through `optimizers`, or subclass and override this method in a subclass.\n Adapted from Huggingface Transformers library.\n \"\"\"\n if self.optimizer is None:\n all_parameters = utils.model_utils.get_parameter_names(self, [nn.LayerNorm])\n decay_parameters = [name for name in all_parameters if (\"bias\" not in name and \"pseudo_likelihood_weight\" not in name and 'zero_shot_fitness_prediction_weight' not in name)]\n psl_decay_parameters = [name for name in all_parameters if (\"bias\" not in name and (\"pseudo_likelihood_weight\" in name or \"zero_shot_fitness_prediction_weight\" in name))]\n optimizer_grouped_parameters = [\n {\n \"params\": [p for n, p in self.named_parameters() if n in decay_parameters],\n \"weight_decay\": self.args.weight_decay,\n },\n {\n \"params\": [p for n, p in self.named_parameters() if n in psl_decay_parameters],\n \"weight_decay\": 1e-8, #Small decay on pseudo-likelihood as in Hsu et al.\n },\n {\n \"params\": [p for n, p in self.named_parameters() if (n not in decay_parameters and n not in psl_decay_parameters)],\n \"weight_decay\": 0.0,\n },\n ] \n optimizer_kwargs = {\n \"betas\": (self.args.adam_beta1, self.args.adam_beta2),\n \"eps\": self.args.adam_epsilon,\n \"lr\": self.args.max_learning_rate\n }\n optimizer = AdamW(optimizer_grouped_parameters, **optimizer_kwargs)\n return optimizer" }, { "identifier": "AugmentedPropertyPredictor", "path": "baselines/model.py", "snippet": "class AugmentedPropertyPredictor(nn.Module):\n def __init__(self, args, alphabet):\n super().__init__()\n self.args = args\n self.alphabet = alphabet\n self.alphabet_size = len(alphabet)\n print(\"Alphabet: {}\".format(alphabet))\n print(\"Alphabet size: {}\".format(self.alphabet_size))\n self.padding_idx = alphabet.padding_idx\n self.mask_idx = alphabet.mask_idx\n self.cls_idx = alphabet.cls_idx\n self.eos_idx = alphabet.eos_idx\n self.prepend_bos = alphabet.prepend_bos\n self.append_eos = alphabet.append_eos\n self.target_names = self.args.target_config.keys() \n self.MSA_sample_sequences = None \n self.device = None\n self.model_type = args.model_type \n if self.args.aa_embeddings in [\"MSA_Transformer\",\"ESM1v\"]:\n model, _ = utils.esm.pretrained.load_model_and_alphabet(args.embedding_model_location)\n self.aa_embedding = model\n if self.args.aa_embeddings == \"MSA_Transformer\": self.args.seq_len = self.args.MSA_seq_len #If MSA does not cover full sequence length, we adjust seq_len param to be MSA_len (sequences truncated as needed in preprocessing)\n elif self.args.aa_embeddings == \"Linear_embedding\":\n self.aa_embedding = nn.Sequential(\n nn.Embedding(\n self.alphabet_size, self.args.embed_dim, padding_idx=self.padding_idx\n ),\n nn.ReLU()\n )\n elif self.args.aa_embeddings == \"One_hot_encoding\":\n self.args.target_prediction_head == \"One_hot_encoding\"\n elif self.args.aa_embeddings == \"Tranception\":\n self.aa_embedding_dim = 1280\n config = json.load(open(args.embedding_model_location+os.sep+'config.json'))\n config = utils.tranception.config.TranceptionConfig(**config)\n config.tokenizer = get_tranception_tokenizer()\n config.inference_time_retrieval_type = None\n config.retrieval_aggregation_mode = None\n self.aa_embedding = utils.tranception.model_pytorch.TranceptionLMHeadModel.from_pretrained(pretrained_model_name_or_path=args.embedding_model_location,config=config)\n self.config = config\n else:\n print(\"Error: Specified AA embedding invalid\")\n sys.exit(0)\n\n if self.args.aa_embeddings != \"One_hot_encoding\": \n self.emb_layer_norm_after = ESM1bLayerNorm(self.args.embed_dim)\n self.dropout_module = nn.Dropout(self.args.dropout)\n\n if self.args.target_prediction_head == \"AA_embeddings_mean_pooled\":\n target_pred_input_dim = self.args.embed_dim\n elif self.args.target_prediction_head == \"One_hot_encoding\":\n target_pred_input_dim = (self.args.seq_len + 1) * self.alphabet_size if args.target_prediction_model!=\"CNN\" else self.alphabet_size #Add one for the BOS token\n else:\n print(self.args.target_prediction_head)\n print(\"Error: Specified embedding aggregation invalid\")\n sys.exit(0)\n \n if args.target_prediction_model==\"MLP\":\n self.layer_pre_head = nn.Sequential(\n nn.Linear(target_pred_input_dim, target_pred_input_dim),\n nn.Dropout(self.args.dropout),\n nn.ReLU()\n )\n elif args.target_prediction_model==\"ConvBERT\":\n configuration = ConvBertConfig(\n hidden_size = self.args.embed_dim,\n num_attention_heads = self.args.attention_heads if self.args.attention_heads is not None else 4,\n conv_kernel_size = self.args.conv_kernel_size,\n hidden_act = \"gelu\",\n hidden_dropout_prob = self.args.dropout,\n attention_probs_dropout_prob = self.args.dropout\n )\n self.layer_pre_head = ConvBertLayer(configuration)\n elif args.target_prediction_model==\"CNN\":\n self.layer_pre_head = nn.Sequential(\n nn.Conv1d(in_channels=target_pred_input_dim, out_channels=target_pred_input_dim, kernel_size = self.args.conv_kernel_size, padding='same'),\n nn.Dropout(self.args.dropout),\n nn.ReLU()\n )\n target_pred_input_dim = target_pred_input_dim if self.args.target_prediction_head != \"One_hot_encoding\" else target_pred_input_dim * (self.args.seq_len + 1)\n elif args.target_prediction_model==\"light_attention\":\n # Adapted from Stark et al (https://github.com/HannesStark/protein-localization)\n self.feature_convolution = nn.Conv1d(self.args.embed_dim, self.args.embed_dim, self.args.conv_kernel_size, stride=1, padding='same')\n self.attention_convolution = nn.Conv1d(self.args.embed_dim, self.args.embed_dim, self.args.conv_kernel_size, stride=1, padding='same')\n self.softmax = nn.Softmax(dim=-1)\n self.dropout = nn.Dropout(self.args.dropout)\n self.linear = nn.Sequential(\n nn.Linear(2 * self.args.embed_dim, 32),\n nn.Dropout(self.args.dropout),\n nn.ReLU(),\n nn.BatchNorm1d(32)\n )\n target_pred_input_dim = 32\n elif args.target_prediction_model==\"linear\":\n pass\n else:\n print(\"Error: Specified layer_pre_head invalid\")\n sys.exit(0)\n\n if self.args.augmentation==\"zero_shot_fitness_predictions_covariate\":\n self.zero_shot_fitness_prediction_weight = nn.ModuleDict(\n { \n target_name: nn.Linear(1, self.args.target_config[target_name][\"dim\"], bias=False)\n for target_name in self.target_names\n }\n )\n for target_name in self.target_names:\n torch.nn.init.constant_(self.zero_shot_fitness_prediction_weight[target_name].weight,1.0)\n\n self.target_pred_head = nn.ModuleDict(\n { \n target_name: nn.Linear(target_pred_input_dim, self.args.target_config[target_name][\"dim\"])\n for target_name in self.target_names #If multiple targets, we learn a separate linear head for each separately\n }\n )\n \n def set_device(self):\n if self.device is None:\n self.device = next(self.parameters()).device\n print(\"Model device: {}\".format(self.device))\n\n def forward(self, tokens, zero_shot_fitness_predictions=None, sequence_embeddings=None, repr_layers=[]):\n if self.args.aa_embeddings == \"MSA_Transformer\" and self.args.sequence_embeddings_location is None:\n assert tokens.ndim == 3, \"Finding dimension of tokens to be: {}\".format(tokens.ndim)\n num_MSAs_in_batch, num_sequences_in_alignments, seqlen = tokens.size()\n batch_size = num_MSAs_in_batch\n else:\n assert tokens.ndim == 2, \"Finding dimension of tokens to be: {}\".format(tokens.ndim)\n batch_size, seqlen = tokens.size()\n \n if sequence_embeddings is not None:\n x = sequence_embeddings.to(self.device)\n else:\n if self.args.aa_embeddings == \"MSA_Transformer\":\n output = self.aa_embedding(tokens, repr_layers=[12])\n x = output[\"representations\"][12][:] # B, N, L, D\n x = x[:,0,:,:] #In each MSA batch the first sequence is what we care about. The other MSA sequences were just to compute embeddings and logits\n elif self.args.aa_embeddings == \"ESM1v\":\n last_layer_index = 33\n output = self.aa_embedding(tokens, repr_layers=[last_layer_index])\n x = output[\"representations\"][last_layer_index][:] # N, L, D\n elif self.args.aa_embeddings == \"Tranception\":\n processed_batch = {'input_ids': tokens, 'labels': tokens}\n output = self.aa_embedding(**processed_batch, return_dict=True, output_hidden_states=True)\n x = output.hidden_states[0]\n elif self.args.aa_embeddings ==\"Linear_embedding\":\n x = self.aa_embedding(tokens)\n elif self.args.aa_embeddings == \"One_hot_encoding\":\n x = nn.functional.one_hot(tokens, num_classes=self.alphabet_size).view(batch_size,-1).float()\n if self.args.target_prediction_model == \"CNN\": x = x.view(batch_size,seqlen,self.alphabet_size)\n\n if self.args.aa_embeddings != \"One_hot_encoding\":\n x = self.emb_layer_norm_after(x)\n x = self.dropout_module(x)\n \n repr_layers = set(repr_layers)\n hidden_representations = {}\n if 0 in repr_layers:\n hidden_representations[0] = x\n\n if self.args.target_prediction_model == \"CNN\": \n assert len(x.size())==3, \"Size error input\"\n N, L, D = x.size()\n x = x.permute(0,2,1) #N, D, L\n x = self.layer_pre_head(x)\n x = x.permute(0,2,1)\n elif self.args.target_prediction_model == \"ConvBERT\":\n x = self.layer_pre_head(x)[0]\n elif self.args.target_prediction_model==\"light_attention\":\n x = x.permute(0,2,1) #N, D, L\n o = self.feature_convolution(x) \n o = self.dropout(o)\n attention = self.attention_convolution(x)\n o1 = torch.sum(o * self.softmax(attention), dim=-1)\n o2, _ = torch.max(o, dim=-1)\n o = torch.cat([o1, o2], dim=-1)\n x = self.linear(o)\n \n if self.args.target_prediction_head == \"AA_embeddings_mean_pooled\": x = x.mean(dim=-2)\n \n if self.args.target_prediction_model == \"MLP\": x = self.layer_pre_head(x)\n \n target_predictions = {}\n for target_name in self.target_names:\n target_predictions[target_name] = self.target_pred_head[target_name](x).view(-1)\n if self.args.augmentation==\"zero_shot_fitness_predictions_covariate\":\n target_predictions[target_name] += self.zero_shot_fitness_prediction_weight[target_name](zero_shot_fitness_predictions).squeeze()\n\n result = {\"target_predictions\": target_predictions, \"representations\": hidden_representations}\n \n return result\n \n def forward_with_uncertainty(self, tokens, zero_shot_fitness_predictions=None, sequence_embeddings=None, num_MC_dropout_samples=10):\n \"\"\"\n Performs MC dropout to compute predictions and the corresponding uncertainties.\n Assumes 1D predictions (eg., prediction of continuous output).\n \"\"\"\n self.eval() \n for m in self.modules(): #Move all dropout layers in train mode to support MC dropout. Keep everything else in eval mode.\n if m.__class__.__name__.startswith('Dropout'):\n m.train()\n with torch.no_grad(): \n predictions_dict = defaultdict(list)\n for _ in range(num_MC_dropout_samples):\n target_predictions_sample = self.forward(tokens, zero_shot_fitness_predictions=zero_shot_fitness_predictions, sequence_embeddings=sequence_embeddings)[\"target_predictions\"]\n for target_name in self.target_names:\n predictions_dict[target_name].append(target_predictions_sample[target_name])\n results_with_uncertainty={}\n for target_name in self.target_names:\n concatenated_target_pred = torch.cat([x.view(-1,1) for x in predictions_dict[target_name]],dim=-1)\n results_with_uncertainty[target_name] = {}\n results_with_uncertainty[target_name]['predictions_avg'] = concatenated_target_pred.mean(dim=-1)\n results_with_uncertainty[target_name]['uncertainty'] = concatenated_target_pred.std(dim=-1)\n return results_with_uncertainty\n\n @property\n def num_layers(self):\n return self.args.num_protein_npt_layers\n \n def max_tokens_per_msa_(self, value: int) -> None:\n \"\"\"\n Batching attention computations when gradients are disabled as per MSA_Transformer\n Set this value to infinity to disable this behavior.\n \"\"\"\n for module in self.modules():\n if isinstance(module, (RowSelfAttention, ColumnSelfAttention)):\n module.max_tokens_per_msa = value\n\n def prediction_loss(self, target_predictions, target_labels, label_smoothing=0.1):\n total_target_prediction_loss = 0.0\n target_prediction_loss_dict = {}\n for target_name in self.target_names:\n non_missing_target_indicator = ~torch.isnan(target_labels[target_name])\n if self.args.target_config[target_name][\"type\"]==\"continuous\":\n tgt_loss = MSELoss(reduction=\"sum\")(target_predictions[target_name][non_missing_target_indicator], target_labels[target_name][non_missing_target_indicator])\n else:\n tgt_loss = CrossEntropyLoss(reduction=\"none\",label_smoothing=label_smoothing)(target_predictions[target_name].view(-1, self.args.target_config[target_name][\"dim\"]), target_labels[target_name].view(-1))\n target_prediction_loss_dict[target_name] = tgt_loss\n total_target_prediction_loss += tgt_loss\n return total_target_prediction_loss, target_prediction_loss_dict\n\n def create_optimizer(self):\n \"\"\"\n Setup the optimizer.\n We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the\n Trainer's init through `optimizers`, or subclass and override this method in a subclass.\n Adapted from Huggingface Transformers library.\n \"\"\"\n all_parameters = utils.model_utils.get_parameter_names(self, [nn.LayerNorm])\n decay_parameters = [name for name in all_parameters if (\"bias\" not in name and \"pseudo_likelihood_weight\" not in name and 'zero_shot_fitness_prediction_weight' not in name)]\n psl_decay_parameters = [name for name in all_parameters if (\"bias\" not in name and (\"pseudo_likelihood_weight\" in name or \"zero_shot_fitness_prediction_weight\" in name))]\n optimizer_grouped_parameters = [\n {\n \"params\": [p for n, p in self.named_parameters() if n in decay_parameters],\n \"weight_decay\": self.args.weight_decay,\n },\n {\n \"params\": [p for n, p in self.named_parameters() if n in psl_decay_parameters],\n \"weight_decay\": 1e-8, #Small decay on pseudo-likelihood as in Hsu et al.\n },\n {\n \"params\": [p for n, p in self.named_parameters() if (n not in decay_parameters and n not in psl_decay_parameters)],\n \"weight_decay\": 0.0,\n },\n ] \n optimizer_kwargs = {\n \"betas\": (self.args.adam_beta1, self.args.adam_beta2),\n \"eps\": self.args.adam_epsilon,\n \"lr\": self.args.max_learning_rate\n }\n optimizer = AdamW(optimizer_grouped_parameters, **optimizer_kwargs)\n return optimizer" }, { "identifier": "Alphabet", "path": "utils/esm/data.py", "snippet": "class Alphabet(object):\n def __init__(\n self,\n standard_toks: Sequence[str],\n prepend_toks: Sequence[str] = (\"<null_0>\", \"<pad>\", \"<eos>\", \"<unk>\"),\n append_toks: Sequence[str] = (\"<cls>\", \"<mask>\", \"<sep>\"),\n prepend_bos: bool = True,\n append_eos: bool = False,\n use_msa: bool = False,\n ):\n #ESM Alphabet: {'<cls>': 0, '<pad>': 1, '<eos>': 2, '<unk>': 3, 'L': 4, 'A': 5, 'G': 6, 'V': 7, 'S': 8, 'E': 9, 'R': 10, 'T': 11, 'I': 12, 'D': 13, 'P': 14, 'K': 15, 'Q': 16, 'N': 17, 'F': 18, 'Y': 19, 'M': 20, 'H': 21, 'W': 22, 'C': 23, 'X': 24, 'B': 25, 'U': 26, 'Z': 27, 'O': 28, '.': 29, '-': 30, '<null_1>': 31, '<mask>': 32}\n self.standard_toks = list(standard_toks)\n self.prepend_toks = list(prepend_toks)\n self.append_toks = list(append_toks)\n self.prepend_bos = prepend_bos\n self.append_eos = append_eos\n self.use_msa = use_msa\n\n self.all_toks = list(self.prepend_toks)\n self.all_toks.extend(self.standard_toks)\n for i in range((8 - (len(self.all_toks) % 8)) % 8):\n self.all_toks.append(f\"<null_{i + 1}>\")\n self.all_toks.extend(self.append_toks)\n\n self.tok_to_idx = {tok: i for i, tok in enumerate(self.all_toks)}\n\n self.unk_idx = self.tok_to_idx[\"<unk>\"]\n self.padding_idx = self.get_idx(\"<pad>\")\n self.cls_idx = self.get_idx(\"<cls>\")\n self.mask_idx = self.get_idx(\"<mask>\")\n self.eos_idx = self.get_idx(\"<eos>\")\n self.all_special_tokens = ['<eos>', '<unk>', '<pad>', '<cls>', '<mask>']\n self.unique_no_split_tokens = self.all_toks\n\n def __len__(self):\n return len(self.all_toks)\n\n def get_idx(self, tok):\n return self.tok_to_idx.get(tok, self.unk_idx)\n\n def get_tok(self, ind):\n return self.all_toks[ind]\n\n def to_dict(self):\n return self.tok_to_idx.copy()\n\n def get_batch_converter(self, truncation_seq_length: int = None):\n if self.use_msa:\n return MSABatchConverter(self, truncation_seq_length)\n else:\n return BatchConverter(self, truncation_seq_length)\n\n @classmethod\n def from_architecture(cls, name: str) -> \"Alphabet\":\n if name in (\"ESM-1\", \"protein_bert_base\"):\n standard_toks = proteinseq_toks[\"toks\"]\n prepend_toks: Tuple[str, ...] = (\"<null_0>\", \"<pad>\", \"<eos>\", \"<unk>\")\n append_toks: Tuple[str, ...] = (\"<cls>\", \"<mask>\", \"<sep>\")\n prepend_bos = True\n append_eos = False\n use_msa = False\n elif name in (\"ESM-1b\", \"roberta_large\"):\n standard_toks = proteinseq_toks[\"toks\"]\n prepend_toks = (\"<cls>\", \"<pad>\", \"<eos>\", \"<unk>\")\n append_toks = (\"<mask>\",)\n prepend_bos = True\n append_eos = True\n use_msa = False\n elif name in (\"MSA Transformer\", \"msa_transformer\"):\n standard_toks = proteinseq_toks[\"toks\"]\n prepend_toks = (\"<cls>\", \"<pad>\", \"<eos>\", \"<unk>\")\n append_toks = (\"<mask>\",)\n prepend_bos = True\n append_eos = False\n use_msa = True\n elif \"invariant_gvp\" in name.lower():\n standard_toks = proteinseq_toks[\"toks\"]\n prepend_toks = (\"<null_0>\", \"<pad>\", \"<eos>\", \"<unk>\")\n append_toks = (\"<mask>\", \"<cath>\", \"<af2>\")\n prepend_bos = True\n append_eos = False\n use_msa = False\n else:\n raise ValueError(\"Unknown architecture selected\")\n return cls(standard_toks, prepend_toks, append_toks, prepend_bos, append_eos, use_msa)\n\n def _tokenize(self, text) -> str:\n return text.split()\n\n def tokenize(self, text, **kwargs) -> List[str]:\n \"\"\"\n Inspired by https://github.com/huggingface/transformers/blob/master/src/transformers/tokenization_utils.py\n Converts a string in a sequence of tokens, using the tokenizer.\n\n Args:\n text (:obj:`str`):\n The sequence to be encoded.\n\n Returns:\n :obj:`List[str]`: The list of tokens.\n \"\"\"\n\n def split_on_token(tok, text):\n result = []\n split_text = text.split(tok)\n for i, sub_text in enumerate(split_text):\n # AddedToken can control whitespace stripping around them.\n # We use them for GPT2 and Roberta to have different behavior depending on the special token\n # Cf. https://github.com/huggingface/transformers/pull/2778\n # and https://github.com/huggingface/transformers/issues/3788\n # We strip left and right by default\n if i < len(split_text) - 1:\n sub_text = sub_text.rstrip()\n if i > 0:\n sub_text = sub_text.lstrip()\n\n if i == 0 and not sub_text:\n result.append(tok)\n elif i == len(split_text) - 1:\n if sub_text:\n result.append(sub_text)\n else:\n pass\n else:\n if sub_text:\n result.append(sub_text)\n result.append(tok)\n return result\n\n def split_on_tokens(tok_list, text):\n if not text.strip():\n return []\n\n tokenized_text = []\n text_list = [text]\n for tok in tok_list:\n tokenized_text = []\n for sub_text in text_list:\n if sub_text not in self.unique_no_split_tokens:\n tokenized_text.extend(split_on_token(tok, sub_text))\n else:\n tokenized_text.append(sub_text)\n text_list = tokenized_text\n\n return list(\n itertools.chain.from_iterable(\n (\n self._tokenize(token)\n if token not in self.unique_no_split_tokens\n else [token]\n for token in tokenized_text\n )\n )\n )\n\n no_split_token = self.unique_no_split_tokens\n tokenized_text = split_on_tokens(no_split_token, text)\n return tokenized_text\n\n def encode(self, text):\n return [self.tok_to_idx[tok] for tok in self.tokenize(text)]" }, { "identifier": "get_tranception_tokenizer", "path": "utils/tranception/model_pytorch.py", "snippet": "def get_tranception_tokenizer():\n #Tranception Alphabet: \"vocab\":{\"[UNK]\":0,\"[CLS]\":1,\"[SEP]\":2,\"[PAD]\":3,\"[MASK]\":4,\"A\":5,\"C\":6,\"D\":7,\"E\":8,\"F\":9,\"G\":10,\"H\":11,\"I\":12,\"K\":13,\"L\":14,\"M\":15,\"N\":16,\"P\":17,\"Q\":18,\"R\":19,\"S\":20,\"T\":21,\"V\":22,\"W\":23,\"Y\":24}\n dir_path = os.path.dirname(os.path.abspath(__file__))\n tokenizer = PreTrainedTokenizerFast(tokenizer_file=dir_path + os.sep + \"utils/tokenizers/Basic_tokenizer\", unk_token=\"[UNK]\", sep_token=\"[SEP]\", pad_token=\"[PAD]\", cls_token=\"[CLS]\",mask_token=\"[MASK]\")\n os.environ[\"TOKENIZERS_PARALLELISM\"] = \"false\"\n tokenizer.tok_to_idx = tokenizer.vocab\n tokenizer.padding_idx = tokenizer.tok_to_idx[\"[PAD]\"]\n tokenizer.mask_idx = tokenizer.tok_to_idx[\"[MASK]\"]\n tokenizer.cls_idx = tokenizer.tok_to_idx[\"[CLS]\"]\n tokenizer.eos_idx = tokenizer.tok_to_idx[\"[SEP]\"]\n tokenizer.prepend_bos = True\n tokenizer.append_eos = True\n return tokenizer" }, { "identifier": "get_train_val_test_data", "path": "utils/data_utils.py", "snippet": "def get_train_val_test_data(args, assay_file_names):\n target_names = args.target_config.keys() \n assay_data={}\n merge = None\n main_target_name = None\n main_target_name_count = 0\n for target in target_names:\n if args.target_config[target][\"main_target\"]: \n main_target_name=target\n main_target_name_count+=1\n assert main_target_name is not None, \"No main target referenced. Please update config to select a unique main target.\"\n assert main_target_name_count <= 1, \"Several main targets referenced. Please update config to select a unique main target.\"\n \n assay_data[main_target_name] = pd.read_csv(args.target_config[main_target_name][\"location\"] + os.sep + assay_file_names[main_target_name])[['mutant','mutated_sequence',args.target_config[main_target_name][\"var_name\"],args.fold_variable_name]] \n assay_data[main_target_name].columns = ['mutant','mutated_sequence', main_target_name, args.fold_variable_name]\n merge = assay_data[main_target_name]\n \n for target_name in target_names:\n if target_name!=main_target_name:\n print(target_name)\n print(args.target_config)\n print(assay_file_names)\n assay_data[target_name] = pd.read_csv(args.target_config[target_name][\"location\"] + os.sep + assay_file_names[target_name])[['mutant',args.target_config[target_name][\"var_name\"]]] \n assay_data[target_name].columns = ['mutant',target_name]\n merge = pd.merge(merge, assay_data[target_name], how='left', on='mutant')\n \n if args.augmentation==\"zero_shot_fitness_predictions_covariate\":\n zero_shot_fitness_predictions = pd.read_csv(args.zero_shot_fitness_predictions_location + os.sep + assay_file_names[main_target_name])[['mutant',args.zero_shot_fitness_predictions_var_name]]\n zero_shot_fitness_predictions.columns = ['mutant','zero_shot_fitness_predictions']\n zero_shot_fitness_predictions['zero_shot_fitness_predictions'] = standardize(zero_shot_fitness_predictions['zero_shot_fitness_predictions'])\n merge = pd.merge(merge,zero_shot_fitness_predictions,how='inner',on='mutant')\n\n train_val_test_splits = split_data_based_on_test_fold_index(\n dataframe = merge, \n fold_variable_name = args.fold_variable_name,\n test_fold_index = args.test_fold_index,\n use_validation_set = args.use_validation_set\n )\n splits_dict = {}\n for split_name, split in zip(['train','val','test'], train_val_test_splits):\n if split_name=='val' and not args.use_validation_set: continue\n splits_dict[split_name] = {}\n splits_dict[split_name]['mutant_mutated_seq_pairs'] = list(zip(list(split['mutant']),list(split['mutated_sequence'])))\n raw_targets = {target_name: split[target_name] for target_name in target_names}\n if args.augmentation==\"zero_shot_fitness_predictions_covariate\": raw_targets['zero_shot_fitness_predictions'] = split['zero_shot_fitness_predictions']\n if split_name==\"train\":\n raw_targets, target_processing = preprocess_training_targets(raw_targets, args.target_config)\n else:\n raw_targets = preprocess_test_targets(raw_targets, args.target_config, target_processing)\n for target_name in target_names: \n splits_dict[split_name][target_name] = raw_targets[target_name]\n if args.augmentation==\"zero_shot_fitness_predictions_covariate\": splits_dict[split_name]['zero_shot_fitness_predictions'] = raw_targets['zero_shot_fitness_predictions']\n # load dict into dataset objects\n train_data = Dataset.from_dict(splits_dict['train'])\n val_data = Dataset.from_dict(splits_dict['val']) if args.use_validation_set else None\n test_data = Dataset.from_dict(splits_dict['test'])\n return train_data, val_data, test_data, target_processing" }, { "identifier": "standardize", "path": "utils/data_utils.py", "snippet": "def standardize(x):\n return (x - x.mean()) / x.std()" }, { "identifier": "pnpt_count_non_nan", "path": "utils/data_utils.py", "snippet": "def pnpt_count_non_nan(x):\n missing_mask = np.isnan(x) | np.equal(x,-100)\n return np.count_nonzero(~missing_mask)" }, { "identifier": "pnpt_spearmanr", "path": "utils/data_utils.py", "snippet": "def pnpt_spearmanr(prediction,target):\n mask_missing_values = np.isnan(target) | np.equal(target, -100) #In PNPT missing values are never masked so corresponding labels are always set to -100\n return spearmanr(prediction[~mask_missing_values], target[~mask_missing_values])[0] #first value is spearman rho, second is the corresponding p-value " }, { "identifier": "process_MSA", "path": "utils/msa_utils.py", "snippet": "def process_MSA(args, MSA_filename, MSA_weights_filename):\n filtered_MSA_filename = filter_msa(filename = args.MSA_data_folder + os.sep + MSA_filename, path_to_hhfilter = args.path_to_hhfilter)\n MSA_all_sequences, MSA_non_ref_sequences_weights = compute_sequence_weights(MSA_filename = filtered_MSA_filename, MSA_weights_filename = args.MSA_weight_data_folder + os.sep + MSA_weights_filename)\n return MSA_all_sequences, MSA_non_ref_sequences_weights" }, { "identifier": "Trainer", "path": "utils/model_utils.py", "snippet": "class Trainer():\n def __init__(self, \n model,\n args,\n train_data, \n val_data,\n MSA_sequences, \n MSA_weights,\n MSA_start_position,\n MSA_end_position,\n target_processing,\n distributed_training=False\n ):\n self.model = model\n self.args = args\n self.train_data = train_data\n self.val_data = val_data\n self.MSA_sequences = MSA_sequences\n self.MSA_weights = MSA_weights\n self.MSA_start_position = MSA_start_position\n self.MSA_end_position = MSA_end_position\n self.target_processing = target_processing\n self.distributed_training = distributed_training\n \n def train(self):\n \"\"\"\n Returns the last value of training_step (useful in case of early stopping for isntance)\n \"\"\"\n \n self.model.train()\n self.model.cuda()\n self.model.set_device()\n\n if self.distributed_training:\n self.model = torch.nn.parallel.DistributedDataParallel(self.model)\n train_sampler = torch.utils.data.distributed.DistributedSampler(self.train_data)\n else:\n train_sampler = None\n \n #To ensure reproducibility with seed setting\n def seed_worker(worker_id):\n worker_seed = torch.initial_seed() % 2**32\n np.random.seed(worker_seed)\n random.seed(worker_seed)\n g = torch.Generator()\n g.manual_seed(0)\n train_loader = torch.utils.data.DataLoader(\n dataset=self.train_data, \n batch_size=self.args.training_num_assay_sequences_per_batch_per_gpu, \n shuffle=(train_sampler is None),\n num_workers=self.args.num_data_loaders_workers, \n pin_memory=True, \n sampler=train_sampler,\n collate_fn=collate_fn_protein_npt,\n worker_init_fn=seed_worker,\n generator=g,\n )\n optimizer = self.model.create_optimizer()\n scheduler = learning_rate_scheduler(\n num_warmup_steps=self.args.num_warmup_steps, \n num_total_training_steps=self.args.num_total_training_steps, \n max_learning_rate=self.args.max_learning_rate, \n min_learning_rate=self.args.min_learning_rate\n )\n \n train_iterator = iter(train_loader)\n num_epochs = 0\n prior_log_time = time.time()\n total_train_time = 0\n log_train_total_loss = 0\n if self.model.model_type==\"ProteinNPT\":\n log_train_reconstruction_loss = 0\n log_train_num_masked_tokens = 0\n log_train_num_target_masked_tokens_dict = defaultdict(int)\n else:\n log_num_sequences_predicted = 0\n log_train_target_prediction_loss_dict = defaultdict(int)\n all_spearmans_eval_during_training = []\n max_average_spearman_across_targets = - math.inf\n if self.args.training_fp16: scaler = torch.cuda.amp.GradScaler()\n\n for training_step in tqdm.tqdm(range(1, self.args.num_total_training_steps+1)):\n optimizer.zero_grad(set_to_none=True)\n lr = scheduler(training_step)\n update_lr_optimizer(optimizer, lr)\n reconstruction_loss_coeff = get_reconstruction_loss_coefficient(training_step, num_total_training_steps=self.args.num_total_training_steps) if (self.model.model_type==\"ProteinNPT\" and not self.model.PNPT_no_reconstruction_error) else 0\n for gradient_accum_step in range(self.args.gradient_accumulation):\n try:\n batch = next(train_iterator)\n except:\n num_epochs +=1\n train_iterator = iter(train_loader)\n batch = next(train_iterator)\n \n if self.model.model_type==\"ProteinNPT\":\n processed_batch = proteinnpt.data_processing.process_batch(\n batch = batch,\n model = self.model,\n alphabet = self.model.alphabet, \n args = self.args, \n MSA_sequences = self.MSA_sequences, \n MSA_weights = self.MSA_weights,\n MSA_start_position = self.MSA_start_position, \n MSA_end_position = self.MSA_end_position,\n target_processing = self.target_processing,\n training_sequences = None,\n proba_target_mask = 0.15,\n proba_aa_mask = 0.15,\n eval_mode = False,\n device=self.model.device,\n indel_mode=self.args.indel_mode\n )\n else:\n processed_batch = baselines.data_processing.process_batch(\n batch = batch,\n model = self.model,\n alphabet = self.model.alphabet, \n args = self.args, \n MSA_sequences = self.MSA_sequences, \n MSA_weights = self.MSA_weights,\n MSA_start_position = self.MSA_start_position, \n MSA_end_position = self.MSA_end_position,\n device=self.model.device,\n eval_mode=False,\n indel_mode=self.args.indel_mode\n )\n\n if self.args.augmentation==\"zero_shot_fitness_predictions_covariate\":\n zero_shot_fitness_predictions = processed_batch['target_labels']['zero_shot_fitness_predictions'].view(-1,1)\n del processed_batch['target_labels']['zero_shot_fitness_predictions']\n else:\n zero_shot_fitness_predictions = None\n \n if self.args.training_fp16:\n with torch.cuda.amp.autocast():\n if self.model.model_type==\"ProteinNPT\":\n output = self.model(\n tokens=processed_batch['masked_tokens'],\n targets=processed_batch['masked_targets'],\n zero_shot_fitness_predictions=zero_shot_fitness_predictions,\n sequence_embeddings=processed_batch['sequence_embeddings']\n )\n total_loss, reconstruction_loss, target_prediction_loss_dict = self.model.protein_npt_loss(\n token_predictions_logits=output['logits_protein_sequence'], \n token_labels=processed_batch['token_labels'], \n target_predictions=output['target_predictions'], \n target_labels=processed_batch['target_labels'], \n MLM_reconstruction_loss_weight=reconstruction_loss_coeff, \n label_smoothing=self.args.label_smoothing\n )\n else:\n output = self.model(\n tokens=processed_batch['input_tokens'],\n zero_shot_fitness_predictions=zero_shot_fitness_predictions,\n sequence_embeddings=processed_batch['sequence_embeddings']\n )\n total_loss, target_prediction_loss_dict = self.model.prediction_loss(\n target_predictions=output[\"target_predictions\"], \n target_labels=processed_batch['target_labels'],\n label_smoothing=self.args.label_smoothing\n )\n scaler.scale(total_loss).backward()\n else:\n if self.model.model_type==\"ProteinNPT\":\n output = self.model(\n tokens=processed_batch['masked_tokens'],\n targets=processed_batch['masked_targets'],\n zero_shot_fitness_predictions=zero_shot_fitness_predictions,\n sequence_embeddings=processed_batch['sequence_embeddings']\n )\n total_loss, reconstruction_loss, target_prediction_loss_dict = self.model.protein_npt_loss(\n token_predictions_logits=output['logits_protein_sequence'], \n token_labels=processed_batch['token_labels'], \n target_predictions=output['target_predictions'], \n target_labels=processed_batch['target_labels'], \n MLM_reconstruction_loss_weight=reconstruction_loss_coeff, \n label_smoothing=self.args.label_smoothing\n )\n if total_loss.item() > 10.0 and training_step >= 100:\n print(\"High training loss detected: {}\".format(total_loss.item()))\n else:\n output = self.model(\n tokens=processed_batch['input_tokens'],\n zero_shot_fitness_predictions=zero_shot_fitness_predictions,\n sequence_embeddings=processed_batch['sequence_embeddings']\n )\n total_loss, target_prediction_loss_dict = self.model.prediction_loss(\n target_predictions=output[\"target_predictions\"], \n target_labels=processed_batch['target_labels'],\n label_smoothing=self.args.label_smoothing\n )\n total_loss.backward()\n torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.args.grad_norm_clip)\n # Taking optimizer update out of the inner loop to support gradient accumulation\n if self.args.training_fp16:\n with torch.cuda.amp.autocast():\n scaler.step(optimizer)\n scaler.update()\n else:\n optimizer.step()\n\n log_train_total_loss += total_loss\n for target_name in self.model.target_names:\n log_train_target_prediction_loss_dict[target_name] += target_prediction_loss_dict[target_name]\n if self.model.model_type==\"ProteinNPT\": \n log_train_reconstruction_loss += reconstruction_loss\n log_train_num_masked_tokens += processed_batch['masked_tokens'].eq(self.model.alphabet.mask_idx).sum()\n for target_name in self.model.target_names:\n log_train_num_target_masked_tokens_dict[target_name] += processed_batch['masked_targets'][target_name][:,-1].eq(1.0).sum().item() # Masked targets are encoded by 1.0. Mask column is the very last one\n else:\n log_num_sequences_predicted += len(batch['mutant_mutated_seq_pairs'])\n \n if training_step % self.args.num_logging_training_steps == 0 and self.args.use_wandb:\n time_end_step = time.time()\n delta_time_since_last_log = time_end_step - prior_log_time\n total_train_time += delta_time_since_last_log\n prior_log_time = time_end_step\n train_logs = {\n \"training_step\": training_step, \n \"step_time\": delta_time_since_last_log / (self.args.num_logging_training_steps)\n }\n if self.model.model_type==\"ProteinNPT\": \n train_logs[\"train_total_loss_per_step\"]: log_train_total_loss / self.args.num_logging_training_steps\n train_logs[\"train_reconstruction_loss_per_masked_token\"] = log_train_reconstruction_loss / log_train_num_masked_tokens\n for target_name in self.model.target_names:\n train_logs[\"train_prediction_\"+str(target_name)+\"_loss_per_masked_token\"] = log_train_target_prediction_loss_dict[target_name] / log_train_num_target_masked_tokens_dict[target_name]\n else:\n train_logs[\"train_total_loss_per_seq\"]: log_train_total_loss / log_num_sequences_predicted\n for target_name in self.model.target_names:\n train_logs[\"train_prediction_\"+str(target_name)+\"_loss_per_seq\"] = log_train_target_prediction_loss_dict[target_name] / log_num_sequences_predicted\n wandb.log(train_logs)\n log_train_total_loss = 0\n log_train_target_prediction_loss_dict = defaultdict(int)\n if self.model.model_type==\"ProteinNPT\":\n log_train_reconstruction_loss = 0\n log_train_num_masked_tokens = 0\n log_train_num_target_masked_tokens_dict = defaultdict(int)\n else:\n log_num_sequences_predicted = 0 \n \n if self.args.save_model_checkpoint and (training_step % self.args.num_saving_training_steps) == 0:\n if not os.path.exists(self.args.model_location): os.mkdir(self.args.model_location)\n if not os.path.exists(self.args.model_location + os.sep + 'checkpoint-'+str(training_step)): os.mkdir(self.args.model_location + os.sep + 'checkpoint-'+str(training_step))\n torch.save({\n 'training_step': training_step,\n 'args': self.args,\n 'state_dict': self.model.state_dict(),\n 'optimizer' : optimizer.state_dict()\n }, \n self.args.model_location + os.sep + 'checkpoint-'+str(training_step) + os.sep + 'checkpoint.t7'\n )\n \n if training_step % self.args.num_eval_steps == 0 and self.args.use_validation_set:\n if self.model.model_type==\"ProteinNPT\":\n eval_results = self.eval(\n test_data=self.val_data,\n train_data=self.train_data,\n reconstruction_loss_weight=0.0,\n output_all_predictions=True\n )\n else:\n eval_results = self.eval(\n test_data=self.val_data, \n output_all_predictions=True\n )\n eval_logs = {\"Training step\": training_step} \n if self.model.model_type==\"ProteinNPT\":\n normalization = 0\n for target_name in self.model.target_names: normalization += eval_results['eval_num_masked_targets'][target_name]\n else:\n normalization = eval_results['eval_num_predicted_targets']\n eval_logs['Eval total loss per seq.']: eval_results['eval_total_loss'] / normalization\n average_spearman_across_targets = 0 #If early stopping based on validation spearman and multiple targets, we check that avg spearman is not decreasing for a certain # of times in a row\n for target_name in self.model.target_names:\n if self.model.model_type==\"ProteinNPT\": normalization = eval_results['eval_num_masked_targets'][target_name] #Update for PNPT (keeep the same normalization constant otherwise)\n eval_logs['Eval loss '+str(target_name)+' per seq.'] = eval_results['eval_target_prediction_loss_dict'][target_name] / normalization\n eval_logs['Eval spearman '+target_name] = spearmanr(eval_results['output_scores']['predictions_'+target_name], eval_results['output_scores']['labels_'+target_name])[0]\n average_spearman_across_targets += eval_logs['Eval spearman '+target_name]\n average_spearman_across_targets /= len(self.model.target_names)\n print(\" | \".join([key + \": \"+str(round(eval_logs[key],5)) for key in eval_logs.keys()]))\n if self.args.use_wandb: wandb.log(eval_logs)\n # Early stopping\n all_spearmans_eval_during_training.append(average_spearman_across_targets)\n if average_spearman_across_targets > max_average_spearman_across_targets: max_average_spearman_across_targets = average_spearman_across_targets\n if (training_step >= 1000) and (self.args.early_stopping_patience is not None) and (np.array(all_spearmans_eval_during_training)[-self.args.early_stopping_patience:].max() < max_average_spearman_across_targets):\n print(\"Early stopping. Training step: {}. Total eval loss: {}. Avg spearman: {}\".format(training_step, eval_results['eval_total_loss'], average_spearman_across_targets))\n break\n self.model.train() #Move back the model to train mode after eval loop\n trainer_final_status = {\n 'total_training_steps': training_step,\n 'total_train_time': total_train_time,\n 'total_training_epochs': num_epochs\n }\n return trainer_final_status\n\n def eval(self, test_data, output_all_predictions=False, need_head_weights=False, train_data = None, reconstruction_loss_weight=0.5, selected_indices_seed=0):\n \"\"\"\n total_eval_target_prediction_loss is the sum of all target prediction losses across all targets\n total_eval_target_prediction_loss contains the breakdown by target\n num_predicted_targets has the number of predicted items\n output_scores is a dict with sequences, predictions and labels\n \"\"\"\n self.model.eval()\n self.model.cuda()\n self.model.set_device()\n with torch.no_grad():\n eval_loader = torch.utils.data.DataLoader(\n dataset=test_data, \n batch_size=self.args.eval_num_sequences_to_score_per_batch_per_gpu, \n shuffle=False,\n num_workers=self.args.num_data_loaders_workers, \n pin_memory=True,\n collate_fn=collate_fn_protein_npt\n )\n eval_iterator = iter(eval_loader)\n \n eval_total_loss = 0\n if self.model.model_type==\"ProteinNPT\": \n eval_reconstruction_loss = 0\n eval_num_masked_tokens = 0\n eval_num_masked_targets = defaultdict(int)\n else:\n num_predicted_targets = 0\n eval_target_prediction_loss_dict = defaultdict(int)\n output_scores = defaultdict(list) if output_all_predictions else None\n\n if need_head_weights:\n col_attentions=[]\n row_attentions=[]\n\n for batch in tqdm.tqdm(eval_iterator):\n if output_all_predictions: \n output_scores['mutated_sequence'] += list(zip(*batch['mutant_mutated_seq_pairs']))[1]\n output_scores['mutant'] += list(zip(*batch['mutant_mutated_seq_pairs']))[0]\n if self.model.model_type==\"ProteinNPT\":\n processed_batch = proteinnpt.data_processing.process_batch(\n batch = batch,\n model = self.model,\n alphabet = self.model.alphabet, \n args = self.args, \n MSA_sequences = self.MSA_sequences, \n MSA_weights = self.MSA_weights,\n MSA_start_position = self.MSA_start_position, \n MSA_end_position = self.MSA_end_position,\n target_processing = self.target_processing,\n training_sequences = train_data,\n proba_target_mask = 1.0, \n proba_aa_mask = 0.0,\n eval_mode = True,\n device=self.model.device,\n selected_indices_seed=selected_indices_seed,\n indel_mode=self.args.indel_mode\n )\n else:\n processed_batch = baselines.data_processing.process_batch(\n batch = batch,\n model = self.model,\n alphabet = self.model.alphabet, \n args = self.args, \n MSA_sequences = self.MSA_sequences, \n MSA_weights = self.MSA_weights,\n MSA_start_position = self.MSA_start_position, \n MSA_end_position = self.MSA_end_position,\n device=self.model.device,\n eval_mode=True,\n indel_mode=self.args.indel_mode\n )\n if self.args.augmentation==\"zero_shot_fitness_predictions_covariate\":\n zero_shot_fitness_predictions = processed_batch['target_labels']['zero_shot_fitness_predictions'].view(-1,1)\n del processed_batch['target_labels']['zero_shot_fitness_predictions']\n else:\n zero_shot_fitness_predictions = None\n \n if self.model.model_type==\"ProteinNPT\":\n output = self.model(\n tokens=processed_batch['masked_tokens'],\n targets=processed_batch['masked_targets'],\n zero_shot_fitness_predictions=zero_shot_fitness_predictions,\n sequence_embeddings=processed_batch['sequence_embeddings'],\n need_head_weights=need_head_weights\n )\n batch_loss, batch_reconstruction_loss, batch_target_prediction_loss_dict = self.model.protein_npt_loss(\n token_predictions_logits=output['logits_protein_sequence'], \n token_labels=processed_batch['token_labels'], \n target_predictions=output['target_predictions'], \n target_labels=processed_batch['target_labels'], \n MLM_reconstruction_loss_weight=reconstruction_loss_weight, \n label_smoothing=self.args.label_smoothing\n )\n if batch_loss.item() > 10.0:\n print(\"High eval loss detected: {}\".format(batch_loss.item()))\n else:\n output = self.model(\n tokens=processed_batch['input_tokens'],\n zero_shot_fitness_predictions=zero_shot_fitness_predictions,\n sequence_embeddings=processed_batch['sequence_embeddings']\n )\n batch_loss, batch_target_prediction_loss_dict = self.model.prediction_loss(\n target_predictions=output[\"target_predictions\"], \n target_labels=processed_batch['target_labels'],\n label_smoothing=self.args.label_smoothing\n )\n \n eval_total_loss += batch_loss.item()\n for target_name in self.model.target_names:\n eval_target_prediction_loss_dict[target_name] += batch_target_prediction_loss_dict[target_name].item()\n if self.model.model_type==\"ProteinNPT\":\n eval_reconstruction_loss += batch_reconstruction_loss.item()\n eval_num_masked_tokens += processed_batch['masked_tokens'].eq(self.model.alphabet.mask_idx).sum().item()\n for target_name in self.model.target_names:\n eval_num_masked_targets[target_name] += processed_batch['masked_targets'][target_name][:,-1].eq(1.0).sum().item()\n else:\n num_predicted_targets += len(batch['mutant_mutated_seq_pairs'])\n if output_all_predictions:\n num_of_mutated_seqs_to_score = processed_batch['num_of_mutated_seqs_to_score'] if self.model.model_type==\"ProteinNPT\" else len(processed_batch['mutant_mutated_seq_pairs'])\n for target_name in self.model.target_names:\n output_scores['predictions_'+target_name] += list(output[\"target_predictions\"][target_name][:num_of_mutated_seqs_to_score].cpu().numpy())\n output_scores['labels_'+target_name] += list(processed_batch['target_labels'][target_name][:num_of_mutated_seqs_to_score].cpu().numpy())\n if need_head_weights:\n col_attentions.append(output[\"col_attentions\"])\n row_attentions.append(output[\"row_attentions\"])\n\n output_scores = pd.DataFrame.from_dict(output_scores)\n output_scores_numeric_cols = [col_name for col_name in output_scores.columns if col_name not in ['mutant','mutated_sequence']]\n output_scores = output_scores.groupby(['mutant'])[output_scores_numeric_cols].mean().reset_index() \n mutated_seqs_dict = {}\n mutant_mutated_seqs = list(zip(*test_data['mutant_mutated_seq_pairs']))\n mutated_seqs_dict['mutant'] = mutant_mutated_seqs[0]\n mutated_seqs_dict['mutated_sequence'] = mutant_mutated_seqs[1]\n mutated_seqs_df = pd.DataFrame.from_dict(mutated_seqs_dict)\n output_scores = pd.merge(output_scores, mutated_seqs_df, on='mutant', how='left')\n \n\n eval_results = {\n 'eval_total_loss':eval_total_loss,\n 'eval_target_prediction_loss_dict':eval_target_prediction_loss_dict,\n 'output_scores': output_scores\n }\n if need_head_weights:\n print(\"dimension of first attention column {}\".format(col_attentions[0].shape))\n eval_results['col_attentions'] = torch.stack(col_attentions, dim=0).cpu().numpy()\n eval_results['row_attentions'] = torch.stack(row_attentions, dim=0).cpu().numpy()\n \n if self.model.model_type==\"ProteinNPT\":\n eval_results['eval_reconstruction_loss']=eval_reconstruction_loss\n eval_results['eval_num_masked_tokens']=eval_num_masked_tokens\n eval_results['eval_num_masked_targets']=eval_num_masked_targets\n else:\n eval_results['eval_num_predicted_targets']=num_predicted_targets\n return eval_results" } ]
import os,gc import json import argparse import random import numpy as np import pandas as pd import wandb import torch import proteinnpt,baselines,utils from collections import defaultdict from proteinnpt.model import ProteinNPTModel from baselines.model import AugmentedPropertyPredictor from utils.esm.data import Alphabet from utils.tranception.model_pytorch import get_tranception_tokenizer from utils.data_utils import get_train_val_test_data, standardize, pnpt_count_non_nan, pnpt_spearmanr from utils.msa_utils import process_MSA from utils.model_utils import Trainer
20,587
for target_index,target in enumerate(args.target_config): if "location" not in args.target_config[target].keys(): # Note: the case of zero-shot fitness predictions is already handled above if present if args.assay_location is not None: # We passed at least one path for the assay location num_targets = [x for x in args.target_config.keys() if args.target_config[x]["in_NPT_loss"]] if len(args.assay_location) > 1: assert len(args.assay_location)==num_targets, "Trying to predict {} targets, but only referencing {} distinct paths for them.".format(num_targets,len(args.assay_location)) args.target_config[target]["location"] = args.assay_location[target_index] print("Location used for target {} if {}".format(target,args.assay_location[target_index])) else: args.target_config[target]["location"] = args.assay_location[0] print("Location used for target {} if {}".format(target,args.assay_location[0])) else: print("Assay location not provided. Defaulting to location for single substitutions fitness assays: {}".format(args.data_location + os.sep + 'data/fitness/substitutions_singles')) args.target_config[target]["location"] = args.data_location + os.sep + 'data/fitness/substitutions_singles' return args def log_performance_fold(args,target_names,test_eval_results,trainer_final_status,perf_list,logs_folder=None): test_logs = {'total_training_steps': trainer_final_status['total_training_steps'], 'total_training_epochs': trainer_final_status['total_training_epochs'], 'total_train_time': trainer_final_status['total_train_time']} if logs_folder is None: dir_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) logs_folder = dir_path+os.sep+'output' if not os.path.exists(logs_folder): os.mkdir(logs_folder) if args.model_type=="ProteinNPT": normalization = 0 for target_name in target_names: normalization += test_eval_results['eval_num_masked_targets'][target_name] else: normalization = test_eval_results['eval_num_predicted_targets'] test_logs['Test total loss per seq.'] = test_eval_results['eval_total_loss'] / normalization spearmans = {target_name: pnpt_spearmanr(test_eval_results['output_scores']['predictions_'+target_name], test_eval_results['output_scores']['labels_'+target_name]) for target_name in target_names} num_obs_spearmans = {target_name: pnpt_count_non_nan(test_eval_results['output_scores']['labels_'+target_name]) for target_name in target_names} for target_name in target_names: print("Spearman {} target: {}".format(target_name,spearmans[target_name])) test_logs['Test Spearman '+target_name] = spearmans[target_name] if args.model_type=="ProteinNPT": normalization = test_eval_results['eval_num_masked_targets'][target_name] test_logs['Test loss '+str(target_name)+' per seq.'] = test_eval_results['eval_target_prediction_loss_dict'][target_name] / normalization with open(logs_folder+os.sep+"test_performance_by_fold_"+args.model_name_suffix+".csv", "a") as perf_tracker: if os.path.getsize(logs_folder+os.sep+"test_performance_by_fold_"+args.model_name_suffix+".csv") == 0: header="fold_index,model_type,model_name_suffix,targets,assay_id,UniProt_id,fold_variable_name,total_training_steps,total_training_epochs,aa_embeddings,target_prediction_model,target_prediction_head,augmentation,frozen_embedding_parameters,dropout,weight_decay,early_stopping_patience,use_validation_set,training_num_assay_sequences_per_batch_per_gpu,eval_num_sequences_to_score_per_batch_per_gpu,eval_num_training_sequences_per_batch_per_gpu,eval_training_sequences_sampling_method,num_MSA_sequences_per_training_instance,embed_dim,ffn_embed_dim,attention_heads,conv_kernel_size,num_protein_npt_layers,total_loss" for target_name in target_names: header += (",loss_" + target_name + ",Spearman_" + target_name + ",num_obs_Spearman_" + target_name) perf_tracker.write(header+"\n") perf = ",".join([str(x) for x in perf_list]) + "," + str(round(test_logs['Test total loss per seq.'],5)) for target_name in target_names: perf += ("," + str(round(test_logs['Test loss '+str(target_name)+' per seq.'],5)) +","+str(spearmans[target_name])+","+str(num_obs_spearmans[target_name])) perf_tracker.write(perf+"\n") return test_logs, spearmans def log_performance_all_folds(args,target_names,all_test_predictions_across_folds,spearmans_across_folds,perf_list,logs_folder=None): if not os.path.exists(args.output_scores_location + os.sep + 'all_aggregated_predictions'): os.mkdir(args.output_scores_location + os.sep + 'all_aggregated_predictions') all_test_predictions_across_folds = pd.DataFrame.from_dict(all_test_predictions_across_folds) all_test_predictions_across_folds.to_csv(args.output_scores_location + os.sep + 'all_aggregated_predictions' + os.sep + model_name_prefix + ".csv", index=False) if logs_folder is None: dir_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) logs_folder = dir_path+os.sep+'output' if not os.path.exists(logs_folder): os.mkdir(logs_folder) with open(logs_folder+os.sep+"test_performance_overall_"+perf_list[2]+".csv", "a") as overall_perf: if os.path.getsize(logs_folder+os.sep+"test_performance_overall_"+perf_list[2]+".csv") == 0: header = "model_type,model_name_suffix,targets,assay_id,UniProt_id,fold_variable_name,total_training_steps,total_training_epochs,aa_embeddings,target_prediction_model,target_prediction_head,augmentation,frozen_embedding_parameters,dropout,weight_decay,early_stopping_patience,use_validation_set,training_num_assay_sequences_per_batch_per_gpu,eval_num_sequences_to_score_per_batch_per_gpu,eval_num_training_sequences_per_batch_per_gpu,eval_training_sequences_sampling_method,num_MSA_sequences_per_training_instance,embed_dim,ffn_embed_dim,attention_heads,conv_kernel_size,num_protein_npt_layers,total_loss" for target_name in target_names: header += (",loss_" + target_name + ",Spearman_" + target_name + ",Std_dev_Spearman_" + target_name + ",num_obs_Spearman_" + target_name + ",standardized_loss_" + target_name + ",standardized_Spearman_" + target_name) overall_perf.write(header+"\n") perf = ",".join([str(x) for x in perf_list[1:]]) #Remove fold_index from perf_list for target_name in target_names: missing_mask = np.isnan(all_test_predictions_across_folds['labels_'+target_name]) | np.equal(all_test_predictions_across_folds['labels_'+target_name],-100) MSE = ((all_test_predictions_across_folds['predictions_'+target_name][~missing_mask] - all_test_predictions_across_folds['labels_'+target_name][~missing_mask])**2).mean() spearman = pnpt_spearmanr(all_test_predictions_across_folds['predictions_'+target_name], all_test_predictions_across_folds['labels_'+target_name]) num_obs_spearman = pnpt_count_non_nan(all_test_predictions_across_folds['labels_'+target_name]) MSE_standardized = ((all_test_predictions_across_folds['fold_standardized_predictions_'+target_name][~missing_mask] - all_test_predictions_across_folds['labels_'+target_name][~missing_mask])**2).mean() spearman_standardized = pnpt_spearmanr(all_test_predictions_across_folds['fold_standardized_predictions_'+target_name], all_test_predictions_across_folds['labels_'+target_name]) spearman_std_dev = np.array(spearmans_across_folds[target_name]).std() perf += ("," + str(MSE) +","+str(spearman) + ","+ str(spearman_std_dev) + "," + str(num_obs_spearman) + "," + str(MSE_standardized) +","+str(spearman_standardized)) overall_perf.write(perf+"\n") def main(args): # Set random seeds torch.manual_seed(args.seed) np.random.seed(args.seed) random.seed(args.seed) # target_names are the true targets we want to predict. target_names_input also includes auxiliary labels (as used in ProteinNPT) target_names = [x for x in args.target_config.keys() if args.target_config[x]["in_NPT_loss"]] target_names_input = args.target_config.keys() num_targets = len(target_names) num_targets_input = len(target_names_input) print("We want to predict {} target(s): {}".format(num_targets, ' and '.join(target_names))) if num_targets_input > num_targets: print("We leverage {} target(s) and auxiliary labels: {}".format(num_targets_input, ' and '.join(target_names_input))) assay_reference_file = pd.read_csv(args.assay_reference_file_location) assay_id=assay_reference_file["DMS_id"][args.assay_index] args.seq_len = int(assay_reference_file["seq_len"][assay_reference_file["DMS_id"]==assay_id].values[0]) args.MSA_seq_len = int(assay_reference_file["MSA_len"][assay_reference_file["DMS_id"]==assay_id].values[0]) print("Training model for assay: {}, where the test_fold index is: {}".format(assay_id, args.test_fold_index)) args.save_model_checkpoint = not args.do_not_save_model_checkpoint args.frozen_embedding_parameters = not args.fine_tune_model_embedding_parameters if args.model_type=="MSA_Transformer_pred": assert args.num_MSA_sequences_per_training_instance==args.num_MSA_sequences_per_eval_instance, "MSA_Transformer_pred only supports same size of MSA for train and eval" effective_batch_size = args.gradient_accumulation * args.training_num_assay_sequences_per_batch_per_gpu print("Effective batch size is {}".format(effective_batch_size)) model_hypers = [args.aa_embeddings,args.target_prediction_model,args.target_prediction_head,args.augmentation,args.frozen_embedding_parameters,args.dropout,args.weight_decay, \ args.early_stopping_patience, args.use_validation_set, args.training_num_assay_sequences_per_batch_per_gpu, args.eval_num_sequences_to_score_per_batch_per_gpu, args.eval_num_training_sequences_per_batch_per_gpu, \ args.eval_training_sequences_sampling_method, args.num_MSA_sequences_per_training_instance, args.embed_dim, args.ffn_embed_dim, args.attention_heads, args.conv_kernel_size, args.num_protein_npt_layers] model_hypers_str = ','.join([str(x) for x in model_hypers]) model_name_prefix = '_'.join([str(x) for x in [args.model_type,assay_id,"_".join(target_names_input),args.fold_variable_name,'embed_'+args.aa_embeddings,'head_'+str(args.target_prediction_model),'aug_'+str(args.augmentation_short), \ 'froz_'+str(args.frozen_embedding_parameters),'drop_'+str(args.dropout),'val_'+str(args.use_validation_set),args.model_name_suffix]]) model_name = model_name_prefix + "_fold-" + str(args.test_fold_index) if not os.path.exists(args.model_location+os.sep+model_name): os.mkdir(args.model_location+os.sep+model_name) with open(args.model_location+os.sep+model_name+os.sep+'training_arguments', 'w') as f: json.dump(args.__dict__, f, indent=2) print("Model name: "+model_name) assay_file_name = assay_reference_file["DMS_filename"][assay_reference_file["DMS_id"]==assay_id].values[0] # File name of main assay used during training (if single property, this is also the only assay). Retrieved embeddings are always for this assay. args.sequence_embeddings_location = args.sequence_embeddings_folder + os.sep + assay_file_name.split(".csv")[0] + '.h5' if args.sequence_embeddings_folder else None print("Sequence embeddings: {}".format(args.sequence_embeddings_location)) if args.use_wandb: wandb.login() # Create & initiate model alphabet = get_tranception_tokenizer() if args.aa_embeddings=="Tranception" else Alphabet.from_architecture("msa_transformer") if args.model_type=="ProteinNPT": model = ProteinNPTModel(args, alphabet) elif args.model_type in ["MSA_Transformer_pred", "ESM1v_pred", "Tranception_pred", "TranceptEVE_pred", "Linear_Embedding_pred", "DeepSequence_pred"]:
def setup_config_and_paths(args): # All parameters that are not defined by end user are fetched from the config file if args.model_config_location is not None: args.main_config=json.load(open(args.model_config_location)) for key in args.main_config: if args.__dict__[key] is None: args.__dict__[key] = args.main_config[key] # File paths config for local_path in ['embedding_model_location','MSA_data_folder','MSA_weight_data_folder','path_to_hhfilter']: if getattr(args, local_path): setattr(args, local_path, args.data_location + os.sep + getattr(args, local_path)) if not os.path.exists(args.data_location + os.sep + 'model_predictions'): os.mkdir(args.data_location + os.sep + 'model_predictions') if not os.path.exists(args.data_location + os.sep + 'checkpoint'): os.mkdir(args.data_location + os.sep + 'checkpoint') args.output_scores_location = args.data_location + os.sep + 'model_predictions' + os.sep + args.model_name_suffix if not os.path.exists(args.output_scores_location): os.mkdir(args.output_scores_location) args.model_location = args.data_location + os.sep + 'checkpoint' + os.sep + args.model_name_suffix if not os.path.exists(args.model_location): os.mkdir(args.model_location) # Target config args.target_config=json.load(open(args.target_config_location)) zero_shot_predictions_mapping={ "MSA_Transformer_pred": "MSA_Transformer_ensemble", "ESM1v_pred": "ESM1v_ensemble", "TranceptEVE_pred": "TranceptEVE_L", "Tranception_pred": "Tranception_L", "DeepSequence_pred": "DeepSequence_ensemble" } if args.model_type=="ProteinNPT": zero_shot_predictions_mapping["ProteinNPT"]=zero_shot_predictions_mapping[args.aa_embeddings+"_pred"] if args.augmentation=="zero_shot_fitness_predictions_auxiliary_labels": # Add auxiliary label to target_config assert args.zero_shot_fitness_predictions_location is not None, "Location of zero-shot fitness predictions to use as auxiliary labels not properly referenced" print("Using zero-shot fitness predictions as auxiliary labels") args.target_config["zero_shot_fitness_predictions"] = { "type": "continuous", "dim": 1, "var_name": zero_shot_predictions_mapping[args.model_type], #Select the relevant model for zero-shot fitness predictions "location": args.zero_shot_fitness_predictions_location, "in_NPT_loss": False, "main_target": False } args.augmentation_short="auxiliary" elif args.augmentation=="zero_shot_fitness_predictions_covariate": # Will use zero-shot fitness predictions as an additional model covariate assert args.zero_shot_fitness_predictions_location is not None, "Location of zero-shot fitness predictions to use as model covariate not properly referenced" print("Using zero-shot fitness predictions as covariate") args.augmentation_short="covariate" args.zero_shot_fitness_predictions_var_name = zero_shot_predictions_mapping[args.model_type] else: args.augmentation_short="none" for target_index,target in enumerate(args.target_config): if "location" not in args.target_config[target].keys(): # Note: the case of zero-shot fitness predictions is already handled above if present if args.assay_location is not None: # We passed at least one path for the assay location num_targets = [x for x in args.target_config.keys() if args.target_config[x]["in_NPT_loss"]] if len(args.assay_location) > 1: assert len(args.assay_location)==num_targets, "Trying to predict {} targets, but only referencing {} distinct paths for them.".format(num_targets,len(args.assay_location)) args.target_config[target]["location"] = args.assay_location[target_index] print("Location used for target {} if {}".format(target,args.assay_location[target_index])) else: args.target_config[target]["location"] = args.assay_location[0] print("Location used for target {} if {}".format(target,args.assay_location[0])) else: print("Assay location not provided. Defaulting to location for single substitutions fitness assays: {}".format(args.data_location + os.sep + 'data/fitness/substitutions_singles')) args.target_config[target]["location"] = args.data_location + os.sep + 'data/fitness/substitutions_singles' return args def log_performance_fold(args,target_names,test_eval_results,trainer_final_status,perf_list,logs_folder=None): test_logs = {'total_training_steps': trainer_final_status['total_training_steps'], 'total_training_epochs': trainer_final_status['total_training_epochs'], 'total_train_time': trainer_final_status['total_train_time']} if logs_folder is None: dir_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) logs_folder = dir_path+os.sep+'output' if not os.path.exists(logs_folder): os.mkdir(logs_folder) if args.model_type=="ProteinNPT": normalization = 0 for target_name in target_names: normalization += test_eval_results['eval_num_masked_targets'][target_name] else: normalization = test_eval_results['eval_num_predicted_targets'] test_logs['Test total loss per seq.'] = test_eval_results['eval_total_loss'] / normalization spearmans = {target_name: pnpt_spearmanr(test_eval_results['output_scores']['predictions_'+target_name], test_eval_results['output_scores']['labels_'+target_name]) for target_name in target_names} num_obs_spearmans = {target_name: pnpt_count_non_nan(test_eval_results['output_scores']['labels_'+target_name]) for target_name in target_names} for target_name in target_names: print("Spearman {} target: {}".format(target_name,spearmans[target_name])) test_logs['Test Spearman '+target_name] = spearmans[target_name] if args.model_type=="ProteinNPT": normalization = test_eval_results['eval_num_masked_targets'][target_name] test_logs['Test loss '+str(target_name)+' per seq.'] = test_eval_results['eval_target_prediction_loss_dict'][target_name] / normalization with open(logs_folder+os.sep+"test_performance_by_fold_"+args.model_name_suffix+".csv", "a") as perf_tracker: if os.path.getsize(logs_folder+os.sep+"test_performance_by_fold_"+args.model_name_suffix+".csv") == 0: header="fold_index,model_type,model_name_suffix,targets,assay_id,UniProt_id,fold_variable_name,total_training_steps,total_training_epochs,aa_embeddings,target_prediction_model,target_prediction_head,augmentation,frozen_embedding_parameters,dropout,weight_decay,early_stopping_patience,use_validation_set,training_num_assay_sequences_per_batch_per_gpu,eval_num_sequences_to_score_per_batch_per_gpu,eval_num_training_sequences_per_batch_per_gpu,eval_training_sequences_sampling_method,num_MSA_sequences_per_training_instance,embed_dim,ffn_embed_dim,attention_heads,conv_kernel_size,num_protein_npt_layers,total_loss" for target_name in target_names: header += (",loss_" + target_name + ",Spearman_" + target_name + ",num_obs_Spearman_" + target_name) perf_tracker.write(header+"\n") perf = ",".join([str(x) for x in perf_list]) + "," + str(round(test_logs['Test total loss per seq.'],5)) for target_name in target_names: perf += ("," + str(round(test_logs['Test loss '+str(target_name)+' per seq.'],5)) +","+str(spearmans[target_name])+","+str(num_obs_spearmans[target_name])) perf_tracker.write(perf+"\n") return test_logs, spearmans def log_performance_all_folds(args,target_names,all_test_predictions_across_folds,spearmans_across_folds,perf_list,logs_folder=None): if not os.path.exists(args.output_scores_location + os.sep + 'all_aggregated_predictions'): os.mkdir(args.output_scores_location + os.sep + 'all_aggregated_predictions') all_test_predictions_across_folds = pd.DataFrame.from_dict(all_test_predictions_across_folds) all_test_predictions_across_folds.to_csv(args.output_scores_location + os.sep + 'all_aggregated_predictions' + os.sep + model_name_prefix + ".csv", index=False) if logs_folder is None: dir_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) logs_folder = dir_path+os.sep+'output' if not os.path.exists(logs_folder): os.mkdir(logs_folder) with open(logs_folder+os.sep+"test_performance_overall_"+perf_list[2]+".csv", "a") as overall_perf: if os.path.getsize(logs_folder+os.sep+"test_performance_overall_"+perf_list[2]+".csv") == 0: header = "model_type,model_name_suffix,targets,assay_id,UniProt_id,fold_variable_name,total_training_steps,total_training_epochs,aa_embeddings,target_prediction_model,target_prediction_head,augmentation,frozen_embedding_parameters,dropout,weight_decay,early_stopping_patience,use_validation_set,training_num_assay_sequences_per_batch_per_gpu,eval_num_sequences_to_score_per_batch_per_gpu,eval_num_training_sequences_per_batch_per_gpu,eval_training_sequences_sampling_method,num_MSA_sequences_per_training_instance,embed_dim,ffn_embed_dim,attention_heads,conv_kernel_size,num_protein_npt_layers,total_loss" for target_name in target_names: header += (",loss_" + target_name + ",Spearman_" + target_name + ",Std_dev_Spearman_" + target_name + ",num_obs_Spearman_" + target_name + ",standardized_loss_" + target_name + ",standardized_Spearman_" + target_name) overall_perf.write(header+"\n") perf = ",".join([str(x) for x in perf_list[1:]]) #Remove fold_index from perf_list for target_name in target_names: missing_mask = np.isnan(all_test_predictions_across_folds['labels_'+target_name]) | np.equal(all_test_predictions_across_folds['labels_'+target_name],-100) MSE = ((all_test_predictions_across_folds['predictions_'+target_name][~missing_mask] - all_test_predictions_across_folds['labels_'+target_name][~missing_mask])**2).mean() spearman = pnpt_spearmanr(all_test_predictions_across_folds['predictions_'+target_name], all_test_predictions_across_folds['labels_'+target_name]) num_obs_spearman = pnpt_count_non_nan(all_test_predictions_across_folds['labels_'+target_name]) MSE_standardized = ((all_test_predictions_across_folds['fold_standardized_predictions_'+target_name][~missing_mask] - all_test_predictions_across_folds['labels_'+target_name][~missing_mask])**2).mean() spearman_standardized = pnpt_spearmanr(all_test_predictions_across_folds['fold_standardized_predictions_'+target_name], all_test_predictions_across_folds['labels_'+target_name]) spearman_std_dev = np.array(spearmans_across_folds[target_name]).std() perf += ("," + str(MSE) +","+str(spearman) + ","+ str(spearman_std_dev) + "," + str(num_obs_spearman) + "," + str(MSE_standardized) +","+str(spearman_standardized)) overall_perf.write(perf+"\n") def main(args): # Set random seeds torch.manual_seed(args.seed) np.random.seed(args.seed) random.seed(args.seed) # target_names are the true targets we want to predict. target_names_input also includes auxiliary labels (as used in ProteinNPT) target_names = [x for x in args.target_config.keys() if args.target_config[x]["in_NPT_loss"]] target_names_input = args.target_config.keys() num_targets = len(target_names) num_targets_input = len(target_names_input) print("We want to predict {} target(s): {}".format(num_targets, ' and '.join(target_names))) if num_targets_input > num_targets: print("We leverage {} target(s) and auxiliary labels: {}".format(num_targets_input, ' and '.join(target_names_input))) assay_reference_file = pd.read_csv(args.assay_reference_file_location) assay_id=assay_reference_file["DMS_id"][args.assay_index] args.seq_len = int(assay_reference_file["seq_len"][assay_reference_file["DMS_id"]==assay_id].values[0]) args.MSA_seq_len = int(assay_reference_file["MSA_len"][assay_reference_file["DMS_id"]==assay_id].values[0]) print("Training model for assay: {}, where the test_fold index is: {}".format(assay_id, args.test_fold_index)) args.save_model_checkpoint = not args.do_not_save_model_checkpoint args.frozen_embedding_parameters = not args.fine_tune_model_embedding_parameters if args.model_type=="MSA_Transformer_pred": assert args.num_MSA_sequences_per_training_instance==args.num_MSA_sequences_per_eval_instance, "MSA_Transformer_pred only supports same size of MSA for train and eval" effective_batch_size = args.gradient_accumulation * args.training_num_assay_sequences_per_batch_per_gpu print("Effective batch size is {}".format(effective_batch_size)) model_hypers = [args.aa_embeddings,args.target_prediction_model,args.target_prediction_head,args.augmentation,args.frozen_embedding_parameters,args.dropout,args.weight_decay, \ args.early_stopping_patience, args.use_validation_set, args.training_num_assay_sequences_per_batch_per_gpu, args.eval_num_sequences_to_score_per_batch_per_gpu, args.eval_num_training_sequences_per_batch_per_gpu, \ args.eval_training_sequences_sampling_method, args.num_MSA_sequences_per_training_instance, args.embed_dim, args.ffn_embed_dim, args.attention_heads, args.conv_kernel_size, args.num_protein_npt_layers] model_hypers_str = ','.join([str(x) for x in model_hypers]) model_name_prefix = '_'.join([str(x) for x in [args.model_type,assay_id,"_".join(target_names_input),args.fold_variable_name,'embed_'+args.aa_embeddings,'head_'+str(args.target_prediction_model),'aug_'+str(args.augmentation_short), \ 'froz_'+str(args.frozen_embedding_parameters),'drop_'+str(args.dropout),'val_'+str(args.use_validation_set),args.model_name_suffix]]) model_name = model_name_prefix + "_fold-" + str(args.test_fold_index) if not os.path.exists(args.model_location+os.sep+model_name): os.mkdir(args.model_location+os.sep+model_name) with open(args.model_location+os.sep+model_name+os.sep+'training_arguments', 'w') as f: json.dump(args.__dict__, f, indent=2) print("Model name: "+model_name) assay_file_name = assay_reference_file["DMS_filename"][assay_reference_file["DMS_id"]==assay_id].values[0] # File name of main assay used during training (if single property, this is also the only assay). Retrieved embeddings are always for this assay. args.sequence_embeddings_location = args.sequence_embeddings_folder + os.sep + assay_file_name.split(".csv")[0] + '.h5' if args.sequence_embeddings_folder else None print("Sequence embeddings: {}".format(args.sequence_embeddings_location)) if args.use_wandb: wandb.login() # Create & initiate model alphabet = get_tranception_tokenizer() if args.aa_embeddings=="Tranception" else Alphabet.from_architecture("msa_transformer") if args.model_type=="ProteinNPT": model = ProteinNPTModel(args, alphabet) elif args.model_type in ["MSA_Transformer_pred", "ESM1v_pred", "Tranception_pred", "TranceptEVE_pred", "Linear_Embedding_pred", "DeepSequence_pred"]:
model = AugmentedPropertyPredictor(args, alphabet)
1
2023-10-28 11:41:05+00:00
24k
CVHub520/yolov5_obb
detect.py
[ { "identifier": "DetectMultiBackend", "path": "models/common.py", "snippet": "class DetectMultiBackend(nn.Module):\n # YOLOv5 MultiBackend class for python inference on various backends\n def __init__(self, weights='yolov5s.pt', device=None, dnn=False):\n # Usage:\n # PyTorch: weights = *.pt\n # TorchScript: *.torchscript\n # CoreML: *.mlmodel\n # TensorFlow: *_saved_model\n # TensorFlow: *.pb\n # TensorFlow Lite: *.tflite\n # ONNX Runtime: *.onnx\n # OpenCV DNN: *.onnx with dnn=True\n # TensorRT: *.engine\n from models.experimental import attempt_download, attempt_load # scoped to avoid circular import\n\n super().__init__()\n w = str(weights[0] if isinstance(weights, list) else weights)\n suffix = Path(w).suffix.lower()\n suffixes = ['.pt', '.torchscript', '.onnx', '.engine', '.tflite', '.pb', '', '.mlmodel']\n check_suffix(w, suffixes) # check weights have acceptable suffix\n pt, jit, onnx, engine, tflite, pb, saved_model, coreml = (suffix == x for x in suffixes) # backend booleans\n stride, names = 64, [f'class{i}' for i in range(1000)] # assign defaults\n w = attempt_download(w) # download if not local\n\n if jit: # TorchScript\n LOGGER.info(f'Loading {w} for TorchScript inference...')\n extra_files = {'config.txt': ''} # model metadata\n model = torch.jit.load(w, _extra_files=extra_files)\n if extra_files['config.txt']:\n d = json.loads(extra_files['config.txt']) # extra_files dict\n stride, names = int(d['stride']), d['names']\n elif pt: # PyTorch\n model = attempt_load(weights if isinstance(weights, list) else w, map_location=device)\n stride = int(model.stride.max()) # model stride\n names = model.module.names if hasattr(model, 'module') else model.names # get class names\n self.model = model # explicitly assign for to(), cpu(), cuda(), half()\n elif coreml: # CoreML\n LOGGER.info(f'Loading {w} for CoreML inference...')\n import coremltools as ct\n model = ct.models.MLModel(w)\n elif dnn: # ONNX OpenCV DNN\n LOGGER.info(f'Loading {w} for ONNX OpenCV DNN inference...')\n check_requirements(('opencv-python>=4.5.4',))\n net = cv2.dnn.readNetFromONNX(w)\n elif onnx: # ONNX Runtime\n LOGGER.info(f'Loading {w} for ONNX Runtime inference...')\n cuda = torch.cuda.is_available()\n check_requirements(('onnx', 'onnxruntime-gpu' if cuda else 'onnxruntime'))\n import onnxruntime\n providers = ['CUDAExecutionProvider', 'CPUExecutionProvider'] if cuda else ['CPUExecutionProvider']\n session = onnxruntime.InferenceSession(w, providers=providers)\n elif engine: # TensorRT\n LOGGER.info(f'Loading {w} for TensorRT inference...')\n import tensorrt as trt # https://developer.nvidia.com/nvidia-tensorrt-download\n check_version(trt.__version__, '8.0.0', verbose=True) # version requirement\n Binding = namedtuple('Binding', ('name', 'dtype', 'shape', 'data', 'ptr'))\n logger = trt.Logger(trt.Logger.INFO)\n with open(w, 'rb') as f, trt.Runtime(logger) as runtime:\n model = runtime.deserialize_cuda_engine(f.read())\n bindings = OrderedDict()\n for index in range(model.num_bindings):\n name = model.get_binding_name(index)\n dtype = trt.nptype(model.get_binding_dtype(index))\n shape = tuple(model.get_binding_shape(index))\n data = torch.from_numpy(np.empty(shape, dtype=np.dtype(dtype))).to(device)\n bindings[name] = Binding(name, dtype, shape, data, int(data.data_ptr()))\n binding_addrs = OrderedDict((n, d.ptr) for n, d in bindings.items())\n context = model.create_execution_context()\n batch_size = bindings['images'].shape[0]\n else: # TensorFlow model (TFLite, pb, saved_model)\n if pb: # https://www.tensorflow.org/guide/migrate#a_graphpb_or_graphpbtxt\n LOGGER.info(f'Loading {w} for TensorFlow *.pb inference...')\n import tensorflow as tf\n\n def wrap_frozen_graph(gd, inputs, outputs):\n x = tf.compat.v1.wrap_function(lambda: tf.compat.v1.import_graph_def(gd, name=\"\"), []) # wrapped\n return x.prune(tf.nest.map_structure(x.graph.as_graph_element, inputs),\n tf.nest.map_structure(x.graph.as_graph_element, outputs))\n\n graph_def = tf.Graph().as_graph_def()\n graph_def.ParseFromString(open(w, 'rb').read())\n frozen_func = wrap_frozen_graph(gd=graph_def, inputs=\"x:0\", outputs=\"Identity:0\")\n elif saved_model:\n LOGGER.info(f'Loading {w} for TensorFlow saved_model inference...')\n import tensorflow as tf\n model = tf.keras.models.load_model(w)\n elif tflite: # https://www.tensorflow.org/lite/guide/python#install_tensorflow_lite_for_python\n if 'edgetpu' in w.lower():\n LOGGER.info(f'Loading {w} for TensorFlow Lite Edge TPU inference...')\n import tflite_runtime.interpreter as tfli\n delegate = {'Linux': 'libedgetpu.so.1', # install https://coral.ai/software/#edgetpu-runtime\n 'Darwin': 'libedgetpu.1.dylib',\n 'Windows': 'edgetpu.dll'}[platform.system()]\n interpreter = tfli.Interpreter(model_path=w, experimental_delegates=[tfli.load_delegate(delegate)])\n else:\n LOGGER.info(f'Loading {w} for TensorFlow Lite inference...')\n import tensorflow as tf\n interpreter = tf.lite.Interpreter(model_path=w) # load TFLite model\n interpreter.allocate_tensors() # allocate\n input_details = interpreter.get_input_details() # inputs\n output_details = interpreter.get_output_details() # outputs\n self.__dict__.update(locals()) # assign all variables to self\n\n def forward(self, im, augment=False, visualize=False, val=False):\n # YOLOv5 MultiBackend inference\n b, ch, h, w = im.shape # batch, channel, height, width\n if self.pt or self.jit: # PyTorch\n y = self.model(im) if self.jit else self.model(im, augment=augment, visualize=visualize)\n return y if val else y[0]\n elif self.coreml: # CoreML\n im = im.permute(0, 2, 3, 1).cpu().numpy() # torch BCHW to numpy BHWC shape(1,320,192,3)\n im = Image.fromarray((im[0] * 255).astype('uint8'))\n # im = im.resize((192, 320), Image.ANTIALIAS)\n y = self.model.predict({'image': im}) # coordinates are xywh normalized\n box = xywh2xyxy(y['coordinates'] * [[w, h, w, h]]) # xyxy pixels\n conf, cls = y['confidence'].max(1), y['confidence'].argmax(1).astype(np.float)\n y = np.concatenate((box, conf.reshape(-1, 1), cls.reshape(-1, 1)), 1)\n elif self.onnx: # ONNX\n im = im.cpu().numpy() # torch to numpy\n if self.dnn: # ONNX OpenCV DNN\n self.net.setInput(im)\n y = self.net.forward()\n else: # ONNX Runtime\n y = self.session.run([self.session.get_outputs()[0].name], {self.session.get_inputs()[0].name: im})[0]\n elif self.engine: # TensorRT\n assert im.shape == self.bindings['images'].shape, (im.shape, self.bindings['images'].shape)\n self.binding_addrs['images'] = int(im.data_ptr())\n self.context.execute_v2(list(self.binding_addrs.values()))\n y = self.bindings['output'].data\n else: # TensorFlow model (TFLite, pb, saved_model)\n im = im.permute(0, 2, 3, 1).cpu().numpy() # torch BCHW to numpy BHWC shape(1,320,192,3)\n if self.pb:\n y = self.frozen_func(x=self.tf.constant(im)).numpy()\n elif self.saved_model:\n y = self.model(im, training=False).numpy()\n elif self.tflite:\n input, output = self.input_details[0], self.output_details[0]\n int8 = input['dtype'] == np.uint8 # is TFLite quantized uint8 model\n if int8:\n scale, zero_point = input['quantization']\n im = (im / scale + zero_point).astype(np.uint8) # de-scale\n self.interpreter.set_tensor(input['index'], im)\n self.interpreter.invoke()\n y = self.interpreter.get_tensor(output['index'])\n if int8:\n scale, zero_point = output['quantization']\n y = (y.astype(np.float32) - zero_point) * scale # re-scale\n y[..., 0] *= w # x\n y[..., 1] *= h # y\n y[..., 2] *= w # w\n y[..., 3] *= h # h\n y = torch.tensor(y) if isinstance(y, np.ndarray) else y\n return (y, []) if val else y\n\n def warmup(self, imgsz=(1, 3, 640, 640), half=False):\n # Warmup model by running inference once\n if self.pt or self.engine or self.onnx: # warmup types\n if isinstance(self.device, torch.device) and self.device.type != 'cpu': # only warmup GPU models\n im = torch.zeros(*imgsz).to(self.device).type(torch.half if half else torch.float) # input image\n self.forward(im) # warmup" }, { "identifier": "IMG_FORMATS", "path": "utils/datasets.py", "snippet": "IMG_FORMATS = ['bmp', 'jpg', 'jpeg', 'png', 'tif', 'tiff', 'dng', 'webp', 'mpo'] # acceptable image suffixes" }, { "identifier": "VID_FORMATS", "path": "utils/datasets.py", "snippet": "VID_FORMATS = ['mov', 'avi', 'mp4', 'mpg', 'mpeg', 'm4v', 'wmv', 'mkv'] # acceptable video suffixes" }, { "identifier": "LoadImages", "path": "utils/datasets.py", "snippet": "class LoadImages:\n # YOLOv5 image/video dataloader, i.e. `python detect.py --source image.jpg/vid.mp4`\n def __init__(self, path, img_size=640, stride=32, auto=True):\n p = str(Path(path).resolve()) # os-agnostic absolute path\n if '*' in p:\n files = sorted(glob.glob(p, recursive=True)) # glob\n elif os.path.isdir(p):\n files = sorted(glob.glob(os.path.join(p, '*.*'))) # dir\n elif os.path.isfile(p):\n files = [p] # files\n else:\n raise Exception(f'ERROR: {p} does not exist')\n\n images = [x for x in files if x.split('.')[-1].lower() in IMG_FORMATS]\n videos = [x for x in files if x.split('.')[-1].lower() in VID_FORMATS]\n ni, nv = len(images), len(videos)\n\n self.img_size = img_size\n self.stride = stride\n self.files = images + videos\n self.nf = ni + nv # number of files\n self.video_flag = [False] * ni + [True] * nv\n self.mode = 'image'\n self.auto = auto\n if any(videos):\n self.new_video(videos[0]) # new video\n else:\n self.cap = None\n assert self.nf > 0, f'No images or videos found in {p}. ' \\\n f'Supported formats are:\\nimages: {IMG_FORMATS}\\nvideos: {VID_FORMATS}'\n\n def __iter__(self):\n self.count = 0\n return self\n\n def __next__(self):\n if self.count == self.nf:\n raise StopIteration\n path = self.files[self.count]\n\n if self.video_flag[self.count]:\n # Read video\n self.mode = 'video'\n ret_val, img0 = self.cap.read()\n while not ret_val:\n self.count += 1\n self.cap.release()\n if self.count == self.nf: # last video\n raise StopIteration\n else:\n path = self.files[self.count]\n self.new_video(path)\n ret_val, img0 = self.cap.read()\n\n self.frame += 1\n s = f'video {self.count + 1}/{self.nf} ({self.frame}/{self.frames}) {path}: '\n\n else:\n # Read image\n self.count += 1\n img0 = cv2.imread(path) # BGR\n assert img0 is not None, f'Image Not Found {path}'\n s = f'image {self.count}/{self.nf} {path}: '\n\n # Padded resize\n img = letterbox(img0, self.img_size, stride=self.stride, auto=self.auto)[0]\n\n # Convert\n img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB\n img = np.ascontiguousarray(img)\n\n return path, img, img0, self.cap, s\n\n def new_video(self, path):\n self.frame = 0\n self.cap = cv2.VideoCapture(path)\n self.frames = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))\n\n def __len__(self):\n return self.nf # number of files" }, { "identifier": "LoadStreams", "path": "utils/datasets.py", "snippet": "class LoadStreams:\n # YOLOv5 streamloader, i.e. `python detect.py --source 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP streams`\n def __init__(self, sources='streams.txt', img_size=640, stride=32, auto=True):\n self.mode = 'stream'\n self.img_size = img_size\n self.stride = stride\n\n if os.path.isfile(sources):\n with open(sources) as f:\n sources = [x.strip() for x in f.read().strip().splitlines() if len(x.strip())]\n else:\n sources = [sources]\n\n n = len(sources)\n self.imgs, self.fps, self.frames, self.threads = [None] * n, [0] * n, [0] * n, [None] * n\n self.sources = [clean_str(x) for x in sources] # clean source names for later\n self.auto = auto\n for i, s in enumerate(sources): # index, source\n # Start thread to read frames from video stream\n st = f'{i + 1}/{n}: {s}... '\n if 'youtube.com/' in s or 'youtu.be/' in s: # if source is YouTube video\n check_requirements(('pafy', 'youtube_dl'))\n import pafy\n s = pafy.new(s).getbest(preftype=\"mp4\").url # YouTube URL\n s = eval(s) if s.isnumeric() else s # i.e. s = '0' local webcam\n cap = cv2.VideoCapture(s)\n assert cap.isOpened(), f'{st}Failed to open {s}'\n w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))\n h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))\n self.fps[i] = max(cap.get(cv2.CAP_PROP_FPS) % 100, 0) or 30.0 # 30 FPS fallback\n self.frames[i] = max(int(cap.get(cv2.CAP_PROP_FRAME_COUNT)), 0) or float('inf') # infinite stream fallback\n\n _, self.imgs[i] = cap.read() # guarantee first frame\n self.threads[i] = Thread(target=self.update, args=([i, cap, s]), daemon=True)\n LOGGER.info(f\"{st} Success ({self.frames[i]} frames {w}x{h} at {self.fps[i]:.2f} FPS)\")\n self.threads[i].start()\n LOGGER.info('') # newline\n\n # check for common shapes\n s = np.stack([letterbox(x, self.img_size, stride=self.stride, auto=self.auto)[0].shape for x in self.imgs])\n self.rect = np.unique(s, axis=0).shape[0] == 1 # rect inference if all shapes equal\n if not self.rect:\n LOGGER.warning('WARNING: Stream shapes differ. For optimal performance supply similarly-shaped streams.')\n\n def update(self, i, cap, stream):\n # Read stream `i` frames in daemon thread\n n, f, read = 0, self.frames[i], 1 # frame number, frame array, inference every 'read' frame\n while cap.isOpened() and n < f:\n n += 1\n # _, self.imgs[index] = cap.read()\n cap.grab()\n if n % read == 0:\n success, im = cap.retrieve()\n if success:\n self.imgs[i] = im\n else:\n LOGGER.warning('WARNING: Video stream unresponsive, please check your IP camera connection.')\n self.imgs[i] = np.zeros_like(self.imgs[i])\n cap.open(stream) # re-open stream if signal was lost\n time.sleep(1 / self.fps[i]) # wait time\n\n def __iter__(self):\n self.count = -1\n return self\n\n def __next__(self):\n self.count += 1\n if not all(x.is_alive() for x in self.threads) or cv2.waitKey(1) == ord('q'): # q to quit\n cv2.destroyAllWindows()\n raise StopIteration\n\n # Letterbox\n img0 = self.imgs.copy()\n img = [letterbox(x, self.img_size, stride=self.stride, auto=self.rect and self.auto)[0] for x in img0]\n\n # Stack\n img = np.stack(img, 0)\n\n # Convert\n img = img[..., ::-1].transpose((0, 3, 1, 2)) # BGR to RGB, BHWC to BCHW\n img = np.ascontiguousarray(img)\n\n return self.sources, img, img0, None, ''\n\n def __len__(self):\n return len(self.sources) # 1E12 frames = 32 streams at 30 FPS for 30 years" }, { "identifier": "LOGGER", "path": "utils/general.py", "snippet": "LOGGER = set_logging(__name__) # define globally (used in train.py, val.py, detect.py, etc.)" }, { "identifier": "check_file", "path": "utils/general.py", "snippet": "def check_file(file, suffix=''):\n # Search/download file (if necessary) and return path\n check_suffix(file, suffix) # optional\n file = str(file) # convert to str()\n if Path(file).is_file() or file == '': # exists\n return file\n elif file.startswith(('http:/', 'https:/')): # download\n url = str(Path(file)).replace(':/', '://') # Pathlib turns :// -> :/\n file = Path(urllib.parse.unquote(file).split('?')[0]).name # '%2F' to '/', split https://url.com/file.txt?auth\n if Path(file).is_file():\n print(f'Found {url} locally at {file}') # file already exists\n else:\n print(f'Downloading {url} to {file}...')\n torch.hub.download_url_to_file(url, file)\n assert Path(file).exists() and Path(file).stat().st_size > 0, f'File download failed: {url}' # check\n return file\n else: # search\n files = []\n for d in 'data', 'models', 'utils': # search directories\n files.extend(glob.glob(str(ROOT / d / '**' / file), recursive=True)) # find file\n assert len(files), f'File not found: {file}' # assert file was found\n assert len(files) == 1, f\"Multiple files match '{file}', specify exact path: {files}\" # assert unique\n return files[0] # return file" }, { "identifier": "check_img_size", "path": "utils/general.py", "snippet": "def check_img_size(imgsz, s=32, floor=0):\n print(f\"#305 in utils/general.py - s={s}\")\n # Verify image size is a multiple of stride s in each dimension\n if isinstance(imgsz, int): # integer i.e. img_size=640\n new_size = max(make_divisible(imgsz, int(s)), floor)\n else: # list i.e. img_size=[640, 480]\n new_size = [max(make_divisible(x, int(s)), floor) for x in imgsz]\n if new_size != imgsz:\n print(f'WARNING: --img-size {imgsz} must be multiple of max stride {s}, updating to {new_size}')\n return new_size" }, { "identifier": "check_imshow", "path": "utils/general.py", "snippet": "def check_imshow():\n # Check if environment supports image displays\n try:\n assert not is_docker(), 'cv2.imshow() is disabled in Docker environments'\n assert not is_colab(), 'cv2.imshow() is disabled in Google Colab environments'\n cv2.imshow('test', np.zeros((1, 1, 3)))\n cv2.waitKey(1)\n cv2.destroyAllWindows()\n cv2.waitKey(1)\n return True\n except Exception as e:\n print(f'WARNING: Environment does not support cv2.imshow() or PIL Image.show() image displays\\n{e}')\n return False" }, { "identifier": "check_requirements", "path": "utils/general.py", "snippet": "@try_except\ndef check_requirements(requirements=ROOT / 'requirements.txt', exclude=(), install=True):\n # Check installed dependencies meet requirements (pass *.txt file or list of packages)\n prefix = colorstr('red', 'bold', 'requirements:')\n check_python() # check python version\n if isinstance(requirements, (str, Path)): # requirements.txt file\n file = Path(requirements)\n assert file.exists(), f\"{prefix} {file.resolve()} not found, check failed.\"\n with file.open() as f:\n requirements = [f'{x.name}{x.specifier}' for x in pkg.parse_requirements(f) if x.name not in exclude]\n else: # list or tuple of packages\n requirements = [x for x in requirements if x not in exclude]\n\n n = 0 # number of packages updates\n for r in requirements:\n try:\n pkg.require(r)\n except Exception as e: # DistributionNotFound or VersionConflict if requirements not met\n s = f\"{prefix} {r} not found and is required by YOLOv5\"\n if install:\n print(f\"{s}, attempting auto-update...\")\n try:\n assert check_online(), f\"'pip install {r}' skipped (offline)\"\n print(check_output(f\"pip install '{r}'\", shell=True).decode())\n n += 1\n except Exception as e:\n print(f'{prefix} {e}')\n else:\n print(f'{s}. Please install and rerun your command.')\n\n if n: # if packages updated\n source = file.resolve() if 'file' in locals() else requirements\n s = f\"{prefix} {n} package{'s' * (n > 1)} updated per {source}\\n\" \\\n f\"{prefix} ⚠️ {colorstr('bold', 'Restart runtime or rerun command for updates to take effect')}\\n\"\n print(emojis(s))" }, { "identifier": "colorstr", "path": "utils/general.py", "snippet": "def colorstr(*input):\n # Colors a string https://en.wikipedia.org/wiki/ANSI_escape_code, i.e. colorstr('blue', 'hello world')\n *args, string = input if len(input) > 1 else ('blue', 'bold', input[0]) # color arguments, string\n colors = {'black': '\\033[30m', # basic colors\n 'red': '\\033[31m',\n 'green': '\\033[32m',\n 'yellow': '\\033[33m',\n 'blue': '\\033[34m',\n 'magenta': '\\033[35m',\n 'cyan': '\\033[36m',\n 'white': '\\033[37m',\n 'bright_black': '\\033[90m', # bright colors\n 'bright_red': '\\033[91m',\n 'bright_green': '\\033[92m',\n 'bright_yellow': '\\033[93m',\n 'bright_blue': '\\033[94m',\n 'bright_magenta': '\\033[95m',\n 'bright_cyan': '\\033[96m',\n 'bright_white': '\\033[97m',\n 'end': '\\033[0m', # misc\n 'bold': '\\033[1m',\n 'underline': '\\033[4m'}\n return ''.join(colors[x] for x in args) + f'{string}' + colors['end']" }, { "identifier": "increment_path", "path": "utils/general.py", "snippet": "def increment_path(path, exist_ok=False, sep='', mkdir=False):\n # Increment file or directory path, i.e. runs/exp --> runs/exp{sep}2, runs/exp{sep}3, ... etc.\n path = Path(path) # os-agnostic\n if path.exists() and not exist_ok:\n path, suffix = (path.with_suffix(''), path.suffix) if path.is_file() else (path, '')\n dirs = glob.glob(f\"{path}{sep}*\") # similar paths\n matches = [re.search(rf\"%s{sep}(\\d+)\" % path.stem, d) for d in dirs]\n i = [int(m.groups()[0]) for m in matches if m] # indices\n n = max(i) + 1 if i else 2 # increment number\n path = Path(f\"{path}{sep}{n}{suffix}\") # increment path\n if mkdir:\n path.mkdir(parents=True, exist_ok=True) # make directory\n return path" }, { "identifier": "non_max_suppression", "path": "utils/general.py", "snippet": "def non_max_suppression(prediction, conf_thres=0.25, iou_thres=0.45, classes=None, agnostic=False, multi_label=False,\n labels=(), max_det=300):\n \"\"\"Runs Non-Maximum Suppression (NMS) on inference results\n\n Returns:\n list of detections, on (n,6) tensor per image [xyxy, conf, cls]\n \"\"\"\n\n nc = prediction.shape[2] - 5 # number of classes\n xc = prediction[..., 4] > conf_thres # candidates\n\n # Checks\n assert 0 <= conf_thres <= 1, f'Invalid Confidence threshold {conf_thres}, valid values are between 0.0 and 1.0'\n assert 0 <= iou_thres <= 1, f'Invalid IoU {iou_thres}, valid values are between 0.0 and 1.0'\n\n # Settings\n min_wh, max_wh = 2, 4096 # (pixels) minimum and maximum box width and height\n max_nms = 30000 # maximum number of boxes into torchvision.ops.nms()\n time_limit = 10.0 # seconds to quit after\n redundant = True # require redundant detections\n multi_label &= nc > 1 # multiple labels per box (adds 0.5ms/img)\n merge = False # use merge-NMS\n\n t = time.time()\n output = [torch.zeros((0, 6), device=prediction.device)] * prediction.shape[0]\n for xi, x in enumerate(prediction): # image index, image inference\n # Apply constraints\n # x[((x[..., 2:4] < min_wh) | (x[..., 2:4] > max_wh)).any(1), 4] = 0 # width-height\n x = x[xc[xi]] # confidence\n\n # Cat apriori labels if autolabelling\n if labels and len(labels[xi]):\n l = labels[xi]\n v = torch.zeros((len(l), nc + 5), device=x.device)\n v[:, :4] = l[:, 1:5] # box\n v[:, 4] = 1.0 # conf\n v[range(len(l)), l[:, 0].long() + 5] = 1.0 # cls\n x = torch.cat((x, v), 0)\n\n # If none remain process next image\n if not x.shape[0]:\n continue\n\n # Compute conf\n x[:, 5:] *= x[:, 4:5] # conf = obj_conf * cls_conf\n\n # Box (center x, center y, width, height) to (x1, y1, x2, y2)\n box = xywh2xyxy(x[:, :4])\n\n # Detections matrix nx6 (xyxy, conf, cls)\n if multi_label:\n i, j = (x[:, 5:] > conf_thres).nonzero(as_tuple=False).T\n x = torch.cat((box[i], x[i, j + 5, None], j[:, None].float()), 1)\n else: # best class only\n conf, j = x[:, 5:].max(1, keepdim=True)\n x = torch.cat((box, conf, j.float()), 1)[conf.view(-1) > conf_thres]\n\n # Filter by class\n if classes is not None:\n x = x[(x[:, 5:6] == torch.tensor(classes, device=x.device)).any(1)]\n\n # Apply finite constraint\n # if not torch.isfinite(x).all():\n # x = x[torch.isfinite(x).all(1)]\n\n # Check shape\n n = x.shape[0] # number of boxes\n if not n: # no boxes\n continue\n elif n > max_nms: # excess boxes\n x = x[x[:, 4].argsort(descending=True)[:max_nms]] # sort by confidence\n\n # Batched NMS\n c = x[:, 5:6] * (0 if agnostic else max_wh) # classes\n boxes, scores = x[:, :4] + c, x[:, 4] # boxes (offset by class), scores\n i = torchvision.ops.nms(boxes, scores, iou_thres) # NMS\n if i.shape[0] > max_det: # limit detections\n i = i[:max_det]\n if merge and (1 < n < 3E3): # Merge NMS (boxes merged using weighted mean)\n # update boxes as boxes(i,4) = weights(i,n) * boxes(n,4)\n iou = box_iou(boxes[i], boxes) > iou_thres # iou matrix\n weights = iou * scores[None] # box weights\n x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(1, keepdim=True) # merged boxes\n if redundant:\n i = i[iou.sum(1) > 1] # require redundancy\n\n output[xi] = x[i]\n if (time.time() - t) > time_limit:\n print(f'WARNING: NMS time limit {time_limit}s exceeded')\n break # time limit exceeded\n\n return output" }, { "identifier": "non_max_suppression_obb", "path": "utils/general.py", "snippet": "def non_max_suppression_obb(prediction, conf_thres=0.25, iou_thres=0.45, classes=None, agnostic=False, multi_label=False,\n labels=(), max_det=1500):\n \"\"\"Runs Non-Maximum Suppression (NMS) on inference results_obb\n Args:\n prediction (tensor): (b, n_all_anchors, [cx cy l s obj num_cls theta_cls])\n agnostic (bool): True = NMS will be applied between elements of different categories\n labels : () or\n\n Returns:\n list of detections, len=batch_size, on (n,7) tensor per image [xylsθ, conf, cls] θ ∈ [-pi/2, pi/2)\n \"\"\"\n\n nc = prediction.shape[2] - 5 - 180 # number of classes\n xc = prediction[..., 4] > conf_thres # candidates\n class_index = nc + 5\n\n # Checks\n assert 0 <= conf_thres <= 1, f'Invalid Confidence threshold {conf_thres}, valid values are between 0.0 and 1.0'\n assert 0 <= iou_thres <= 1, f'Invalid IoU {iou_thres}, valid values are between 0.0 and 1.0'\n\n # Settings\n max_wh = 4096 # min_wh, max_wh = 2, 4096 # (pixels) minimum and maximum box width and height\n max_nms = 30000 # maximum number of boxes into torchvision.ops.nms()\n time_limit = 30.0 # seconds to quit after\n # redundant = True # require redundant detections\n multi_label &= nc > 1 # multiple labels per box (adds 0.5ms/img)\n\n t = time.time()\n output = [torch.zeros((0, 7), device=prediction.device)] * prediction.shape[0]\n for xi, x in enumerate(prediction): # image index, image inference\n # Apply constraints\n # x[((x[..., 2:4] < min_wh) | (x[..., 2:4] > max_wh)).any(1), 4] = 0 # width-height\n x = x[xc[xi]] # confidence, (tensor): (n_conf_thres, [cx cy l s obj num_cls theta_cls])\n\n # Cat apriori labels if autolabelling\n if labels and len(labels[xi]):\n l = labels[xi]\n v = torch.zeros((len(l), nc + 5), device=x.device)\n v[:, :4] = l[:, 1:5] # box\n v[:, 4] = 1.0 # conf\n v[range(len(l)), l[:, 0].long() + 5] = 1.0 # cls\n x = torch.cat((x, v), 0)\n\n # If none remain process next image\n if not x.shape[0]:\n continue\n\n # Compute conf\n x[:, 5:class_index] *= x[:, 4:5] # conf = obj_conf * cls_conf\n\n thete_index, theta_pred = torch.max(x[:, class_index:], 1, keepdim=True) # [n_conf_thres, 1] θ ∈ int[0, 179]\n theta_pred = (theta_pred - 90) / 180 * pi # [n_conf_thres, 1] θ ∈ [-pi/2, pi/2)\n\n # Detections matrix nx7 (xyls, θ, conf, cls) θ ∈ [-pi/2, pi/2)\n if multi_label:\n i, j = (x[:, 5:class_index] > conf_thres).nonzero(as_tuple=False).T # ()\n x = torch.cat((x[i, :4], theta_pred[i], x[i, j + 5, None], j[:, None].float()), 1)\n else: # best class only\n conf, j = x[:, 5:class_index].max(1, keepdim=True)\n x = torch.cat((x[:, :4], theta_pred, conf, j.float()), 1)[conf.view(-1) > conf_thres]\n\n # Filter by class\n if classes is not None:\n x = x[(x[:, 6:7] == torch.tensor(classes, device=x.device)).any(1)]\n\n # Apply finite constraint\n # if not torch.isfinite(x).all():\n # x = x[torch.isfinite(x).all(1)]\n\n # Check shape\n n = x.shape[0] # number of boxes\n if not n: # no boxes\n continue\n elif n > max_nms: # excess boxes\n x = x[x[:, 5].argsort(descending=True)[:max_nms]] # sort by confidence\n\n # Batched NMS\n c = x[:, 6:7] * (0 if agnostic else max_wh) # classes\n rboxes = x[:, :5].clone() \n rboxes[:, :2] = rboxes[:, :2] + c # rboxes (offset by class)\n scores = x[:, 5] # scores\n _, i = obb_nms(rboxes, scores, iou_thres)\n if i.shape[0] > max_det: # limit detections\n i = i[:max_det]\n\n output[xi] = x[i]\n if (time.time() - t) > time_limit:\n print(f'WARNING: NMS time limit {time_limit}s exceeded')\n break # time limit exceeded\n\n return output" }, { "identifier": "print_args", "path": "utils/general.py", "snippet": "def print_args(name, opt):\n # Print argparser arguments\n LOGGER.info(colorstr(f'{name}: ') + ', '.join(f'{k}={v}' for k, v in vars(opt).items()))" }, { "identifier": "scale_coords", "path": "utils/general.py", "snippet": "def scale_coords(img1_shape, coords, img0_shape, ratio_pad=None):\n # Rescale coords (xyxy) from img1_shape to img0_shape\n if ratio_pad is None: # calculate from img0_shape\n gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new\n pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding\n else:\n gain = ratio_pad[0][0]\n pad = ratio_pad[1]\n\n coords[:, [0, 2]] -= pad[0] # x padding\n coords[:, [1, 3]] -= pad[1] # y padding\n coords[:, :4] /= gain\n clip_coords(coords, img0_shape)\n return coords" }, { "identifier": "scale_polys", "path": "utils/general.py", "snippet": "def scale_polys(img1_shape, polys, img0_shape, ratio_pad=None):\n # ratio_pad: [(h_raw, w_raw), (hw_ratios, wh_paddings)]\n # Rescale coords (xyxyxyxy) from img1_shape to img0_shape\n if ratio_pad is None: # calculate from img0_shape\n gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = resized / raw\n pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding\n else:\n gain = ratio_pad[0][0] # h_ratios\n pad = ratio_pad[1] # wh_paddings\n\n polys[:, [0, 2, 4, 6]] -= pad[0] # x padding\n polys[:, [1, 3, 5, 7]] -= pad[1] # y padding\n polys[:, :8] /= gain # Rescale poly shape to img0_shape\n #clip_polys(polys, img0_shape)\n return polys" }, { "identifier": "strip_optimizer", "path": "utils/general.py", "snippet": "def strip_optimizer(f='best.pt', s=''): # from utils.general import *; strip_optimizer()\n # Strip optimizer from 'f' to finalize training, optionally save as 's'\n x = torch.load(f, map_location=torch.device('cpu'))\n if x.get('ema'):\n x['model'] = x['ema'] # replace model with ema\n for k in 'optimizer', 'best_fitness', 'wandb_id', 'ema', 'updates': # keys\n x[k] = None\n x['epoch'] = -1\n x['model'].half() # to FP16\n for p in x['model'].parameters():\n p.requires_grad = False\n torch.save(x, s or f)\n mb = os.path.getsize(s or f) / 1E6 # filesize\n print(f\"Optimizer stripped from {f},{(' saved as %s,' % s) if s else ''} {mb:.1f}MB\")" }, { "identifier": "xyxy2xywh", "path": "utils/general.py", "snippet": "def xyxy2xywh(x):\n # Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] where xy1=top-left, xy2=bottom-right\n y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)\n y[:, 0] = (x[:, 0] + x[:, 2]) / 2 # x center\n y[:, 1] = (x[:, 1] + x[:, 3]) / 2 # y center\n y[:, 2] = x[:, 2] - x[:, 0] # width\n y[:, 3] = x[:, 3] - x[:, 1] # height\n return y" }, { "identifier": "Annotator", "path": "utils/plots.py", "snippet": "CONFIG_DIR = user_config_dir() # Ultralytics settings dir\nRANK = int(os.getenv('RANK', -1))\nclass Colors:\nclass Annotator:\n def __init__(self):\n def __call__(self, i, bgr=False):\n def hex2rgb(h): # rgb order (PIL)\ndef check_font(font='Arial.ttf', size=10):\n def __init__(self, im, line_width=None, font_size=None, font='Arial.ttf', pil=False, example='abc'):\n def box_label(self, box, label='', color=(128, 128, 128), txt_color=(255, 255, 255)):\n def poly_label(self, poly, label='', color=(128, 128, 128), txt_color=(255, 255, 255)):\n def rectangle(self, xy, fill=None, outline=None, width=1):\n def text(self, xy, text, txt_color=(255, 255, 255)):\n def result(self):\ndef feature_visualization(x, module_type, stage, n=32, save_dir=Path('runs/detect/exp')):\ndef hist2d(x, y, n=100):\ndef butter_lowpass_filtfilt(data, cutoff=1500, fs=50000, order=5):\n def butter_lowpass(cutoff, fs, order):\ndef output_to_target(output): #list*(n, [xylsθ, conf, cls]) θ ∈ [-pi/2, pi/2)\ndef plot_images(images, targets, paths=None, fname='images.jpg', names=None, max_size=2048, max_subplots=4):\ndef plot_lr_scheduler(optimizer, scheduler, epochs=300, save_dir=''):\ndef plot_val_txt(): # from utils.plots import *; plot_val()\ndef plot_targets_txt(): # from utils.plots import *; plot_targets_txt()\ndef plot_val_study(file='', dir='', x=None): # from utils.plots import *; plot_val_study()\ndef plot_labels(labels, names=(), save_dir=Path(''), img_size=1024):\ndef plot_evolve(evolve_csv='path/to/evolve.csv'): # from utils.plots import *; plot_evolve()\ndef plot_results(file='path/to/results.csv', dir=''):\ndef profile_idetection(start=0, stop=0, labels=(), save_dir=''):\ndef save_one_box(xyxy, im, file='image.jpg', gain=1.02, pad=10, square=False, BGR=False, save=True):" }, { "identifier": "select_device", "path": "utils/torch_utils.py", "snippet": "def select_device(device='', batch_size=0, newline=True):\n # device = 'cpu' or '0' or '0,1,2,3'\n s = f'YOLOv5 🚀 {git_describe() or date_modified()} torch {torch.__version__} ' # string\n device = str(device).strip().lower().replace('cuda:', '') # to string, 'cuda:0' to '0'\n cpu = device == 'cpu'\n if cpu:\n os.environ['CUDA_VISIBLE_DEVICES'] = '-1' # force torch.cuda.is_available() = False\n elif device: # non-cpu device requested\n os.environ['CUDA_VISIBLE_DEVICES'] = device # set environment variable\n assert torch.cuda.is_available(), f'CUDA unavailable, invalid device {device} requested' # check availability\n\n cuda = not cpu and torch.cuda.is_available()\n if cuda:\n devices = device.split(',') if device else '0' # range(torch.cuda.device_count()) # i.e. 0,1,6,7\n n = len(devices) # device count\n if n > 1 and batch_size > 0: # check batch_size is divisible by device_count\n assert batch_size % n == 0, f'batch-size {batch_size} not multiple of GPU count {n}'\n space = ' ' * (len(s) + 1)\n for i, d in enumerate(devices):\n p = torch.cuda.get_device_properties(i)\n s += f\"{'' if i == 0 else space}CUDA:{d} ({p.name}, {p.total_memory / 1024 ** 2:.0f}MiB)\\n\" # bytes to MB\n else:\n s += 'CPU\\n'\n\n if not newline:\n s = s.rstrip()\n LOGGER.info(s.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else s) # emoji-safe\n return torch.device('cuda:0' if cuda else 'cpu')" }, { "identifier": "time_sync", "path": "utils/torch_utils.py", "snippet": "def time_sync():\n # pytorch-accurate time\n if torch.cuda.is_available():\n torch.cuda.synchronize()\n return time.time()" }, { "identifier": "poly2rbox", "path": "utils/rboxs_utils.py", "snippet": "def poly2rbox(polys, num_cls_thata=180, radius=6.0, use_pi=False, use_gaussian=False):\n \"\"\"\n Trans poly format to rbox format.\n Args:\n polys (array): (num_gts, [x1 y1 x2 y2 x3 y3 x4 y4]) \n num_cls_thata (int): [1], theta class num\n radius (float32): [1], window radius for Circular Smooth Label\n use_pi (bool): True θ∈[-pi/2, pi/2) , False θ∈[0, 180)\n\n Returns:\n use_gaussian True:\n rboxes (array): \n csl_labels (array): (num_gts, num_cls_thata)\n elif \n rboxes (array): (num_gts, [cx cy l s θ]) \n \"\"\"\n assert polys.shape[-1] == 8\n if use_gaussian:\n csl_labels = []\n rboxes = []\n for poly in polys:\n poly = np.float32(poly.reshape(4, 2))\n (x, y), (w, h), angle = cv2.minAreaRect(poly) # θ ∈ [0, 90]\n angle = -angle # θ ∈ [-90, 0]\n theta = angle / 180 * pi # 转为pi制\n\n # trans opencv format to longedge format θ ∈ [-pi/2, pi/2]\n if w != max(w, h): \n w, h = h, w\n theta += pi/2\n theta = regular_theta(theta) # limit theta ∈ [-pi/2, pi/2)\n angle = (theta * 180 / pi) + 90 # θ ∈ [0, 180)\n\n if not use_pi: # 采用angle弧度制 θ ∈ [0, 180)\n rboxes.append([x, y, w, h, angle])\n else: # 采用pi制\n rboxes.append([x, y, w, h, theta])\n if use_gaussian:\n csl_label = gaussian_label_cpu(label=angle, num_class=num_cls_thata, u=0, sig=radius)\n csl_labels.append(csl_label)\n if use_gaussian:\n return np.array(rboxes), np.array(csl_labels)\n return np.array(rboxes)" }, { "identifier": "rbox2poly", "path": "utils/rboxs_utils.py", "snippet": "def rbox2poly(obboxes):\n \"\"\"\n Trans rbox format to poly format.\n Args:\n rboxes (array/tensor): (num_gts, [cx cy l s θ]) θ∈[-pi/2, pi/2)\n\n Returns:\n polys (array/tensor): (num_gts, [x1 y1 x2 y2 x3 y3 x4 y4]) \n \"\"\"\n if isinstance(obboxes, torch.Tensor):\n center, w, h, theta = obboxes[:, :2], obboxes[:, 2:3], obboxes[:, 3:4], obboxes[:, 4:5]\n Cos, Sin = torch.cos(theta), torch.sin(theta)\n\n vector1 = torch.cat(\n (w/2 * Cos, -w/2 * Sin), dim=-1)\n vector2 = torch.cat(\n (-h/2 * Sin, -h/2 * Cos), dim=-1)\n point1 = center + vector1 + vector2\n point2 = center + vector1 - vector2\n point3 = center - vector1 - vector2\n point4 = center - vector1 + vector2\n order = obboxes.shape[:-1]\n return torch.cat(\n (point1, point2, point3, point4), dim=-1).reshape(*order, 8)\n else:\n center, w, h, theta = np.split(obboxes, (2, 3, 4), axis=-1)\n Cos, Sin = np.cos(theta), np.sin(theta)\n\n vector1 = np.concatenate(\n [w/2 * Cos, -w/2 * Sin], axis=-1)\n vector2 = np.concatenate(\n [-h/2 * Sin, -h/2 * Cos], axis=-1)\n\n point1 = center + vector1 + vector2\n point2 = center + vector1 - vector2\n point3 = center - vector1 - vector2\n point4 = center - vector1 + vector2\n order = obboxes.shape[:-1]\n return np.concatenate(\n [point1, point2, point3, point4], axis=-1).reshape(*order, 8)" } ]
import argparse import os import sys import cv2 import torch import torch.backends.cudnn as cudnn from pathlib import Path from models.common import DetectMultiBackend from utils.datasets import IMG_FORMATS, VID_FORMATS, LoadImages, LoadStreams from utils.general import (LOGGER, check_file, check_img_size, check_imshow, check_requirements, colorstr, increment_path, non_max_suppression, non_max_suppression_obb, print_args, scale_coords, scale_polys, strip_optimizer, xyxy2xywh) from utils.plots import Annotator, colors, save_one_box from utils.torch_utils import select_device, time_sync from utils.rboxs_utils import poly2rbox, rbox2poly
15,636
# pred = utils.general.apply_classifier(pred, classifier_model, im, im0s) # Process predictions for i, det in enumerate(pred): # per image pred_poly = rbox2poly(det[:, :5]) # (n, [x1 y1 x2 y2 x3 y3 x4 y4]) seen += 1 if webcam: # batch_size >= 1 p, im0, frame = path[i], im0s[i].copy(), dataset.count s += f'{i}: ' else: p, im0, frame = path, im0s.copy(), getattr(dataset, 'frame', 0) p = Path(p) # to Path save_path = str(save_dir / p.name) # im.jpg txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}') # im.txt s += '%gx%g ' % im.shape[2:] # print string gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh imc = im0.copy() if save_crop else im0 # for save_crop annotator = Annotator(im0, line_width=line_thickness, example=str(names)) if len(det): # Rescale polys from img_size to im0 size # det[:, :4] = scale_coords(im.shape[2:], det[:, :4], im0.shape).round() pred_poly = scale_polys(im.shape[2:], pred_poly, im0.shape) det = torch.cat((pred_poly, det[:, -2:]), dim=1) # (n, [poly conf cls]) # Print results for c in det[:, -1].unique(): n = (det[:, -1] == c).sum() # detections per class s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string # Write results for *poly, conf, cls in reversed(det): if save_txt: # Write to file # xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh poly = poly.tolist() line = (cls, *poly, conf) if save_conf else (cls, *poly) # label format with open(txt_path + '.txt', 'a') as f: f.write(('%g ' * len(line)).rstrip() % line + '\n') if save_img or save_crop or view_img: # Add poly to image c = int(cls) # integer class label = None if hide_labels else (names[c] if hide_conf else f'{names[c]} {conf:.2f}') # annotator.box_label(xyxy, label, color=colors(c, True)) annotator.poly_label(poly, label, color=colors(c, True)) if save_crop: # Yolov5-obb doesn't support it yet # save_one_box(xyxy, imc, file=save_dir / 'crops' / names[c] / f'{p.stem}.jpg', BGR=True) pass # Print time (inference-only) LOGGER.info(f'{s}Done. ({t3 - t2:.3f}s)') # Stream results im0 = annotator.result() if view_img: cv2.imshow(str(p), im0) cv2.waitKey(1) # 1 millisecond # Save results (image with detections) if save_img: if dataset.mode == 'image': cv2.imwrite(save_path, im0) else: # 'video' or 'stream' if vid_path[i] != save_path: # new video vid_path[i] = save_path if isinstance(vid_writer[i], cv2.VideoWriter): vid_writer[i].release() # release previous video writer if vid_cap: # video fps = vid_cap.get(cv2.CAP_PROP_FPS) w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH)) h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) else: # stream fps, w, h = 30, im0.shape[1], im0.shape[0] save_path += '.mp4' vid_writer[i] = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h)) vid_writer[i].write(im0) # Print results t = tuple(x / seen * 1E3 for x in dt) # speeds per image LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {(1, 3, *imgsz)}' % t) if save_txt or save_img: s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else '' LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}{s}") if update: strip_optimizer(weights) # update model (to fix SourceChangeWarning) def parse_opt(): parser = argparse.ArgumentParser() parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'runs/train/yolov5n_DroneVehicle/weights/best.pt', help='model path(s)') parser.add_argument('--source', type=str, default='/media/test/4d846cae-2315-4928-8d1b-ca6d3a61a3c6/DroneVehicle/val/raw/images/', help='file/dir/URL/glob, 0 for webcam') parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[840], help='inference size h,w') parser.add_argument('--conf-thres', type=float, default=0.25, help='confidence threshold') parser.add_argument('--iou-thres', type=float, default=0.2, help='NMS IoU threshold') parser.add_argument('--max-det', type=int, default=1000, help='maximum detections per image') parser.add_argument('--device', default='3', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') parser.add_argument('--view-img', action='store_true', help='show results') parser.add_argument('--save-txt', action='store_true', help='save results to *.txt') parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels') parser.add_argument('--save-crop', action='store_true', help='save cropped prediction boxes') parser.add_argument('--nosave', action='store_true', help='do not save images/videos') parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --classes 0, or --classes 0 2 3') parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS') parser.add_argument('--augment', action='store_true', help='augmented inference') parser.add_argument('--visualize', action='store_true', help='visualize features') parser.add_argument('--update', action='store_true', help='update all models') parser.add_argument('--project', default='runs/detect', help='save results to project/name') parser.add_argument('--name', default='exp', help='save results to project/name') parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') parser.add_argument('--line-thickness', default=2, type=int, help='bounding box thickness (pixels)') parser.add_argument('--hide-labels', default=False, action='store_true', help='hide labels') parser.add_argument('--hide-conf', default=False, action='store_true', help='hide confidences') parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference') parser.add_argument('--dnn', action='store_true', help='use OpenCV DNN for ONNX inference') opt = parser.parse_args() opt.imgsz *= 2 if len(opt.imgsz) == 1 else 1 # expand print_args(FILE.stem, opt) return opt def main(opt):
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license """ Run inference on images, videos, directories, streams, etc. Usage: $ python path/to/detect.py --weights yolov5s.pt --source 0 # webcam img.jpg # image vid.mp4 # video path/ # directory path/*.jpg # glob 'https://youtu.be/Zgi9g1ksQHc' # YouTube 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream """ FILE = Path(__file__).resolve() ROOT = FILE.parents[0] # YOLOv5 root directory if str(ROOT) not in sys.path: sys.path.append(str(ROOT)) # add ROOT to PATH ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative @torch.no_grad() def run(weights=ROOT / 'yolov5s.pt', # model.pt path(s) source=ROOT / 'data/images', # file/dir/URL/glob, 0 for webcam imgsz=(640, 640), # inference size (height, width) conf_thres=0.25, # confidence threshold iou_thres=0.45, # NMS IOU threshold max_det=1000, # maximum detections per image device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu view_img=False, # show results save_txt=False, # save results to *.txt save_conf=False, # save confidences in --save-txt labels save_crop=False, # save cropped prediction boxes nosave=False, # do not save images/videos classes=None, # filter by class: --class 0, or --class 0 2 3 agnostic_nms=False, # class-agnostic NMS augment=False, # augmented inference visualize=False, # visualize features update=False, # update all models project=ROOT / 'runs/detect', # save results to project/name name='exp', # save results to project/name exist_ok=False, # existing project/name ok, do not increment line_thickness=3, # bounding box thickness (pixels) hide_labels=False, # hide labels hide_conf=False, # hide confidences half=False, # use FP16 half-precision inference dnn=False, # use OpenCV DNN for ONNX inference ): source = str(source) save_img = not nosave and not source.endswith('.txt') # save inference images is_file = Path(source).suffix[1:] in (IMG_FORMATS + VID_FORMATS) is_url = source.lower().startswith(('rtsp://', 'rtmp://', 'http://', 'https://')) webcam = source.isnumeric() or source.endswith('.txt') or (is_url and not is_file) if is_url and is_file: source = check_file(source) # download # Directories save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir # Load model device = select_device(device) model = DetectMultiBackend(weights, device=device, dnn=dnn) stride, names, pt, jit, onnx, engine = model.stride, model.names, model.pt, model.jit, model.onnx, model.engine imgsz = check_img_size(imgsz, s=stride) # check image size # Half half &= (pt or jit or engine) and device.type != 'cpu' # half precision only supported by PyTorch on CUDA if pt or jit: model.model.half() if half else model.model.float() # Dataloader if webcam: view_img = check_imshow() cudnn.benchmark = True # set True to speed up constant image size inference dataset = LoadStreams(source, img_size=imgsz, stride=stride, auto=pt) bs = len(dataset) # batch_size else: dataset = LoadImages(source, img_size=imgsz, stride=stride, auto=pt) bs = 1 # batch_size vid_path, vid_writer = [None] * bs, [None] * bs # Run inference model.warmup(imgsz=(1, 3, *imgsz), half=half) # warmup dt, seen = [0.0, 0.0, 0.0], 0 for path, im, im0s, vid_cap, s in dataset: t1 = time_sync() im = torch.from_numpy(im).to(device) im = im.half() if half else im.float() # uint8 to fp16/32 im /= 255 # 0 - 255 to 0.0 - 1.0 if len(im.shape) == 3: im = im[None] # expand for batch dim t2 = time_sync() dt[0] += t2 - t1 # Inference visualize = increment_path(save_dir / Path(path).stem, mkdir=True) if visualize else False pred = model(im, augment=augment, visualize=visualize) t3 = time_sync() dt[1] += t3 - t2 # NMS # pred: list*(n, [xylsθ, conf, cls]) θ ∈ [-pi/2, pi/2) pred = non_max_suppression_obb(pred, conf_thres, iou_thres, classes, agnostic_nms, multi_label=True, max_det=max_det) dt[2] += time_sync() - t3 # Second-stage classifier (optional) # pred = utils.general.apply_classifier(pred, classifier_model, im, im0s) # Process predictions for i, det in enumerate(pred): # per image pred_poly = rbox2poly(det[:, :5]) # (n, [x1 y1 x2 y2 x3 y3 x4 y4]) seen += 1 if webcam: # batch_size >= 1 p, im0, frame = path[i], im0s[i].copy(), dataset.count s += f'{i}: ' else: p, im0, frame = path, im0s.copy(), getattr(dataset, 'frame', 0) p = Path(p) # to Path save_path = str(save_dir / p.name) # im.jpg txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}') # im.txt s += '%gx%g ' % im.shape[2:] # print string gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh imc = im0.copy() if save_crop else im0 # for save_crop annotator = Annotator(im0, line_width=line_thickness, example=str(names)) if len(det): # Rescale polys from img_size to im0 size # det[:, :4] = scale_coords(im.shape[2:], det[:, :4], im0.shape).round() pred_poly = scale_polys(im.shape[2:], pred_poly, im0.shape) det = torch.cat((pred_poly, det[:, -2:]), dim=1) # (n, [poly conf cls]) # Print results for c in det[:, -1].unique(): n = (det[:, -1] == c).sum() # detections per class s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string # Write results for *poly, conf, cls in reversed(det): if save_txt: # Write to file # xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh poly = poly.tolist() line = (cls, *poly, conf) if save_conf else (cls, *poly) # label format with open(txt_path + '.txt', 'a') as f: f.write(('%g ' * len(line)).rstrip() % line + '\n') if save_img or save_crop or view_img: # Add poly to image c = int(cls) # integer class label = None if hide_labels else (names[c] if hide_conf else f'{names[c]} {conf:.2f}') # annotator.box_label(xyxy, label, color=colors(c, True)) annotator.poly_label(poly, label, color=colors(c, True)) if save_crop: # Yolov5-obb doesn't support it yet # save_one_box(xyxy, imc, file=save_dir / 'crops' / names[c] / f'{p.stem}.jpg', BGR=True) pass # Print time (inference-only) LOGGER.info(f'{s}Done. ({t3 - t2:.3f}s)') # Stream results im0 = annotator.result() if view_img: cv2.imshow(str(p), im0) cv2.waitKey(1) # 1 millisecond # Save results (image with detections) if save_img: if dataset.mode == 'image': cv2.imwrite(save_path, im0) else: # 'video' or 'stream' if vid_path[i] != save_path: # new video vid_path[i] = save_path if isinstance(vid_writer[i], cv2.VideoWriter): vid_writer[i].release() # release previous video writer if vid_cap: # video fps = vid_cap.get(cv2.CAP_PROP_FPS) w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH)) h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) else: # stream fps, w, h = 30, im0.shape[1], im0.shape[0] save_path += '.mp4' vid_writer[i] = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h)) vid_writer[i].write(im0) # Print results t = tuple(x / seen * 1E3 for x in dt) # speeds per image LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {(1, 3, *imgsz)}' % t) if save_txt or save_img: s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else '' LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}{s}") if update: strip_optimizer(weights) # update model (to fix SourceChangeWarning) def parse_opt(): parser = argparse.ArgumentParser() parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'runs/train/yolov5n_DroneVehicle/weights/best.pt', help='model path(s)') parser.add_argument('--source', type=str, default='/media/test/4d846cae-2315-4928-8d1b-ca6d3a61a3c6/DroneVehicle/val/raw/images/', help='file/dir/URL/glob, 0 for webcam') parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[840], help='inference size h,w') parser.add_argument('--conf-thres', type=float, default=0.25, help='confidence threshold') parser.add_argument('--iou-thres', type=float, default=0.2, help='NMS IoU threshold') parser.add_argument('--max-det', type=int, default=1000, help='maximum detections per image') parser.add_argument('--device', default='3', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') parser.add_argument('--view-img', action='store_true', help='show results') parser.add_argument('--save-txt', action='store_true', help='save results to *.txt') parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels') parser.add_argument('--save-crop', action='store_true', help='save cropped prediction boxes') parser.add_argument('--nosave', action='store_true', help='do not save images/videos') parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --classes 0, or --classes 0 2 3') parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS') parser.add_argument('--augment', action='store_true', help='augmented inference') parser.add_argument('--visualize', action='store_true', help='visualize features') parser.add_argument('--update', action='store_true', help='update all models') parser.add_argument('--project', default='runs/detect', help='save results to project/name') parser.add_argument('--name', default='exp', help='save results to project/name') parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') parser.add_argument('--line-thickness', default=2, type=int, help='bounding box thickness (pixels)') parser.add_argument('--hide-labels', default=False, action='store_true', help='hide labels') parser.add_argument('--hide-conf', default=False, action='store_true', help='hide confidences') parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference') parser.add_argument('--dnn', action='store_true', help='use OpenCV DNN for ONNX inference') opt = parser.parse_args() opt.imgsz *= 2 if len(opt.imgsz) == 1 else 1 # expand print_args(FILE.stem, opt) return opt def main(opt):
check_requirements(exclude=('tensorboard', 'thop'))
9
2023-10-31 06:06:41+00:00
24k
serengil/LightPHE
lightphe/models/Ciphertext.py
[ { "identifier": "Homomorphic", "path": "lightphe/models/Homomorphic.py", "snippet": "class Homomorphic(ABC):\n keys: dict\n plaintext_modulo: int\n ciphertext_modulo: int\n\n @abstractmethod\n def generate_keys(self, key_size: int, s: Optional[int] = None) -> dict:\n pass\n\n @abstractmethod\n def generate_random_key(self) -> int:\n pass\n\n @abstractmethod\n def encrypt(\n self, plaintext: int, random_key: Union[Optional[int], Optional[list]] = None\n ) -> Union[int, tuple, list]:\n pass\n\n @abstractmethod\n def decrypt(self, ciphertext: Union[int, tuple, list]) -> int:\n pass\n\n @abstractmethod\n def add(\n self, ciphertext1: Union[int, tuple, list], ciphertext2: Union[int, tuple, list]\n ) -> Union[int, tuple, list]:\n pass\n\n @abstractmethod\n def multiply(\n self, ciphertext1: Union[int, tuple, list], ciphertext2: Union[int, tuple, list]\n ) -> Union[int, tuple]:\n pass\n\n @abstractmethod\n def xor(self, ciphertext1: list, ciphertext2: list) -> list:\n pass\n\n @abstractmethod\n def multiply_by_contant(self, ciphertext: Union[int, tuple, list], constant: int) -> int:\n pass\n\n @abstractmethod\n def reencrypt(self, ciphertext: Union[int, tuple, list]) -> Union[int, tuple, list]:\n pass" }, { "identifier": "Algorithm", "path": "lightphe/models/Algorithm.py", "snippet": "class Algorithm:\n RSA = \"RSA\"\n ElGamal = \"ElGamal\"\n ExponentialElGamal = \"Exponential-ElGamal\"\n EllipticCurveElGamal = \"EllipticCurve-ElGamal\"\n Paillier = \"Paillier\"\n DamgardJurik = \"Damgard-Jurik\"\n OkamotoUchiyama = \"Okamoto-Uchiyama\"\n Benaloh = \"Benaloh\"\n NaccacheStern = \"Naccache-Stern\"\n GoldwasserMicali = \"Goldwasser-Micali\"" }, { "identifier": "RSA", "path": "lightphe/cryptosystems/RSA.py", "snippet": "class RSA(Homomorphic):\n \"\"\"\n RSA algorithm is partially homomorphic with respect to the multiplication\n Ref: https://sefiks.com/2023/03/06/a-step-by-step-partially-homomorphic-encryption-example-with-rsa-in-python/\n \"\"\"\n\n def __init__(self, keys: Optional[dict] = None, key_size: int = 1024, encrypt_with_public=True):\n \"\"\"\n Args:\n keys (dict): private - public key pair.\n set this to None if you want to generate random keys.\n key_size (int): key size in bits\n encrypt_with_public (boolean): RSA has two keys: private (d) and public (e).\n If you encrypt a message with smo's public, then just that person can decrypt it\n with his private (secure message). Otherwise, if you encrypt it with your private,\n one can decrypt it with your public (digital signatures).\n Set this arg to True if you want to do encryption with public key e,\n and do decryption with private key d.\n \"\"\"\n self.keys = keys or self.generate_keys(key_size)\n self.plaintext_modulo = self.keys[\"public_key\"][\"n\"]\n self.ciphertext_modulo = self.keys[\"public_key\"][\"n\"]\n self.encrypt_with_public = encrypt_with_public\n\n def generate_keys(self, key_size: int) -> dict:\n \"\"\"\n Generate public and private keys of RSA cryptosystem\n Args:\n key_size (int): key size in bits\n Returns:\n keys (dict): having private_key and public_key keys\n \"\"\"\n keys = {}\n keys[\"private_key\"] = {}\n keys[\"public_key\"] = {}\n\n while True:\n try:\n # picking a prime modulus p and q\n p = sympy.randprime(200, 2 ** int(key_size / 2) - 1)\n q = sympy.randprime(200, 2 ** int(key_size / 2) - 1)\n\n n = p * q\n phi = (p - 1) * (q - 1)\n\n # select public exponent e\n while True:\n e = random.randint(1, phi - 1)\n if math.gcd(e, n) == 1:\n break\n\n d = pow(e, -1, phi)\n break\n except:\n pass\n\n keys[\"public_key\"][\"n\"] = n\n keys[\"public_key\"][\"e\"] = e\n keys[\"private_key\"][\"d\"] = d\n return keys\n\n def generate_random_key(self) -> int:\n pass\n\n def encrypt(self, plaintext: int) -> int:\n \"\"\"\n Encrypt plain messages with RSA\n Args:\n plaintext (int): plain message\n Returns:\n ciphertext (int): ciphertext encrypted with RSA\n \"\"\"\n n = self.keys[\"public_key\"][\"n\"]\n\n if plaintext > n:\n plaintext = plaintext % n\n logger.debug(\n f\"RSA can encrypt messages [1, {n}]. \"\n f\"Seems you exceeded this limit. New plaintext is {plaintext}\"\n )\n\n if self.encrypt_with_public is True:\n e = self.keys[\"public_key\"][\"e\"]\n c = pow(plaintext, e, n)\n else:\n d = self.keys[\"private_key\"][\"d\"]\n c = pow(plaintext, d, n)\n\n return c\n\n def decrypt(self, ciphertext: int) -> int:\n \"\"\"\n Decrypt ciphertexts with RSA\n Args:\n ciphertext (int): encrypted message\n decrypt_with_private (int): RSA has two keys: private (d) and public (e).\n If you encrypt a message with smo's public, then just that person can decrypt it\n with his private (secure message). Otherwise, if you encrypt it with your private,\n one can decrypt it with your public (digital signatures).\n Set this arg to True if you want to do encryption with public key e,\n and do decryption with private key d.\n Returns:\n plaintext (int): restored message\n \"\"\"\n n = self.keys[\"public_key\"][\"n\"]\n if self.encrypt_with_public is True:\n d = self.keys[\"private_key\"][\"d\"]\n p = pow(ciphertext, d, n)\n else:\n e = self.keys[\"public_key\"][\"e\"]\n p = pow(ciphertext, e, n)\n\n return p\n\n def multiply(self, ciphertext1: int, ciphertext2: int) -> int:\n \"\"\"\n Perform homomorphic multiplication on encrypted data.\n Result of this must be equal to E(m1 * m2)\n \"\"\"\n n = self.keys[\"public_key\"][\"n\"]\n return (ciphertext1 * ciphertext2) % n\n\n def add(self, ciphertext1: int, ciphertext2: int) -> int:\n raise ValueError(\"RSA is not homomorphic with respect to the addition\")\n\n def xor(self, ciphertext1: int, ciphertext2: int) -> int:\n raise ValueError(\"RSA is not homomorphic with respect to the exclusive or\")\n\n def multiply_by_contant(self, ciphertext: int, constant: int) -> int:\n raise ValueError(\"RSA is not supporting multiplying ciphertext by a known constant\")\n\n def reencrypt(self, ciphertext: int) -> int:\n raise ValueError(\"RSA does not support re-encryption\")" }, { "identifier": "ElGamal", "path": "lightphe/cryptosystems/ElGamal.py", "snippet": "class ElGamal(Homomorphic):\n \"\"\"\n ElGamal algorithm is either multiplicatively or additively homomorphic\n Ref: https://sefiks.com/2023/03/27/a-step-by-step-partially-homomorphic-encryption-example-with-elgamal-in-python/\n \"\"\"\n\n def __init__(self, keys: Optional[dict] = None, exponential=False, key_size: int = 1024):\n \"\"\"\n Args:\n keys (dict): private - public key pair.\n set this to None if you want to generate random keys.\n key_size (int): key size in bits\n exponential (boolean): set this to True to make cryptosystem exponential ElGamal.\n Regular ElGamal is homomorphic with respect to the multiplication whereas\n exponential ElGamal is homomorphic with respect to the addition\n \"\"\"\n self.exponential = exponential\n self.keys = keys or self.generate_keys(key_size)\n self.plaintext_modulo = self.keys[\"public_key\"][\"p\"]\n self.ciphertext_modulo = self.keys[\"public_key\"][\"p\"]\n\n def generate_keys(self, key_size: int):\n \"\"\"\n Generate public and private keys of ElGamal cryptosystem\n Args:\n key_size (int): key size in bits\n Returns:\n keys (dict): having private_key and public_key keys\n \"\"\"\n keys = {}\n keys[\"private_key\"] = {}\n keys[\"public_key\"] = {}\n\n # picking a prime modulus p\n p = sympy.randprime(100, 2 ** int(key_size / 2) - 1)\n\n # picking a generator g\n g = random.randint(2, int(math.sqrt(p)))\n\n # picking a private key x\n x = random.randint(1, p - 2)\n\n # public key\n y = pow(g, x, p)\n\n keys[\"public_key\"] = {\n \"p\": p,\n \"g\": g,\n \"y\": y,\n }\n\n keys[\"private_key\"] = {\"x\": x}\n\n return keys\n\n def generate_random_key(self) -> int:\n \"\"\"\n ElGamal requires to generate one-time random key per encryption\n Returns:\n random key (int): one time random key for encryption\n \"\"\"\n p = self.keys[\"public_key\"][\"p\"]\n return random.randint(1, p - 1)\n\n def encrypt(self, plaintext: int, random_key: Optional[int] = None) -> tuple:\n \"\"\"\n Encrypt plaintext with ElGamal\n Args:\n plaintext (int): message to encrypt\n random_key (int): random key for encryption. Do not set this to a static value.\n Returns\n ciphertext (tuple): c1 and c2\n \"\"\"\n p = self.keys[\"public_key\"][\"p\"]\n g = self.keys[\"public_key\"][\"g\"]\n y = self.keys[\"public_key\"][\"y\"]\n r = random_key or self.generate_random_key()\n\n if plaintext > p:\n plaintext = plaintext % p\n logger.debug(\n f\"ElGamal can encrypt messages [1, {p}]. \"\n f\"Seems you exceeded this limit. New plaintext is {plaintext}\"\n )\n\n c1 = pow(g, r, p)\n if self.exponential is False:\n c2 = (plaintext * pow(y, r, p)) % p\n else:\n c2 = (pow(g, plaintext, p) * pow(y, r, p)) % p\n\n return c1, c2\n\n def decrypt(self, ciphertext: tuple) -> int:\n \"\"\"\n Decrypt ciphertext with ElGamal\n Args:\n ciphertext (tuple): c1 and c2\n Returns:\n plaintext (int): restored message\n \"\"\"\n c1, c2 = ciphertext\n\n x = self.keys[\"private_key\"][\"x\"]\n p = self.keys[\"public_key\"][\"p\"]\n g = self.keys[\"public_key\"][\"g\"]\n\n m_prime = (c2 * pow(c1, -1 * x, p)) % p\n\n if self.exponential is False:\n return m_prime\n\n if self.exponential is True:\n # m_prime = g^m . Find m for known m_prime and known g (DLP).\n m = 0\n while True:\n if pow(g, m, p) == m_prime:\n return m\n m += 1\n if m > p:\n raise ValueError(f\"Cannot restore the message in [0, {p}]\")\n\n return -1\n\n def multiply(self, ciphertext1: tuple, ciphertext2: tuple) -> tuple:\n \"\"\"\n Perform homomorphic multiplication on encrypted data\n Result of this must be equal to E(m1 * m2)\n Args:\n ciphertext1 (dict): ElGamal ciphertext consisting of c1 and c2 keys\n ciphertext2 (dict): ElGamal ciphertext consisting of c1 and c2 keys\n Returns\n ciphertext (dict): ElGamal ciphertext consisting of c1 and c2 keys\n \"\"\"\n if self.exponential is True:\n raise ValueError(\"Exponential ElGamal is not homomorphic with respect to the addition\")\n p = self.keys[\"public_key\"][\"p\"]\n return (ciphertext1[0] * ciphertext2[0]) % p, (ciphertext1[1] * ciphertext2[1]) % p\n\n def add(self, ciphertext1: tuple, ciphertext2: tuple) -> tuple:\n \"\"\"\n Perform homomorphic addition on encrypted data\n Result of this must be equal to E(m1 + m2)\n Args:\n ciphertext1 (dict): ElGamal ciphertext consisting of c1 and c2 keys\n ciphertext2 (dict): ElGamal ciphertext consisting of c1 and c2 keys\n Returns\n ciphertext (dict): ElGamal ciphertext consisting of c1 and c2 keys\n \"\"\"\n if self.exponential is False:\n raise ValueError(\"Regular ElGamal is not homomorphic with respect to the addition\")\n p = self.keys[\"public_key\"][\"p\"]\n return (ciphertext1[0] * ciphertext2[0]) % p, (ciphertext1[1] * ciphertext2[1]) % p\n\n def xor(self, ciphertext1: tuple, ciphertext2: tuple) -> int:\n raise ValueError(\"ElGamal is not homomorphic with respect to the exclusive or\")\n\n def multiply_by_contant(self, ciphertext: tuple, constant: int) -> tuple:\n if self.exponential is False:\n raise ValueError(\"ElGamal is not supporting multiplying ciphertext by a known constant\")\n p = self.keys[\"public_key\"][\"p\"]\n if constant > p:\n constant = constant % p\n logger.debug(\n f\"ElGamal can encrypt messages [1, {p}]. \"\n f\"Seems constant exceeded this limit. New constant is {constant}\"\n )\n\n return pow(ciphertext[0], constant, p), pow(ciphertext[1], constant, p)\n\n def reencrypt(self, ciphertext: tuple) -> tuple:\n \"\"\"\n Re-generate ciphertext with re-encryption. Many ciphertext will be decrypted to same plaintext.\n Args:\n ciphertext (int): given ciphertext\n Returns:\n new ciphertext (int): different ciphertext for same plaintext\n \"\"\"\n if self.exponential is True:\n # then this is additively homomorphic\n neutral_element = 0\n else:\n # then this is multiplicatively homomorphic\n neutral_element = 1\n\n neutral_encrypted = self.encrypt(plaintext=neutral_element)\n\n if self.exponential is True:\n reencrypted_value = self.add(ciphertext1=ciphertext, ciphertext2=neutral_encrypted)\n else:\n reencrypted_value = self.multiply(ciphertext1=ciphertext, ciphertext2=neutral_encrypted)\n\n return reencrypted_value" }, { "identifier": "Paillier", "path": "lightphe/cryptosystems/Paillier.py", "snippet": "class Paillier(Homomorphic):\n \"\"\"\n Paillier algorithm is homomorphic with respect to the addition.\n Also, it supports power operation for ciphertext base and plaintext exponent\n Ref: https://sefiks.com/2023/04/03/a-step-by-step-partially-homomorphic-encryption-example-with-paillier-in-python/\n \"\"\"\n\n def __init__(self, keys: Optional[dict] = None, key_size=1024):\n \"\"\"\n Args:\n keys (dict): private - public key pair.\n set this to None if you want to generate random keys.\n key_size (int): key size in bits\n \"\"\"\n self.keys = keys or self.generate_keys(key_size)\n n = self.keys[\"public_key\"][\"n\"]\n self.plaintext_modulo = n\n self.ciphertext_modulo = n * n\n\n def generate_keys(self, key_size: int):\n \"\"\"\n Generate public and private keys of Paillier cryptosystem\n Args:\n key_size (int): key size in bits\n Returns:\n keys (dict): having private_key and public_key keys\n \"\"\"\n keys = {}\n keys[\"private_key\"] = {}\n keys[\"public_key\"] = {}\n\n # picking a prime modulus p\n p = sympy.randprime(200, 2 ** int(key_size / 2) - 1)\n\n # picking a prime modulus q\n q = sympy.randprime(200, 2 ** int(key_size / 2) - 1)\n\n n = p * q\n phi = (p - 1) * (q - 1)\n g = 1 + n\n\n keys[\"private_key\"][\"phi\"] = phi\n keys[\"public_key\"][\"g\"] = g\n keys[\"public_key\"][\"n\"] = n\n\n return keys\n\n def generate_random_key(self) -> int:\n \"\"\"\n Paillier requires to generate one-time random key per encryption\n Returns:\n random key (int): one time random key for encryption\n \"\"\"\n n = self.keys[\"public_key\"][\"n\"]\n while True:\n r = random.randint(0, n)\n if math.gcd(r, n) == 1:\n break\n return r\n\n def encrypt(self, plaintext: int, random_key: Optional[int] = None) -> int:\n \"\"\"\n Encrypt a given plaintext for optionally given random key with Paillier\n Args:\n plaintext (int): message to encrypt\n random_key (int): Paillier requires a random key that co-prime to n.\n Random key will be generated automatically if you do not set this.\n Returns:\n ciphertext (int): encrypted message\n \"\"\"\n g = self.keys[\"public_key\"][\"g\"]\n n = self.keys[\"public_key\"][\"n\"]\n r = random_key or self.generate_random_key()\n assert math.gcd(r, n) == 1\n return (pow(g, plaintext, n * n) * pow(r, n, n * n)) % (n * n)\n\n def decrypt(self, ciphertext: int):\n \"\"\"\n Decrypt a given ciphertext with Paillier\n Args:\n ciphertext (int): encrypted message\n Returns:\n plaintext (int): restored message\n \"\"\"\n phi = self.keys[\"private_key\"][\"phi\"]\n n = self.keys[\"public_key\"][\"n\"]\n mu = pow(phi, -1, n)\n\n return (self.lx(pow(ciphertext, phi, n * n)) * mu) % (n)\n\n def add(self, ciphertext1: int, ciphertext2: int) -> int:\n \"\"\"\n Perform homomorphic addition on encrypted data.\n Result of this must be equal to E(m1 + m2)\n Encryption calculations are done in module n squared.\n Args:\n ciphertext1 (int): 1st ciphertext created with Paillier\n ciphertext2 (int): 2nd ciphertext created with Paillier\n Returns:\n ciphertext3 (int): 3rd ciphertext created with Paillier\n \"\"\"\n n = self.keys[\"public_key\"][\"n\"]\n return (ciphertext1 * ciphertext2) % (n * n)\n\n def multiply(self, ciphertext1: int, ciphertext2: int) -> int:\n raise ValueError(\"Paillier is not homomorphic with respect to the multiplication\")\n\n def xor(self, ciphertext1: int, ciphertext2: int) -> int:\n raise ValueError(\"Paillier is not homomorphic with respect to the exclusive or\")\n\n def multiply_by_contant(self, ciphertext: int, constant: int) -> int:\n \"\"\"\n Multiply a ciphertext with a plain constant.\n Result of this must be equal to E(m1 * m2) where E(m1) = ciphertext\n Encryption calculations are done in module n squared.\n Args:\n ciphertext (int): ciphertext created with Paillier\n constant (int): known plain constant\n Returns:\n ciphertext (int): new ciphertext created with Paillier\n \"\"\"\n n = self.keys[\"public_key\"][\"n\"]\n\n if constant > self.plaintext_modulo:\n constant = constant % self.plaintext_modulo\n logger.debug(\n f\"Paillier can encrypt messages [1, {n}]. \"\n f\"Seems constant exceeded this limit. New constant is {constant}\"\n )\n\n return pow(ciphertext, constant, n * n)\n\n def reencrypt(self, ciphertext: int) -> int:\n \"\"\"\n Re-generate ciphertext with re-encryption. Many ciphertext will be decrypted to same plaintext.\n Args:\n ciphertext (int): given ciphertext\n Returns:\n new ciphertext (int): different ciphertext for same plaintext\n \"\"\"\n neutral_element = 0\n neutral_encrypted = self.encrypt(plaintext=neutral_element)\n return self.add(ciphertext1=ciphertext, ciphertext2=neutral_encrypted)\n\n def lx(self, x: int) -> int:\n \"\"\"\n Find logarithm over cyclic group\n Args:\n x (int): some integer\n Returns:\n lx (int): (x-1) / n\n \"\"\"\n n = self.keys[\"public_key\"][\"n\"]\n y = (x - 1) // n\n assert y - int(y) == 0\n return int(y)" }, { "identifier": "DamgardJurik", "path": "lightphe/cryptosystems/DamgardJurik.py", "snippet": "class DamgardJurik(Homomorphic):\n \"\"\"\n Damgard-Jurik algorithm is a generalization of Paillier.\n It is homomorphic with respect to the addition.\n Ref: https://sefiks.com/2023/10/20/a-step-by-step-partially-homomorphic-encryption-example-with-damgard-jurik-in-python/\n \"\"\"\n\n def __init__(self, s: int = 2, keys: Optional[dict] = None, key_size: int = 1024):\n \"\"\"\n Args:\n s (int): cryptosystem's module is going to be n^(s+1). if s == 1 then this is Paillier\n keys (dict): private - public key pair.\n set this to None if you want to generate random keys.\n key_size (int): key size in bits\n \"\"\"\n self.keys = keys or self.generate_keys(key_size=key_size, s=s)\n n = self.keys[\"public_key\"][\"n\"]\n self.plaintext_modulo = n\n self.ciphertext_modulo = pow(n, s + 1)\n\n def generate_keys(self, key_size: int, s: Optional[int] = None):\n \"\"\"\n Generate public and private keys of Paillier cryptosystem\n Args:\n s (int): cryptosystem's module is going to be n^(s+1). if s == 1 then this is Paillier\n key_size (int): key size in bits\n Returns:\n keys (dict): having private_key and public_key keys\n \"\"\"\n keys = {}\n keys[\"private_key\"] = {}\n keys[\"public_key\"] = {}\n\n # picking a prime modulus p\n p = sympy.randprime(200, 2 ** int(key_size / 2) - 1)\n\n # picking a prime modulus q\n q = sympy.randprime(200, 2 ** int(key_size / 2) - 1)\n\n n = p * q\n phi = (p - 1) * (q - 1)\n g = 1 + n\n\n keys[\"private_key\"][\"phi\"] = phi\n keys[\"public_key\"][\"g\"] = g\n keys[\"public_key\"][\"n\"] = n\n keys[\"public_key\"][\"s\"] = s\n\n return keys\n\n def generate_random_key(self) -> int:\n \"\"\"\n Paillier requires to generate one-time random key per encryption\n Returns:\n random key (int): one time random key for encryption\n \"\"\"\n n = self.keys[\"public_key\"][\"n\"]\n while True:\n r = random.randint(0, n)\n if math.gcd(r, n) == 1:\n break\n return r\n\n def encrypt(self, plaintext: int, random_key: Optional[int] = None) -> int:\n \"\"\"\n Encrypt a given plaintext for optionally given random key with Paillier\n Args:\n plaintext (int): message to encrypt\n random_key (int): Paillier requires a random key that co-prime to n.\n Random key will be generated automatically if you do not set this.\n Returns:\n ciphertext (int): encrypted message\n \"\"\"\n g = self.keys[\"public_key\"][\"g\"]\n n = self.keys[\"public_key\"][\"n\"]\n s = self.keys[\"public_key\"][\"s\"]\n r = random_key or self.generate_random_key()\n modulo = pow(n, s + 1)\n\n # assert math.gcd(r, n) == 1\n c = (pow(g, plaintext, modulo) * pow(r, n, modulo)) % modulo\n # c = (pow(g, plaintext, modulo) * pow(r, pow(n, s), modulo)) % modulo\n if math.gcd(c, modulo) != 1:\n logger.info(f\"WARNING! gcd({c=}, {modulo=}) != 1\")\n return c\n\n def decrypt(self, ciphertext: int):\n \"\"\"\n Decrypt a given ciphertext with Paillier\n Args:\n ciphertext (int): encrypted message\n Returns:\n plaintext (int): restored message\n \"\"\"\n phi = self.keys[\"private_key\"][\"phi\"]\n n = self.keys[\"public_key\"][\"n\"]\n s = self.keys[\"public_key\"][\"s\"]\n mu = pow(phi, -1, n)\n modulo = pow(n, s + 1)\n return (self.lx(pow(ciphertext, phi, modulo)) * mu) % (n)\n\n def add(self, ciphertext1: int, ciphertext2: int) -> int:\n \"\"\"\n Perform homomorphic addition on encrypted data.\n Result of this must be equal to E(m1 + m2)\n Encryption calculations are done in module n squared.\n Args:\n ciphertext1 (int): 1st ciphertext created with Paillier\n ciphertext2 (int): 2nd ciphertext created with Paillier\n Returns:\n ciphertext3 (int): 3rd ciphertext created with Paillier\n \"\"\"\n n = self.keys[\"public_key\"][\"n\"]\n s = self.keys[\"public_key\"][\"s\"]\n modulo = pow(n, s + 1)\n return (ciphertext1 * ciphertext2) % modulo\n\n def multiply(self, ciphertext1: int, ciphertext2: int) -> int:\n raise ValueError(\"Damgard-Jurik is not homomorphic with respect to the multiplication\")\n\n def xor(self, ciphertext1: int, ciphertext2: int) -> int:\n raise ValueError(\"Damgard-Jurik is not homomorphic with respect to the exclusive or\")\n\n def multiply_by_contant(self, ciphertext: int, constant: int) -> int:\n \"\"\"\n Multiply a ciphertext by a known plain constant\n Result of this must be equal to E(m1 * m2), where E(m1) = ciphertext\n Encryption calculations are done in module n squared.\n Args:\n ciphertext (int): ciphertext created with Damgard-Jurik\n constant (int): a known plain constant\n Returns:\n ciphertext (int): new ciphertext created with Damgard-Jurik\n \"\"\"\n n = self.keys[\"public_key\"][\"n\"]\n if constant > self.plaintext_modulo:\n constant = constant % self.plaintext_modulo\n logger.debug(\n f\"Damgard-Jurik can encrypt messages [1, {n}]. \"\n f\"Seems constant exceeded this limit. New constant is {constant}\"\n )\n return pow(ciphertext, constant, self.ciphertext_modulo)\n\n def reencrypt(self, ciphertext: int) -> int:\n \"\"\"\n Re-generate ciphertext with re-encryption. Many ciphertext will be decrypted to same plaintext.\n Args:\n ciphertext (int): given ciphertext\n Returns:\n new ciphertext (int): different ciphertext for same plaintext\n \"\"\"\n neutral_element = 0\n neutral_encrypted = self.encrypt(plaintext=neutral_element)\n return self.add(ciphertext1=ciphertext, ciphertext2=neutral_encrypted)\n\n def lx(self, x: int) -> int:\n \"\"\"\n Find logarithm over cyclic group\n Args:\n x (int): some integer\n Returns:\n lx (int): (x-1) / n\n \"\"\"\n n = self.keys[\"public_key\"][\"n\"]\n y = (x - 1) // n\n assert y - int(y) == 0\n return int(y)" }, { "identifier": "OkamotoUchiyama", "path": "lightphe/cryptosystems/OkamotoUchiyama.py", "snippet": "class OkamotoUchiyama(Homomorphic):\n \"\"\"\n Okamoto-Uchiyama algorithm is homomorphic with respect to the addition.\n Ref: https://sefiks.com/2023/10/20/a-step-by-step-partially-homomorphic-encryption-example-with-okamoto-uchiyama-in-python/\n \"\"\"\n\n def __init__(self, keys: Optional[dict] = None, key_size=1024):\n \"\"\"\n Args:\n keys (dict): private - public key pair.\n set this to None if you want to generate random keys.\n key_size (int): key size in bits\n \"\"\"\n self.keys = keys or self.generate_keys(key_size)\n self.plaintext_modulo = self.keys[\"private_key\"][\"p\"]\n self.ciphertext_modulo = self.keys[\"public_key\"][\"n\"]\n\n def generate_keys(self, key_size: int) -> dict:\n \"\"\"\n Generate public and private keys of OkamotoUchiyama cryptosystem\n Args:\n key_size (int): key size in bits\n Returns:\n keys (dict): having private_key and public_key keys\n \"\"\"\n keys = {}\n keys[\"private_key\"] = {}\n keys[\"public_key\"] = {}\n\n # picking a prime modulus p\n p = sympy.randprime(200, 2 ** int(key_size / 2) - 1)\n\n # picking a prime modulus q\n q = sympy.randprime(200, 2 ** int(key_size / 2) - 1)\n\n # modulo\n n = p * p * q\n\n # generator\n g = random.randint(2, n)\n\n if pow(g, p - 1, p * p) == 1:\n raise ValueError(\"Fermat's Little Theorem must be satisfied\")\n\n h = pow(g, n, n)\n\n keys[\"public_key\"][\"n\"] = n\n keys[\"public_key\"][\"g\"] = g\n keys[\"public_key\"][\"h\"] = h\n keys[\"private_key\"][\"p\"] = p\n keys[\"private_key\"][\"q\"] = q\n\n return keys\n\n def generate_random_key(self) -> int:\n \"\"\"\n Okamoto-Uchiyama requires to generate one-time random key per encryption\n Returns:\n random key (int): one time random key for encryption\n \"\"\"\n n = self.keys[\"public_key\"][\"n\"]\n return random.randint(1, n - 1)\n\n def encrypt(self, plaintext: int, random_key: Optional[int] = None) -> int:\n \"\"\"\n Encrypt a given plaintext for optionally given random key with OkamotoUchiyama\n Args:\n plaintext (int): message to encrypt\n random_key (int): OkamotoUchiyama requires a random key\n Random key will be generated automatically if you do not set this.\n Returns:\n ciphertext (int): encrypted message\n \"\"\"\n p = self.keys[\"private_key\"][\"p\"]\n g = self.keys[\"public_key\"][\"g\"]\n n = self.keys[\"public_key\"][\"n\"]\n h = self.keys[\"public_key\"][\"h\"]\n r = random_key or self.generate_random_key()\n\n if plaintext > p:\n plaintext = plaintext % p\n logger.debug(\n f\"plaintext must be in scale [0, {p=}] but this is exceeded.\"\n \"New plaintext is {plaintext}\"\n )\n return (pow(g, plaintext, n) * pow(h, r, n)) % n\n\n def decrypt(self, ciphertext: int):\n \"\"\"\n Decrypt a given ciphertext with Okamoto-Uchiyama\n Args:\n ciphertext (int): encrypted message\n Returns:\n plaintext (int): restored message\n \"\"\"\n p = self.keys[\"private_key\"][\"p\"]\n g = self.keys[\"public_key\"][\"g\"]\n\n a = self.lx(pow(ciphertext, p - 1, p * p))\n b = self.lx(pow(g, p - 1, p * p))\n return (a * pow(b, -1, p)) % p\n\n def add(self, ciphertext1: int, ciphertext2: int) -> int:\n \"\"\"\n Perform homomorphic addition on encrypted data.\n Result of this must be equal to E(m1 + m2)\n Encryption calculations are done in module n\n Args:\n ciphertext1 (int): 1st ciphertext created with OkamotoUchiyama\n ciphertext2 (int): 2nd ciphertext created with OkamotoUchiyama\n Returns:\n ciphertext3 (int): 3rd ciphertext created with OkamotoUchiyama\n \"\"\"\n n = self.keys[\"public_key\"][\"n\"]\n return (ciphertext1 * ciphertext2) % n\n\n def multiply(self, ciphertext1: int, ciphertext2: int) -> int:\n raise ValueError(\"Okamoto-Uchiyama is not homomorphic with respect to the multiplication\")\n\n def xor(self, ciphertext1: int, ciphertext2: int) -> int:\n raise ValueError(\"Okamoto-Uchiyama is not homomorphic with respect to the exclusive or\")\n\n def multiply_by_contant(self, ciphertext: int, constant: int) -> int:\n \"\"\"\n Multiply a ciphertext with a plain constant.\n Result of this must be equal to E(m1 * constant) where E(m1) = ciphertext\n Encryption calculations are done in module n squared.\n Args:\n ciphertext (int): ciphertext created with Okamoto-Uchiyama\n constant (int): known plain constant\n Returns:\n ciphertext (int): new ciphertext created with Okamoto-Uchiyama\n \"\"\"\n n = self.keys[\"public_key\"][\"n\"]\n if constant > self.plaintext_modulo:\n constant = constant % self.plaintext_modulo\n logger.debug(\n f\"Okamoto-Uchiyama can encrypt messages [1, {n}]. \"\n f\"Seems constant exceeded this limit. New constant is {constant}\"\n )\n return pow(ciphertext, constant, n)\n\n def reencrypt(self, ciphertext: int) -> int:\n \"\"\"\n Re-generate ciphertext with re-encryption. Many ciphertext will be decrypted to same plaintext.\n Args:\n ciphertext (int): given ciphertext\n Returns:\n new ciphertext (int): different ciphertext for same plaintext\n \"\"\"\n neutral_element = 0\n neutral_encrypted = self.encrypt(plaintext=neutral_element)\n return self.add(ciphertext1=ciphertext, ciphertext2=neutral_encrypted)\n\n def lx(self, x: int) -> int:\n \"\"\"\n Find logarithm over cyclic group\n Args:\n x (int): some integer\n Returns:\n lx (int): (x-1) / p\n \"\"\"\n p = self.keys[\"private_key\"][\"p\"]\n if x % p != 1:\n raise ValueError(f\"Input passed to lx ({x}) must be identical to 1 in modulo {p}\")\n if math.gcd(x, p * p) != 1:\n raise ValueError(f\"gcd({x}, {p}^2) must be equal to 1\")\n y = (x - 1) // p\n assert y - int(y) == 0\n return int(y)" }, { "identifier": "Benaloh", "path": "lightphe/cryptosystems/Benaloh.py", "snippet": "class Benaloh(Homomorphic):\n def __init__(self, keys: Optional[dict] = None, key_size: int = 50):\n \"\"\"\n Args:\n keys (dict): private - public key pair.\n set this to None if you want to generate random keys.\n key_size (int): key size in bits. default is less than other cryptosystems\n because decryption of Benaloh requires to solve DLP :/\n \"\"\"\n self.keys = keys or self.generate_keys(key_size)\n self.plaintext_modulo = self.keys[\"public_key\"][\"r\"]\n self.ciphertext_modulo = self.keys[\"public_key\"][\"n\"]\n\n def generate_keys(self, key_size: int) -> dict:\n \"\"\"\n Generate public and private keys of Paillier cryptosystem\n Args:\n key_size (int): key size in bits\n Returns:\n keys (dict): having private_key and public_key keys\n \"\"\"\n keys = {}\n keys[\"private_key\"] = {}\n keys[\"public_key\"] = {}\n\n x = 1\n while x == 1:\n # picking a prime p\n p = sympy.randprime(200, 2**key_size)\n\n # picking a prime q\n q = sympy.randprime(100, p)\n\n n = p * q\n phi = (p - 1) * (q - 1)\n\n r = p - 1\n while gcd(q - 1, r) != 1:\n r = int(r / gcd(q - 1, r))\n\n if not (\n # r should divide p-1 without remainder\n (p - 1) % r == 0\n # r and (p - 1) / r must be coprimes\n and gcd(r, int((p - 1) / r)) == 1\n # r and q-1 must be coprimes\n and gcd(r, q - 1) == 1\n ):\n continue\n\n y = random.randint(2, n)\n if gcd(y, n) != 1:\n continue\n\n # to guarantee correct decryption\n prime_factors = sympy.factorint(r).keys()\n decryption_guaranteed = True\n for prime_factor in prime_factors:\n # none of r's prime factor should satisfy the condition\n if pow(y, int(phi / prime_factor), n) == 1:\n decryption_guaranteed = False\n\n if decryption_guaranteed is False:\n continue\n\n x = pow(y, int(phi / r), n)\n if x != 1:\n break\n\n keys[\"public_key\"][\"y\"] = y\n keys[\"public_key\"][\"r\"] = r\n keys[\"public_key\"][\"n\"] = n\n\n keys[\"private_key\"][\"p\"] = p\n keys[\"private_key\"][\"q\"] = q\n keys[\"private_key\"][\"phi\"] = phi\n keys[\"private_key\"][\"x\"] = x\n\n return keys\n\n def generate_random_key(self) -> int:\n \"\"\"\n Generate random key for encryption\n \"\"\"\n n = self.keys[\"public_key\"][\"n\"]\n while True:\n u = random.randint(1, n)\n if gcd(u, n) == 1:\n break\n return u\n\n def encrypt(self, plaintext: int, random_key: Optional[int] = None) -> int:\n \"\"\"\n Encrypt a given plaintext for optionally given random key with Benaloh\n Args:\n plaintext (int): message to encrypt\n random_key (int): Benaloh requires a random key\n Random key will be generated automatically if you do not set this.\n Returns:\n ciphertext (int): encrypted message\n \"\"\"\n y = self.keys[\"public_key\"][\"y\"]\n r = self.keys[\"public_key\"][\"r\"]\n n = self.keys[\"public_key\"][\"n\"]\n\n u = random_key or self.generate_random_key()\n\n if plaintext > r:\n plaintext = plaintext % r\n logger.debug(\n f\"Benaloh lets you to encrypt messages in [0, {r=}].\"\n f\"But your plaintext exceeds this limit.\"\n f\"New plaintext is {plaintext}\"\n )\n\n c = (pow(y, plaintext, n) * pow(u, r, n)) % n\n\n if gcd(c, n) != 1:\n logger.debug(\"ciphertext is not co-prime with n!\")\n\n return c\n\n def decrypt(self, ciphertext: int) -> int:\n \"\"\"\n Decrypt a given ciphertext with Benaloh\n Args:\n ciphertext (int): encrypted message\n Returns:\n plaintext (int): restored message\n \"\"\"\n n = self.keys[\"public_key\"][\"n\"]\n r = self.keys[\"public_key\"][\"r\"]\n phi = self.keys[\"private_key\"][\"phi\"]\n x = self.keys[\"private_key\"][\"x\"]\n\n a = pow(ciphertext, int(phi / r), n)\n\n md = 0\n while True:\n if pow(x, md, n) == a:\n break\n md = md + 1\n if md > r:\n raise ValueError(f\"Message cannot be restored in [{0}, {n}]\")\n return md\n\n def add(self, ciphertext1: int, ciphertext2: int) -> int:\n \"\"\"\n Perform homomorphic addition on encrypted data.\n Result of this must be equal to E(m1 + m2)\n Encryption calculations are done in module n\n Args:\n ciphertext1 (int): 1st ciphertext created with Benaloh\n ciphertext2 (int): 2nd ciphertext created with Benaloh\n Returns:\n ciphertext3 (int): 3rd ciphertext created with Benaloh\n \"\"\"\n n = self.keys[\"public_key\"][\"n\"]\n return (ciphertext1 * ciphertext2) % n\n\n def multiply(self, ciphertext1: int, ciphertext2: int) -> int:\n raise ValueError(\"Benaloh is not homomorphic with respect to the multiplication\")\n\n def xor(self, ciphertext1: int, ciphertext2: int) -> int:\n raise ValueError(\"Benaloh is not homomorphic with respect to the exclusive or\")\n\n def multiply_by_contant(self, ciphertext: int, constant: int) -> int:\n \"\"\"\n Multiply a ciphertext with a plain constant.\n Result of this must be equal to E(m1 * constant) where E(m1) = ciphertext\n Encryption calculations are done in module n squared.\n Args:\n ciphertext (int): ciphertext created with Benaloh\n constant (int): known plain constant\n Returns:\n ciphertext (int): new ciphertext created with Benaloh\n \"\"\"\n # raise ValueError(\"Benaloh is not supporting multiplying by a constant\")\n n = self.keys[\"public_key\"][\"n\"]\n if constant > self.plaintext_modulo:\n constant = constant % self.plaintext_modulo\n logger.debug(\n f\"Benaloh can encrypt messages [1, {self.plaintext_modulo}]. \"\n f\"Seems constant exceeded this limit. New constant is {constant}\"\n )\n return pow(ciphertext, constant, n)\n\n def reencrypt(self, ciphertext: int) -> int:\n \"\"\"\n Re-generate ciphertext with re-encryption. Many ciphertext will be decrypted to same plaintext.\n Args:\n ciphertext (int): given ciphertext\n Returns:\n new ciphertext (int): different ciphertext for same plaintext\n \"\"\"\n neutral_element = 0\n neutral_encrypted = self.encrypt(plaintext=neutral_element)\n return self.add(ciphertext1=ciphertext, ciphertext2=neutral_encrypted)" }, { "identifier": "NaccacheStern", "path": "lightphe/cryptosystems/NaccacheStern.py", "snippet": "class NaccacheStern(Homomorphic):\n \"\"\"\n Naccache-Stern algorithm is homomorphic with respect to the addition.\n It is a generaliation of Benaloh cryptosystem\n Ref: https://sefiks.com/2023/10/26/a-step-by-step-partially-homomorphic-encryption-example-with-naccache-stern-in-python/\n Original paper: https://dl.acm.org/doi/pdf/10.1145/288090.288106\n \"\"\"\n\n def __init__(self, keys: Optional[dict] = None, key_size=37, deterministic: bool = False):\n \"\"\"\n Args:\n keys (dict): private - public key pair.\n set this to None if you want to generate random keys.\n key_size (int): key size in bits. Less than many cryptosystems because\n decryption requires to solve DLP.\n deterministic (boolean): deterministic or probabilistic version of\n cryptosystem\n \"\"\"\n self.keys = keys or self.generate_keys(key_size)\n self.plaintext_modulo = self.keys[\"public_key\"][\"sigma\"]\n self.ciphertext_modulo = self.keys[\"public_key\"][\"n\"]\n self.deterministic = deterministic\n\n def generate_keys(self, key_size: int) -> dict:\n \"\"\"\n Generate public and private keys of Naccache-Stern cryptosystem\n Args:\n key_size (int): key size in bits\n Returns:\n keys (dict): having private_key and public_key keys\n \"\"\"\n keys = {}\n keys[\"private_key\"] = {}\n keys[\"public_key\"] = {}\n\n # pick a family of small primes. the largest one is 10-bits\n # TODO: do something generic instead of constant primes\n prime_set = [3, 5, 7, 11, 13, 17]\n k = len(prime_set)\n\n if all(sympy.isprime(prime) is True for prime in prime_set) is False:\n raise ValueError(\"All items of prime set must be prime!\")\n\n # divide the set in half and find products of primes\n u = 1\n v = 1\n\n for i, prime in enumerate(prime_set):\n if i < len(prime_set) / 2:\n u = u * prime\n else:\n v = v * prime\n\n # product of all primes\n sigma = u * v\n\n # pick large prime numbers\n while True:\n a = sympy.randprime(200, 2 ** int(key_size / 2) - 1)\n b = sympy.randprime(100, a)\n\n # calculate two primes from chosen ones\n p = (2 * a * u) + 1\n q = (2 * b * v) + 1\n\n # recommended n is 768 bits\n n = p * q\n phi = (p - 1) * (q - 1)\n\n if phi % sigma != 0:\n logger.debug(\"canceled because phi cannot be divisible by sigma\")\n continue\n\n if math.gcd(sigma, int(phi // sigma)) != 1:\n logger.debug(\"canceled because sigma and phi/sigma are not coprime\")\n continue\n\n p_conditions = []\n for i in range(0, int(k / 2)):\n pi = prime_set[i]\n if (\n (p - 1) % pi == 0\n and math.gcd(pi, int((p - 1) / pi)) == 1\n and math.gcd(pi, q - 1) == 1\n ):\n p_conditions.append(1)\n else:\n p_conditions.append(0)\n p_satisfied = True if len(p_conditions) == sum(p_conditions) else False\n if p_satisfied is False:\n logger.debug(\"canceled because p_conditions are not satisfied\")\n continue\n\n q_conditions = []\n for i in range(int(k / 2), k):\n pi = prime_set[i]\n if (\n (q - 1) % pi == 0\n and math.gcd(pi, int((q - 1) / pi)) == 1\n and math.gcd(pi, p - 1)\n ):\n q_conditions.append(1)\n else:\n q_conditions.append(0)\n\n q_satisfied = True if len(q_conditions) == sum(q_conditions) else False\n if q_satisfied is False:\n logger.debug(\"canceled because q_conditions are not satisfied\")\n continue\n\n # p and q must be primes\n if not (sympy.isprime(p) and sympy.isprime(q)):\n continue\n\n # choose a generator g\n g = random.randint(2, n)\n # it must be co-prime to n\n if math.gcd(g, n) != 1:\n logger.debug(\"canceled becuase g is not co-prime with ne\")\n continue\n # guarantee it is not pi-th power.\n for pi in prime_set:\n logger.debug(\"canceled because g is a pi-th power\")\n if pow(g, int(phi / pi), n) == 1:\n continue\n\n # the order of g modulo n must be phi/4\n if pow(g, int(phi / 4), n) != 1:\n continue\n\n # check decryption is guaranteed similar to benaloh\n # ps: this is not mentioned in the original paper\n is_decryption_guaranteed = True\n for pi in prime_set:\n prime_factors = sympy.factorint(pi).keys()\n for prime_factor in prime_factors:\n if pow(g, int(phi / prime_factor), n) == 1:\n is_decryption_guaranteed = False\n if is_decryption_guaranteed is True:\n break\n\n logger.debug(f\"n bits is {len(bin(n)[2:])}\")\n\n keys[\"public_key\"][\"g\"] = g\n keys[\"public_key\"][\"n\"] = n\n # sigma can optionally be secret in deterministic version\n keys[\"public_key\"][\"sigma\"] = sigma\n\n keys[\"private_key\"][\"p\"] = p\n keys[\"private_key\"][\"q\"] = q\n keys[\"private_key\"][\"phi\"] = phi\n keys[\"private_key\"][\"prime_set\"] = prime_set\n\n return keys\n\n def generate_random_key(self) -> int:\n \"\"\"\n Naccache-Stern requires to generate one-time random key per encryption\n Returns:\n random key (int): one time random key for encryption\n \"\"\"\n n = self.keys[\"public_key\"][\"n\"]\n return random.randint(1, n - 1)\n\n def encrypt(self, plaintext: int, random_key: Optional[int] = None) -> int:\n \"\"\"\n Encrypt a given plaintext for optionally given random key with Naccache-Stern\n Args:\n plaintext (int): message to encrypt\n random_key (int): Naccache-Stern requires a random key\n Random key will be generated automatically if you do not set this.\n Returns:\n ciphertext (int): encrypted message\n \"\"\"\n g = self.keys[\"public_key\"][\"g\"]\n n = self.keys[\"public_key\"][\"n\"]\n r = random_key or self.generate_random_key()\n sigma = self.keys[\"public_key\"][\"sigma\"]\n if plaintext > self.plaintext_modulo:\n plaintext = plaintext % self.plaintext_modulo\n logger.debug(\n f\"plaintext must be in scale [0, {self.plaintext_modulo}] \"\n \"but this is exceeded. New plaintext is {plaintext}\"\n )\n\n if self.deterministic is True:\n return pow(g, plaintext, n)\n\n # Probabilistic\n return (pow(r, sigma, n) * pow(g, plaintext, n)) % n\n\n def decrypt(self, ciphertext: int):\n \"\"\"\n Decrypt a given ciphertext with Naccache-Stern\n Args:\n ciphertext (int): encrypted message\n Returns:\n plaintext (int): restored message\n \"\"\"\n phi = self.keys[\"private_key\"][\"phi\"]\n n = self.keys[\"public_key\"][\"n\"]\n g = self.keys[\"public_key\"][\"g\"]\n prime_set = self.keys[\"private_key\"][\"prime_set\"]\n\n remainders = []\n for i, prime in enumerate(prime_set):\n ci = pow(ciphertext, int(phi / prime), n)\n logger.debug(f\"c_{i} = {ci}\")\n\n j = 0\n while True:\n if ci == pow(g, int((j * phi) / prime), n):\n logger.debug(f\"m_{i} = {j}\")\n remainders.append(j)\n break\n j = j + 1\n if j > prime**2:\n raise ValueError(\n f\"c_{i} cannot be restored from {ci} = {g}^(j*{phi}/{prime}) mod {n}\"\n )\n\n congruences = []\n for i in range(0, len(prime_set)):\n logger.debug(f\"m mod {prime_set[i]} = {remainders[i]}\")\n congruences.append((remainders[i], prime_set[i]))\n\n # chinese remainder problem\n ms = solve_congruence(*congruences)\n if not ms:\n raise ValueError(\"message cannot be restored with Chinese Remainder!\")\n return ms[0]\n\n def add(self, ciphertext1: int, ciphertext2: int) -> int:\n \"\"\"\n Perform homomorphic addition on encrypted data.\n Result of this must be equal to E(m1 + m2)\n Encryption calculations are done in module n\n Args:\n ciphertext1 (int): 1st ciphertext created with Naccache-Stern\n ciphertext2 (int): 2nd ciphertext created with Naccache-Stern\n Returns:\n ciphertext3 (int): 3rd ciphertext created with Naccache-Stern\n \"\"\"\n return (ciphertext1 * ciphertext2) % self.ciphertext_modulo\n\n def multiply(self, ciphertext1: int, ciphertext2: int) -> int:\n raise ValueError(\"Naccache-Stern is not homomorphic with respect to the multiplication\")\n\n def xor(self, ciphertext1: int, ciphertext2: int) -> int:\n raise ValueError(\"Naccache-Stern is not homomorphic with respect to the exclusive or\")\n\n def multiply_by_contant(self, ciphertext: int, constant: int) -> int:\n \"\"\"\n Multiply a ciphertext with a plain constant.\n Result of this must be equal to E(m1 * constant) where E(m1) = ciphertext\n Encryption calculations are done in module n squared.\n Args:\n ciphertext (int): ciphertext created with Naccache-Stern\n constant (int): known plain constant\n Returns:\n ciphertext (int): new ciphertext created with Naccache-Stern\n \"\"\"\n if constant > self.plaintext_modulo:\n constant = constant % self.plaintext_modulo\n logger.debug(\n f\"Naccache-Stern can encrypt messages [1, {self.plaintext_modulo}]. \"\n f\"Seems constant exceeded this limit. New constant is {constant}\"\n )\n\n return pow(ciphertext, constant, self.ciphertext_modulo)\n\n def reencrypt(self, ciphertext: int) -> int:\n \"\"\"\n Re-generate ciphertext with re-encryption. Many ciphertext will be decrypted to same plaintext.\n Args:\n ciphertext (int): given ciphertext\n Returns:\n new ciphertext (int): different ciphertext for same plaintext\n \"\"\"\n if self.deterministic is True:\n raise ValueError(\n \"Deterministic version of Naccache-Stern does not support reencryption.\"\n \"If you still want to perform ciphertext regeneration, then you may \"\n \"consider to use its probabilistic version.\"\n )\n neutral_element = 0\n neutral_encrypted = self.encrypt(plaintext=neutral_element)\n return self.add(ciphertext1=ciphertext, ciphertext2=neutral_encrypted)" }, { "identifier": "GoldwasserMicali", "path": "lightphe/cryptosystems/GoldwasserMicali.py", "snippet": "class GoldwasserMicali(Homomorphic):\n \"\"\"\n Goldwasser-Micali algorithm is homomorphic with respect to the Exclusively OR (XOR).\n Ref: https://sefiks.com/2023/10/27/a-step-by-step-partially-homomorphic-encryption-example-with-goldwasser-micali-in-python/\n \"\"\"\n\n def __init__(self, keys: Optional[dict] = None, key_size=100):\n \"\"\"\n Args:\n keys (dict): private - public key pair.\n set this to None if you want to generate random keys.\n key_size (int): key size in bits\n \"\"\"\n self.keys = keys or self.generate_keys(key_size)\n self.ciphertext_modulo = self.keys[\"public_key\"][\"n\"]\n # TODO: not sure about the plaintext modulo\n self.plaintext_modulo = self.keys[\"public_key\"][\"n\"]\n\n def generate_keys(self, key_size: int) -> dict:\n \"\"\"\n Generate public and private keys of Goldwasser-Micali cryptosystem\n Args:\n key_size (int): key size in bits\n Returns:\n keys (dict): having private_key and public_key keys\n \"\"\"\n keys = {}\n keys[\"private_key\"] = {}\n keys[\"public_key\"] = {}\n\n # picking a prime p\n p = sympy.randprime(200, 2 ** int(key_size / 2) - 1)\n\n # picking a prime q\n q = sympy.randprime(200, 2 ** int(key_size / 2) - 1)\n\n n = p * q\n\n # find non-residue x\n while True:\n x = random.randint(1, n - 1)\n if math.gcd(x, n) == 1 and jacobi_symbol(x, p) == -1 and jacobi_symbol(x, q) == -1:\n break\n\n keys[\"public_key\"][\"n\"] = n\n keys[\"public_key\"][\"x\"] = x\n\n keys[\"private_key\"][\"p\"] = p\n keys[\"private_key\"][\"q\"] = q\n\n return keys\n\n def generate_random_key(self) -> int:\n \"\"\"\n Goldwasser-Micali requires to generate one-time random key that co-prime to n\n Returns:\n random key (int): one time random key for encryption\n \"\"\"\n n = self.keys[\"public_key\"][\"n\"]\n while True:\n r = random.randint(1, n)\n if math.gcd(r, n) == 1:\n break\n return r\n\n def encrypt(self, plaintext: int, random_key: Optional[int] = None) -> list:\n \"\"\"\n Encrypt a given plaintext for optionally given random key with Goldwasser-Micali\n Args:\n plaintext (int): message to encrypt\n random_key (int): Goldwasser-Micali requires a random key\n Random key will be generated automatically if you do not set this.\n Returns:\n ciphertext (int): encrypted message\n \"\"\"\n n = self.keys[\"public_key\"][\"n\"]\n x = self.keys[\"public_key\"][\"x\"]\n\n m_binary = bin(plaintext)[2:]\n\n # number of bits\n k = len(m_binary)\n\n if random_key and len(random_key) != k:\n raise ValueError(f\"Random key must be length of {k}\")\n\n c = []\n for i in range(0, k):\n mi = int(m_binary[i])\n\n if random_key:\n ri = random_key[i]\n else:\n ri = self.generate_random_key()\n\n ci = (pow(ri, 2, n) * pow(x, mi, n)) % n\n c.append(ci)\n\n return c\n\n def decrypt(self, ciphertext: list) -> int:\n \"\"\"\n Decrypt a given ciphertext with Goldwasser-Micali\n Args:\n ciphertext (int): encrypted message\n Returns:\n plaintext (int): restored message\n \"\"\"\n m_binaries = []\n\n p = self.keys[\"private_key\"][\"p\"]\n q = self.keys[\"private_key\"][\"q\"]\n\n for i in ciphertext:\n xp = i % p\n xq = i % q\n\n if pow(xp, int((p - 1) / 2), p) == 1 and pow(xq, int((q - 1) / 2), q) == 1:\n m_binaries.append(\"0\")\n else:\n m_binaries.append(\"1\")\n\n m_binary = \"\".join(m_binaries)\n return int(m_binary, 2)\n\n def add(self, ciphertext1: list, ciphertext2: list) -> list:\n raise ValueError(\"Goldwasser-Micali is not homomorphic with respect to the addition\")\n\n def multiply(self, ciphertext1: int, ciphertext2: int) -> int:\n raise ValueError(\"Goldwasser-Micali is not homomorphic with respect to the multiplication\")\n\n def xor(self, ciphertext1: int, ciphertext2: int) -> list:\n \"\"\"\n Perform homomorphic xor on encrypted data.\n Result of this must be equal to E(m1 ^ m2) = E(m1) ^ E(m2)\n Encryption calculations are done in module n\n Args:\n ciphertext1 (int): 1st ciphertext created with Goldwasser-Micali\n ciphertext2 (int): 2nd ciphertext created with Goldwasser-Micali\n Returns:\n ciphertext3 (int): 3rd ciphertext created with Goldwasser-Micali\n \"\"\"\n ciphertext3 = []\n for i in range(0, len(ciphertext1)):\n c1 = ciphertext1[i]\n c2 = ciphertext2[i]\n ciphertext3.append((c1 * c2) % self.ciphertext_modulo)\n\n return ciphertext3\n\n def multiply_by_contant(self, ciphertext: int, constant: int):\n raise ValueError(\"Goldwasser-Micali does not support multiplying with constant\")\n\n def reencrypt(self, ciphertext: int):\n raise ValueError(\"Goldwasser-Micali does not support re-encryption\")" }, { "identifier": "EllipticCurveElGamal", "path": "lightphe/cryptosystems/EllipticCurveElGamal.py", "snippet": "class EllipticCurveElGamal(Homomorphic):\n \"\"\"\n Elliptic Curve ElGamal algorithm is an additively homomorphic algorithm\n Unluckily, it requires to solve (EC)DLP to restore plaintext in decryption\n However it is easy to restore plaintext while plaintext is not very large\n unsimilar to Benaloh or Naccache-Stern\n Ref: https://sefiks.com/2018/08/21/elliptic-curve-elgamal-encryption/\n \"\"\"\n\n def __init__(self, keys: Optional[dict] = None, key_size: int = 160):\n \"\"\"\n Args:\n keys (dict): private - public key pair.\n set this to None if you want to generate random keys.\n key_size (int): key size in bits. default is 160.\n this is equivalent to 1024 bit RSA.\n \"\"\"\n # TODO: add different forms and curves. e.g. Koblitz, Edwards (Ed25519)\n self.curve = Weierstrass()\n self.keys = keys or self.generate_keys(key_size)\n self.plaintext_modulo = self.curve.p\n self.ciphertext_modulo = self.curve.p\n\n def generate_keys(self, key_size: int):\n \"\"\"\n Generate public and private keys of Elliptic Curve ElGamal cryptosystem\n Args:\n key_size (int): key size in bits\n Returns:\n keys (dict): having private_key and public_key keys\n \"\"\"\n keys = {}\n keys[\"private_key\"] = {}\n keys[\"public_key\"] = {}\n\n # private key\n ka = random.getrandbits(key_size)\n\n # public key\n Qa = self.curve.apply_double_and_add_method(G=self.curve.G, k=ka, p=self.curve.p)\n\n keys[\"public_key\"][\"Qa\"] = Qa\n keys[\"private_key\"][\"ka\"] = ka\n\n return keys\n\n def generate_random_key(self) -> int:\n \"\"\"\n Elliptic Curve ElGamal requires to generate one-time random key per encryption\n Returns:\n random key (int): one time random key for encryption\n \"\"\"\n return random.getrandbits(128)\n\n def encrypt(self, plaintext: int, random_key: Optional[int] = None) -> tuple:\n \"\"\"\n Encrypt plaintext with Elliptic Curve ElGamal\n Args:\n plaintext (int): message to encrypt\n random_key (int): random key for encryption. Do not set this to a static value.\n Returns\n ciphertext (tuple): c1 and c2\n \"\"\"\n # modulo\n p = self.curve.p\n\n # base point\n G = self.curve.G\n\n # public key\n Qa = self.keys[\"public_key\"][\"Qa\"]\n\n # random key\n r = random_key or self.generate_random_key()\n\n s = self.curve.apply_double_and_add_method(G=G, k=plaintext, p=p)\n\n c1 = self.curve.apply_double_and_add_method(G=G, k=r, p=p)\n\n c2 = self.curve.apply_double_and_add_method(G=Qa, k=r, p=p)\n c2 = self.curve.add_points(c2, s, p)\n\n return c1, c2\n\n def decrypt(self, ciphertext: tuple) -> int:\n \"\"\"\n Decrypt ciphertext with Elliptic Curve ElGamal\n Args:\n ciphertext (tuple): c1 and c2\n Returns:\n plaintext (int): restored message\n \"\"\"\n # modulo\n p = self.curve.p\n\n # private key\n ka = self.keys[\"private_key\"][\"ka\"]\n\n c1, c2 = ciphertext\n c1_prime = (c1[0], (-1 * c1[1]) % p)\n s_prime = self.curve.apply_double_and_add_method(G=c1_prime, k=ka, p=p)\n s_prime = self.curve.add_points(P=c2, Q=s_prime, p=p)\n\n # s_prime is a point on the elliptic curve\n # s_prime = k x G\n # we need to find k from known s_prime and G\n # this requires to solve ECDLP\n\n # base point\n G = self.curve.G\n k = 2\n while True:\n G = self.curve.add_points(P=G, Q=self.curve.G, p=p)\n if G[0] == s_prime[0] and G[1] == s_prime[1]:\n return k\n k = k + 1\n if k > self.curve.n:\n raise ValueError(f\"Cannot restore scalar from {s_prime} = k x {self.curve.G}\")\n\n def multiply(self, ciphertext1: tuple, ciphertext2: tuple) -> tuple:\n raise ValueError(\n \"Elliptic Curve ElGamal is not homomorphic with respect to the multiplication\"\n )\n\n def add(self, ciphertext1: tuple, ciphertext2: tuple) -> tuple:\n \"\"\"\n Perform homomorphic addition on encrypted data\n Result of this must be equal to E(m1 + m2)\n Args:\n ciphertext1 (dict): Elliptic Curve ElGamal ciphertext consisting of c1 and c2 keys\n ciphertext2 (dict): Elliptic Curve ElGamal ciphertext consisting of c1 and c2 keys\n Returns\n ciphertext (dict): Elliptic Curve ElGamal ciphertext consisting of c1 and c2 keys\n \"\"\"\n a = self.curve.add_points(P=ciphertext1[0], Q=ciphertext2[0], p=self.curve.p)\n b = self.curve.add_points(P=ciphertext1[1], Q=ciphertext2[1], p=self.curve.p)\n return a, b\n\n def xor(self, ciphertext1: tuple, ciphertext2: tuple) -> int:\n raise ValueError(\n \"Elliptic Curve ElGamal is not homomorphic with respect to the exclusive or\"\n )\n\n def multiply_by_contant(self, ciphertext: tuple, constant: int) -> tuple:\n \"\"\"\n Multiply a ciphertext with a plain constant.\n Result of this must be equal to k x E(m1) = E(m1 * k)\n where E(m1) = ciphertext\n Args:\n ciphertext (int): ciphertext created with Elliptic Curve ElGamal\n constant (int): known plain constant\n Returns:\n ciphertext (int): new ciphertext created with Elliptic Curve ElGamal\n \"\"\"\n return self.curve.apply_double_and_add_method(\n G=ciphertext[0], k=constant, p=self.curve.p\n ), self.curve.apply_double_and_add_method(G=ciphertext[1], k=constant, p=self.curve.p)\n\n def reencrypt(self, ciphertext: tuple) -> tuple:\n raise ValueError(\"Elliptic Curve ElGamal does not support regeneration of ciphertext\")" }, { "identifier": "phe_utils", "path": "lightphe/commons/phe_utils.py", "snippet": "def parse_int(value: Union[int, float], modulo: int) -> int:\ndef fractionize(value: float, modulo: int, precision: Optional[int] = None) -> Tuple[int, int]:\ndef solve_dlp():" }, { "identifier": "Logger", "path": "lightphe/commons/logger.py", "snippet": "class Logger:\n def __init__(self, module):\n self.module = module\n log_level = os.environ.get(\"LIGHTPHE_LOG_LEVEL\", str(logging.INFO))\n try:\n self.log_level = int(log_level)\n except Exception as err:\n self.dump_log(\n f\"Exception while parsing $LIGHTPHE_LOG_LEVEL.\"\n f\"Expected int but it is {log_level} ({str(err)})\"\n )\n self.log_level = logging.INFO\n\n def info(self, message):\n if self.log_level <= logging.INFO:\n self.dump_log(message)\n\n def debug(self, message):\n if self.log_level <= logging.DEBUG:\n self.dump_log(f\"🕷️ {message}\")\n\n def warn(self, message):\n if self.log_level <= logging.WARNING:\n self.dump_log(f\"⚠️ {message}\")\n\n def error(self, message):\n if self.log_level <= logging.ERROR:\n self.dump_log(f\"🔴 {message}\")\n\n def critical(self, message):\n if self.log_level <= logging.CRITICAL:\n self.dump_log(f\"💥 {message}\")\n\n def dump_log(self, message):\n print(f\"{str(datetime.now())[2:-7]} - {message}\")" } ]
from typing import Union from lightphe.models.Homomorphic import Homomorphic from lightphe.models.Algorithm import Algorithm from lightphe.cryptosystems.RSA import RSA from lightphe.cryptosystems.ElGamal import ElGamal from lightphe.cryptosystems.Paillier import Paillier from lightphe.cryptosystems.DamgardJurik import DamgardJurik from lightphe.cryptosystems.OkamotoUchiyama import OkamotoUchiyama from lightphe.cryptosystems.Benaloh import Benaloh from lightphe.cryptosystems.NaccacheStern import NaccacheStern from lightphe.cryptosystems.GoldwasserMicali import GoldwasserMicali from lightphe.cryptosystems.EllipticCurveElGamal import EllipticCurveElGamal from lightphe.commons import phe_utils from lightphe.commons.logger import Logger
17,410
logger = Logger(module="lightphe/models/Ciphertext.py") # pylint: disable=too-few-public-methods, no-else-return class Ciphertext: def __init__(self, algorithm_name: str, keys: dict, value: Union[int, tuple, list]): self.algorithm_name = algorithm_name self.keys = keys self.value = value
logger = Logger(module="lightphe/models/Ciphertext.py") # pylint: disable=too-few-public-methods, no-else-return class Ciphertext: def __init__(self, algorithm_name: str, keys: dict, value: Union[int, tuple, list]): self.algorithm_name = algorithm_name self.keys = keys self.value = value
if algorithm_name == Algorithm.RSA:
1
2023-10-28 14:57:59+00:00
24k
chenran-li/RQL-release
sb3_contrib/ars/ars.py
[ { "identifier": "BaseAlgorithm", "path": "stable_baselines3/common/base_class.py", "snippet": "class BaseAlgorithm(ABC):\n \"\"\"\n The base of RL algorithms\n\n :param policy: The policy model to use (MlpPolicy, CnnPolicy, ...)\n :param env: The environment to learn from\n (if registered in Gym, can be str. Can be None for loading trained models)\n :param learning_rate: learning rate for the optimizer,\n it can be a function of the current progress remaining (from 1 to 0)\n :param policy_kwargs: Additional arguments to be passed to the policy on creation\n :param tensorboard_log: the log location for tensorboard (if None, no logging)\n :param verbose: Verbosity level: 0 for no output, 1 for info messages (such as device or wrappers used), 2 for\n debug messages\n :param device: Device on which the code should run.\n By default, it will try to use a Cuda compatible device and fallback to cpu\n if it is not possible.\n :param support_multi_env: Whether the algorithm supports training\n with multiple environments (as in A2C)\n :param monitor_wrapper: When creating an environment, whether to wrap it\n or not in a Monitor wrapper.\n :param seed: Seed for the pseudo random generators\n :param use_sde: Whether to use generalized State Dependent Exploration (gSDE)\n instead of action noise exploration (default: False)\n :param sde_sample_freq: Sample a new noise matrix every n steps when using gSDE\n Default: -1 (only sample at the beginning of the rollout)\n :param supported_action_spaces: The action spaces supported by the algorithm.\n \"\"\"\n\n # Policy aliases (see _get_policy_from_name())\n policy_aliases: Dict[str, Type[BasePolicy]] = {}\n\n def __init__(\n self,\n policy: Union[str, Type[BasePolicy]],\n env: Union[GymEnv, str, None],\n learning_rate: Union[float, Schedule],\n policy_kwargs: Optional[Dict[str, Any]] = None,\n tensorboard_log: Optional[str] = None,\n verbose: int = 0,\n device: Union[th.device, str] = \"auto\",\n support_multi_env: bool = False,\n monitor_wrapper: bool = True,\n seed: Optional[int] = None,\n use_sde: bool = False,\n sde_sample_freq: int = -1,\n supported_action_spaces: Optional[Tuple[spaces.Space, ...]] = None,\n ):\n if isinstance(policy, str):\n self.policy_class = self._get_policy_from_name(policy)\n else:\n self.policy_class = policy\n\n self.device = get_device(device)\n if verbose >= 1:\n print(f\"Using {self.device} device\")\n\n self.env = None # type: Optional[GymEnv]\n # get VecNormalize object if needed\n self._vec_normalize_env = unwrap_vec_normalize(env)\n self.verbose = verbose\n self.policy_kwargs = {} if policy_kwargs is None else policy_kwargs\n self.observation_space = None # type: Optional[spaces.Space]\n self.action_space = None # type: Optional[spaces.Space]\n self.n_envs = None\n self.num_timesteps = 0\n # Used for updating schedules\n self._total_timesteps = 0\n # Used for computing fps, it is updated at each call of learn()\n self._num_timesteps_at_start = 0\n self.seed = seed\n self.action_noise: Optional[ActionNoise] = None\n self.start_time = None\n self.policy = None\n self.learning_rate = learning_rate\n self.tensorboard_log = tensorboard_log\n self.lr_schedule = None # type: Optional[Schedule]\n self._last_obs = None # type: Optional[Union[np.ndarray, Dict[str, np.ndarray]]]\n self._last_episode_starts = None # type: Optional[np.ndarray]\n # When using VecNormalize:\n self._last_original_obs = None # type: Optional[Union[np.ndarray, Dict[str, np.ndarray]]]\n self._episode_num = 0\n # Used for gSDE only\n self.use_sde = use_sde\n self.sde_sample_freq = sde_sample_freq\n # Track the training progress remaining (from 1 to 0)\n # this is used to update the learning rate\n self._current_progress_remaining = 1\n # Buffers for logging\n self.ep_info_buffer = None # type: Optional[deque]\n self.ep_success_buffer = None # type: Optional[deque]\n # For logging (and TD3 delayed updates)\n self._n_updates = 0 # type: int\n # The logger object\n self._logger = None # type: Logger\n # Whether the user passed a custom logger or not\n self._custom_logger = False\n\n # Create and wrap the env if needed\n if env is not None:\n env = maybe_make_env(env, self.verbose)\n env = self._wrap_env(env, self.verbose, monitor_wrapper)\n\n self.observation_space = env.observation_space\n self.action_space = env.action_space\n self.n_envs = env.num_envs\n self.env = env\n\n if supported_action_spaces is not None:\n assert isinstance(self.action_space, supported_action_spaces), (\n f\"The algorithm only supports {supported_action_spaces} as action spaces \"\n f\"but {self.action_space} was provided\"\n )\n\n if not support_multi_env and self.n_envs > 1:\n raise ValueError(\n \"Error: the model does not support multiple envs; it requires \" \"a single vectorized environment.\"\n )\n\n # Catch common mistake: using MlpPolicy/CnnPolicy instead of MultiInputPolicy\n if policy in [\"MlpPolicy\", \"CnnPolicy\"] and isinstance(self.observation_space, spaces.Dict):\n raise ValueError(f\"You must use `MultiInputPolicy` when working with dict observation space, not {policy}\")\n\n if self.use_sde and not isinstance(self.action_space, spaces.Box):\n raise ValueError(\"generalized State-Dependent Exploration (gSDE) can only be used with continuous actions.\")\n\n if isinstance(self.action_space, spaces.Box):\n assert np.all(\n np.isfinite(np.array([self.action_space.low, self.action_space.high]))\n ), \"Continuous action space must have a finite lower and upper bound\"\n\n @staticmethod\n def _wrap_env(env: GymEnv, verbose: int = 0, monitor_wrapper: bool = True) -> VecEnv:\n \"\"\" \"\n Wrap environment with the appropriate wrappers if needed.\n For instance, to have a vectorized environment\n or to re-order the image channels.\n\n :param env:\n :param verbose: Verbosity level: 0 for no output, 1 for indicating wrappers used\n :param monitor_wrapper: Whether to wrap the env in a ``Monitor`` when possible.\n :return: The wrapped environment.\n \"\"\"\n if not isinstance(env, VecEnv):\n if not is_wrapped(env, Monitor) and monitor_wrapper:\n if verbose >= 1:\n print(\"Wrapping the env with a `Monitor` wrapper\")\n env = Monitor(env)\n if verbose >= 1:\n print(\"Wrapping the env in a DummyVecEnv.\")\n env = DummyVecEnv([lambda: env])\n\n # Make sure that dict-spaces are not nested (not supported)\n check_for_nested_spaces(env.observation_space)\n\n if not is_vecenv_wrapped(env, VecTransposeImage):\n wrap_with_vectranspose = False\n if isinstance(env.observation_space, spaces.Dict):\n # If even one of the keys is a image-space in need of transpose, apply transpose\n # If the image spaces are not consistent (for instance one is channel first,\n # the other channel last), VecTransposeImage will throw an error\n for space in env.observation_space.spaces.values():\n wrap_with_vectranspose = wrap_with_vectranspose or (\n is_image_space(space) and not is_image_space_channels_first(space)\n )\n else:\n wrap_with_vectranspose = is_image_space(env.observation_space) and not is_image_space_channels_first(\n env.observation_space\n )\n\n if wrap_with_vectranspose:\n if verbose >= 1:\n print(\"Wrapping the env in a VecTransposeImage.\")\n env = VecTransposeImage(env)\n\n return env\n\n @abstractmethod\n def _setup_model(self) -> None:\n \"\"\"Create networks, buffer and optimizers.\"\"\"\n\n def set_logger(self, logger: Logger) -> None:\n \"\"\"\n Setter for for logger object.\n\n .. warning::\n\n When passing a custom logger object,\n this will overwrite ``tensorboard_log`` and ``verbose`` settings\n passed to the constructor.\n \"\"\"\n self._logger = logger\n # User defined logger\n self._custom_logger = True\n\n @property\n def logger(self) -> Logger:\n \"\"\"Getter for the logger object.\"\"\"\n return self._logger\n\n def _setup_lr_schedule(self) -> None:\n \"\"\"Transform to callable if needed.\"\"\"\n self.lr_schedule = get_schedule_fn(self.learning_rate)\n\n def _update_current_progress_remaining(self, num_timesteps: int, total_timesteps: int) -> None:\n \"\"\"\n Compute current progress remaining (starts from 1 and ends to 0)\n\n :param num_timesteps: current number of timesteps\n :param total_timesteps:\n \"\"\"\n self._current_progress_remaining = 1.0 - float(num_timesteps) / float(total_timesteps)\n\n def _update_learning_rate(self, optimizers: Union[List[th.optim.Optimizer], th.optim.Optimizer]) -> None:\n \"\"\"\n Update the optimizers learning rate using the current learning rate schedule\n and the current progress remaining (from 1 to 0).\n\n :param optimizers:\n An optimizer or a list of optimizers.\n \"\"\"\n # Log the current learning rate\n self.logger.record(\"train/learning_rate\", self.lr_schedule(self._current_progress_remaining))\n\n if not isinstance(optimizers, list):\n optimizers = [optimizers]\n for optimizer in optimizers:\n update_learning_rate(optimizer, self.lr_schedule(self._current_progress_remaining))\n\n def _excluded_save_params(self) -> List[str]:\n \"\"\"\n Returns the names of the parameters that should be excluded from being\n saved by pickling. E.g. replay buffers are skipped by default\n as they take up a lot of space. PyTorch variables should be excluded\n with this so they can be stored with ``th.save``.\n\n :return: List of parameters that should be excluded from being saved with pickle.\n \"\"\"\n return [\n \"policy\",\n \"device\",\n \"env\",\n \"replay_buffer\",\n \"rollout_buffer\",\n \"_vec_normalize_env\",\n \"_episode_storage\",\n \"_logger\",\n \"_custom_logger\",\n ]\n\n def _get_policy_from_name(self, policy_name: str) -> Type[BasePolicy]:\n \"\"\"\n Get a policy class from its name representation.\n\n The goal here is to standardize policy naming, e.g.\n all algorithms can call upon \"MlpPolicy\" or \"CnnPolicy\",\n and they receive respective policies that work for them.\n\n :param policy_name: Alias of the policy\n :return: A policy class (type)\n \"\"\"\n\n if policy_name in self.policy_aliases:\n return self.policy_aliases[policy_name]\n else:\n raise ValueError(f\"Policy {policy_name} unknown\")\n\n def _get_torch_save_params(self) -> Tuple[List[str], List[str]]:\n \"\"\"\n Get the name of the torch variables that will be saved with\n PyTorch ``th.save``, ``th.load`` and ``state_dicts`` instead of the default\n pickling strategy. This is to handle device placement correctly.\n\n Names can point to specific variables under classes, e.g.\n \"policy.optimizer\" would point to ``optimizer`` object of ``self.policy``\n if this object.\n\n :return:\n List of Torch variables whose state dicts to save (e.g. th.nn.Modules),\n and list of other Torch variables to store with ``th.save``.\n \"\"\"\n state_dicts = [\"policy\"]\n\n return state_dicts, []\n\n def _init_callback(\n self,\n callback: MaybeCallback,\n progress_bar: bool = False,\n ) -> BaseCallback:\n \"\"\"\n :param callback: Callback(s) called at every step with state of the algorithm.\n :param progress_bar: Display a progress bar using tqdm and rich.\n :return: A hybrid callback calling `callback` and performing evaluation.\n \"\"\"\n # Convert a list of callbacks into a callback\n if isinstance(callback, list):\n callback = CallbackList(callback)\n\n # Convert functional callback to object\n if not isinstance(callback, BaseCallback):\n callback = ConvertCallback(callback)\n\n # Add progress bar callback\n if progress_bar:\n callback = CallbackList([callback, ProgressBarCallback()])\n\n callback.init_callback(self)\n return callback\n\n def _setup_learn(\n self,\n total_timesteps: int,\n callback: MaybeCallback = None,\n reset_num_timesteps: bool = True,\n tb_log_name: str = \"run\",\n progress_bar: bool = False,\n ) -> Tuple[int, BaseCallback]:\n \"\"\"\n Initialize different variables needed for training.\n\n :param total_timesteps: The total number of samples (env steps) to train on\n :param callback: Callback(s) called at every step with state of the algorithm.\n :param reset_num_timesteps: Whether to reset or not the ``num_timesteps`` attribute\n :param tb_log_name: the name of the run for tensorboard log\n :param progress_bar: Display a progress bar using tqdm and rich.\n :return: Total timesteps and callback(s)\n \"\"\"\n self.start_time = time.time_ns()\n\n if self.ep_info_buffer is None or reset_num_timesteps:\n # Initialize buffers if they don't exist, or reinitialize if resetting counters\n self.ep_info_buffer = deque(maxlen=100)\n self.ep_success_buffer = deque(maxlen=100)\n\n if self.action_noise is not None:\n self.action_noise.reset()\n\n if reset_num_timesteps:\n self.num_timesteps = 0\n self._episode_num = 0\n else:\n # Make sure training timesteps are ahead of the internal counter\n total_timesteps += self.num_timesteps\n self._total_timesteps = total_timesteps\n self._num_timesteps_at_start = self.num_timesteps\n\n # Avoid resetting the environment when calling ``.learn()`` consecutive times\n if reset_num_timesteps or self._last_obs is None:\n self._last_obs = self.env.reset() # pytype: disable=annotation-type-mismatch\n self._last_episode_starts = np.ones((self.env.num_envs,), dtype=bool)\n # Retrieve unnormalized observation for saving into the buffer\n if self._vec_normalize_env is not None:\n self._last_original_obs = self._vec_normalize_env.get_original_obs()\n\n # Configure logger's outputs if no logger was passed\n if not self._custom_logger:\n self._logger = utils.configure_logger(self.verbose, self.tensorboard_log, tb_log_name, reset_num_timesteps)\n\n # Create eval callback if needed\n callback = self._init_callback(callback, progress_bar)\n\n return total_timesteps, callback\n\n def _update_info_buffer(self, infos: List[Dict[str, Any]], dones: Optional[np.ndarray] = None) -> None:\n \"\"\"\n Retrieve reward, episode length, episode success and update the buffer\n if using Monitor wrapper or a GoalEnv.\n\n :param infos: List of additional information about the transition.\n :param dones: Termination signals\n \"\"\"\n if dones is None:\n dones = np.array([False] * len(infos))\n for idx, info in enumerate(infos):\n maybe_ep_info = info.get(\"episode\")\n maybe_is_success = info.get(\"is_success\")\n if maybe_ep_info is not None:\n self.ep_info_buffer.extend([maybe_ep_info])\n if maybe_is_success is not None and dones[idx]:\n self.ep_success_buffer.append(maybe_is_success)\n\n def get_env(self) -> Optional[VecEnv]:\n \"\"\"\n Returns the current environment (can be None if not defined).\n\n :return: The current environment\n \"\"\"\n return self.env\n\n def get_vec_normalize_env(self) -> Optional[VecNormalize]:\n \"\"\"\n Return the ``VecNormalize`` wrapper of the training env\n if it exists.\n\n :return: The ``VecNormalize`` env.\n \"\"\"\n return self._vec_normalize_env\n\n def set_env(self, env: GymEnv, force_reset: bool = True) -> None:\n \"\"\"\n Checks the validity of the environment, and if it is coherent, set it as the current environment.\n Furthermore wrap any non vectorized env into a vectorized\n checked parameters:\n - observation_space\n - action_space\n\n :param env: The environment for learning a policy\n :param force_reset: Force call to ``reset()`` before training\n to avoid unexpected behavior.\n See issue https://github.com/DLR-RM/stable-baselines3/issues/597\n \"\"\"\n # if it is not a VecEnv, make it a VecEnv\n # and do other transformations (dict obs, image transpose) if needed\n env = self._wrap_env(env, self.verbose)\n assert env.num_envs == self.n_envs, (\n \"The number of environments to be set is different from the number of environments in the model: \"\n f\"({env.num_envs} != {self.n_envs}), whereas `set_env` requires them to be the same. To load a model with \"\n f\"a different number of environments, you must use `{self.__class__.__name__}.load(path, env)` instead\"\n )\n # Check that the observation spaces match\n check_for_correct_spaces(env, self.observation_space, self.action_space)\n # Update VecNormalize object\n # otherwise the wrong env may be used, see https://github.com/DLR-RM/stable-baselines3/issues/637\n self._vec_normalize_env = unwrap_vec_normalize(env)\n\n # Discard `_last_obs`, this will force the env to reset before training\n # See issue https://github.com/DLR-RM/stable-baselines3/issues/597\n if force_reset:\n self._last_obs = None\n\n self.n_envs = env.num_envs\n self.env = env\n\n @abstractmethod\n def learn(\n self: SelfBaseAlgorithm,\n total_timesteps: int,\n callback: MaybeCallback = None,\n log_interval: int = 100,\n tb_log_name: str = \"run\",\n reset_num_timesteps: bool = True,\n progress_bar: bool = False,\n ) -> SelfBaseAlgorithm:\n \"\"\"\n Return a trained model.\n\n :param total_timesteps: The total number of samples (env steps) to train on\n :param callback: callback(s) called at every step with state of the algorithm.\n :param log_interval: The number of timesteps before logging.\n :param tb_log_name: the name of the run for TensorBoard logging\n :param reset_num_timesteps: whether or not to reset the current timestep number (used in logging)\n :param progress_bar: Display a progress bar using tqdm and rich.\n :return: the trained model\n \"\"\"\n\n def predict(\n self,\n observation: Union[np.ndarray, Dict[str, np.ndarray]],\n state: Optional[Tuple[np.ndarray, ...]] = None,\n episode_start: Optional[np.ndarray] = None,\n deterministic: bool = False,\n ) -> Tuple[np.ndarray, Optional[Tuple[np.ndarray, ...]]]:\n \"\"\"\n Get the policy action from an observation (and optional hidden state).\n Includes sugar-coating to handle different observations (e.g. normalizing images).\n\n :param observation: the input observation\n :param state: The last hidden states (can be None, used in recurrent policies)\n :param episode_start: The last masks (can be None, used in recurrent policies)\n this correspond to beginning of episodes,\n where the hidden states of the RNN must be reset.\n :param deterministic: Whether or not to return deterministic actions.\n :return: the model's action and the next hidden state\n (used in recurrent policies)\n \"\"\"\n return self.policy.predict(observation, state, episode_start, deterministic)\n\n def set_random_seed(self, seed: Optional[int] = None) -> None:\n \"\"\"\n Set the seed of the pseudo-random generators\n (python, numpy, pytorch, gym, action_space)\n\n :param seed:\n \"\"\"\n if seed is None:\n return\n set_random_seed(seed, using_cuda=self.device.type == th.device(\"cuda\").type)\n self.action_space.seed(seed)\n # self.env is always a VecEnv\n if self.env is not None:\n self.env.seed(seed)\n\n def set_parameters(\n self,\n load_path_or_dict: Union[str, Dict[str, Dict]],\n exact_match: bool = True,\n device: Union[th.device, str] = \"auto\",\n ) -> None:\n \"\"\"\n Load parameters from a given zip-file or a nested dictionary containing parameters for\n different modules (see ``get_parameters``).\n\n :param load_path_or_iter: Location of the saved data (path or file-like, see ``save``), or a nested\n dictionary containing nn.Module parameters used by the policy. The dictionary maps\n object names to a state-dictionary returned by ``torch.nn.Module.state_dict()``.\n :param exact_match: If True, the given parameters should include parameters for each\n module and each of their parameters, otherwise raises an Exception. If set to False, this\n can be used to update only specific parameters.\n :param device: Device on which the code should run.\n \"\"\"\n params = None\n if isinstance(load_path_or_dict, dict):\n params = load_path_or_dict\n else:\n _, params, _ = load_from_zip_file(load_path_or_dict, device=device)\n\n # Keep track which objects were updated.\n # `_get_torch_save_params` returns [params, other_pytorch_variables].\n # We are only interested in former here.\n objects_needing_update = set(self._get_torch_save_params()[0])\n updated_objects = set()\n\n for name in params:\n attr = None\n try:\n attr = recursive_getattr(self, name)\n except Exception as e:\n # What errors recursive_getattr could throw? KeyError, but\n # possible something else too (e.g. if key is an int?).\n # Catch anything for now.\n raise ValueError(f\"Key {name} is an invalid object name.\") from e\n\n if isinstance(attr, th.optim.Optimizer):\n # Optimizers do not support \"strict\" keyword...\n # Seems like they will just replace the whole\n # optimizer state with the given one.\n # On top of this, optimizer state-dict\n # seems to change (e.g. first ``optim.step()``),\n # which makes comparing state dictionary keys\n # invalid (there is also a nesting of dictionaries\n # with lists with dictionaries with ...), adding to the\n # mess.\n #\n # TL;DR: We might not be able to reliably say\n # if given state-dict is missing keys.\n #\n # Solution: Just load the state-dict as is, and trust\n # the user has provided a sensible state dictionary.\n attr.load_state_dict(params[name])\n else:\n # Assume attr is th.nn.Module\n attr.load_state_dict(params[name], strict=exact_match)\n updated_objects.add(name)\n\n if exact_match and updated_objects != objects_needing_update:\n raise ValueError(\n \"Names of parameters do not match agents' parameters: \"\n f\"expected {objects_needing_update}, got {updated_objects}\"\n )\n\n @classmethod # noqa: C901\n def load(\n cls: Type[SelfBaseAlgorithm],\n path: Union[str, pathlib.Path, io.BufferedIOBase],\n env: Optional[GymEnv] = None,\n device: Union[th.device, str] = \"auto\",\n custom_objects: Optional[Dict[str, Any]] = None,\n print_system_info: bool = False,\n force_reset: bool = True,\n **kwargs,\n ) -> SelfBaseAlgorithm:\n \"\"\"\n Load the model from a zip-file.\n Warning: ``load`` re-creates the model from scratch, it does not update it in-place!\n For an in-place load use ``set_parameters`` instead.\n\n :param path: path to the file (or a file-like) where to\n load the agent from\n :param env: the new environment to run the loaded model on\n (can be None if you only need prediction from a trained model) has priority over any saved environment\n :param device: Device on which the code should run.\n :param custom_objects: Dictionary of objects to replace\n upon loading. If a variable is present in this dictionary as a\n key, it will not be deserialized and the corresponding item\n will be used instead. Similar to custom_objects in\n ``keras.models.load_model``. Useful when you have an object in\n file that can not be deserialized.\n :param print_system_info: Whether to print system info from the saved model\n and the current system info (useful to debug loading issues)\n :param force_reset: Force call to ``reset()`` before training\n to avoid unexpected behavior.\n See https://github.com/DLR-RM/stable-baselines3/issues/597\n :param kwargs: extra arguments to change the model when loading\n :return: new model instance with loaded parameters\n \"\"\"\n if print_system_info:\n print(\"== CURRENT SYSTEM INFO ==\")\n get_system_info()\n\n data, params, pytorch_variables = load_from_zip_file(\n path,\n device=device,\n custom_objects=custom_objects,\n print_system_info=print_system_info,\n )\n\n # Remove stored device information and replace with ours\n if \"policy_kwargs\" in data:\n if \"device\" in data[\"policy_kwargs\"]:\n del data[\"policy_kwargs\"][\"device\"]\n\n if \"policy_kwargs\" in kwargs and kwargs[\"policy_kwargs\"] != data[\"policy_kwargs\"]:\n raise ValueError(\n f\"The specified policy kwargs do not equal the stored policy kwargs.\"\n f\"Stored kwargs: {data['policy_kwargs']}, specified kwargs: {kwargs['policy_kwargs']}\"\n )\n\n if \"observation_space\" not in data or \"action_space\" not in data:\n raise KeyError(\"The observation_space and action_space were not given, can't verify new environments\")\n\n if env is not None:\n # Wrap first if needed\n env = cls._wrap_env(env, data[\"verbose\"])\n # Check if given env is valid\n check_for_correct_spaces(env, data[\"observation_space\"], data[\"action_space\"])\n # Discard `_last_obs`, this will force the env to reset before training\n # See issue https://github.com/DLR-RM/stable-baselines3/issues/597\n if force_reset and data is not None:\n data[\"_last_obs\"] = None\n # `n_envs` must be updated. See issue https://github.com/DLR-RM/stable-baselines3/issues/1018\n if data is not None:\n data[\"n_envs\"] = env.num_envs\n else:\n # Use stored env, if one exists. If not, continue as is (can be used for predict)\n if \"env\" in data:\n env = data[\"env\"]\n\n # noinspection PyArgumentList\n from stable_baselines3.dqn_soft_residual.policies import ResidualSoftCnnPolicy, ResidualSoftMlpPolicy, ResidualSoftMultiInputPolicy\n from stable_baselines3.sac_residual.policies import ResidualCnnPolicy, ResidualMlpPolicy, ResidualMultiInputPolicy\n ResidualPolicies = [ResidualSoftCnnPolicy, ResidualSoftMlpPolicy, ResidualSoftMultiInputPolicy, ResidualCnnPolicy, ResidualMlpPolicy, ResidualMultiInputPolicy]\n if (data[\"policy_class\"] in ResidualPolicies):\n model = cls( # pytype: disable=not-instantiable,wrong-keyword-args\n policy=data[\"policy_class\"],\n prior_model_path = data[\"prior_model_path\"],\n env=env,\n device=device,\n _init_setup_model=False, # pytype: disable=not-instantiable,wrong-keyword-args\n )\n else:\n model = cls( # pytype: disable=not-instantiable,wrong-keyword-args\n policy=data[\"policy_class\"],\n env=env,\n device=device,\n _init_setup_model=False, # pytype: disable=not-instantiable,wrong-keyword-args\n )\n\n # load parameters\n model.__dict__.update(data)\n model.__dict__.update(kwargs)\n model._setup_model()\n\n try:\n # put state_dicts back in place\n model.set_parameters(params, exact_match=True, device=device)\n except RuntimeError as e:\n # Patch to load Policy saved using SB3 < 1.7.0\n # the error is probably due to old policy being loaded\n # See https://github.com/DLR-RM/stable-baselines3/issues/1233\n if \"pi_features_extractor\" in str(e) and \"Missing key(s) in state_dict\" in str(e):\n model.set_parameters(params, exact_match=False, device=device)\n warnings.warn(\n \"You are probably loading a model saved with SB3 < 1.7.0, \"\n \"we deactivated exact_match so you can save the model \"\n \"again to avoid issues in the future \"\n \"(see https://github.com/DLR-RM/stable-baselines3/issues/1233 for more info). \"\n f\"Original error: {e} \\n\"\n \"Note: the model should still work fine, this only a warning.\"\n )\n else:\n raise e\n\n # put other pytorch variables back in place\n if pytorch_variables is not None:\n for name in pytorch_variables:\n # Skip if PyTorch variable was not defined (to ensure backward compatibility).\n # This happens when using SAC/TQC.\n # SAC has an entropy coefficient which can be fixed or optimized.\n # If it is optimized, an additional PyTorch variable `log_ent_coef` is defined,\n # otherwise it is initialized to `None`.\n if pytorch_variables[name] is None:\n continue\n # Set the data attribute directly to avoid issue when using optimizers\n # See https://github.com/DLR-RM/stable-baselines3/issues/391\n recursive_setattr(model, name + \".data\", pytorch_variables[name].data)\n\n # Sample gSDE exploration matrix, so it uses the right device\n # see issue #44\n if model.use_sde:\n model.policy.reset_noise() # pytype: disable=attribute-error\n return model\n\n def get_parameters(self) -> Dict[str, Dict]:\n \"\"\"\n Return the parameters of the agent. This includes parameters from different networks, e.g.\n critics (value functions) and policies (pi functions).\n\n :return: Mapping of from names of the objects to PyTorch state-dicts.\n \"\"\"\n state_dicts_names, _ = self._get_torch_save_params()\n params = {}\n for name in state_dicts_names:\n attr = recursive_getattr(self, name)\n # Retrieve state dict\n params[name] = attr.state_dict()\n return params\n\n def save(\n self,\n path: Union[str, pathlib.Path, io.BufferedIOBase],\n exclude: Optional[Iterable[str]] = None,\n include: Optional[Iterable[str]] = None,\n ) -> None:\n \"\"\"\n Save all the attributes of the object and the model parameters in a zip-file.\n\n :param path: path to the file where the rl agent should be saved\n :param exclude: name of parameters that should be excluded in addition to the default ones\n :param include: name of parameters that might be excluded but should be included anyway\n \"\"\"\n # Copy parameter list so we don't mutate the original dict\n data = self.__dict__.copy()\n\n # Exclude is union of specified parameters (if any) and standard exclusions\n if exclude is None:\n exclude = []\n exclude = set(exclude).union(self._excluded_save_params())\n\n # Do not exclude params if they are specifically included\n if include is not None:\n exclude = exclude.difference(include)\n\n state_dicts_names, torch_variable_names = self._get_torch_save_params()\n all_pytorch_variables = state_dicts_names + torch_variable_names\n for torch_var in all_pytorch_variables:\n # We need to get only the name of the top most module as we'll remove that\n var_name = torch_var.split(\".\")[0]\n # Any params that are in the save vars must not be saved by data\n exclude.add(var_name)\n\n # Remove parameter entries of parameters which are to be excluded\n for param_name in exclude:\n data.pop(param_name, None)\n\n # Build dict of torch variables\n pytorch_variables = None\n if torch_variable_names is not None:\n pytorch_variables = {}\n for name in torch_variable_names:\n attr = recursive_getattr(self, name)\n pytorch_variables[name] = attr\n\n # Build dict of state_dicts\n params_to_save = self.get_parameters()\n\n save_to_zip_file(path, data=data, params=params_to_save, pytorch_variables=pytorch_variables)" }, { "identifier": "BaseCallback", "path": "stable_baselines3/common/callbacks.py", "snippet": "class BaseCallback(ABC):\n \"\"\"\n Base class for callback.\n\n :param verbose: Verbosity level: 0 for no output, 1 for info messages, 2 for debug messages\n \"\"\"\n\n def __init__(self, verbose: int = 0):\n super().__init__()\n # The RL model\n self.model = None # type: Optional[base_class.BaseAlgorithm]\n # An alias for self.model.get_env(), the environment used for training\n self.training_env = None # type: Union[gym.Env, VecEnv, None]\n # Number of time the callback was called\n self.n_calls = 0 # type: int\n # n_envs * n times env.step() was called\n self.num_timesteps = 0 # type: int\n self.verbose = verbose\n self.locals: Dict[str, Any] = {}\n self.globals: Dict[str, Any] = {}\n self.logger = None\n # Sometimes, for event callback, it is useful\n # to have access to the parent object\n self.parent = None # type: Optional[BaseCallback]\n\n # Type hint as string to avoid circular import\n def init_callback(self, model: \"base_class.BaseAlgorithm\") -> None:\n \"\"\"\n Initialize the callback by saving references to the\n RL model and the training environment for convenience.\n \"\"\"\n self.model = model\n self.training_env = model.get_env()\n self.logger = model.logger\n self._init_callback()\n\n def _init_callback(self) -> None:\n pass\n\n def on_training_start(self, locals_: Dict[str, Any], globals_: Dict[str, Any]) -> None:\n # Those are reference and will be updated automatically\n self.locals = locals_\n self.globals = globals_\n # Update num_timesteps in case training was done before\n self.num_timesteps = self.model.num_timesteps\n self._on_training_start()\n\n def _on_training_start(self) -> None:\n pass\n\n def on_rollout_start(self) -> None:\n self._on_rollout_start()\n\n def _on_rollout_start(self) -> None:\n pass\n\n @abstractmethod\n def _on_step(self) -> bool:\n \"\"\"\n :return: If the callback returns False, training is aborted early.\n \"\"\"\n return True\n\n def on_step(self) -> bool:\n \"\"\"\n This method will be called by the model after each call to ``env.step()``.\n\n For child callback (of an ``EventCallback``), this will be called\n when the event is triggered.\n\n :return: If the callback returns False, training is aborted early.\n \"\"\"\n self.n_calls += 1\n self.num_timesteps = self.model.num_timesteps\n\n return self._on_step()\n\n def on_training_end(self) -> None:\n self._on_training_end()\n\n def _on_training_end(self) -> None:\n pass\n\n def on_rollout_end(self) -> None:\n self._on_rollout_end()\n\n def _on_rollout_end(self) -> None:\n pass\n\n def update_locals(self, locals_: Dict[str, Any]) -> None:\n \"\"\"\n Update the references to the local variables.\n\n :param locals_: the local variables during rollout collection\n \"\"\"\n self.locals.update(locals_)\n self.update_child_locals(locals_)\n\n def update_child_locals(self, locals_: Dict[str, Any]) -> None:\n \"\"\"\n Update the references to the local variables on sub callbacks.\n\n :param locals_: the local variables during rollout collection\n \"\"\"\n pass" }, { "identifier": "evaluate_policy", "path": "stable_baselines3/common/evaluation.py", "snippet": "def evaluate_policy(\n model: \"type_aliases.PolicyPredictor\",\n env: Union[gym.Env, VecEnv],\n n_eval_episodes: int = 10,\n deterministic: bool = True,\n render: bool = False,\n callback: Optional[Callable[[Dict[str, Any], Dict[str, Any]], None]] = None,\n reward_threshold: Optional[float] = None,\n return_episode_rewards: bool = False,\n warn: bool = True,\n) -> Union[Tuple[float, float], Tuple[List[float], List[int]]]:\n \"\"\"\n Runs policy for ``n_eval_episodes`` episodes and returns average reward.\n If a vector env is passed in, this divides the episodes to evaluate onto the\n different elements of the vector env. This static division of work is done to\n remove bias. See https://github.com/DLR-RM/stable-baselines3/issues/402 for more\n details and discussion.\n\n .. note::\n If environment has not been wrapped with ``Monitor`` wrapper, reward and\n episode lengths are counted as it appears with ``env.step`` calls. If\n the environment contains wrappers that modify rewards or episode lengths\n (e.g. reward scaling, early episode reset), these will affect the evaluation\n results as well. You can avoid this by wrapping environment with ``Monitor``\n wrapper before anything else.\n\n :param model: The RL agent you want to evaluate. This can be any object\n that implements a `predict` method, such as an RL algorithm (``BaseAlgorithm``)\n or policy (``BasePolicy``).\n :param env: The gym environment or ``VecEnv`` environment.\n :param n_eval_episodes: Number of episode to evaluate the agent\n :param deterministic: Whether to use deterministic or stochastic actions\n :param render: Whether to render the environment or not\n :param callback: callback function to do additional checks,\n called after each step. Gets locals() and globals() passed as parameters.\n :param reward_threshold: Minimum expected reward per episode,\n this will raise an error if the performance is not met\n :param return_episode_rewards: If True, a list of rewards and episode lengths\n per episode will be returned instead of the mean.\n :param warn: If True (default), warns user about lack of a Monitor wrapper in the\n evaluation environment.\n :return: Mean reward per episode, std of reward per episode.\n Returns ([float], [int]) when ``return_episode_rewards`` is True, first\n list containing per-episode rewards and second containing per-episode lengths\n (in number of steps).\n \"\"\"\n is_monitor_wrapped = False\n # Avoid circular import\n from stable_baselines3.common.monitor import Monitor\n\n if not isinstance(env, VecEnv):\n env = DummyVecEnv([lambda: env])\n\n is_monitor_wrapped = is_vecenv_wrapped(env, VecMonitor) or env.env_is_wrapped(Monitor)[0]\n\n if not is_monitor_wrapped and warn:\n warnings.warn(\n \"Evaluation environment is not wrapped with a ``Monitor`` wrapper. \"\n \"This may result in reporting modified episode lengths and rewards, if other wrappers happen to modify these. \"\n \"Consider wrapping environment first with ``Monitor`` wrapper.\",\n UserWarning,\n )\n\n n_envs = env.num_envs\n episode_rewards = []\n episode_lengths = []\n\n episode_counts = np.zeros(n_envs, dtype=\"int\")\n # Divides episodes among different sub environments in the vector as evenly as possible\n episode_count_targets = np.array([(n_eval_episodes + i) // n_envs for i in range(n_envs)], dtype=\"int\")\n\n current_rewards = np.zeros(n_envs)\n current_lengths = np.zeros(n_envs, dtype=\"int\")\n observations = env.reset()\n states = None\n episode_starts = np.ones((env.num_envs,), dtype=bool)\n while (episode_counts < episode_count_targets).any():\n actions, states = model.predict(observations, state=states, episode_start=episode_starts, deterministic=deterministic)\n observations, rewards, dones, infos = env.step(actions)\n current_rewards += rewards\n current_lengths += 1\n for i in range(n_envs):\n if episode_counts[i] < episode_count_targets[i]:\n\n # unpack values so that the callback can access the local variables\n reward = rewards[i]\n done = dones[i]\n info = infos[i]\n episode_starts[i] = done\n\n if callback is not None:\n callback(locals(), globals())\n\n if dones[i]:\n if is_monitor_wrapped:\n # Atari wrapper can send a \"done\" signal when\n # the agent loses a life, but it does not correspond\n # to the true end of episode\n if \"episode\" in info.keys():\n # Do not trust \"done\" with episode endings.\n # Monitor wrapper includes \"episode\" key in info if environment\n # has been wrapped with it. Use those rewards instead.\n episode_rewards.append(info[\"episode\"][\"r\"])\n episode_lengths.append(info[\"episode\"][\"l\"])\n # Only increment at the real end of an episode\n episode_counts[i] += 1\n else:\n episode_rewards.append(current_rewards[i])\n episode_lengths.append(current_lengths[i])\n episode_counts[i] += 1\n current_rewards[i] = 0\n current_lengths[i] = 0\n\n if render:\n env.render()\n\n mean_reward = np.mean(episode_rewards)\n std_reward = np.std(episode_rewards)\n if reward_threshold is not None:\n assert mean_reward > reward_threshold, \"Mean reward below threshold: \" f\"{mean_reward:.2f} < {reward_threshold:.2f}\"\n if return_episode_rewards:\n return episode_rewards, episode_lengths\n return mean_reward, std_reward" }, { "identifier": "BasePolicy", "path": "stable_baselines3/common/policies.py", "snippet": "class BasePolicy(BaseModel, ABC):\n \"\"\"The base policy object.\n\n Parameters are mostly the same as `BaseModel`; additions are documented below.\n\n :param args: positional arguments passed through to `BaseModel`.\n :param kwargs: keyword arguments passed through to `BaseModel`.\n :param squash_output: For continuous actions, whether the output is squashed\n or not using a ``tanh()`` function.\n \"\"\"\n\n def __init__(self, *args, squash_output: bool = False, **kwargs):\n super().__init__(*args, **kwargs)\n self._squash_output = squash_output\n\n @staticmethod\n def _dummy_schedule(progress_remaining: float) -> float:\n \"\"\"(float) Useful for pickling policy.\"\"\"\n del progress_remaining\n return 0.0\n\n @property\n def squash_output(self) -> bool:\n \"\"\"(bool) Getter for squash_output.\"\"\"\n return self._squash_output\n\n @staticmethod\n def init_weights(module: nn.Module, gain: float = 1) -> None:\n \"\"\"\n Orthogonal initialization (used in PPO and A2C)\n \"\"\"\n if isinstance(module, (nn.Linear, nn.Conv2d)):\n nn.init.orthogonal_(module.weight, gain=gain)\n if module.bias is not None:\n module.bias.data.fill_(0.0)\n\n @abstractmethod\n def _predict(self, observation: th.Tensor, deterministic: bool = False) -> th.Tensor:\n \"\"\"\n Get the action according to the policy for a given observation.\n\n By default provides a dummy implementation -- not all BasePolicy classes\n implement this, e.g. if they are a Critic in an Actor-Critic method.\n\n :param observation:\n :param deterministic: Whether to use stochastic or deterministic actions\n :return: Taken action according to the policy\n \"\"\"\n\n def predict(\n self,\n observation: Union[np.ndarray, Dict[str, np.ndarray]],\n state: Optional[Tuple[np.ndarray, ...]] = None,\n episode_start: Optional[np.ndarray] = None,\n deterministic: bool = False,\n ) -> Tuple[np.ndarray, Optional[Tuple[np.ndarray, ...]]]:\n \"\"\"\n Get the policy action from an observation (and optional hidden state).\n Includes sugar-coating to handle different observations (e.g. normalizing images).\n\n :param observation: the input observation\n :param state: The last hidden states (can be None, used in recurrent policies)\n :param episode_start: The last masks (can be None, used in recurrent policies)\n this correspond to beginning of episodes,\n where the hidden states of the RNN must be reset.\n :param deterministic: Whether or not to return deterministic actions.\n :return: the model's action and the next hidden state\n (used in recurrent policies)\n \"\"\"\n # TODO (GH/1): add support for RNN policies\n # if state is None:\n # state = self.initial_state\n # if episode_start is None:\n # episode_start = [False for _ in range(self.n_envs)]\n # Switch to eval mode (this affects batch norm / dropout)\n self.set_training_mode(False)\n\n observation, vectorized_env = self.obs_to_tensor(observation)\n\n with th.no_grad():\n actions = self._predict(observation, deterministic=deterministic)\n # Convert to numpy, and reshape to the original action shape\n actions = actions.cpu().numpy().reshape((-1,) + self.action_space.shape)\n\n if isinstance(self.action_space, spaces.Box):\n if self.squash_output:\n # Rescale to proper domain when using squashing\n actions = self.unscale_action(actions)\n else:\n # Actions could be on arbitrary scale, so clip the actions to avoid\n # out of bound error (e.g. if sampling from a Gaussian distribution)\n actions = np.clip(actions, self.action_space.low, self.action_space.high)\n\n # Remove batch dimension if needed\n if not vectorized_env:\n actions = actions.squeeze(axis=0)\n\n return actions, state\n\n def scale_action(self, action: np.ndarray) -> np.ndarray:\n \"\"\"\n Rescale the action from [low, high] to [-1, 1]\n (no need for symmetric action space)\n\n :param action: Action to scale\n :return: Scaled action\n \"\"\"\n low, high = self.action_space.low, self.action_space.high\n return 2.0 * ((action - low) / (high - low)) - 1.0\n\n def unscale_action(self, scaled_action: np.ndarray) -> np.ndarray:\n \"\"\"\n Rescale the action from [-1, 1] to [low, high]\n (no need for symmetric action space)\n\n :param scaled_action: Action to un-scale\n \"\"\"\n low, high = self.action_space.low, self.action_space.high\n return low + (0.5 * (scaled_action + 1.0) * (high - low))" }, { "identifier": "load_from_zip_file", "path": "stable_baselines3/common/save_util.py", "snippet": "def load_from_zip_file(\n load_path: Union[str, pathlib.Path, io.BufferedIOBase],\n load_data: bool = True,\n custom_objects: Optional[Dict[str, Any]] = None,\n device: Union[th.device, str] = \"auto\",\n verbose: int = 0,\n print_system_info: bool = False,\n) -> (Tuple[Optional[Dict[str, Any]], Optional[TensorDict], Optional[TensorDict]]):\n \"\"\"\n Load model data from a .zip archive\n\n :param load_path: Where to load the model from\n :param load_data: Whether we should load and return data\n (class parameters). Mainly used by 'load_parameters' to only load model parameters (weights)\n :param custom_objects: Dictionary of objects to replace\n upon loading. If a variable is present in this dictionary as a\n key, it will not be deserialized and the corresponding item\n will be used instead. Similar to custom_objects in\n ``keras.models.load_model``. Useful when you have an object in\n file that can not be deserialized.\n :param device: Device on which the code should run.\n :param verbose: Verbosity level: 0 for no output, 1 for info messages, 2 for debug messages\n :param print_system_info: Whether to print or not the system info\n about the saved model.\n :return: Class parameters, model state_dicts (aka \"params\", dict of state_dict)\n and dict of pytorch variables\n \"\"\"\n load_path = open_path(load_path, \"r\", verbose=verbose, suffix=\"zip\")\n\n # set device to cpu if cuda is not available\n device = get_device(device=device)\n\n # Open the zip archive and load data\n try:\n with zipfile.ZipFile(load_path) as archive:\n namelist = archive.namelist()\n # If data or parameters is not in the\n # zip archive, assume they were stored\n # as None (_save_to_file_zip allows this).\n data = None\n pytorch_variables = None\n params = {}\n\n # Debug system info first\n if print_system_info:\n if \"system_info.txt\" in namelist:\n print(\"== SAVED MODEL SYSTEM INFO ==\")\n print(archive.read(\"system_info.txt\").decode())\n else:\n warnings.warn(\n \"The model was saved with SB3 <= 1.2.0 and thus cannot print system information.\",\n UserWarning,\n )\n\n if \"data\" in namelist and load_data:\n # Load class parameters that are stored\n # with either JSON or pickle (not PyTorch variables).\n json_data = archive.read(\"data\").decode()\n data = json_to_data(json_data, custom_objects=custom_objects)\n\n # Check for all .pth files and load them using th.load.\n # \"pytorch_variables.pth\" stores PyTorch variables, and any other .pth\n # files store state_dicts of variables with custom names (e.g. policy, policy.optimizer)\n pth_files = [file_name for file_name in namelist if os.path.splitext(file_name)[1] == \".pth\"]\n for file_path in pth_files:\n with archive.open(file_path, mode=\"r\") as param_file:\n # File has to be seekable, but param_file is not, so load in BytesIO first\n # fixed in python >= 3.7\n file_content = io.BytesIO()\n file_content.write(param_file.read())\n # go to start of file\n file_content.seek(0)\n # Load the parameters with the right ``map_location``.\n # Remove \".pth\" ending with splitext\n th_object = th.load(file_content, map_location=device)\n # \"tensors.pth\" was renamed \"pytorch_variables.pth\" in v0.9.0, see PR #138\n if file_path == \"pytorch_variables.pth\" or file_path == \"tensors.pth\":\n # PyTorch variables (not state_dicts)\n pytorch_variables = th_object\n else:\n # State dicts. Store into params dictionary\n # with same name as in .zip file (without .pth)\n params[os.path.splitext(file_path)[0]] = th_object\n except zipfile.BadZipFile as e:\n # load_path wasn't a zip file\n raise ValueError(f\"Error: the file {load_path} wasn't a zip-file\") from e\n return data, params, pytorch_variables" }, { "identifier": "GymEnv", "path": "stable_baselines3/common/type_aliases.py", "snippet": "class RolloutBufferSamples(NamedTuple):\nclass DictRolloutBufferSamples(NamedTuple):\nclass ReplayBufferSamples(NamedTuple):\nclass DictReplayBufferSamples(NamedTuple):\nclass RolloutReturn(NamedTuple):\nclass TrainFrequencyUnit(Enum):\nclass TrainFreq(NamedTuple):\nclass PolicyPredictor(Protocol):\n STEP = \"step\"\n EPISODE = \"episode\"\n def predict(\n self,\n observation: Union[np.ndarray, Dict[str, np.ndarray]],\n state: Optional[Tuple[np.ndarray, ...]] = None,\n episode_start: Optional[np.ndarray] = None,\n deterministic: bool = False,\n ) -> Tuple[np.ndarray, Optional[Tuple[np.ndarray, ...]]]:" }, { "identifier": "get_schedule_fn", "path": "stable_baselines3/common/utils.py", "snippet": "def get_schedule_fn(value_schedule: Union[Schedule, float, int]) -> Schedule:\n \"\"\"\n Transform (if needed) learning rate and clip range (for PPO)\n to callable.\n\n :param value_schedule: Constant value of schedule function\n :return: Schedule function (can return constant value)\n \"\"\"\n # If the passed schedule is a float\n # create a constant function\n if isinstance(value_schedule, (float, int)):\n # Cast to float to avoid errors\n value_schedule = constant_fn(float(value_schedule))\n else:\n assert callable(value_schedule)\n return value_schedule" }, { "identifier": "safe_mean", "path": "stable_baselines3/common/utils.py", "snippet": "def safe_mean(arr: Union[np.ndarray, list, deque]) -> np.ndarray:\n \"\"\"\n Compute the mean of an array if there is at least one element.\n For empty array, return NaN. It is used for logging only.\n\n :param arr: Numpy array or list of values\n :return:\n \"\"\"\n return np.nan if len(arr) == 0 else np.mean(arr)" }, { "identifier": "ARSPolicy", "path": "sb3_contrib/ars/policies.py", "snippet": "class ARSPolicy(BasePolicy):\nclass ARSLinearPolicy(ARSPolicy):\n def __init__(\n self,\n observation_space: spaces.Space,\n action_space: spaces.Space,\n net_arch: Optional[List[int]] = None,\n activation_fn: Type[nn.Module] = nn.ReLU,\n with_bias: bool = True,\n squash_output: bool = True,\n ):\n def _get_constructor_parameters(self) -> Dict[str, Any]:\n def forward(self, obs: th.Tensor) -> th.Tensor:\n def _predict(self, observation: th.Tensor, deterministic: bool = True) -> th.Tensor:\n def __init__(\n self,\n observation_space: spaces.Space,\n action_space: spaces.Space,\n with_bias: bool = False,\n squash_output: bool = False,\n ):" }, { "identifier": "AsyncEval", "path": "sb3_contrib/common/vec_env/async_eval.py", "snippet": "class AsyncEval:\n \"\"\"\n Helper class to do asynchronous evaluation of different policies with multiple processes.\n It is useful when implementing population based methods like Evolution Strategies (ES),\n Cross Entropy Method (CEM) or Augmented Random Search (ARS).\n\n .. warning::\n\n Only 'forkserver' and 'spawn' start methods are thread-safe,\n which is important to avoid race conditions.\n However, compared to\n 'fork' they incur a small start-up cost and have restrictions on\n global variables. With those methods, users must wrap the code in an\n ``if __name__ == \"__main__\":`` block.\n For more information, see the multiprocessing documentation.\n\n :param envs_fn: Vectorized environments to run in subprocesses (callable)\n :param train_policy: The policy object that will load the different candidate\n weights.\n :param start_method: method used to start the subprocesses.\n Must be one of the methods returned by ``multiprocessing.get_all_start_methods()``.\n Defaults to 'forkserver' on available platforms, and 'spawn' otherwise.\n :param n_eval_episodes: The number of episodes to test each agent\n \"\"\"\n\n def __init__(\n self,\n envs_fn: List[Callable[[], VecEnv]],\n train_policy: BasePolicy,\n start_method: Optional[str] = None,\n n_eval_episodes: int = 1,\n ):\n self.waiting = False\n self.closed = False\n n_envs = len(envs_fn)\n\n if start_method is None:\n # Fork is not a thread safe method (see issue #217)\n # but is more user friendly (does not require to wrap the code in\n # a `if __name__ == \"__main__\":`)\n forkserver_available = \"forkserver\" in mp.get_all_start_methods()\n start_method = \"forkserver\" if forkserver_available else \"spawn\"\n ctx = mp.get_context(start_method)\n\n self.remotes, self.work_remotes = zip(*[ctx.Pipe() for _ in range(n_envs)])\n self.processes = []\n for work_remote, remote, worker_env in zip(self.work_remotes, self.remotes, envs_fn):\n args = (\n work_remote,\n remote,\n CloudpickleWrapper(worker_env),\n CloudpickleWrapper(train_policy),\n n_eval_episodes,\n )\n # daemon=True: if the main process crashes, we should not cause things to hang\n process = ctx.Process(target=_worker, args=args, daemon=True) # pytype:disable=attribute-error\n process.start()\n self.processes.append(process)\n work_remote.close()\n\n def send_jobs(self, candidate_weights: th.Tensor, pop_size: int) -> None:\n \"\"\"\n Send jobs to the workers to evaluate new candidates.\n\n :param candidate_weights: The weights to be evaluated.\n :pop_size: The number of candidate (size of the population)\n \"\"\"\n jobs_per_worker = defaultdict(list)\n for weights_idx in range(pop_size):\n jobs_per_worker[weights_idx % len(self.remotes)].append((weights_idx, candidate_weights[weights_idx]))\n\n for remote_idx, remote in enumerate(self.remotes):\n remote.send((\"eval\", jobs_per_worker[remote_idx]))\n self.waiting = True\n\n def seed(self, seed: Optional[int] = None) -> List[Union[None, int]]:\n \"\"\"\n Seed the environments.\n\n :param seed: The seed for the pseudo-random generators.\n :return:\n \"\"\"\n for idx, remote in enumerate(self.remotes):\n remote.send((\"seed\", seed + idx))\n return [remote.recv() for remote in self.remotes]\n\n def get_results(self) -> List[Tuple[int, Tuple[np.ndarray, np.ndarray]]]:\n \"\"\"\n Retreive episode rewards and lengths from each worker\n for all candidates (there might be multiple candidates per worker)\n\n :return: A list of tuples containing each candidate index and its\n result (episodic reward and episode length)\n \"\"\"\n results = [remote.recv() for remote in self.remotes]\n flat_results = [result for worker_results in results for result in worker_results]\n self.waiting = False\n return flat_results\n\n def get_obs_rms(self) -> List[RunningMeanStd]:\n \"\"\"\n Retrieve the observation filters (observation running mean std)\n of each process, they will be combined in the main process.\n Synchronisation is done afterward using ``sync_obs_rms()``.\n :return: A list of ``RunningMeanStd`` objects (one per process)\n \"\"\"\n for remote in self.remotes:\n remote.send((\"get_obs_rms\", None))\n return [remote.recv() for remote in self.remotes]\n\n def sync_obs_rms(self, obs_rms: RunningMeanStd) -> None:\n \"\"\"\n Synchronise (and update) the observation filters\n (observation running mean std)\n :param obs_rms: The updated ``RunningMeanStd`` to be used\n by workers for normalizing observations.\n \"\"\"\n for remote in self.remotes:\n remote.send((\"sync_obs_rms\", obs_rms))\n\n def close(self) -> None:\n \"\"\"\n Close the processes.\n \"\"\"\n if self.closed:\n return\n if self.waiting:\n for remote in self.remotes:\n remote.recv()\n for remote in self.remotes:\n remote.send((\"close\", None))\n for process in self.processes:\n process.join()\n self.closed = True" } ]
import copy import sys import time import warnings import numpy as np import torch as th import torch.nn.utils from functools import partial from typing import Any, Dict, Optional, Type, TypeVar, Union from gym import spaces from stable_baselines3.common.base_class import BaseAlgorithm from stable_baselines3.common.callbacks import BaseCallback from stable_baselines3.common.evaluation import evaluate_policy from stable_baselines3.common.policies import BasePolicy from stable_baselines3.common.save_util import load_from_zip_file from stable_baselines3.common.type_aliases import GymEnv, MaybeCallback, Schedule from stable_baselines3.common.utils import get_schedule_fn, safe_mean from sb3_contrib.ars.policies import ARSPolicy, LinearPolicy, MlpPolicy from sb3_contrib.common.vec_env.async_eval import AsyncEval
15,394
SelfARS = TypeVar("SelfARS", bound="ARS") class ARS(BaseAlgorithm): """ Augmented Random Search: https://arxiv.org/abs/1803.07055 Original implementation: https://github.com/modestyachts/ARS C++/Cuda Implementation: https://github.com/google-research/tiny-differentiable-simulator/ 150 LOC Numpy Implementation: https://github.com/alexis-jacq/numpy_ARS/blob/master/asr.py :param policy: The policy to train, can be an instance of ``ARSPolicy``, or a string from ["LinearPolicy", "MlpPolicy"] :param env: The environment to train on, may be a string if registered with gym :param n_delta: How many random perturbations of the policy to try at each update step. :param n_top: How many of the top delta to use in each update step. Default is n_delta :param learning_rate: Float or schedule for the step size :param delta_std: Float or schedule for the exploration noise :param zero_policy: Boolean determining if the passed policy should have it's weights zeroed before training. :param alive_bonus_offset: Constant added to the reward at each step, used to cancel out alive bonuses. :param n_eval_episodes: Number of episodes to evaluate each candidate. :param policy_kwargs: Keyword arguments to pass to the policy on creation :param tensorboard_log: String with the directory to put tensorboard logs: :param seed: Random seed for the training :param verbose: Verbosity level: 0 no output, 1 info, 2 debug :param device: Torch device to use for training, defaults to "cpu" :param _init_setup_model: Whether or not to build the network at the creation of the instance """ policy_aliases: Dict[str, Type[BasePolicy]] = { "MlpPolicy": MlpPolicy, "LinearPolicy": LinearPolicy, } def __init__( self, policy: Union[str, Type[ARSPolicy]], env: Union[GymEnv, str], n_delta: int = 8, n_top: Optional[int] = None,
SelfARS = TypeVar("SelfARS", bound="ARS") class ARS(BaseAlgorithm): """ Augmented Random Search: https://arxiv.org/abs/1803.07055 Original implementation: https://github.com/modestyachts/ARS C++/Cuda Implementation: https://github.com/google-research/tiny-differentiable-simulator/ 150 LOC Numpy Implementation: https://github.com/alexis-jacq/numpy_ARS/blob/master/asr.py :param policy: The policy to train, can be an instance of ``ARSPolicy``, or a string from ["LinearPolicy", "MlpPolicy"] :param env: The environment to train on, may be a string if registered with gym :param n_delta: How many random perturbations of the policy to try at each update step. :param n_top: How many of the top delta to use in each update step. Default is n_delta :param learning_rate: Float or schedule for the step size :param delta_std: Float or schedule for the exploration noise :param zero_policy: Boolean determining if the passed policy should have it's weights zeroed before training. :param alive_bonus_offset: Constant added to the reward at each step, used to cancel out alive bonuses. :param n_eval_episodes: Number of episodes to evaluate each candidate. :param policy_kwargs: Keyword arguments to pass to the policy on creation :param tensorboard_log: String with the directory to put tensorboard logs: :param seed: Random seed for the training :param verbose: Verbosity level: 0 no output, 1 info, 2 debug :param device: Torch device to use for training, defaults to "cpu" :param _init_setup_model: Whether or not to build the network at the creation of the instance """ policy_aliases: Dict[str, Type[BasePolicy]] = { "MlpPolicy": MlpPolicy, "LinearPolicy": LinearPolicy, } def __init__( self, policy: Union[str, Type[ARSPolicy]], env: Union[GymEnv, str], n_delta: int = 8, n_top: Optional[int] = None,
learning_rate: Union[float, Schedule] = 0.02,
5
2023-10-28 01:09:21+00:00
24k
pytabular-ai/auto-scikit-dl
utils/model.py
[ { "identifier": "MLP", "path": "models/mlp.py", "snippet": "class MLP(TabModel):\n def __init__(\n self,\n model_config: dict,\n n_num_features: int,\n categories: ty.Optional[ty.List[int]],\n n_labels: int,\n device: ty.Union[str, torch.device] = 'cuda',\n ):\n super().__init__()\n model_config = self.preproc_config(model_config)\n self.model = _MLP(\n d_in=n_num_features,\n categories=categories,\n d_out=n_labels,\n **model_config\n ).to(device)\n self.base_name = 'mlp'\n self.device = torch.device(device)\n \n def preproc_config(self, model_config: dict):\n \"\"\"MLP config preprocessing\"\"\"\n # process mlp configs\n self.saved_model_config = model_config.copy()\n d_layers = []\n n_layers, first_dim, mid_dim, last_dim = \\\n (\n model_config.pop('n_layers'), model_config.pop('first_dim'),\n model_config.pop('mid_dim'), model_config.pop('last_dim')\n )\n for i in range(n_layers):\n if i == 0:\n d_layers.append(first_dim)\n elif i == n_layers - 1 and n_layers > 1:\n d_layers.append(last_dim)\n else:\n d_layers.append(mid_dim)\n model_config['d_layers'] = d_layers\n return model_config\n\n def fit(\n self,\n # API for specical sampler like curriculum learning\n train_loader: ty.Optional[ty.Tuple[DataLoader, int]] = None, # (loader, missing_idx)\n # using normal sampler if is None\n X_num: ty.Optional[torch.Tensor] = None, \n X_cat: ty.Optional[torch.Tensor] = None, \n ys: ty.Optional[torch.Tensor] = None,\n y_std: ty.Optional[float] = None, # for RMSE\n eval_set: ty.Tuple[torch.Tensor, np.ndarray] = None,\n patience: int = 0,\n task: str = None,\n training_args: dict = None,\n meta_args: ty.Optional[dict] = None,\n ):\n def train_step(model, x_num, x_cat, y): # input is X and y\n # process input (model-specific)\n # define your running time calculation\n start_time = time.time()\n # define your model API\n logits = model(x_num, x_cat)\n used_time = time.time() - start_time # don't forget backward time, calculate in outer loop\n return logits, used_time\n \n # to custom other training paradigm\n # 1. add self.dnn_fit2(...) in abstract class for special training process\n # 2. (recommended) override self.dnn_fit in abstract class\n self.dnn_fit( # uniform training paradigm\n dnn_fit_func=train_step,\n # training data\n train_loader=train_loader,\n X_num=X_num, X_cat=X_cat, ys=ys, y_std=y_std,\n # dev data\n eval_set=eval_set, patience=patience, task=task,\n # args\n training_args=training_args,\n meta_args=meta_args,\n )\n \n def predict(\n self,\n dev_loader: ty.Optional[ty.Tuple[DataLoader, int]] = None, # reuse, (loader, missing_idx)\n X_num: ty.Optional[torch.Tensor] = None, \n X_cat: ty.Optional[torch.Tensor] = None, \n ys: ty.Optional[torch.Tensor] = None, \n y_std: ty.Optional[float] = None, # for RMSE\n task: str = None,\n return_probs: bool = True,\n return_metric: bool = False,\n return_loss: bool = False,\n meta_args: ty.Optional[dict] = None,\n ):\n def inference_step(model, x_num, x_cat): # input only X (y inaccessible)\n \"\"\"\n Inference Process\n `no_grad` will be applied in `dnn_predict'\n \"\"\"\n # process input (model-specific)\n # define your running time calculation\n start_time = time.time()\n # define your model API\n logits = model(x_num, x_cat)\n used_time = time.time() - start_time\n return logits, used_time\n \n # to custom other inference paradigm\n # 1. add self.dnn_predict2(...) in abstract class for special training process\n # 2. (recommended) override self.dnn_predict in abstract class\n return self.dnn_predict( # uniform training paradigm\n dnn_predict_func=inference_step,\n dev_loader=dev_loader,\n X_num=X_num, X_cat=X_cat, ys=ys, y_std=y_std, task=task,\n return_probs=return_probs, return_metric=return_metric, return_loss=return_loss,\n meta_args=meta_args\n )\n \n def save(self, output_dir):\n check_dir(output_dir)\n self.save_pt_model(output_dir)\n self.save_history(output_dir)\n self.save_config(output_dir)" }, { "identifier": "FTTransformer", "path": "models/ft_transformer.py", "snippet": "class FTTransformer(TabModel):\n def __init__(\n self,\n model_config: dict,\n n_num_features: int,\n categories: ty.Optional[ty.List[int]],\n n_labels: int,\n device: ty.Union[str, torch.device] = 'cuda',\n ):\n super().__init__()\n model_config = self.preproc_config(model_config)\n self.model = rtdl.FTTransformer.make_baseline(\n n_num_features=n_num_features,\n cat_cardinalities=categories,\n d_out=n_labels,\n **model_config\n ).to(device)\n self.base_name = 'ft-transformer'\n self.device = torch.device(device)\n \n def preproc_config(self, model_config: dict):\n self.saved_model_config = model_config.copy()\n # process ftt configs\n if 'ffn_d_factor' in model_config:\n model_config['ffn_d_hidden'] = \\\n int(model_config['d_token'] * model_config.pop('ffn_d_factor'))\n return model_config\n \n def fit(\n self,\n # API for specical sampler like curriculum learning\n train_loader: ty.Optional[ty.Tuple[DataLoader, int]] = None, # (loader, missing_idx)\n # using normal sampler if is None\n X_num: ty.Optional[torch.Tensor] = None, \n X_cat: ty.Optional[torch.Tensor] = None, \n ys: ty.Optional[torch.Tensor] = None,\n y_std: ty.Optional[float] = None, # for RMSE\n eval_set: ty.Tuple[torch.Tensor, np.ndarray] = None,\n patience: int = 0,\n task: str = None,\n training_args: dict = None,\n meta_args: ty.Optional[dict] = None,\n ):\n def train_step(model, x_num, x_cat, y): # input is X and y\n # process input (model-specific)\n # define your running time calculation\n start_time = time.time()\n # define your model API\n logits = model(x_num, x_cat)\n used_time = time.time() - start_time # don't forget backward time, calculate in outer loop\n return logits, used_time\n \n # to custom other training paradigm\n # 1. add self.dnn_fit2(...) in abstract class for special training process\n # 2. (recommended) override self.dnn_fit in abstract class\n self.dnn_fit( # uniform training paradigm\n dnn_fit_func=train_step,\n # training data\n train_loader=train_loader,\n X_num=X_num, X_cat=X_cat, ys=ys, y_std=y_std,\n # dev data\n eval_set=eval_set, patience=patience, task=task,\n # args\n training_args=training_args,\n meta_args=meta_args,\n )\n \n def predict(\n self,\n dev_loader: ty.Optional[ty.Tuple[DataLoader, int]] = None, # reuse, (loader, missing_idx)\n X_num: ty.Optional[torch.Tensor] = None, \n X_cat: ty.Optional[torch.Tensor] = None, \n ys: ty.Optional[torch.Tensor] = None, \n y_std: ty.Optional[float] = None, # for RMSE\n task: str = None,\n return_probs: bool = True,\n return_metric: bool = False,\n return_loss: bool = False,\n meta_args: ty.Optional[dict] = None,\n ):\n def inference_step(model, x_num, x_cat): # input only X (y inaccessible)\n \"\"\"\n Inference Process\n `no_grad` will be applied in `dnn_predict'\n \"\"\"\n # process input (model-specific)\n # define your running time calculation\n start_time = time.time()\n # define your model API\n logits = model(x_num, x_cat)\n used_time = time.time() - start_time\n return logits, used_time\n \n # to custom other inference paradigm\n # 1. add self.dnn_predict2(...) in abstract class for special training process\n # 2. (recommended) override self.dnn_predict in abstract class\n return self.dnn_predict( # uniform training paradigm\n dnn_predict_func=inference_step,\n dev_loader=dev_loader,\n X_num=X_num, X_cat=X_cat, ys=ys, y_std=y_std, task=task,\n return_probs=return_probs, return_metric=return_metric, return_loss=return_loss,\n meta_args=meta_args\n )\n \n def save(self, output_dir):\n check_dir(output_dir)\n self.save_pt_model(output_dir)\n self.save_history(output_dir)\n self.save_config(output_dir)" }, { "identifier": "AutoInt", "path": "models/autoint.py", "snippet": "class AutoInt(TabModel):\n def __init__(\n self,\n model_config: dict,\n n_num_features: int,\n categories: ty.Optional[ty.List[int]],\n n_labels: int,\n device: ty.Union[str, torch.device] = 'cuda',\n ):\n super().__init__()\n model_config = self.preproc_config(model_config)\n self.model = _AutoInt(\n d_numerical=n_num_features,\n categories=categories,\n d_out=n_labels,\n **model_config\n ).to(device)\n self.base_name = 'autoint'\n self.device = torch.device(device)\n \n def preproc_config(self, model_config: dict):\n # process autoint configs\n self.saved_model_config = model_config.copy()\n return model_config\n\n def fit(\n self,\n # API for specical sampler like curriculum learning\n train_loader: ty.Optional[ty.Tuple[DataLoader, int]] = None, # (loader, missing_idx)\n # using normal sampler if is None\n X_num: ty.Optional[torch.Tensor] = None, \n X_cat: ty.Optional[torch.Tensor] = None, \n ys: ty.Optional[torch.Tensor] = None,\n y_std: ty.Optional[float] = None, # for RMSE\n eval_set: ty.Tuple[torch.Tensor, np.ndarray] = None,\n patience: int = 0,\n task: str = None,\n training_args: dict = None,\n meta_args: ty.Optional[dict] = None,\n ):\n def train_step(model, x_num, x_cat, y): # input is X and y\n # process input (model-specific)\n # define your model API\n start_time = time.time()\n # define your model API\n logits = model(x_num, x_cat)\n used_time = time.time() - start_time\n return logits, used_time\n \n # to custom other training paradigm\n # 1. add self.dnn_fit2(...) in abstract class for special training process\n # 2. (recommended) override self.dnn_fit in abstract class\n self.dnn_fit( # uniform training paradigm\n dnn_fit_func=train_step,\n # training data\n train_loader=train_loader,\n X_num=X_num, X_cat=X_cat, ys=ys, y_std=y_std,\n # dev data\n eval_set=eval_set, patience=patience, task=task,\n # args\n training_args=training_args,\n meta_args=meta_args,\n )\n \n def predict(\n self,\n dev_loader: ty.Optional[ty.Tuple[DataLoader, int]] = None, # reuse, (loader, missing_idx)\n X_num: ty.Optional[torch.Tensor] = None, \n X_cat: ty.Optional[torch.Tensor] = None, \n ys: ty.Optional[torch.Tensor] = None, \n y_std: ty.Optional[float] = None, # for RMSE\n task: str = None,\n return_probs: bool = True,\n return_metric: bool = False,\n return_loss: bool = False,\n meta_args: ty.Optional[dict] = None,\n ):\n def inference_step(model, x_num, x_cat): # input only X (y inaccessible)\n \"\"\"\n Inference Process\n `no_grad` will be applied in `dnn_predict'\n \"\"\"\n # process input (model-specific)\n # define your model API\n start_time = time.time()\n # define your model API\n logits = model(x_num, x_cat)\n used_time = time.time() - start_time\n return logits, used_time\n \n # to custom other inference paradigm\n # 1. add self.dnn_predict2(...) in abstract class for special training process\n # 2. (recommended) override self.dnn_predict in abstract class\n return self.dnn_predict( # uniform training paradigm\n dnn_predict_func=inference_step,\n dev_loader=dev_loader,\n X_num=X_num, X_cat=X_cat, ys=ys, y_std=y_std, task=task,\n return_probs=return_probs, return_metric=return_metric, return_loss=return_loss,\n meta_args=meta_args,\n )\n \n def save(self, output_dir):\n check_dir(output_dir)\n self.save_pt_model(output_dir)\n self.save_history(output_dir)\n self.save_config(output_dir)" }, { "identifier": "DCNv2", "path": "models/dcnv2.py", "snippet": "class DCNv2(TabModel):\n def __init__(\n self,\n model_config: dict,\n n_num_features: int,\n categories: ty.Optional[ty.List[int]],\n n_labels: int,\n device: ty.Union[str, torch.device] = 'cuda',\n ):\n super().__init__()\n model_config = self.preproc_config(model_config)\n self.model = _DCNv2(\n d_in=n_num_features,\n categories=categories,\n d_out=n_labels,\n **model_config\n ).to(device)\n self.base_name = 'dcnv2'\n self.device = torch.device(device)\n \n def preproc_config(self, model_config: dict):\n # process autoint configs\n self.saved_model_config = model_config.copy()\n return model_config\n\n def fit(\n self,\n # API for specical sampler like curriculum learning\n train_loader: ty.Optional[ty.Tuple[DataLoader, int]] = None, # (loader, missing_idx)\n # using normal sampler if is None\n X_num: ty.Optional[torch.Tensor] = None, \n X_cat: ty.Optional[torch.Tensor] = None, \n ys: ty.Optional[torch.Tensor] = None,\n y_std: ty.Optional[float] = None, # for RMSE\n eval_set: ty.Tuple[torch.Tensor, np.ndarray] = None,\n patience: int = 0,\n task: str = None,\n training_args: dict = None,\n meta_args: ty.Optional[dict] = None,\n ):\n def train_step(model, x_num, x_cat, y): # input is X and y\n # process input (model-specific)\n # define your model API\n start_time = time.time()\n # define your model API\n logits = model(x_num, x_cat)\n used_time = time.time() - start_time\n return logits, used_time\n \n # to custom other training paradigm\n # 1. add self.dnn_fit2(...) in abstract class for special training process\n # 2. (recommended) override self.dnn_fit in abstract class\n self.dnn_fit( # uniform training paradigm\n dnn_fit_func=train_step,\n # training data\n train_loader=train_loader,\n X_num=X_num, X_cat=X_cat, ys=ys, y_std=y_std,\n # dev data\n eval_set=eval_set, patience=patience, task=task,\n # args\n training_args=training_args,\n meta_args=meta_args,\n )\n \n def predict(\n self,\n dev_loader: ty.Optional[ty.Tuple[DataLoader, int]] = None, # reuse, (loader, missing_idx)\n X_num: ty.Optional[torch.Tensor] = None, \n X_cat: ty.Optional[torch.Tensor] = None, \n ys: ty.Optional[torch.Tensor] = None, \n y_std: ty.Optional[float] = None, # for RMSE\n task: str = None,\n return_probs: bool = True,\n return_metric: bool = False,\n return_loss: bool = False,\n meta_args: ty.Optional[dict] = None,\n ):\n def inference_step(model, x_num, x_cat): # input only X (y inaccessible)\n \"\"\"\n Inference Process\n `no_grad` will be applied in `dnn_predict'\n \"\"\"\n # process input (model-specific)\n # define your model API\n start_time = time.time()\n # define your model API\n logits = model(x_num, x_cat)\n used_time = time.time() - start_time\n return logits, used_time\n \n # to custom other inference paradigm\n # 1. add self.dnn_predict2(...) in abstract class for special training process\n # 2. (recommended) override self.dnn_predict in abstract class\n return self.dnn_predict( # uniform training paradigm\n dnn_predict_func=inference_step,\n dev_loader=dev_loader,\n X_num=X_num, X_cat=X_cat, ys=ys, y_std=y_std, task=task,\n return_probs=return_probs, return_metric=return_metric, return_loss=return_loss,\n meta_args=meta_args\n )\n \n def save(self, output_dir):\n check_dir(output_dir)\n self.save_pt_model(output_dir)\n self.save_history(output_dir)\n self.save_config(output_dir)" }, { "identifier": "NODE", "path": "models/node_model.py", "snippet": "class NODE(TabModel):\n def __init__(\n self,\n model_config: dict,\n n_num_features: int,\n categories: ty.Optional[ty.List[int]],\n n_labels: int,\n device: ty.Union[str, torch.device] = 'cuda',\n ):\n super().__init__()\n model_config = self.preproc_config(model_config)\n self.model = _NODE(\n d_in=n_num_features,\n categories=categories,\n d_out=n_labels,\n tree_dim=n_labels,\n **model_config\n ).to(device)\n self.base_name = 'node'\n self.device = torch.device(device)\n \n def preproc_config(self, model_config: dict):\n # process autoint configs\n self.saved_model_config = model_config.copy()\n return model_config\n\n def fit(\n self,\n # API for specical sampler like curriculum learning\n train_loader: ty.Optional[ty.Tuple[DataLoader, int]] = None, # (loader, missing_idx)\n # using normal sampler if is None\n X_num: ty.Optional[torch.Tensor] = None, \n X_cat: ty.Optional[torch.Tensor] = None, \n ys: ty.Optional[torch.Tensor] = None,\n y_std: ty.Optional[float] = None, # for RMSE\n eval_set: ty.Tuple[torch.Tensor, np.ndarray] = None,\n patience: int = 0,\n task: str = None,\n training_args: dict = None,\n meta_args: ty.Optional[dict] = None,\n ):\n def train_step(model, x_num, x_cat, y): # input is X and y\n # process input (model-specific)\n # define your model API\n start_time = time.time()\n # define your model API\n logits = model(x_num, x_cat)\n used_time = time.time() - start_time\n return logits, used_time\n \n # to custom other training paradigm\n # 1. add self.dnn_fit2(...) in abstract class for special training process\n # 2. (recommended) override self.dnn_fit in abstract class\n self.dnn_fit( # uniform training paradigm\n dnn_fit_func=train_step,\n # training data\n train_loader=train_loader,\n X_num=X_num, X_cat=X_cat, ys=ys, y_std=y_std,\n # dev data\n eval_set=eval_set, patience=patience, task=task,\n # args\n training_args=training_args,\n meta_args=meta_args,\n )\n \n def predict(\n self,\n dev_loader: ty.Optional[ty.Tuple[DataLoader, int]] = None, # reuse, (loader, missing_idx)\n X_num: ty.Optional[torch.Tensor] = None, \n X_cat: ty.Optional[torch.Tensor] = None, \n ys: ty.Optional[torch.Tensor] = None, \n y_std: ty.Optional[float] = None, # for RMSE\n task: str = None,\n return_probs: bool = True,\n return_metric: bool = False,\n return_loss: bool = False,\n meta_args: ty.Optional[dict] = None,\n ):\n def inference_step(model, x_num, x_cat): # input only X (y inaccessible)\n \"\"\"\n Inference Process\n `no_grad` will be applied in `dnn_predict'\n \"\"\"\n # process input (model-specific)\n # define your running time calculation\n start_time = time.time()\n # define your model API\n logits = model(x_num, x_cat)\n used_time = time.time() - start_time\n return logits, used_time\n \n # to custom other inference paradigm\n # 1. add self.dnn_predict2(...) in abstract class for special training process\n # 2. (recommended) override self.dnn_predict in abstract class\n return self.dnn_predict( # uniform training paradigm\n dnn_predict_func=inference_step,\n dev_loader=dev_loader,\n X_num=X_num, X_cat=X_cat, ys=ys, y_std=y_std, task=task,\n return_probs=return_probs, return_metric=return_metric, return_loss=return_loss,\n meta_args=meta_args\n )\n \n def save(self, output_dir):\n check_dir(output_dir)\n self.save_pt_model(output_dir)\n self.save_history(output_dir)\n self.save_config(output_dir)" }, { "identifier": "TabModel", "path": "models/abstract.py", "snippet": "class TabModel(ABC):\n def __init__(self):\n self.model: Optional[nn.Module] = None # true model\n self.base_name = None # model type name\n self.device = None\n self.saved_model_config = None\n self.training_config = None\n self.meta_config = None\n self.post_init()\n\n def post_init(self):\n self.history = {\n 'train': {'loss': [], 'tot_time': 0, 'avg_step_time': 0, 'avg_epoch_time': 0}, \n 'val': {\n 'metric_name': None, 'metric': [], 'best_metric': None, \n 'log_loss': [], 'best_log_loss': None,\n 'best_epoch': None, 'best_step': None,\n 'tot_time': 0, 'avg_step_time': 0, 'avg_epoch_time': 0\n }, \n # 'test': {'loss': [], 'metric': [], 'final_metric': None},\n 'device': torch.cuda.get_device_name(),\n } # save metrics\n self.no_improvement = 0 # for dnn early stop\n \n def preproc_config(self, model_config: dict):\n \"\"\"default preprocessing for model configurations\"\"\"\n self.saved_model_config = model_config\n return model_config\n \n @abstractmethod\n def fit(\n self,\n X_num: Union[torch.Tensor, np.ndarray], \n X_cat: Union[torch.Tensor, np.ndarray], \n ys: Union[torch.Tensor, np.ndarray],\n y_std: Optional[float],\n eval_set: Optional[Tuple[Union[torch.Tensor, np.ndarray]]],\n patience: int,\n task: str,\n training_args: dict,\n meta_args: Optional[dict],\n ):\n \"\"\"\n Training Model with Early Stop(optional)\n load best weights at the end\n \"\"\"\n pass\n \n def dnn_fit(\n self,\n *,\n dnn_fit_func: Optional[DNN_FIT_API] = None,\n # API for specical sampler like curriculum learning\n train_loader: Optional[Tuple[DataLoader, int]] = None, # (loader, missing_idx)\n # using normal dataloader sampler if is None\n X_num: Optional[torch.Tensor] = None, \n X_cat: Optional[torch.Tensor] = None, \n ys: Optional[torch.Tensor] = None,\n y_std: Optional[float] = None, # for RMSE\n eval_set: Tuple[torch.Tensor, np.ndarray] = None, # similar API as sk-learn\n patience: int = 0, # <= 0 without early stop\n task: str,\n training_args: dict,\n meta_args: Optional[dict] = None,\n ):\n # DONE: move to abstract class (dnn_fit)\n if dnn_fit_func is None:\n dnn_fit_func = default_dnn_fit\n # meta args\n if meta_args is None:\n meta_args = {}\n meta_args.setdefault('save_path', f'results/{self.base_name}')\n if not os.path.exists(meta_args['save_path']):\n print('create new results dir: ', meta_args['save_path'])\n os.makedirs(meta_args['save_path'])\n self.meta_config = meta_args\n # optimzier and scheduler\n training_args.setdefault('optimizer', 'adamw')\n optimizer, scheduler = TabModel.make_optimizer(self.model, training_args)\n # data loader\n training_args.setdefault('batch_size', 64)\n training_args.setdefault('ghost_batch_size', None)\n if train_loader is not None:\n train_loader, missing_idx = train_loader\n training_args['batch_size'] = train_loader.batch_size\n else:\n train_loader, missing_idx = TabModel.prepare_tensor_loader(\n X_num=X_num, X_cat=X_cat, ys=ys,\n batch_size=training_args['batch_size'],\n shuffle=True,\n )\n if eval_set is not None:\n eval_set = eval_set[0] # only use the first dev set\n dev_loader = TabModel.prepare_tensor_loader(\n X_num=eval_set[0], X_cat=eval_set[1], ys=eval_set[2],\n batch_size=training_args['batch_size'],\n )\n else:\n dev_loader = None\n # training loops\n training_args.setdefault('max_epochs', 1000)\n # training_args.setdefault('report_frequency', 100) # same as save_freq\n # training_args.setdefault('save_frequency', 100) # save per 100 steps\n training_args.setdefault('patience', patience)\n training_args.setdefault('save_frequency', 'epoch') # save per epoch\n self.training_config = training_args\n\n steps_per_backward = 1 if training_args['ghost_batch_size'] is None \\\n else training_args['batch_size'] // training_args['ghost_batch_size']\n steps_per_epoch = len(train_loader)\n tot_step, tot_time = 0, 0\n for e in range(training_args['max_epochs']):\n self.model.train()\n tot_loss = 0\n for step, batch in enumerate(train_loader):\n optimizer.zero_grad()\n x_num, x_cat, y = TabModel.parse_batch(batch, missing_idx, self.device)\n logits, forward_time = dnn_fit_func(self.model, x_num, x_cat, y)\n loss = TabModel.compute_loss(logits, y, task)\n # backward\n start_time = time.time()\n loss.backward()\n backward_time = time.time() - start_time\n self.gradient_policy()\n tot_time += forward_time + backward_time\n optimizer.step()\n if scheduler is not None:\n scheduler.step()\n # print or save infos\n tot_step += 1\n tot_loss += loss.cpu().item()\n if isinstance(training_args['save_frequency'], int) \\\n and tot_step % training_args['save_frequency'] == 0:\n is_early_stop = self.save_evaluate_dnn(\n tot_step, steps_per_epoch, \n tot_loss, tot_time,\n task, training_args['patience'], meta_args['save_path'],\n dev_loader, y_std,\n )\n if is_early_stop:\n self.save(meta_args['save_path'])\n self.load_best_dnn(meta_args['save_path'])\n return\n if training_args['save_frequency'] == 'epoch':\n if hasattr(self.model, 'layer_masks'):\n print('layer_mask: ', self.model.layer_masks > 0)\n is_early_stop = self.save_evaluate_dnn(\n tot_step, steps_per_epoch, \n tot_loss, tot_time,\n task, training_args['patience'], meta_args['save_path'],\n dev_loader, y_std,\n )\n if is_early_stop:\n self.save(meta_args['save_path'])\n self.load_best_dnn(meta_args['save_path'])\n return\n self.save(meta_args['save_path'])\n self.load_best_dnn(meta_args['save_path'])\n \n @abstractmethod\n def predict(\n self,\n dev_loader: Optional[DataLoader],\n X_num: Union[torch.Tensor, np.ndarray], \n X_cat: Union[torch.Tensor, np.ndarray], \n ys: Union[torch.Tensor, np.ndarray],\n y_std: Optional[float],\n task: str,\n return_probs: bool = True,\n return_metric: bool = True,\n return_loss: bool = True,\n meta_args: Optional[dict] = None,\n ):\n \"\"\"\n Prediction\n \"\"\"\n pass\n \n def dnn_predict(\n self,\n *,\n dnn_predict_func: Optional[DNN_PREDICT_API] = None,\n dev_loader: Optional[Tuple[DataLoader, int]] = None, # reuse, (loader, missing_idx)\n X_num: Optional[torch.Tensor] = None, \n X_cat: Optional[torch.Tensor] = None, \n ys: Optional[torch.Tensor] = None, \n y_std: Optional[float] = None, # for RMSE\n task: str,\n return_probs: bool = True,\n return_metric: bool = False,\n return_loss: bool = False,\n meta_args: Optional[dict] = None,\n ):\n # DONE: move to abstract class (dnn_predict)\n if dnn_predict_func is None:\n dnn_predict_func = default_dnn_predict\n if dev_loader is None:\n dev_loader, missing_idx = TabModel.prepare_tensor_loader(\n X_num=X_num, X_cat=X_cat, ys=ys,\n batch_size=128,\n )\n else:\n dev_loader, missing_idx = dev_loader\n # print(\"Evaluate...\")\n predictions, golds = [], []\n tot_time = 0\n self.model.eval()\n for batch in dev_loader:\n x_num, x_cat, y = TabModel.parse_batch(batch, missing_idx, self.device)\n with torch.no_grad():\n logits, used_time = dnn_predict_func(self.model, x_num, x_cat)\n tot_time += used_time\n predictions.append(logits)\n golds.append(y)\n self.model.train()\n predictions = torch.cat(predictions).squeeze(-1)\n golds = torch.cat(golds)\n if return_loss:\n loss = TabModel.compute_loss(predictions, golds, task).cpu().item()\n else:\n loss = None\n if return_probs and task != 'regression':\n predictions = (\n predictions.sigmoid()\n if task == 'binclass'\n else predictions.softmax(-1)\n )\n prediction_type = 'probs'\n elif task == 'regression':\n prediction_type = None\n else:\n prediction_type = 'logits'\n predictions = predictions.cpu().numpy()\n golds = golds.cpu().numpy()\n if return_metric:\n metric = TabModel.calculate_metric(\n golds, predictions,\n task, prediction_type, y_std\n )\n logloss = (\n log_loss(golds, np.stack([1-predictions, predictions], axis=1), labels=[0,1])\n if task == 'binclass'\n else log_loss(golds, predictions, labels=list(range(len(set(golds)))))\n if task == 'multiclass'\n else None\n )\n else:\n metric, logloss = None, None\n results = {'loss': loss, 'metric': metric, 'time': tot_time, 'log_loss': logloss}\n if meta_args is not None:\n self.save_prediction(meta_args['save_path'], results)\n return predictions, results\n \n def gradient_policy(self):\n \"\"\"For post porcess model gradient\"\"\"\n pass\n \n @abstractmethod\n def save(self, output_dir):\n \"\"\"\n Save model weights and configs,\n the following default save functions\n can be combined to override this function\n \"\"\"\n pass\n\n def save_pt_model(self, output_dir):\n print('saving pt model weights...')\n # save model params\n torch.save(self.model.state_dict(), Path(output_dir) / 'final.bin')\n \n def save_tree_model(self, output_dir):\n print('saving tree model...')\n pass\n\n def save_history(self, output_dir):\n # save metrics\n with open(Path(output_dir) / 'results.json', 'w') as f:\n json.dump(self.history, f, indent=4)\n \n def save_prediction(self, output_dir, results, file='prediction'):\n check_dir(output_dir)\n # save test results\n print(\"saving prediction results\")\n saved_results = {\n 'loss': results['loss'], \n 'metric_name': results['metric'][1], \n 'metric': results['metric'][0], \n 'time': results['time'],\n 'log_loss': results['log_loss'],\n }\n with open(Path(output_dir) / f'{file}.json', 'w') as f:\n json.dump(saved_results, f, indent=4)\n \n def save_config(self, output_dir):\n def serialize(config: dict):\n for key in config:\n # serialized object to store yaml or json files \n if any(isinstance(config[key], obj) for obj in [Path, ]):\n config[key] = str(config[key])\n return config\n # save all configs\n with open(Path(output_dir) / 'configs.yaml', 'w') as f:\n configs = {\n 'model': self.saved_model_config, \n 'training': self.training_config,\n 'meta': serialize(self.meta_config)\n }\n yaml.dump(configs, f, indent=2)\n\n @staticmethod\n def make_optimizer(\n model: nn.Module,\n training_args: dict,\n ) -> Tuple[optim.Optimizer, optim.lr_scheduler._LRScheduler]:\n training_args.setdefault('optimizer', 'adamw')\n training_args.setdefault('no_wd_group', None)\n training_args.setdefault('scheduler', None)\n # optimizer\n if training_args['no_wd_group'] is not None:\n assert isinstance(training_args['no_wd_group'], list)\n def needs_wd(name):\n return all(x not in name for x in training_args['no_wd_group'])\n parameters_with_wd = [v for k, v in model.named_parameters() if needs_wd(k)]\n parameters_without_wd = [v for k, v in model.named_parameters() if not needs_wd(k)]\n model_params = [\n {'params': parameters_with_wd},\n {'params': parameters_without_wd, 'weight_decay': 0.0},\n ]\n else:\n model_params = model.parameters()\n optimizer = make_optimizer(\n training_args['optimizer'],\n model_params,\n training_args['lr'],\n training_args['weight_decay'],\n )\n # scheduler\n if training_args['scheduler'] is not None:\n scheduler = None\n else:\n scheduler = None\n\n return optimizer, scheduler\n \n @staticmethod\n def prepare_tensor_loader(\n X_num: Optional[torch.Tensor],\n X_cat: Optional[torch.Tensor],\n ys: torch.Tensor,\n batch_size: int = 64,\n shuffle: bool = False,\n ):\n assert not all(x is None for x in [X_num, X_cat])\n missing_placeholder = 0 if X_num is None else 1 if X_cat is None else -1\n datas = [x for x in [X_num, X_cat, ys] if x is not None]\n tensor_dataset = TensorDataset(*datas)\n tensor_loader = DataLoader(\n tensor_dataset,\n batch_size=batch_size,\n shuffle=shuffle,\n )\n return tensor_loader, missing_placeholder\n \n @staticmethod\n def parse_batch(batch: Tuple[torch.Tensor], missing_idx, device: torch.device):\n if batch[0].device.type != device.type:\n # if batch[0].device != device: # initialize self.device with model.device rather than torch.device()\n # batch = (x.to(device) for x in batch) # generator\n batch = tuple([x.to(device) for x in batch]) # list\n if missing_idx == -1:\n return batch\n else:\n return batch[:missing_idx] + [None,] + batch[missing_idx:]\n \n @staticmethod\n def compute_loss(logits: torch.Tensor, targets: torch.Tensor, task: str, reduction: str = 'mean'):\n loss_fn = {\n 'binclass': F.binary_cross_entropy_with_logits,\n 'multiclass': F.cross_entropy,\n 'regression': F.mse_loss,\n }[task]\n return loss_fn(logits.squeeze(-1), targets, reduction=reduction)\n \n @staticmethod\n def calculate_metric(\n golds,\n predictions,\n task: str,\n prediction_type: Optional[str] = None,\n y_std: Optional[float] = None,\n ):\n \"\"\"Calculate metrics\"\"\"\n metric = {\n 'regression': 'rmse', \n 'binclass': 'roc_auc', \n 'multiclass': 'accuracy'\n }[task]\n \n return calculate_metrics(\n golds, predictions,\n task, prediction_type, y_std\n )[metric], metric\n \n def better_result(self, dev_metric, task, is_loss=False):\n if is_loss: # logloss\n best_dev_metric = self.history['val']['best_log_loss']\n if best_dev_metric is None or best_dev_metric > dev_metric:\n self.history['val']['best_log_loss'] = dev_metric\n return True\n else:\n return False\n best_dev_metric = self.history['val']['best_metric']\n if best_dev_metric is None:\n self.history['val']['best_metric'] = dev_metric\n return True\n elif task == 'regression': # rmse\n if best_dev_metric > dev_metric:\n self.history['val']['best_metric'] = dev_metric\n return True\n else:\n return False\n else:\n if best_dev_metric < dev_metric:\n self.history['val']['best_metric'] = dev_metric\n return True\n else:\n return False\n \n def early_stop_handler(self, epoch, tot_step, dev_metric, task, patience, save_path):\n if task != 'regression' and self.better_result(dev_metric['log_loss'], task, is_loss=True):\n # record best logloss\n torch.save(self.model.state_dict(), Path(save_path) / 'best-logloss.bin')\n if self.better_result(dev_metric['metric'], task):\n print('<<< Best Dev Result', end='')\n torch.save(self.model.state_dict(), Path(save_path) / 'best.bin')\n self.no_improvement = 0\n self.history['val']['best_epoch'] = epoch\n self.history['val']['best_step'] = tot_step\n else:\n self.no_improvement += 1\n print(f'| [no improvement] {self.no_improvement}', end='')\n if patience <= 0:\n return False\n else:\n return self.no_improvement >= patience\n \n def save_evaluate_dnn(\n self, \n # print and saved infos\n tot_step, steps_per_epoch, \n tot_loss, tot_time,\n # evaluate infos\n task, patience, save_path,\n dev_loader, y_std\n ):\n \"\"\"For DNN models\"\"\"\n epoch, step = tot_step // steps_per_epoch, (tot_step - 1) % steps_per_epoch + 1\n avg_loss = tot_loss / step\n self.history['train']['loss'].append(avg_loss)\n self.history['train']['tot_time'] = tot_time\n self.history['train']['avg_step_time'] = tot_time / tot_step\n self.history['train']['avg_epoch_time'] = self.history['train']['avg_step_time'] * steps_per_epoch\n print(f\"[epoch] {epoch} | [step] {step} | [tot_step] {tot_step} | [used time] {tot_time:.4g} | [train_loss] {avg_loss:.4g} \", end='')\n if dev_loader is not None:\n _, results = self.predict(dev_loader=dev_loader, y_std=y_std, task=task, return_metric=True)\n dev_metric, metric_name = results['metric']\n print(f\"| [{metric_name}] {dev_metric:.4g} \", end='')\n if task != 'regression':\n print(f\"| [log-loss] {results['log_loss']:.4g} \", end='')\n self.history['val']['log_loss'].append(results['log_loss'])\n self.history['val']['metric_name'] = metric_name\n self.history['val']['metric'].append(dev_metric)\n self.history['val']['tot_time'] += results['time']\n self.history['val']['avg_step_time'] = self.history['val']['tot_time'] / tot_step\n self.history['val']['avg_epoch_time'] = self.history['val']['avg_step_time'] * steps_per_epoch\n dev_metric = {'metric': dev_metric, 'log_loss': results['log_loss']}\n if self.early_stop_handler(epoch, tot_step, dev_metric, task, patience, save_path):\n print(' <<< Early Stop')\n return True\n print()\n return False\n \n def load_best_dnn(self, save_path, file='best'):\n model_file = Path(save_path) / f\"{file}.bin\"\n if not os.path.exists(model_file):\n print(f'There is no {file} checkpoint, loading the last one...')\n model_file = Path(save_path) / 'final.bin'\n else:\n print(f'Loading {file} model...')\n self.model.load_state_dict(torch.load(model_file))\n print('successfully')" }, { "identifier": "check_dir", "path": "models/abstract.py", "snippet": "def check_dir(dir):\n if not os.path.exists(dir):\n os.makedirs(dir)" }, { "identifier": "Dataset", "path": "data/utils.py", "snippet": "class Dataset:\n X_num: Optional[ArrayDict]\n X_cat: Optional[ArrayDict]\n y: ArrayDict\n y_info: Dict[str, Any]\n task_type: TaskType\n n_classes: Optional[int]\n name: Optional[str]\n\n @classmethod\n def from_dir(cls, dir_: Union[Path, str]) -> 'Dataset':\n dir_ = Path(dir_)\n\n def load(item) -> ArrayDict:\n def _load(file: Path):\n return cast(np.ndarray, np.load(file)) if file.exists() else None\n return {\n x: _load(dir_ / f'{item}_{x}.npy')\n for x in ['train', 'val', 'test']\n }\n\n info = load_json(dir_ / 'info.json')\n\n return Dataset(\n load('X_num') if dir_.joinpath('X_num_train.npy').exists() else None,\n load('X_cat') if dir_.joinpath('X_cat_train.npy').exists() else None,\n load('y'),\n {},\n TaskType(info['task_type']),\n info.get('n_classes'),\n info.get('name'),\n )\n\n @property\n def is_binclass(self) -> bool:\n return self.task_type == TaskType.BINCLASS\n\n @property\n def is_multiclass(self) -> bool:\n return self.task_type == TaskType.MULTICLASS\n\n @property\n def is_regression(self) -> bool:\n return self.task_type == TaskType.REGRESSION\n\n @property\n def n_num_features(self) -> int:\n return 0 if self.X_num is None else self.X_num['train'].shape[1]\n\n @property\n def n_cat_features(self) -> int:\n return 0 if self.X_cat is None else self.X_cat['train'].shape[1]\n\n @property\n def n_features(self) -> int:\n return self.n_num_features + self.n_cat_features\n\n def size(self, part: Optional[str]) -> int:\n return sum(map(len, self.y.values())) if part is None else len(self.y[part])\n\n @property\n def nn_output_dim(self) -> int:\n if self.is_multiclass:\n assert self.n_classes is not None\n return self.n_classes\n else:\n return 1\n\n def get_category_sizes(self, part: str) -> List[int]:\n return [] if self.X_cat is None else get_category_sizes(self.X_cat[part])" }, { "identifier": "DataProcessor", "path": "data/processor.py", "snippet": "class DataProcessor:\n \"\"\"Base class to process a single dataset\"\"\"\n def __init__(\n self, \n normalization: Optional[Normalization] = None,\n num_nan_policy: Optional[NumNanPolicy] = None,\n cat_nan_policy: Optional[CatNanPolicy] = None,\n cat_min_frequency: Optional[float] = None,\n cat_encoding: Optional[CatEncoding] = None,\n y_policy: Optional[YPolicy] = 'default',\n seed: int = 42,\n cache_dir: Optional[str] = None,\n ):\n self.transformation = Transformations(\n seed=seed, \n normalization=normalization, \n num_nan_policy=num_nan_policy,\n cat_nan_policy=cat_nan_policy,\n cat_min_frequency=cat_min_frequency,\n cat_encoding=cat_encoding,\n y_policy=y_policy\n )\n self.cache_dir = cache_dir\n \n def apply(self, dataset: Dataset):\n return transform_dataset(dataset, self.transformation, self.cache_dir)\n \n def save(self, file, **kwargs):\n data_config = {\n 'transformation': vars(self.transformation),\n 'cache_dir': str(self.cache_dir),\n 'meta': kwargs,\n }\n with open(file, 'w') as f:\n yaml.dump(data_config, f, indent=2)\n \n @staticmethod\n def check_splits(dataset: Dataset):\n valid_splits = True\n if 'train' in dataset.y:\n if 'test' not in dataset.y:\n warnings.warn(\"Missing test split, unable to prediction\")\n valid_splits = False\n if 'val' not in dataset.y:\n warnings.warn(\"Missing dev split, unable to early stop, or ignore this message if no early stop needed.\")\n valid_splits = False\n if valid_splits:\n print(\"ready for training!\")\n else:\n raise ValueError(\"Missing training split in the dataset\")\n \n @staticmethod\n def prepare(dataset: Dataset, model: Optional[TabModel] = None, device: str = 'cuda'):\n assert model is not None or device is not None\n def get_spl(X: Optional[Union[ArrayDict, TensorDict]], spl):\n return None if X is None else X[spl]\n if device is not None or isinstance(model.model, nn.Module):\n device = device or model.model.device\n X_num, X_cat, ys = prepare_tensors(dataset, device)\n return {spl: (\n get_spl(X_num, spl), \n get_spl(X_cat, spl), \n get_spl(ys, spl)\n ) for spl in ys}\n else:\n return {spl: (\n get_spl(dataset.X_num, spl), \n get_spl(dataset.X_cat, spl), \n get_spl(dataset.y, spl)\n ) for spl in dataset.y}\n \n @staticmethod\n def load_preproc_default(\n output_dir, # output preprocessing infos\n model_name, \n dataset_name, \n benchmark_name: Optional[str] = None, \n seed: int = 42, \n cache_dir: Optional[str] = None\n ):\n global DATASETS, CUSTOM_DATASETS\n \"\"\"default data preprocessing pipeline\"\"\"\n if dataset_name in DATASETS or dataset_name in CUSTOM_DATASETS:\n data_src = DATASETS if dataset_name in DATASETS else CUSTOM_DATASETS\n data_config = data_src[dataset_name]\n data_path = Path(data_config['path'])\n data_config.setdefault('normalization', 'quantile')\n normalization = data_config['normalization']\n elif benchmark_name is not None:\n assert benchmark_name in BENCHMARKS, f\"Benchmark '{benchmark_name}' is not included, \\\n please choose one of '{list(BENCHMARKS.keys())}', for include your benchmark manually.\"\n benchmark_info = BENCHMARKS[benchmark_name]\n assert dataset_name in benchmark_info['datasets'], f\"dataset '{dataset_name}' not in benchmark '{benchmark_name}'\"\n data_path = Path(benchmark_info['path']) / dataset_name\n normalization = 'quantile'\n else:\n raise ValueError(f\"No dataset '{dataset_name}' is available, \\\n if you want to use a custom dataset (from csv file), using `add_custom_dataset`\")\n \n dataset = Dataset.from_dir(data_path)\n # default preprocess settings\n num_nan_policy = 'mean' if dataset.X_num is not None and \\\n any(np.isnan(dataset.X_num[spl]).any() for spl in dataset.X_num) else None\n cat_nan_policy = None\n if model_name in ['xgboost', 'catboost', 'lightgbm']: # for tree models or other sklearn algorithms\n normalization = None\n cat_min_frequency = None\n cat_encoding = 'one-hot'\n if model_name in ['catboost']:\n cat_encoding = None\n else: # for dnns\n # BUG: (dataset.X_cat[spl] == CAT_MISSING_VALUE).any() has different action\n # dtype: int -> bool, dtype: string -> array[bool], dtype: object -> np.load error\n # CURRENT: uniformly using string type to store catgorical features\n if dataset.X_cat is not None and \\\n any((dataset.X_cat[spl] == CAT_MISSING_VALUE).any() for spl in dataset.X_cat):\n cat_nan_policy = 'most_frequent'\n cat_min_frequency = None\n cat_encoding = None\n cache_dir = cache_dir or data_path\n processor = DataProcessor(\n normalization=normalization,\n num_nan_policy=num_nan_policy,\n cat_nan_policy=cat_nan_policy,\n cat_min_frequency=cat_min_frequency,\n cat_encoding=cat_encoding,\n seed=seed,\n cache_dir=Path(cache_dir),\n )\n dataset = processor.apply(dataset)\n # check train, val, test splits\n DataProcessor.check_splits(dataset)\n # save preprocessing infos\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n processor.save(\n Path(output_dir) / 'data_config.yaml',\n benchmark=str(benchmark_name),\n dataset=dataset_name\n )\n return dataset\n\n @staticmethod\n def split(\n X_num: Optional[np.ndarray] = None, \n X_cat: Optional[np.ndarray] = None, \n ys: np.ndarray = None, \n train_ratio: float = 0.8,\n stratify: bool = True,\n seed: int = 42,\n ):\n assert 0 < train_ratio < 1\n assert ys is not None\n sample_idx = np.arange(len(ys))\n test_ratio = 1 - train_ratio\n _stratify = None if not stratify else ys\n train_idx, test_idx = train_test_split(sample_idx, test_size=test_ratio, random_state=seed, stratify=_stratify)\n _stratify = None if not stratify else ys[train_idx]\n train_idx, val_idx = train_test_split(train_idx, test_size=test_ratio, random_state=seed, stratify=_stratify)\n if X_num is not None:\n X_num = {'train': X_num[train_idx], 'val': X_num[val_idx], 'test': X_num[test_idx]}\n if X_cat is not None:\n X_cat = {'train': X_cat[train_idx], 'val': X_cat[val_idx], 'test': X_cat[test_idx]}\n ys = {'train': ys[train_idx], 'val': ys[val_idx], 'test': ys[test_idx]}\n idx = {'train': train_idx, 'val': val_idx, 'test': test_idx}\n return X_num, X_cat, ys, idx\n \n @staticmethod\n def del_custom_dataset(\n dataset_names: Union[str, List[str]]\n ):\n global DATASETS, CUSTOM_DATASETS\n all_infos = read_custom_infos()\n if isinstance(dataset_names, str):\n dataset_names = [dataset_names]\n for dataset_name in dataset_names:\n if dataset_name not in CUSTOM_DATASETS:\n print(f\"custom dataset: {dataset_name} not exist\")\n continue\n elif dataset_name in DATASETS:\n print(f\"can not delete an in-built dataset: {dataset_name}\")\n continue\n data_info = CUSTOM_DATASETS[dataset_name]\n task = data_info['task_type']\n data_path = data_info['path']\n data_idx = [info['name'] for info in all_infos['data_list']].index(dataset_name)\n all_infos['data_list'].pop(data_idx)\n all_infos['n_datasets'] -= 1\n all_infos[task] -= 1\n shutil.rmtree(data_path)\n print(f\"delete dataset: {dataset_name} successfully\")\n write_custom_infos(all_infos)\n from .env import CUSTOM_DATASETS # BUG: refresh the global variable\n\n @staticmethod\n def add_custom_dataset(\n file: Union[str, Path],\n format: DataFileType = 'csv',\n dataset_name: Optional[str] = None,\n task: Optional[str] = None,\n num_cols: Optional[List[int]] = None,\n cat_cols: Optional[List[int]] = None,\n label_index: int = -1, # label column index\n header: Optional[int] = 0, # header row\n max_cat_num: int = 16,\n train_ratio: float = 0.8, # split train / test, train / val\n seed: float = 42, # random split seed\n ):\n \"\"\"\n Support for adding a custom dataset from a single data file\n ---\n read a raw csv file, process into 3 splits (train, val, test), and add to custom_datasets\n\n TODO: adding a dataset from prepared data split files \n TODO: support no validation split\n \"\"\"\n global DATASETS, CUSTOM_DATASETS\n file_name = Path(file).name\n assert file_name.endswith(format), f'please check if the file \\\n is in {format} format, or add the suffix manually'\n dataset_name = dataset_name or file_name[:-len(format)-1]\n assert dataset_name not in DATASETS, f'same dataset name as an in-built dataset: {dataset_name}'\n assert dataset_name not in CUSTOM_DATASETS, f\"existing custom dataset '{dataset_name}' found\"\n \n if format == 'csv':\n datas: pd.DataFrame = pd.read_csv(file, header=header)\n columns = datas.columns if header is not None else None\n elif format == 'npy':\n header = None # numpy file has no headers\n columns = None\n datas = np.load(file)\n raise NotImplementedError(\"only support load csv file now\")\n else:\n raise ValueError(\"other support format to be add further\")\n \n X_idx = list(range(datas.shape[1]))\n y_idx = X_idx.pop(label_index)\n label_name = columns[y_idx] if columns is not None else None\n # numerical and categorical feature detection\n if num_cols is None or cat_cols is None:\n print('automatically detect column type...')\n print('max category amount: ', max_cat_num)\n num_cols, cat_cols = [], []\n num_names, cat_names = [], []\n for i in X_idx:\n if datas.iloc[:, i].values.dtype == float:\n num_cols.append(i)\n if columns is not None:\n num_names.append(columns[i])\n else: # int or object (str)\n if len(set(datas.iloc[:, i].values)) <= max_cat_num:\n cat_cols.append(i)\n if columns is not None:\n cat_names.append(columns[i])\n elif datas.iloc[:, i].values.dtype == int:\n num_cols.append(i)\n if columns is not None:\n num_names.append(columns[i])\n if not num_names and not cat_names:\n num_names, cat_names = None, None\n elif columns:\n num_names = [columns[i] for i in num_cols]\n cat_names = [columns[i] for i in cat_cols]\n else:\n num_names, cat_names = None, None\n n_num_features = len(num_cols)\n n_cat_features = len(cat_cols)\n # build X_num and X_cat\n X_num, ys = None, datas.iloc[:, y_idx].values\n if len(num_cols) > 0:\n X_num = datas.iloc[:, num_cols].values.astype(np.float32)\n # check data type\n X_cat = []\n for i in cat_cols:\n if datas.iloc[:, i].values.dtype == int:\n x = datas.iloc[:, i].values.astype(np.int64)\n # ordered by value\n # x = OrdinalEncoder(categories=[sorted(list(set(x)))]).fit_transform(x.reshape(-1, 1))\n else: # string object\n x = datas.iloc[:, i].values.astype(object)\n # most_common = [item[0] for item in Counter(x).most_common()]\n # ordered by frequency\n # x = OrdinalEncoder(categories=[most_common]).fit_transform(x.reshape(-1, 1))\n X_cat.append(x.astype(np.str0)) # Encoder Later, compatible with Line 140\n X_cat = np.stack(X_cat, axis=1) if len(X_cat) > 0 else None # if using OrdinalEncoder, np.concatenate\n # detect task type\n def process_non_regression_labels(ys: np.ndarray, task):\n if ys.dtype in [int, float]:\n ys = OrdinalEncoder(categories=[sorted(list(set(ys)))]).fit_transform(ys.reshape(-1, 1))\n else:\n most_common = [item[0] for item in Counter(ys).most_common()]\n ys = OrdinalEncoder(categories=most_common).fit_transform(ys.reshape(-1, 1))\n ys = ys[:, 0]\n return ys.astype(np.float32) if task == 'binclass' else ys.astype(np.int64)\n \n if task is None:\n if ys.dtype in [int, object]:\n task = 'binclass' if len(set(ys)) == 2 else 'multiclass'\n ys = process_non_regression_labels(ys, task)\n elif ys.dtype == float:\n if len(set(ys)) == 2:\n task = 'binclass'\n ys = process_non_regression_labels(ys, task)\n else:\n task = 'regression'\n ys = ys.astype(np.float32)\n else:\n if task == 'regression':\n ys = ys.astype(np.float32)\n else:\n ys = process_non_regression_labels(ys, task)\n\n # split datasets\n stratify = task != 'regression'\n X_num, X_cat, ys, idx = DataProcessor.split(X_num, X_cat, ys, train_ratio, stratify, seed)\n # push to CUSTOM_DATASETS\n data_info = {\n 'name': dataset_name,\n 'id': f'{dataset_name.lower()}--custom',\n 'task_type': task,\n 'label_name': label_name,\n 'n_num_features': n_num_features,\n 'num_feature_names': num_names,\n 'n_cat_features': n_cat_features,\n 'cat_feature_names': cat_names,\n 'test_size': len(ys['test']),\n 'train_size': len(ys['train']),\n 'val_size': len(ys['val'])}\n push_custom_datasets(X_num, X_cat, ys, idx, data_info)\n from .env import CUSTOM_DATASETS # refresh global variable\n print(f'finish, now you can load your dataset with `load_preproc_default({dataset_name})`')" } ]
import os import time import json import yaml import shutil import random import datetime import numpy as np import torch import optuna from pathlib import Path from typing import Dict, List, Tuple, Union, Optional, Literal from models import MLP, FTTransformer, AutoInt, DCNv2, NODE from models.abstract import TabModel, check_dir from data.utils import Dataset from data.processor import DataProcessor
15,498
MODEL_CARDS = { 'xgboost': None, 'catboost': None, 'lightgbm': None, 'mlp': MLP, 'autoint': AutoInt, 'dcnv2': DCNv2, 'node': NODE, 'ft-transformer': FTTransformer, 'saint': None, 't2g-former': None, 'excel-former': None, } HPOLib = Literal['optuna', 'hyperopt'] # TODO: add 'hyperopt' support def get_model_cards(): return { 'ready': sorted(list([key for key, value in MODEL_CARDS.items() if value])), 'comming soon': sorted(list([key for key, value in MODEL_CARDS.items() if not value])) } def seed_everything(seed=42): ''' Sets the seed of the entire notebook so results are the same every time we run. This is for REPRODUCIBILITY. ''' random.seed(seed) # Set a fixed value for the hash seed os.environ['PYTHONHASHSEED'] = str(seed) np.random.seed(seed) torch.manual_seed(seed) if torch.cuda.is_available(): torch.cuda.manual_seed(seed) torch.cuda.manual_seed_all(seed) # When running on the CuDNN backend, two further options must be set torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False def load_config_from_file(file): file = str(file) if file.endswith('.yaml'): with open(file, 'r') as f: cfg = yaml.safe_load(f) elif file.endswith('.json'): with open(file, 'r') as f: cfg = json.load(f) else: raise AssertionError('Config files only support yaml or json format now.') return cfg def extract_config(model_config: dict, is_large_data: bool = False): """selection of different search spaces""" used_cfgs = {"model": {}, "training": {}, 'meta': model_config.get('meta', {})} for field in ['model', 'training']: for k in model_config[field]: cfgs = model_config[field][k] if 'type2' not in cfgs: used_cfg = cfgs else: if not is_large_data: used_cfg = {k: v for k, v in cfgs.items() if not k.endswith('2')} else: used_cfg = {k[:-1]: v for k, v in cfgs.items() if k.endswith('2')} used_cfgs[field][k] = used_cfg return used_cfgs def make_baseline( model_name, model_config: Union[dict, str], n_num: int, cat_card: Optional[List[int]], n_labels: int, sparsity_scheme: Optional[str] = None, device: Union[str, torch.device] = 'cuda',
MODEL_CARDS = { 'xgboost': None, 'catboost': None, 'lightgbm': None, 'mlp': MLP, 'autoint': AutoInt, 'dcnv2': DCNv2, 'node': NODE, 'ft-transformer': FTTransformer, 'saint': None, 't2g-former': None, 'excel-former': None, } HPOLib = Literal['optuna', 'hyperopt'] # TODO: add 'hyperopt' support def get_model_cards(): return { 'ready': sorted(list([key for key, value in MODEL_CARDS.items() if value])), 'comming soon': sorted(list([key for key, value in MODEL_CARDS.items() if not value])) } def seed_everything(seed=42): ''' Sets the seed of the entire notebook so results are the same every time we run. This is for REPRODUCIBILITY. ''' random.seed(seed) # Set a fixed value for the hash seed os.environ['PYTHONHASHSEED'] = str(seed) np.random.seed(seed) torch.manual_seed(seed) if torch.cuda.is_available(): torch.cuda.manual_seed(seed) torch.cuda.manual_seed_all(seed) # When running on the CuDNN backend, two further options must be set torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False def load_config_from_file(file): file = str(file) if file.endswith('.yaml'): with open(file, 'r') as f: cfg = yaml.safe_load(f) elif file.endswith('.json'): with open(file, 'r') as f: cfg = json.load(f) else: raise AssertionError('Config files only support yaml or json format now.') return cfg def extract_config(model_config: dict, is_large_data: bool = False): """selection of different search spaces""" used_cfgs = {"model": {}, "training": {}, 'meta': model_config.get('meta', {})} for field in ['model', 'training']: for k in model_config[field]: cfgs = model_config[field][k] if 'type2' not in cfgs: used_cfg = cfgs else: if not is_large_data: used_cfg = {k: v for k, v in cfgs.items() if not k.endswith('2')} else: used_cfg = {k[:-1]: v for k, v in cfgs.items() if k.endswith('2')} used_cfgs[field][k] = used_cfg return used_cfgs def make_baseline( model_name, model_config: Union[dict, str], n_num: int, cat_card: Optional[List[int]], n_labels: int, sparsity_scheme: Optional[str] = None, device: Union[str, torch.device] = 'cuda',
) -> TabModel:
5
2023-10-30 14:55:44+00:00
24k
hyperspy/exspy
exspy/signals/eds_tem.py
[ { "identifier": "EDSSpectrum", "path": "exspy/signals/eds.py", "snippet": "class EDSSpectrum(Signal1D):\n \"\"\"General signal class for EDS spectra.\"\"\"\n\n _signal_type = \"EDS\"\n\n def __init__(self, *args, **kwards):\n super().__init__(*args, **kwards)\n if self.metadata.Signal.signal_type == \"EDS\":\n warnings.warn(\n \"The microscope type is not set. Use \"\n \"set_signal_type('EDS_TEM') \"\n \"or set_signal_type('EDS_SEM')\"\n )\n self.axes_manager.signal_axes[0].is_binned = True\n self._xray_markers = {}\n\n def _get_line_energy(self, Xray_line, FWHM_MnKa=None):\n \"\"\"\n Get the line energy and the energy resolution of a Xray line.\n\n The return values are in the same units than the signal axis\n\n Parameters\n ----------\n Xray_line : strings\n Valid element X-ray lines e.g. Fe_Kb\n FWHM_MnKa: {None, float, 'auto'}\n The energy resolution of the detector in eV\n if 'auto', used the one in\n 'self.metadata.Acquisition_instrument.SEM.Detector.EDS.energy_resolution_MnKa'\n\n Returns\n -------\n float: the line energy, if FWHM_MnKa is None\n (float,float): the line energy and the energy resolution, if FWHM_MnKa\n is not None\n \"\"\"\n\n units_name = self.axes_manager.signal_axes[0].units\n\n if FWHM_MnKa == \"auto\":\n if self.metadata.Signal.signal_type == \"EDS_SEM\":\n FWHM_MnKa = (\n self.metadata.Acquisition_instrument.SEM.Detector.EDS.energy_resolution_MnKa\n )\n elif self.metadata.Signal.signal_type == \"EDS_TEM\":\n FWHM_MnKa = (\n self.metadata.Acquisition_instrument.TEM.Detector.EDS.energy_resolution_MnKa\n )\n else:\n raise NotImplementedError(\n \"This method only works for EDS_TEM or EDS_SEM signals. \"\n \"You can use `set_signal_type('EDS_TEM')` or\"\n \"`set_signal_type('EDS_SEM')` to convert to one of these\"\n \"signal types.\"\n )\n line_energy = utils_eds._get_energy_xray_line(Xray_line)\n if units_name == \"eV\":\n line_energy *= 1000\n if FWHM_MnKa is not None:\n line_FWHM = (\n utils_eds.get_FWHM_at_Energy(FWHM_MnKa, line_energy / 1000) * 1000\n )\n elif units_name == \"keV\":\n if FWHM_MnKa is not None:\n line_FWHM = utils_eds.get_FWHM_at_Energy(FWHM_MnKa, line_energy)\n else:\n raise ValueError(\n f\"{units_name} is not a valid units for the energy axis. \"\n \"Only `eV` and `keV` are supported. \"\n \"If `s` is the variable containing this EDS spectrum:\\n \"\n \">>> s.axes_manager.signal_axes[0].units = 'keV' \\n\"\n )\n if FWHM_MnKa is None:\n return line_energy\n else:\n return line_energy, line_FWHM\n\n def _get_beam_energy(self):\n \"\"\"\n Get the beam energy.\n\n The return value is in the same units than the signal axis\n \"\"\"\n\n if \"Acquisition_instrument.SEM.beam_energy\" in self.metadata:\n beam_energy = self.metadata.Acquisition_instrument.SEM.beam_energy\n elif \"Acquisition_instrument.TEM.beam_energy\" in self.metadata:\n beam_energy = self.metadata.Acquisition_instrument.TEM.beam_energy\n else:\n raise AttributeError(\n \"The beam energy is not defined in `metadata`. \"\n \"Use `set_microscope_parameters` to set it.\"\n )\n\n units_name = self.axes_manager.signal_axes[0].units\n\n if units_name == \"eV\":\n beam_energy *= 1000\n return beam_energy\n\n def _get_xray_lines_in_spectral_range(self, xray_lines):\n \"\"\"\n Return the lines in the energy range\n\n Parameters\n ----------\n xray_lines: List of string\n The xray_lines\n\n Return\n ------\n The list of xray_lines in the energy range\n \"\"\"\n ax = self.axes_manager.signal_axes[0]\n low_value = ax.low_value\n high_value = ax.high_value\n try:\n if self._get_beam_energy() < high_value:\n high_value = self._get_beam_energy()\n except AttributeError:\n # in case the beam energy is not defined in the metadata\n pass\n xray_lines_in_range = []\n xray_lines_not_in_range = []\n for xray_line in xray_lines:\n line_energy = self._get_line_energy(xray_line)\n if low_value < line_energy < high_value:\n xray_lines_in_range.append(xray_line)\n else:\n xray_lines_not_in_range.append(xray_line)\n return xray_lines_in_range, xray_lines_not_in_range\n\n def sum(self, axis=None, out=None, rechunk=False):\n if axis is None:\n axis = self.axes_manager.navigation_axes\n s = super().sum(axis=axis, out=out, rechunk=rechunk)\n s = out or s\n\n # Update live time by the change in navigation axes dimensions\n time_factor = np.prod(\n [ax.size for ax in self.axes_manager.navigation_axes]\n ) / np.prod([ax.size for ax in s.axes_manager.navigation_axes])\n aimd = s.metadata.get_item(\"Acquisition_instrument\", None)\n if aimd is not None:\n aimd = s.metadata.Acquisition_instrument\n if \"SEM.Detector.EDS.live_time\" in aimd:\n aimd.SEM.Detector.EDS.live_time *= time_factor\n elif \"TEM.Detector.EDS.live_time\" in aimd:\n aimd.TEM.Detector.EDS.live_time *= time_factor\n else:\n _logger.info(\n \"Live_time could not be found in the metadata and \"\n \"has not been updated.\"\n )\n\n if out is None:\n return s\n\n sum.__doc__ = Signal1D.sum.__doc__\n\n def rebin(self, new_shape=None, scale=None, crop=True, dtype=None, out=None):\n factors = self._validate_rebin_args_and_get_factors(\n new_shape=new_shape,\n scale=scale,\n )\n m = super().rebin(\n new_shape=new_shape, scale=scale, crop=crop, dtype=dtype, out=out\n )\n m = out or m\n time_factor = np.prod(\n [factors[axis.index_in_array] for axis in m.axes_manager.navigation_axes]\n )\n aimd = m.metadata.Acquisition_instrument\n if \"Acquisition_instrument.SEM.Detector.EDS.real_time\" in m.metadata:\n aimd.SEM.Detector.EDS.real_time *= time_factor\n elif \"Acquisition_instrument.TEM.Detector.EDS.real_time\" in m.metadata:\n aimd.TEM.Detector.EDS.real_time *= time_factor\n else:\n _logger.info(\n \"real_time could not be found in the metadata and has not been updated.\"\n )\n if \"Acquisition_instrument.SEM.Detector.EDS.live_time\" in m.metadata:\n aimd.SEM.Detector.EDS.live_time *= time_factor\n elif \"Acquisition_instrument.TEM.Detector.EDS.live_time\" in m.metadata:\n aimd.TEM.Detector.EDS.live_time *= time_factor\n else:\n _logger.info(\n \"Live_time could not be found in the metadata and has not been updated.\"\n )\n\n if out is None:\n return m\n else:\n out.events.data_changed.trigger(obj=out)\n return m\n\n rebin.__doc__ = BaseSignal.rebin.__doc__\n\n def set_elements(self, elements):\n \"\"\"Erase all elements and set them.\n\n Parameters\n ----------\n elements : list of strings\n A list of chemical element symbols.\n\n See also\n --------\n add_elements, set_lines, add_lines\n\n Examples\n --------\n >>> s = exspy.data.EDS_SEM_TM002()\n >>> print(s.metadata.Sample.elements)\n >>> s.set_elements(['Al'])\n >>> print(s.metadata.Sample.elements)\n ['Al' 'C' 'Cu' 'Mn' 'Zr']\n ['Al']\n\n \"\"\"\n # Erase previous elements and X-ray lines\n if \"Sample.elements\" in self.metadata:\n del self.metadata.Sample.elements\n self.add_elements(elements)\n\n def add_elements(self, elements):\n \"\"\"Add elements and the corresponding X-ray lines.\n\n The list of elements is stored in `metadata.Sample.elements`\n\n Parameters\n ----------\n elements : list of strings\n The symbol of the elements.\n\n Examples\n --------\n >>> s = exspy.data.EDS_SEM_TM002()\n >>> print(s.metadata.Sample.elements)\n >>> s.add_elements(['Ar'])\n >>> print(s.metadata.Sample.elements)\n ['Al' 'C' 'Cu' 'Mn' 'Zr']\n ['Al', 'Ar', 'C', 'Cu', 'Mn', 'Zr']\n\n See also\n --------\n set_elements, add_lines, set_lines\n\n \"\"\"\n if not isiterable(elements) or isinstance(elements, str):\n raise ValueError(\n \"Input must be in the form of a list. For example, \"\n \"if `s` is the variable containing this EDS spectrum:\\n \"\n \">>> s.add_elements(('C',))\\n\"\n \"See the docstring for more information.\"\n )\n if \"Sample.elements\" in self.metadata:\n elements_ = set(self.metadata.Sample.elements)\n else:\n elements_ = set()\n for element in elements:\n if element in elements_db:\n elements_.add(element)\n else:\n raise ValueError(f\"{element} is not a valid chemical element symbol.\")\n self.metadata.set_item(\"Sample.elements\", sorted(list(elements_)))\n\n def _get_xray_lines(self, xray_lines=None, only_one=None, only_lines=(\"a\",)):\n if xray_lines is None:\n if \"Sample.xray_lines\" in self.metadata:\n xray_lines = self.metadata.Sample.xray_lines\n elif \"Sample.elements\" in self.metadata:\n xray_lines = self._get_lines_from_elements(\n self.metadata.Sample.elements,\n only_one=only_one,\n only_lines=only_lines,\n )\n else:\n raise ValueError(\"Not X-ray line, set them with `add_elements`.\")\n return xray_lines\n\n def set_lines(self, lines, only_one=True, only_lines=(\"a\",)):\n \"\"\"Erase all Xrays lines and set them.\n\n See add_lines for details.\n\n Parameters\n ----------\n lines : list of strings\n A list of valid element X-ray lines to add e.g. Fe_Kb.\n Additionally, if `metadata.Sample.elements` is\n defined, add the lines of those elements that where not\n given in this list.\n only_one: bool\n If False, add all the lines of each element in\n `metadata.Sample.elements` that has not line\n defined in lines. If True (default),\n only add the line at the highest energy\n above an overvoltage of 2 (< beam energy / 2).\n only_lines : {None, list of strings}\n If not None, only the given lines will be added.\n\n Examples\n --------\n >>> s = exspy.data.EDS_SEM_TM002()\n >>> s.add_lines()\n >>> print(s.metadata.Sample.xray_lines)\n >>> s.set_lines(['Cu_Ka'])\n >>> print(s.metadata.Sample.xray_lines)\n ['Al_Ka', 'C_Ka', 'Cu_La', 'Mn_La', 'Zr_La']\n ['Al_Ka', 'C_Ka', 'Cu_Ka', 'Mn_La', 'Zr_La']\n\n See also\n --------\n add_lines, add_elements, set_elements\n\n \"\"\"\n only_lines = utils_eds._parse_only_lines(only_lines)\n if \"Sample.xray_lines\" in self.metadata:\n del self.metadata.Sample.xray_lines\n self.add_lines(lines=lines, only_one=only_one, only_lines=only_lines)\n\n def add_lines(self, lines=(), only_one=True, only_lines=(\"a\",)):\n \"\"\"Add X-rays lines to the internal list.\n\n Although most functions do not require an internal list of\n X-ray lines because they can be calculated from the internal\n list of elements, ocassionally it might be useful to customize the\n X-ray lines to be use by all functions by default using this method.\n The list of X-ray lines is stored in\n `metadata.Sample.xray_lines`\n\n Parameters\n ----------\n lines : list of strings\n A list of valid element X-ray lines to add e.g. Fe_Kb.\n Additionally, if `metadata.Sample.elements` is\n defined, add the lines of those elements that where not\n given in this list. If the list is empty (default), and\n `metadata.Sample.elements` is\n defined, add the lines of all those elements.\n only_one: bool\n If False, add all the lines of each element in\n `metadata.Sample.elements` that has not line\n defined in lines. If True (default),\n only add the line at the highest energy\n above an overvoltage of 2 (< beam energy / 2).\n only_lines : {None, list of strings}\n If not None, only the given lines will be added.\n\n Examples\n --------\n >>> s = exspy.data.EDS_SEM_TM002()\n >>> s.add_lines()\n >>> print(s.metadata.Sample.xray_lines)\n ['Al_Ka', 'C_Ka', 'Cu_La', 'Mn_La', 'Zr_La']\n\n >>> s = exspy.data.EDS_SEM_TM002()\n >>> s.set_microscope_parameters(beam_energy=30)\n >>> s.add_lines()\n >>> print(s.metadata.Sample.xray_lines)\n ['Al_Ka', 'C_Ka', 'Cu_Ka', 'Mn_Ka', 'Zr_La']\n\n >>> s = exspy.data.EDS_SEM_TM002()\n >>> s.add_lines()\n >>> print(s.metadata.Sample.xray_lines)\n >>> s.add_lines(['Cu_Ka'])\n >>> print(s.metadata.Sample.xray_lines)\n ['Al_Ka', 'C_Ka', 'Cu_La', 'Mn_La', 'Zr_La']\n ['Al_Ka', 'C_Ka', 'Cu_Ka', 'Cu_La', 'Mn_La', 'Zr_La']\n\n See also\n --------\n set_lines, add_elements, set_elements\n\n \"\"\"\n only_lines = utils_eds._parse_only_lines(only_lines)\n if \"Sample.xray_lines\" in self.metadata:\n xray_lines = set(self.metadata.Sample.xray_lines)\n else:\n xray_lines = set()\n # Define the elements which Xray lines has been customized\n # So that we don't attempt to add new lines automatically\n elements = set()\n for line in xray_lines:\n elements.add(line.split(\"_\")[0])\n for line in lines:\n try:\n element, subshell = line.split(\"_\")\n except ValueError:\n raise ValueError(\n \"Invalid line symbol. \"\n \"Please provide a valid line symbol e.g. Fe_Ka\"\n )\n if element in elements_db:\n elements.add(element)\n if subshell in elements_db[element][\"Atomic_properties\"][\"Xray_lines\"]:\n lines_len = len(xray_lines)\n xray_lines.add(line)\n if lines_len != len(xray_lines):\n _logger.info(f\"{line} line added,\")\n else:\n _logger.info(f\"{line} line already in.\")\n else:\n raise ValueError(f\"{line} is not a valid line of {element}.\")\n else:\n raise ValueError(f\"{element} is not a valid symbol of an element.\")\n xray_not_here = self._get_xray_lines_in_spectral_range(xray_lines)[1]\n for xray in xray_not_here:\n warnings.warn(f\"{xray} is not in the data energy range.\", UserWarning)\n if \"Sample.elements\" in self.metadata:\n extra_elements = set(self.metadata.Sample.elements) - elements\n if extra_elements:\n new_lines = self._get_lines_from_elements(\n extra_elements, only_one=only_one, only_lines=only_lines\n )\n if new_lines:\n self.add_lines(list(new_lines) + list(lines))\n self.add_elements(elements)\n if not hasattr(self.metadata, \"Sample\"):\n self.metadata.add_node(\"Sample\")\n if \"Sample.xray_lines\" in self.metadata:\n xray_lines = xray_lines.union(self.metadata.Sample.xray_lines)\n self.metadata.Sample.xray_lines = sorted(list(xray_lines))\n\n def _get_lines_from_elements(self, elements, only_one=False, only_lines=(\"a\",)):\n \"\"\"Returns the X-ray lines of the given elements in spectral range\n of the data.\n\n Parameters\n ----------\n elements : list of strings\n A list containing the symbol of the chemical elements.\n only_one : bool\n If False, add all the lines of each element in the data spectral\n range. If True only add the line at the highest energy\n above an overvoltage of 2 (< beam energy / 2).\n only_lines : {None, list of strings}\n If not None, only the given lines will be returned.\n\n Returns\n -------\n list of X-ray lines alphabetically sorted\n\n \"\"\"\n\n only_lines = utils_eds._parse_only_lines(only_lines)\n try:\n beam_energy = self._get_beam_energy()\n except BaseException:\n # Fall back to the high_value of the energy axis\n beam_energy = self.axes_manager.signal_axes[0].high_value\n lines = []\n elements = [el if isinstance(el, str) else el.decode() for el in elements]\n for element in elements:\n # Possible line (existing and excited by electron)\n element_lines = []\n for subshell in list(\n elements_db[element][\"Atomic_properties\"][\"Xray_lines\"].keys()\n ):\n if only_lines and subshell not in only_lines:\n continue\n element_lines.append(element + \"_\" + subshell)\n element_lines = self._get_xray_lines_in_spectral_range(element_lines)[0]\n if only_one and element_lines:\n # Choose the best line\n select_this = -1\n element_lines.sort()\n for i, line in enumerate(element_lines):\n if self._get_line_energy(line) < beam_energy / 2:\n select_this = i\n break\n element_lines = [\n element_lines[select_this],\n ]\n\n if not element_lines:\n _logger.info(\n f\"There is no X-ray line for element {element} \"\n \"in the data spectral range\"\n )\n else:\n lines.extend(element_lines)\n lines.sort()\n return lines\n\n def _parse_xray_lines(self, xray_lines, only_one, only_lines):\n only_lines = utils_eds._parse_only_lines(only_lines)\n xray_lines = self._get_xray_lines(\n xray_lines, only_one=only_one, only_lines=only_lines\n )\n xray_lines, xray_not_here = self._get_xray_lines_in_spectral_range(xray_lines)\n for xray in xray_not_here:\n warnings.warn(\n f\"{xray} is not in the data energy range. \"\n \"You can remove it with: \"\n f\"`s.metadata.Sample.xray_lines.remove('{xray}')`\"\n )\n return xray_lines\n\n def get_lines_intensity(\n self,\n xray_lines=None,\n integration_windows=2.0,\n background_windows=None,\n plot_result=False,\n only_one=True,\n only_lines=(\"a\",),\n **kwargs,\n ):\n \"\"\"Return the intensity map of selected Xray lines.\n\n The intensities, the number of X-ray counts, are computed by\n suming the spectrum over the\n different X-ray lines. The sum window width\n is calculated from the energy resolution of the detector\n as defined in 'energy_resolution_MnKa' of the metadata.\n Backgrounds average in provided windows can be subtracted from the\n intensities.\n\n Parameters\n ----------\n xray_lines: {None, Iterable* of strings}\n If None,\n if `metadata.Sample.elements.xray_lines` contains a\n list of lines use those.\n If `metadata.Sample.elements.xray_lines` is undefined\n or empty but `metadata.Sample.elements` is defined,\n use the same syntax as `add_line` to select a subset of lines\n for the operation.\n Alternatively, provide an iterable containing\n a list of valid X-ray lines symbols.\n * Note that while dictionaries and strings are iterable,\n their use is ambiguous and specifically not allowed.\n integration_windows: Float or array\n If float, the width of the integration windows is the\n 'integration_windows_width' times the calculated FWHM of the line.\n Else provide an array for which each row corresponds to a X-ray\n line. Each row contains the left and right value of the window.\n background_windows: None or 2D array of float\n If None, no background subtraction. Else, the backgrounds average\n in the windows are subtracted from the return intensities.\n 'background_windows' provides the position of the windows in\n energy. Each line corresponds to a X-ray line. In a line, the two\n first values correspond to the limits of the left window and the\n two last values correspond to the limits of the right window.\n plot_result : bool\n If True, plot the calculated line intensities. If the current\n object is a single spectrum it prints the result instead.\n only_one : bool\n If False, use all the lines of each element in the data spectral\n range. If True use only the line at the highest energy\n above an overvoltage of 2 (< beam energy / 2).\n only_lines : {None, list of strings}\n If not None, use only the given lines.\n kwargs\n The extra keyword arguments for plotting. See\n `utils.plot.plot_signals`\n\n Returns\n -------\n intensities : list\n A list containing the intensities as BaseSignal subclasses.\n\n Examples\n --------\n >>> s = exspy.data.EDS_SEM_TM002()\n >>> s.get_lines_intensity(['Mn_Ka'], plot_result=True)\n Mn_La at 0.63316 keV : Intensity = 96700.00\n\n >>> s = exspy.data.EDS_SEM_TM002()\n >>> s.plot(['Mn_Ka'], integration_windows=2.1)\n >>> s.get_lines_intensity(['Mn_Ka'],\n >>> integration_windows=2.1, plot_result=True)\n Mn_Ka at 5.8987 keV : Intensity = 53597.00\n\n >>> s = exspy.data.EDS_SEM_TM002()\n >>> s.set_elements(['Mn'])\n >>> s.set_lines(['Mn_Ka'])\n >>> bw = s.estimate_background_windows()\n >>> s.plot(background_windows=bw)\n >>> s.get_lines_intensity(background_windows=bw, plot_result=True)\n Mn_Ka at 5.8987 keV : Intensity = 46716.00\n\n See also\n --------\n set_elements, add_elements, estimate_background_windows,\n plot\n\n \"\"\"\n if xray_lines is not None and (\n not isinstance(xray_lines, Iterable) or isinstance(xray_lines, (str, dict))\n ):\n raise TypeError(\n \"xray_lines must be a compatible iterable, but was \"\n f\"mistakenly provided as a {type(xray_lines)}.\"\n )\n\n xray_lines = self._parse_xray_lines(xray_lines, only_one, only_lines)\n if hasattr(integration_windows, \"__iter__\") is False:\n integration_windows = self.estimate_integration_windows(\n windows_width=integration_windows, xray_lines=xray_lines\n )\n intensities = []\n ax = self.axes_manager.signal_axes[0]\n # test Signal1D (0D problem)\n # signal_to_index = self.axes_manager.navigation_dimension - 2\n for i, (Xray_line, window) in enumerate(zip(xray_lines, integration_windows)):\n element, line = utils_eds._get_element_and_line(Xray_line)\n line_energy = self._get_line_energy(Xray_line)\n # Replace with `map` function for lazy large datasets\n img = self.isig[window[0] : window[1]].integrate1D(\n -1\n ) # integrate over window.\n if np.issubdtype(img.data.dtype, np.integer):\n # The operations below require a float dtype with the default\n # numpy casting rule ('same_kind')\n img.change_dtype(\"float\")\n if background_windows is not None:\n bw = background_windows[i]\n # TODO: test to prevent slicing bug. To be reomved when fixed\n indexes = [float(ax.value2index(de)) for de in list(bw) + window]\n if indexes[0] == indexes[1]:\n bck1 = self.isig[bw[0]]\n else:\n bck1 = self.isig[bw[0] : bw[1]].integrate1D(-1)\n if indexes[2] == indexes[3]:\n bck2 = self.isig[bw[2]]\n else:\n bck2 = self.isig[bw[2] : bw[3]].integrate1D(-1)\n corr_factor = (indexes[5] - indexes[4]) / (\n (indexes[1] - indexes[0]) + (indexes[3] - indexes[2])\n )\n img = img - (bck1 + bck2) * corr_factor\n img.metadata.General.title = (\n f\"X-ray line intensity of {self.metadata.General.title}: \"\n f\"{Xray_line} at {line_energy:.2f} \"\n f\"{self.axes_manager.signal_axes[0].units}\"\n )\n img = img.transpose(signal_axes=[])\n if plot_result and img.axes_manager.navigation_size == 1:\n if img._lazy:\n img.compute()\n print(\n f\"{Xray_line} at {line_energy} {ax.units} : \"\n f\"Intensity = {img.data[0]:.2f}\"\n )\n img.metadata.set_item(\"Sample.elements\", ([element]))\n img.metadata.set_item(\"Sample.xray_lines\", ([Xray_line]))\n intensities.append(img)\n if plot_result and img.axes_manager.navigation_size != 1:\n utils.plot.plot_signals(intensities, **kwargs)\n return intensities\n\n def get_take_off_angle(self):\n \"\"\"Calculate the take-off-angle (TOA).\n\n TOA is the angle with which the X-rays leave the surface towards\n the detector. Parameters are read in 'SEM.Stage.tilt_alpha',\n 'Acquisition_instrument.SEM.Detector.EDS.azimuth_angle' and\n 'SEM.Detector.EDS.elevation_angle' and 'SEM.Stage.tilt_beta in\n 'metadata'.\n\n Returns\n -------\n take_off_angle: float\n in Degree\n\n Examples\n --------\n >>> s = exspy.data.EDS_SEM_TM002()\n >>> s.get_take_off_angle()\n 37.0\n >>> s.set_microscope_parameters(tilt_stage=20.)\n >>> s.get_take_off_angle()\n 57.0\n\n See also\n --------\n hs.eds.take_off_angle\n \"\"\"\n if self.metadata.Signal.signal_type == \"EDS_SEM\":\n mp = self.metadata.Acquisition_instrument.SEM\n elif self.metadata.Signal.signal_type == \"EDS_TEM\":\n mp = self.metadata.Acquisition_instrument.TEM\n\n tilt_stage = mp.get_item(\"Stage.tilt_alpha\", None)\n azimuth_angle = mp.get_item(\"Detector.EDS.azimuth_angle\", None)\n elevation_angle = mp.get_item(\"Detector.EDS.elevation_angle\", None)\n beta_tilt = mp.get_item(\"Stage.tilt_beta\", 0.0)\n\n return utils_eds.take_off_angle(\n tilt_stage, azimuth_angle, elevation_angle, beta_tilt\n )\n\n def estimate_integration_windows(self, windows_width=2.0, xray_lines=None):\n \"\"\"\n Estimate a window of integration for each X-ray line.\n\n Parameters\n ----------\n windows_width: float\n The width of the integration windows is the 'windows_width' times\n the calculated FWHM of the line.\n xray_lines: None or list of string\n If None, use 'metadata.Sample.elements.xray_lines'. Else,\n provide an iterable containing a list of valid X-ray lines\n symbols.\n\n Return\n ------\n integration_windows: 2D array of float\n The positions of the windows in energy. Each row corresponds to a\n X-ray line. Each row contains the left and right value of the\n window.\n\n Examples\n --------\n >>> s = exspy.data.EDS_SEM_TM002()\n >>> s.add_lines()\n >>> iw = s.estimate_integration_windows()\n >>> s.plot(integration_windows=iw)\n >>> s.get_lines_intensity(integration_windows=iw, plot_result=True)\n Fe_Ka at 6.4039 keV : Intensity = 3710.00\n Pt_La at 9.4421 keV : Intensity = 15872.00\n\n See also\n --------\n plot, get_lines_intensity\n \"\"\"\n xray_lines = self._get_xray_lines(xray_lines)\n integration_windows = []\n for Xray_line in xray_lines:\n line_energy, line_FWHM = self._get_line_energy(Xray_line, FWHM_MnKa=\"auto\")\n element, line = utils_eds._get_element_and_line(Xray_line)\n det = windows_width * line_FWHM / 2.0\n integration_windows.append([line_energy - det, line_energy + det])\n return integration_windows\n\n def estimate_background_windows(\n self, line_width=[2, 2], windows_width=1, xray_lines=None\n ):\n \"\"\"\n Estimate two windows around each X-ray line containing only the\n background.\n\n Parameters\n ----------\n line_width: list of two floats\n The position of the two windows around the X-ray line is given by\n the `line_width` (left and right) times the calculated FWHM of the\n line.\n windows_width: float\n The width of the windows is is the `windows_width` times the\n calculated FWHM of the line.\n xray_lines: None or list of string\n If None, use `metadata.Sample.elements.xray_lines`. Else,\n provide an iterable containing a list of valid X-ray lines\n symbols.\n\n Return\n ------\n windows_position: 2D array of float\n The position of the windows in energy. Each line corresponds to a\n X-ray line. In a line, the two first values correspond to the\n limits of the left window and the two last values correspond to\n the limits of the right window.\n\n Examples\n --------\n >>> s = exspy.data.EDS_SEM_TM002()\n >>> s.add_lines()\n >>> bw = s.estimate_background_windows(line_width=[5.0, 2.0])\n >>> s.plot(background_windows=bw)\n >>> s.get_lines_intensity(background_windows=bw, plot_result=True)\n Fe_Ka at 6.4039 keV : Intensity = 2754.00\n Pt_La at 9.4421 keV : Intensity = 15090.00\n\n See also\n --------\n plot, get_lines_intensity\n \"\"\"\n xray_lines = self._get_xray_lines(xray_lines)\n windows_position = []\n for xray_line in xray_lines:\n line_energy, line_FWHM = self._get_line_energy(xray_line, FWHM_MnKa=\"auto\")\n tmp = [\n line_energy - line_FWHM * line_width[0] - line_FWHM * windows_width,\n line_energy - line_FWHM * line_width[0],\n line_energy + line_FWHM * line_width[1],\n line_energy + line_FWHM * line_width[1] + line_FWHM * windows_width,\n ]\n windows_position.append(tmp)\n windows_position = np.array(windows_position)\n # merge ovelapping windows\n index = windows_position.argsort(axis=0)[:, 0]\n for i in range(len(index) - 1):\n ia, ib = index[i], index[i + 1]\n if windows_position[ia, 2] > windows_position[ib, 0]:\n interv = np.append(windows_position[ia, :2], windows_position[ib, 2:])\n windows_position[ia] = interv\n windows_position[ib] = interv\n return windows_position\n\n def plot(\n self,\n xray_lines=False,\n only_lines=(\"a\", \"b\"),\n only_one=False,\n background_windows=None,\n integration_windows=None,\n navigator=\"auto\",\n plot_markers=True,\n autoscale=\"v\",\n norm=\"auto\",\n axes_manager=None,\n navigator_kwds={},\n **kwargs,\n ):\n \"\"\"Plot the EDS spectrum. The following markers can be added\n\n - The position of the X-ray lines and their names.\n - The background windows associated with each X-ray lines. A black line\n links the left and right window with the average value in each window.\n\n Parameters\n ----------\n xray_lines: {False, True, 'from_elements', list of string}\n If not False, indicate the position and the name of the X-ray\n lines.\n If True, if `metadata.Sample.elements.xray_lines` contains a\n list of lines use those. If `metadata.Sample.elements.xray_lines`\n is undefined or empty or if xray_lines equals 'from_elements' and\n `metadata.Sample.elements` is defined, use the same syntax as\n `add_line` to select a subset of lines for the operation.\n Alternatively, provide an iterable containing a list of valid X-ray\n lines symbols.\n only_lines : None or list of strings\n If not None, use only the given lines (eg. ('a','Kb')).\n If None, use all lines.\n only_one : bool\n If False, use all the lines of each element in the data spectral\n range. If True use only the line at the highest energy\n above an overvoltage of 2 (< beam energy / 2).\n background_windows: None or 2D array of float\n If not None, add markers at the position of the windows in energy.\n Each line corresponds to a X-ray lines. In a line, the two first\n value corresponds to the limit of the left window and the two\n last values corresponds to the limit of the right window.\n integration_windows: None or 'auto' or float or 2D array of float\n If not None, add markers at the position of the integration\n windows.\n If 'auto' (or float), the width of the integration windows is 2.0\n (or float) times the calculated FWHM of the line. see\n 'estimate_integration_windows'.\n Else provide an array for which each row corresponds to a X-ray\n line. Each row contains the left and right value of the window.\n %s\n %s\n\n Examples\n --------\n >>> s = exspy.data.EDS_SEM_TM002()\n >>> s.plot()\n\n >>> s = exspy.data.EDS_SEM_TM002()\n >>> s.plot(True)\n\n >>> s = exspy.data.EDS_SEM_TM002()\n >>> s.add_lines()\n >>> bw = s.estimate_background_windows()\n >>> s.plot(background_windows=bw)\n\n >>> s = exspy.data.EDS_SEM_TM002()\n >>> s.plot(['Mn_Ka'], integration_windows='auto')\n\n >>> s = exspy.data.EDS_SEM_TM002()\n >>> s.add_lines()\n >>> bw = s.estimate_background_windows()\n >>> s.plot(background_windows=bw, integration_windows=2.1)\n\n See also\n --------\n set_elements, add_elements, estimate_integration_windows,\n get_lines_intensity, estimate_background_windows\n \"\"\"\n super().plot(\n navigator=navigator,\n plot_markers=plot_markers,\n autoscale=autoscale,\n norm=norm,\n axes_manager=axes_manager,\n navigator_kwds=navigator_kwds,\n **kwargs,\n )\n self._plot_xray_lines(\n xray_lines,\n only_lines,\n only_one,\n background_windows,\n integration_windows,\n render_figure=False,\n )\n self._render_figure(plot=[\"signal_plot\"])\n\n plot.__doc__ %= (BASE_PLOT_DOCSTRING_PARAMETERS, PLOT1D_DOCSTRING)\n\n def _plot_xray_lines(\n self,\n xray_lines=False,\n only_lines=(\"a\", \"b\"),\n only_one=False,\n background_windows=None,\n integration_windows=None,\n render_figure=True,\n ):\n if (\n xray_lines is not False\n or background_windows is not None\n or integration_windows is not None\n ):\n if xray_lines is False:\n xray_lines = True\n only_lines = utils_eds._parse_only_lines(only_lines)\n if xray_lines is True or xray_lines == \"from_elements\":\n if (\n \"Sample.xray_lines\" in self.metadata\n and xray_lines != \"from_elements\"\n ):\n xray_lines = self.metadata.Sample.xray_lines\n elif \"Sample.elements\" in self.metadata:\n xray_lines = self._get_lines_from_elements(\n self.metadata.Sample.elements,\n only_one=only_one,\n only_lines=only_lines,\n )\n else:\n _logger.warning(\"No elements defined, set them with `add_elements`\")\n # No X-rays lines, nothing to do then\n return\n\n xray_lines, xray_not_here = self._get_xray_lines_in_spectral_range(\n xray_lines\n )\n for xray in xray_not_here:\n _logger.warning(f\"{xray} is not in the data energy range.\")\n\n xray_lines = np.unique(xray_lines)\n\n self.add_xray_lines_markers(xray_lines, render_figure=False)\n if background_windows is not None:\n self._add_background_windows_markers(\n background_windows, render_figure=False\n )\n if integration_windows is not None:\n if integration_windows == \"auto\":\n integration_windows = 2.0\n if hasattr(integration_windows, \"__iter__\") is False:\n integration_windows = self.estimate_integration_windows(\n windows_width=integration_windows, xray_lines=xray_lines\n )\n self._add_vertical_lines_groups(\n integration_windows, linestyle=\"--\", render_figure=False\n )\n # Render figure only at the end\n if render_figure:\n self._render_figure(plot=[\"signal_plot\"])\n\n def _add_vertical_lines_groups(self, position, render_figure=True, **kwargs):\n \"\"\"\n Add vertical markers for each group that shares the color.\n\n Parameters\n ----------\n position: 2D array of float\n The position on the signal axis. Each row corresponds to a\n group.\n kwargs\n keywords argument for :py:class:`~.api.plot.markers.VerticalLine`\n \"\"\"\n colors = itertools.cycle(\n np.sort(plt.rcParams[\"axes.prop_cycle\"].by_key()[\"color\"])\n )\n\n for x, color in zip(position, colors):\n line = VerticalLines(offsets=x, color=color, **kwargs)\n self.add_marker(line, render_figure=False)\n if render_figure:\n self._render_figure(plot=[\"signal_plot\"])\n\n def add_xray_lines_markers(self, xray_lines, render_figure=True):\n \"\"\"\n Add marker on a spec.plot() with the name of the selected X-ray\n lines\n\n Parameters\n ----------\n xray_lines: list of string\n A valid list of X-ray lines\n \"\"\"\n if self._plot is None or not self._plot.is_active:\n raise RuntimeError(\"The signal needs to be plotted.\")\n norm = self._plot.signal_plot.ax_lines[0].norm\n minimum_intensity = self.data[self.data > 0].min() if norm == \"log\" else 0\n line_names = []\n segments = np.empty((len(xray_lines), 2, 2))\n offsets = np.empty((len(xray_lines), 2))\n # might want to set the intensity based on the alpha line intensity\n for i, xray_line in enumerate(xray_lines):\n element, line = utils_eds._get_element_and_line(xray_line)\n relative_factor = elements_db[element][\"Atomic_properties\"][\"Xray_lines\"][\n line\n ][\"weight\"]\n eng = self._get_line_energy(f\"{element}_{line}\")\n segments[i] = [[eng, 0], [eng, 1]]\n offsets[i] = [eng, 1]\n line_names.append(\n r\"$\\mathrm{%s}_{\\mathrm{%s}}$\"\n % utils_eds._get_element_and_line(xray_line)\n )\n\n line_markers = Lines(\n segments=segments,\n transform=\"relative\",\n color=\"black\",\n )\n text_markers = Texts(\n offsets=offsets,\n texts=line_names,\n offset_transform=\"relative\",\n rotation=np.pi / 2,\n horizontalalignment=\"left\",\n verticalalignment=\"bottom\",\n facecolor=\"black\",\n shift=0.005,\n )\n\n self.add_marker(line_markers, render_figure=False)\n self.add_marker(text_markers, render_figure=False)\n\n # Connect events to remove the markers when the line is closed\n line_markers.events.closed.connect(self._xray_marker_closed)\n text_markers.events.closed.connect(self._xray_marker_closed)\n self._xray_markers[\"lines\"] = line_markers\n self._xray_markers[\"texts\"] = text_markers\n self._xray_markers[\"names\"] = xray_lines\n\n if render_figure:\n self._render_figure(plot=[\"signal_plot\"])\n\n def _xray_marker_closed(self, obj):\n self._xray_markers = {}\n\n def remove_xray_lines_markers(self, xray_lines, render_figure=True):\n \"\"\"\n Remove marker previously added on a spec.plot() with the name of the\n selected X-ray lines\n\n Parameters\n ----------\n xray_lines: list of string\n A valid list of X-ray lines to remove\n render_figure: bool\n If True, render the figure after removing the markers\n \"\"\"\n ind = np.where(np.isin(self._xray_markers[\"names\"], xray_lines))\n self._xray_markers[\"lines\"].remove_items(ind)\n self._xray_markers[\"texts\"].remove_items(ind)\n self._xray_markers[\"names\"] = np.delete(self._xray_markers[\"names\"], ind)\n if render_figure:\n self._render_figure(plot=[\"signal_plot\"])\n\n def _add_background_windows_markers(self, windows_position, render_figure=True):\n \"\"\"\n Plot the background windows associated with each X-ray lines.\n\n For X-ray lines, a black line links the left and right window with the\n average value in each window.\n\n Parameters\n ----------\n windows_position: 2D array of float\n The position of the windows in energy. Each line corresponds to a\n X-ray lines. In a line, the two first value corresponds to the\n limit of the left window and the two last values corresponds to the\n limit of the right window.\n\n See also\n --------\n estimate_background_windows, get_lines_intensity\n \"\"\"\n self._add_vertical_lines_groups(windows_position)\n ax = self.axes_manager.signal_axes[0]\n segments = []\n for bw in windows_position:\n # TODO: test to prevent slicing bug. To be removed when fixed\n if ax.value2index(bw[0]) == ax.value2index(bw[1]):\n y1 = self.isig[bw[0]].data\n else:\n y1 = self.isig[bw[0] : bw[1]].mean(-1).data\n if ax.value2index(bw[2]) == ax.value2index(bw[3]):\n y2 = self.isig[bw[2]].data\n else:\n y2 = self.isig[bw[2] : bw[3]].mean(-1).data\n x1 = (bw[0] + bw[1]) / 2.0\n x2 = (bw[2] + bw[3]) / 2.0\n segments.append([[x1, y1[0]], [x2, y2[0]]])\n segments = np.array(segments)\n lines = Lines(segments=segments, color=\"black\")\n self.add_marker(lines, render_figure=False)\n if render_figure:\n self._render_figure(plot=[\"signal_plot\"])" }, { "identifier": "LazyEDSSpectrum", "path": "exspy/signals/eds.py", "snippet": "class LazyEDSSpectrum(EDSSpectrum, LazySignal1D):\n \"\"\"Lazy general signal class for EDS spectra.\"\"\"\n\n __doc__ += LAZYSIGNAL_DOC.replace(\"__BASECLASS__\", \"EDSSpectrum\")" }, { "identifier": "preferences", "path": "exspy/_defaults_parser.py", "snippet": "def guess_gos_path():\ndef template2config(template, config):\ndef config2template(template, config):\n def save(self):\nclass EELSConfig(t.HasTraits):\nclass EDSConfig(t.HasTraits):\nclass Preferences(t.HasTraits):\n EELS = t.Instance(EELSConfig)\n EDS = t.Instance(EDSConfig)" }, { "identifier": "material", "path": "exspy/misc/material.py", "snippet": "def __dir__():\ndef _weight_to_atomic(weight_percent, elements):\ndef weight_to_atomic(weight_percent, elements=\"auto\"):\ndef _atomic_to_weight(atomic_percent, elements):\ndef atomic_to_weight(atomic_percent, elements=\"auto\"):\ndef _density_of_mixture(weight_percent, elements, mean=\"harmonic\"):\ndef density_of_mixture(weight_percent, elements=\"auto\", mean=\"harmonic\"):\ndef mass_absorption_coefficient(element, energies):\ndef _mass_absorption_mixture(weight_percent, elements, energies):\ndef mass_absorption_mixture(weight_percent, elements=\"auto\", energies=\"auto\"):\ndef _lines_auto(composition, xray_lines):\ndef _elements_auto(composition, elements):" }, { "identifier": "utils", "path": "exspy/misc/eds/utils.py", "snippet": "_ABSORPTION_CORRECTION_DOCSTRING = \"\"\"absorption_correction : numpy.ndarray or None\n If None (default), absorption correction is ignored, otherwise, the\n array must contain values between 0 and 1 to correct the intensities\n based on estimated absorption.\n\"\"\"\n Z = elements_db[element][\"General_properties\"][\"Z\"]\n A = elements_db[element][\"General_properties\"][\"atomic_weight\"]\ndef _get_element_and_line(xray_line):\ndef _get_energy_xray_line(xray_line):\ndef _get_xray_lines_family(xray_line):\ndef _parse_only_lines(only_lines):\ndef get_xray_lines_near_energy(energy, width=0.2, only_lines=None):\ndef get_FWHM_at_Energy(energy_resolution_MnKa, E):\ndef xray_range(xray_line, beam_energy, density=\"auto\"):\ndef electron_range(element, beam_energy, density=\"auto\", tilt=0):\ndef take_off_angle(tilt_stage, azimuth_angle, elevation_angle, beta_tilt=0.0):\ndef xray_lines_model(\n elements,\n beam_energy=200,\n weight_percents=None,\n energy_resolution_MnKa=130,\n energy_axis=None,\n):\ndef quantification_cliff_lorimer(\n intensities, kfactors, absorption_correction=None, mask=None\n):\ndef _quantification_cliff_lorimer(\n intensities, kfactors, absorption_correction, ref_index=0, ref_index2=1\n):\ndef quantification_zeta_factor(intensities, zfactors, dose, absorption_correction=None):\ndef get_abs_corr_zeta(weight_percent, mass_thickness, take_off_angle):\ndef quantification_cross_section(\n intensities, cross_sections, dose, absorption_correction=None\n):\ndef get_abs_corr_cross_section(\n composition, number_of_atoms, take_off_angle, probe_area\n):\ndef edx_cross_section_to_zeta(cross_sections, elements):\ndef zeta_to_edx_cross_section(zfactors, elements):" }, { "identifier": "elements", "path": "exspy/misc/elements.py", "snippet": "" } ]
import warnings import logging import traits.api as t import numpy as np import pint import hyperspy.api as hs from scipy import constants from hyperspy.signal import BaseSetMetadataItems, BaseSignal from hyperspy import utils from hyperspy.docstrings.signal import LAZYSIGNAL_DOC from hyperspy.ui_registry import add_gui_method, DISPLAY_DT, TOOLKIT_DT from hyperspy.misc.utils import isiterable from hyperspy.external.progressbar import progressbar from hyperspy.axes import DataAxis from .eds import EDSSpectrum, LazyEDSSpectrum from exspy._defaults_parser import preferences from exspy.misc import material from exspy.misc.eds import utils as utils_eds from exspy.misc.elements import elements as elements_db from scipy.ndimage import binary_dilation, binary_erosion from exspy.models.edstemmodel import EDSTEMModel
14,559
if take_off_angle == "auto": toa = self.get_take_off_angle() else: toa = take_off_angle # determining illumination area for cross sections quantification. if method == "cross_section": if probe_area == "auto": parameters = self.metadata.Acquisition_instrument.TEM if probe_area in parameters: probe_area = parameters.TEM.probe_area else: probe_area = self.get_probe_area( navigation_axes=self.axes_manager.navigation_axes ) int_stack = utils.stack(intensities, lazy=False, show_progressbar=False) comp_old = np.zeros_like(int_stack.data) abs_corr_factor = None # initial if method == "CL": quantification_method = utils_eds.quantification_cliff_lorimer kwargs = { "intensities": int_stack.data, "kfactors": factors, "absorption_correction": abs_corr_factor, "mask": navigation_mask, } elif method == "zeta": quantification_method = utils_eds.quantification_zeta_factor kwargs = { "intensities": int_stack.data, "zfactors": factors, "dose": self._get_dose(method), "absorption_correction": abs_corr_factor, } elif method == "cross_section": quantification_method = utils_eds.quantification_cross_section kwargs = { "intensities": int_stack.data, "cross_sections": factors, "dose": self._get_dose(method, **kwargs), "absorption_correction": abs_corr_factor, } else: raise ValueError( "Please specify method for quantification, " 'as "CL", "zeta" or "cross_section".' ) while True: results = quantification_method(**kwargs) if method == "CL": composition.data = results * 100.0 if absorption_correction: if thickness is not None: mass_thickness = intensities[0].deepcopy() mass_thickness.data = self.CL_get_mass_thickness( composition.split(), thickness ) mass_thickness.metadata.General.title = "Mass thickness" else: raise ValueError( "Thickness is required for absorption correction " "with k-factor method. Results will contain no " "correction for absorption." ) elif method == "zeta": composition.data = results[0] * 100 mass_thickness = intensities[0].deepcopy() mass_thickness.data = results[1] else: composition.data = results[0] * 100.0 number_of_atoms = composition._deepcopy_with_new_data(results[1]) if method == "cross_section": if absorption_correction: abs_corr_factor = utils_eds.get_abs_corr_cross_section( composition.split(), number_of_atoms.split(), toa, probe_area ) kwargs["absorption_correction"] = abs_corr_factor else: if absorption_correction: abs_corr_factor = utils_eds.get_abs_corr_zeta( composition.split(), mass_thickness, toa ) kwargs["absorption_correction"] = abs_corr_factor res_max = np.max(composition.data - comp_old) comp_old = composition.data if absorption_correction and show_progressbar: pbar.update(1) it += 1 if not absorption_correction or abs(res_max) < convergence_criterion: break elif it >= max_iterations: raise Exception( "Absorption correction failed as solution " f"did not converge after {max_iterations} " "iterations" ) if method == "cross_section": number_of_atoms = composition._deepcopy_with_new_data(results[1]) number_of_atoms = number_of_atoms.split() composition = composition.split() else: composition = composition.split() # convert ouput units to selection as required. if composition_units == "atomic": if method != "cross_section":
# -*- coding: utf-8 -*- # Copyright 2007-2023 The exSpy developers # # This file is part of exSpy. # # exSpy is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # exSpy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with exSpy. If not, see <https://www.gnu.org/licenses/#GPL>. _logger = logging.getLogger(__name__) @add_gui_method(toolkey="exspy.microscope_parameters_EDS_TEM") class EDSTEMParametersUI(BaseSetMetadataItems): beam_energy = t.Float(t.Undefined, label="Beam energy (keV)") real_time = t.Float(t.Undefined, label="Real time (s)") tilt_stage = t.Float(t.Undefined, label="Stage tilt (degree)") live_time = t.Float(t.Undefined, label="Live time (s)") probe_area = t.Float(t.Undefined, label="Beam/probe area (nm²)") azimuth_angle = t.Float(t.Undefined, label="Azimuth angle (degree)") elevation_angle = t.Float(t.Undefined, label="Elevation angle (degree)") energy_resolution_MnKa = t.Float(t.Undefined, label="Energy resolution MnKa (eV)") beam_current = t.Float(t.Undefined, label="Beam current (nA)") mapping = { "Acquisition_instrument.TEM.beam_energy": "beam_energy", "Acquisition_instrument.TEM.Stage.tilt_alpha": "tilt_stage", "Acquisition_instrument.TEM.Detector.EDS.live_time": "live_time", "Acquisition_instrument.TEM.Detector.EDS.azimuth_angle": "azimuth_angle", "Acquisition_instrument.TEM.Detector.EDS.elevation_angle": "elevation_angle", "Acquisition_instrument.TEM.Detector.EDS.energy_resolution_MnKa": "energy_resolution_MnKa", "Acquisition_instrument.TEM.beam_current": "beam_current", "Acquisition_instrument.TEM.probe_area": "probe_area", "Acquisition_instrument.TEM.Detector.EDS.real_time": "real_time", } class EDSTEMSpectrum(EDSSpectrum): """Signal class for EDS spectra measured in an TEM.""" _signal_type = "EDS_TEM" def __init__(self, *args, **kwards): super().__init__(*args, **kwards) # Attributes defaults if "Acquisition_instrument.TEM.Detector.EDS" not in self.metadata: if "Acquisition_instrument.SEM.Detector.EDS" in self.metadata: self.metadata.set_item( "Acquisition_instrument.TEM", self.metadata.Acquisition_instrument.SEM, ) del self.metadata.Acquisition_instrument.SEM self._set_default_param() def _set_default_param(self): """Set to value to default (defined in preferences)""" mp = self.metadata mp.Signal.signal_type = "EDS_TEM" mp = self.metadata if "Acquisition_instrument.TEM.Stage.tilt_alpha" not in mp: mp.set_item( "Acquisition_instrument.TEM.Stage.tilt_alpha", preferences.EDS.eds_tilt_stage, ) if "Acquisition_instrument.TEM.Detector.EDS.elevation_angle" not in mp: mp.set_item( "Acquisition_instrument.TEM.Detector.EDS.elevation_angle", preferences.EDS.eds_detector_elevation, ) if "Acquisition_instrument.TEM.Detector.EDS.energy_resolution_MnKa" not in mp: mp.set_item( "Acquisition_instrument.TEM.Detector.EDS." + "energy_resolution_MnKa", preferences.EDS.eds_mn_ka, ) if "Acquisition_instrument.TEM.Detector.EDS.azimuth_angle" not in mp: mp.set_item( "Acquisition_instrument.TEM.Detector.EDS.azimuth_angle", preferences.EDS.eds_detector_azimuth, ) def set_microscope_parameters( self, beam_energy=None, live_time=None, tilt_stage=None, azimuth_angle=None, elevation_angle=None, energy_resolution_MnKa=None, beam_current=None, probe_area=None, real_time=None, display=True, toolkit=None, ): if set( [ beam_energy, live_time, tilt_stage, azimuth_angle, elevation_angle, energy_resolution_MnKa, beam_current, probe_area, real_time, ] ) == {None}: tem_par = EDSTEMParametersUI(self) return tem_par.gui(display=display, toolkit=toolkit) md = self.metadata if beam_energy is not None: md.set_item("Acquisition_instrument.TEM.beam_energy ", beam_energy) if live_time is not None: md.set_item("Acquisition_instrument.TEM.Detector.EDS.live_time", live_time) if tilt_stage is not None: md.set_item("Acquisition_instrument.TEM.Stage.tilt_alpha", tilt_stage) if azimuth_angle is not None: md.set_item( "Acquisition_instrument.TEM.Detector.EDS.azimuth_angle", azimuth_angle ) if elevation_angle is not None: md.set_item( "Acquisition_instrument.TEM.Detector.EDS.elevation_angle", elevation_angle, ) if energy_resolution_MnKa is not None: md.set_item( "Acquisition_instrument.TEM.Detector.EDS." + "energy_resolution_MnKa", energy_resolution_MnKa, ) if beam_current is not None: md.set_item("Acquisition_instrument.TEM.beam_current", beam_current) if probe_area is not None: md.set_item("Acquisition_instrument.TEM.probe_area", probe_area) if real_time is not None: md.set_item("Acquisition_instrument.TEM.Detector.EDS.real_time", real_time) set_microscope_parameters.__doc__ = """ Set the microscope parameters. If no arguments are given, raises an interactive mode to fill the values. Parameters ---------- beam_energy: float The energy of the electron beam in keV live_time : float In seconds tilt_stage : float In degree azimuth_angle : float In degree elevation_angle : float In degree energy_resolution_MnKa : float In eV beam_current: float In nA probe_area: float In nm² real_time: float In seconds {} {} Examples -------- >>> s = exspy.data.EDS_TEM_FePt_nanoparticles() >>> print(s.metadata.Acquisition_instrument. >>> TEM.Detector.EDS.energy_resolution_MnKa) >>> s.set_microscope_parameters(energy_resolution_MnKa=135.) >>> print(s.metadata.Acquisition_instrument. >>> TEM.Detector.EDS.energy_resolution_MnKa) 133.312296 135.0 """.format( DISPLAY_DT, TOOLKIT_DT ) def _are_microscope_parameters_missing(self): """Check if the EDS parameters necessary for quantification are defined in metadata.""" must_exist = ( "Acquisition_instrument.TEM.beam_energy", "Acquisition_instrument.TEM.Detector.EDS.live_time", ) missing_parameters = [] for item in must_exist: exists = self.metadata.has_item(item) if exists is False: missing_parameters.append(item) if missing_parameters: _logger.info("Missing parameters {}".format(missing_parameters)) return True else: return False def get_calibration_from(self, ref, nb_pix=1): """Copy the calibration and all metadata of a reference. Primary use: To add a calibration to ripple file from INCA software Parameters ---------- ref : signal The reference contains the calibration in its metadata nb_pix : int The live time (real time corrected from the "dead time") is divided by the number of pixel (spectrums), giving an average live time. Raises ------ NotImplementedError If the signal axis is a non-uniform axis. Examples -------- >>> ref = exspy.data.EDS_TEM_FePt_nanoparticles() >>> s = exspy.data.EDS_TEM_FePt_nanoparticles(ref.data) >>> print(s.axes_manager[0].scale) >>> s.get_calibration_from(ref) >>> print(s.axes_manager[0].scale) 1.0 0.020028 """ self._original_metadata = ref.original_metadata.deepcopy() # Setup the axes_manager ax_m = self.axes_manager.signal_axes[0] ax_ref = ref.axes_manager.signal_axes[0] for _axis in [ax_m, ax_ref]: if not _axis.is_uniform: raise NotImplementedError( "The function is not implemented for non-uniform axes." ) ax_m.scale = ax_ref.scale ax_m.units = ax_ref.units ax_m.offset = ax_ref.offset # Setup metadata if "Acquisition_instrument.TEM" in ref.metadata: mp_ref = ref.metadata.Acquisition_instrument.TEM elif "Acquisition_instrument.SEM" in ref.metadata: mp_ref = ref.metadata.Acquisition_instrument.SEM else: raise ValueError( "The reference has no metadata " "'Acquisition_instrument.TEM '" "or 'metadata.Acquisition_instrument.SEM'." ) mp = self.metadata mp.Acquisition_instrument.TEM = mp_ref.deepcopy() if mp_ref.has_item("Detector.EDS.live_time"): mp.Acquisition_instrument.TEM.Detector.EDS.live_time = ( mp_ref.Detector.EDS.live_time / nb_pix ) def quantification( self, intensities, method, factors, composition_units="atomic", absorption_correction=False, take_off_angle="auto", thickness="auto", convergence_criterion=0.5, navigation_mask=1.0, closing=True, plot_result=False, probe_area="auto", max_iterations=30, show_progressbar=None, **kwargs, ): """ Absorption corrected quantification using Cliff-Lorimer, the zeta-factor method, or ionization cross sections. The function iterates through quantification function until two successive interations don't change the final composition by a defined percentage critera (0.5% by default). Parameters ---------- intensities: list of signal the intensitiy for each X-ray lines. method: {'CL', 'zeta', 'cross_section'} Set the quantification method: Cliff-Lorimer, zeta-factor, or ionization cross sections. factors: list of float The list of kfactors, zeta-factors or cross sections in same order as intensities. Note that intensities provided by Hyperspy are sorted by the alphabetical order of the X-ray lines. eg. factors =[0.982, 1.32, 1.60] for ['Al_Ka', 'Cr_Ka', 'Ni_Ka']. composition_units: {'atomic', 'weight'} The quantification returns the composition in 'atomic' percent by default, but can also return weight percent if specified. absorption_correction: bool Specify whether or not an absorption correction should be applied. 'False' by default so absorption will not be applied unless specfied. take_off_angle : {'auto'} The angle between the sample surface and the vector along which X-rays travel to reach the centre of the detector. thickness: {'auto'} thickness in nm (can be a single value or have the same navigation dimension as the signal). NB: Must be specified for 'CL' method. For 'zeta' or 'cross_section' methods, first quantification step provides a mass_thickness internally during quantification. convergence_criterion: The convergence criterium defined as the percentage difference between 2 successive iterations. 0.5% by default. navigation_mask : None or float or signal The navigation locations marked as True are not used in the quantification. If float is given the vacuum_mask method is used to generate a mask with the float value as threhsold. Else provides a signal with the navigation shape. Only for the 'Cliff-Lorimer' method. closing: bool If true, applied a morphologic closing to the mask obtained by vacuum_mask. plot_result : bool If True, plot the calculated composition. If the current object is a single spectrum it prints the result instead. probe_area = {'auto'} This allows the user to specify the probe_area for interaction with the sample needed specifically for the cross_section method of quantification. When left as 'auto' the pixel area is used, calculated from the navigation axes information. max_iterations : int An upper limit to the number of calculations for absorption correction. kwargs The extra keyword arguments are passed to plot. Returns ------- A list of quantified elemental maps (signal) giving the composition of the sample in weight or atomic percent with absorption correciton taken into account based on the sample thickness estimate provided. If the method is 'zeta' this function also returns the mass thickness profile for the data. If the method is 'cross_section' this function also returns the atom counts for each element. Examples -------- >>> s = exspy.data.EDS_TEM_FePt_nanoparticles() >>> s.add_lines() >>> kfactors = [1.450226, 5.075602] #For Fe Ka and Pt La >>> bw = s.estimate_background_windows(line_width=[5.0, 2.0]) >>> s.plot(background_windows=bw) >>> intensities = s.get_lines_intensity(background_windows=bw) >>> res = s.quantification(intensities, kfactors, plot_result=True, >>> composition_units='atomic') Fe (Fe_Ka): Composition = 15.41 atomic percent Pt (Pt_La): Composition = 84.59 atomic percent See also -------- vacuum_mask """ if not isinstance(intensities, (list, tuple)) or not isinstance( intensities[0], BaseSignal ): raise ValueError("The parameter `intensities` must be a list of signals.") elif len(intensities) <= 1: raise ValueError("Several X-ray line intensities are required.") if isinstance(navigation_mask, float): if self.axes_manager.navigation_dimension > 0: navigation_mask = self.vacuum_mask(navigation_mask, closing) else: navigation_mask = None xray_lines = [ intensity.metadata.Sample.xray_lines[0] for intensity in intensities ] it = 0 if absorption_correction: if show_progressbar is None: # pragma: no cover show_progressbar = hs.preferences.General.show_progressbar if show_progressbar: pbar = progressbar(total=None, desc="Absorption correction calculation") composition = utils.stack(intensities, lazy=False, show_progressbar=False) if take_off_angle == "auto": toa = self.get_take_off_angle() else: toa = take_off_angle # determining illumination area for cross sections quantification. if method == "cross_section": if probe_area == "auto": parameters = self.metadata.Acquisition_instrument.TEM if probe_area in parameters: probe_area = parameters.TEM.probe_area else: probe_area = self.get_probe_area( navigation_axes=self.axes_manager.navigation_axes ) int_stack = utils.stack(intensities, lazy=False, show_progressbar=False) comp_old = np.zeros_like(int_stack.data) abs_corr_factor = None # initial if method == "CL": quantification_method = utils_eds.quantification_cliff_lorimer kwargs = { "intensities": int_stack.data, "kfactors": factors, "absorption_correction": abs_corr_factor, "mask": navigation_mask, } elif method == "zeta": quantification_method = utils_eds.quantification_zeta_factor kwargs = { "intensities": int_stack.data, "zfactors": factors, "dose": self._get_dose(method), "absorption_correction": abs_corr_factor, } elif method == "cross_section": quantification_method = utils_eds.quantification_cross_section kwargs = { "intensities": int_stack.data, "cross_sections": factors, "dose": self._get_dose(method, **kwargs), "absorption_correction": abs_corr_factor, } else: raise ValueError( "Please specify method for quantification, " 'as "CL", "zeta" or "cross_section".' ) while True: results = quantification_method(**kwargs) if method == "CL": composition.data = results * 100.0 if absorption_correction: if thickness is not None: mass_thickness = intensities[0].deepcopy() mass_thickness.data = self.CL_get_mass_thickness( composition.split(), thickness ) mass_thickness.metadata.General.title = "Mass thickness" else: raise ValueError( "Thickness is required for absorption correction " "with k-factor method. Results will contain no " "correction for absorption." ) elif method == "zeta": composition.data = results[0] * 100 mass_thickness = intensities[0].deepcopy() mass_thickness.data = results[1] else: composition.data = results[0] * 100.0 number_of_atoms = composition._deepcopy_with_new_data(results[1]) if method == "cross_section": if absorption_correction: abs_corr_factor = utils_eds.get_abs_corr_cross_section( composition.split(), number_of_atoms.split(), toa, probe_area ) kwargs["absorption_correction"] = abs_corr_factor else: if absorption_correction: abs_corr_factor = utils_eds.get_abs_corr_zeta( composition.split(), mass_thickness, toa ) kwargs["absorption_correction"] = abs_corr_factor res_max = np.max(composition.data - comp_old) comp_old = composition.data if absorption_correction and show_progressbar: pbar.update(1) it += 1 if not absorption_correction or abs(res_max) < convergence_criterion: break elif it >= max_iterations: raise Exception( "Absorption correction failed as solution " f"did not converge after {max_iterations} " "iterations" ) if method == "cross_section": number_of_atoms = composition._deepcopy_with_new_data(results[1]) number_of_atoms = number_of_atoms.split() composition = composition.split() else: composition = composition.split() # convert ouput units to selection as required. if composition_units == "atomic": if method != "cross_section":
composition = material.weight_to_atomic(composition)
3
2023-10-28 20:04:10+00:00
24k
Elfenreigen/UniChest
train.py
[ { "identifier": "utils", "path": "factory/utils.py", "snippet": "class SmoothedValue(object):\nclass MetricLogger(object):\nclass AttrDict(dict):\n def __init__(self, window_size=20, fmt=None):\n def update(self, value, n=1):\n def synchronize_between_processes(self):\n def median(self):\n def avg(self):\n def global_avg(self):\n def max(self):\n def value(self):\n def __str__(self):\n def __init__(self, delimiter=\"\\t\"):\n def update(self, **kwargs):\n def __getattr__(self, attr):\n def __str__(self):\n def global_avg(self):\n def synchronize_between_processes(self):\n def add_meter(self, name, meter):\n def log_every(self, iterable, print_freq, header=None):\n def __init__(self, *args, **kwargs):\ndef compute_acc(logits, label, reduction='mean'):\ndef compute_n_params(model, return_str=True):\ndef setup_for_distributed(is_master):\n def print(*args, **kwargs):\ndef seed_worker(worker_id):\ndef is_dist_avail_and_initialized():\ndef get_world_size():\ndef get_rank():\ndef is_main_process():\ndef save_on_master(*args, **kwargs):\ndef init_distributed_mode(args):\n MB = 1024.0 * 1024.0" }, { "identifier": "create_scheduler", "path": "scheduler/scheduler_factory.py", "snippet": "def create_scheduler(args, optimizer):\n num_epochs = args.epochs\n\n if getattr(args, 'lr_noise', None) is not None:\n lr_noise = getattr(args, 'lr_noise')\n if isinstance(lr_noise, (list, tuple)):\n noise_range = [n * num_epochs for n in lr_noise]\n if len(noise_range) == 1:\n noise_range = noise_range[0]\n else:\n noise_range = lr_noise * num_epochs\n else:\n noise_range = None\n\n lr_scheduler = None\n if args.sched == 'cosine':\n lr_scheduler = CosineLRScheduler(\n optimizer,\n t_initial=num_epochs,\n t_mul=getattr(args, 'lr_cycle_mul', 1.),\n lr_min=args.min_lr,\n decay_rate=args.decay_rate,\n warmup_lr_init=args.warmup_lr,\n warmup_t=args.warmup_epochs,\n cycle_limit=getattr(args, 'lr_cycle_limit', 1),\n t_in_epochs=True,\n noise_range_t=noise_range,\n noise_pct=getattr(args, 'lr_noise_pct', 0.67),\n noise_std=getattr(args, 'lr_noise_std', 1.),\n noise_seed=getattr(args, 'seed', 42),\n )\n num_epochs = lr_scheduler.get_cycle_length() + args.cooldown_epochs\n elif args.sched == 'tanh':\n lr_scheduler = TanhLRScheduler(\n optimizer,\n t_initial=num_epochs,\n t_mul=getattr(args, 'lr_cycle_mul', 1.),\n lr_min=args.min_lr,\n warmup_lr_init=args.warmup_lr,\n warmup_t=args.warmup_epochs,\n cycle_limit=getattr(args, 'lr_cycle_limit', 1),\n t_in_epochs=True,\n noise_range_t=noise_range,\n noise_pct=getattr(args, 'lr_noise_pct', 0.67),\n noise_std=getattr(args, 'lr_noise_std', 1.),\n noise_seed=getattr(args, 'seed', 42),\n )\n num_epochs = lr_scheduler.get_cycle_length() + args.cooldown_epochs\n elif args.sched == 'step':\n lr_scheduler = StepLRScheduler(\n optimizer,\n decay_t=args.decay_epochs,\n decay_rate=args.decay_rate,\n warmup_lr_init=args.warmup_lr,\n warmup_t=args.warmup_epochs,\n noise_range_t=noise_range,\n noise_pct=getattr(args, 'lr_noise_pct', 0.67),\n noise_std=getattr(args, 'lr_noise_std', 1.),\n noise_seed=getattr(args, 'seed', 42),\n )\n elif args.sched == 'plateau':\n mode = 'min' if 'loss' in getattr(args, 'eval_metric', '') else 'max'\n lr_scheduler = PlateauLRScheduler(\n optimizer,\n decay_rate=args.decay_rate,\n patience_t=args.patience_epochs,\n lr_min=args.min_lr,\n mode=mode,\n warmup_lr_init=args.warmup_lr,\n warmup_t=args.warmup_epochs,\n cooldown_t=0,\n noise_range_t=noise_range,\n noise_pct=getattr(args, 'lr_noise_pct', 0.67),\n noise_std=getattr(args, 'lr_noise_std', 1.),\n noise_seed=getattr(args, 'seed', 42),\n )\n\n return lr_scheduler, num_epochs" }, { "identifier": "create_optimizer", "path": "optim/optim_factory.py", "snippet": "def create_optimizer(args, model, image_encoder,text_encoder, filter_bias_and_bn=True):\n opt_lower = args.opt.lower()\n weight_decay = args.weight_decay\n if weight_decay and filter_bias_and_bn:\n skip = {}\n if hasattr(model, 'no_weight_decay'):\n skip = model.no_weight_decay()\n parameters = add_weight_decay(model,image_encoder,text_encoder, weight_decay, skip)\n weight_decay = 0.\n else:\n parameters = [filter(lambda p: p.requires_grad, model.parameters()),filter(lambda p: p.requires_grad, image_encoder.parameters()),filter(lambda p: p.requires_grad, text_encoder.parameters())]\n #model.parameters()\n\n # print(parameters)\n if 'fused' in opt_lower:\n assert has_apex and torch.cuda.is_available(), 'APEX and CUDA required for fused optimizers'\n\n opt_args = dict(lr=args.lr, weight_decay=weight_decay)\n if hasattr(args, 'opt_eps') and args.opt_eps is not None:\n opt_args['eps'] = args.opt_eps\n if hasattr(args, 'opt_betas') and args.opt_betas is not None:\n opt_args['betas'] = args.opt_betas\n if hasattr(args, 'opt_args') and args.opt_args is not None:\n opt_args.update(args.opt_args)\n\n opt_split = opt_lower.split('_')\n opt_lower = opt_split[-1]\n if opt_lower == 'sgd' or opt_lower == 'nesterov':\n opt_args.pop('eps', None)\n optimizer = optim.SGD(parameters, momentum=args.momentum, nesterov=True, **opt_args)\n elif opt_lower == 'momentum':\n opt_args.pop('eps', None)\n optimizer = optim.SGD(parameters, momentum=args.momentum, nesterov=False, **opt_args)\n elif opt_lower == 'adam':\n optimizer = optim.Adam(parameters, **opt_args)\n elif opt_lower == 'adamw':\n optimizer = optim.AdamW(parameters, **opt_args)\n elif opt_lower == 'nadam':\n optimizer = Nadam(parameters, **opt_args)\n elif opt_lower == 'radam':\n optimizer = RAdam(parameters, **opt_args)\n elif opt_lower == 'adamp': \n optimizer = AdamP(parameters, wd_ratio=0.01, nesterov=True, **opt_args)\n elif opt_lower == 'sgdp': \n optimizer = SGDP(parameters, momentum=args.momentum, nesterov=True, **opt_args)\n elif opt_lower == 'adadelta':\n optimizer = optim.Adadelta(parameters, **opt_args)\n elif opt_lower == 'adafactor':\n if not args.lr:\n opt_args['lr'] = None\n optimizer = Adafactor(parameters, **opt_args)\n elif opt_lower == 'adahessian':\n optimizer = Adahessian(parameters, **opt_args)\n elif opt_lower == 'rmsprop':\n optimizer = optim.RMSprop(parameters, alpha=0.9, momentum=args.momentum, **opt_args)\n elif opt_lower == 'rmsproptf':\n optimizer = RMSpropTF(parameters, alpha=0.9, momentum=args.momentum, **opt_args)\n elif opt_lower == 'novograd':\n optimizer = NovoGrad(parameters, **opt_args)\n elif opt_lower == 'nvnovograd':\n optimizer = NvNovoGrad(parameters, **opt_args)\n elif opt_lower == 'fusedsgd':\n opt_args.pop('eps', None)\n optimizer = FusedSGD(parameters, momentum=args.momentum, nesterov=True, **opt_args)\n elif opt_lower == 'fusedmomentum':\n opt_args.pop('eps', None)\n optimizer = FusedSGD(parameters, momentum=args.momentum, nesterov=False, **opt_args)\n elif opt_lower == 'fusedadam':\n optimizer = FusedAdam(parameters, adam_w_mode=False, **opt_args)\n elif opt_lower == 'fusedadamw':\n optimizer = FusedAdam(parameters, adam_w_mode=True, **opt_args)\n elif opt_lower == 'fusedlamb':\n optimizer = FusedLAMB(parameters, **opt_args)\n elif opt_lower == 'fusednovograd':\n opt_args.setdefault('betas', (0.95, 0.98))\n optimizer = FusedNovoGrad(parameters, **opt_args)\n else:\n assert False and \"Invalid optimizer\"\n raise ValueError\n\n if len(opt_split) > 1:\n if opt_split[0] == 'lookahead':\n optimizer = Lookahead(optimizer)\n\n return optimizer" }, { "identifier": "train", "path": "engine/train.py", "snippet": "def train(model, image_encoder, text_encoder, tokenizer, data_loader, optimizer, epoch, warmup_steps, device, scheduler, args, config, writer):\n clip_loss = ClipLoss()\n ce_loss = nn.CrossEntropyLoss(ignore_index=-1)\n \n if args.add_dataset:\n ASL_loss = AsymmetricLossAdd(gamma_neg=6, gamma_pos=0, clip=0.05, disable_torch_grad_focal_loss=True)\n else:\n ASL_loss = AsymmetricLoss(gamma_neg=6, gamma_pos=0, clip=0.05, disable_torch_grad_focal_loss=True)\n\n loss_m = AverageMeter()\n loss_clip_m = AverageMeter()\n loss_ce_m = AverageMeter()\n loss_ce_image_m = AverageMeter()\n loss_ce_text_m = AverageMeter()\n batch_time_m = AverageMeter()\n data_time_m = AverageMeter()\n end = time.time()\n\n model.train() \n image_encoder.train() \n text_encoder.train()\n metric_logger = utils.MetricLogger(delimiter=\" \")\n metric_logger.add_meter('lr', utils.SmoothedValue(window_size=50, fmt='{value:.6f}'))\n metric_logger.add_meter('loss', utils.SmoothedValue(window_size=50, fmt='{value:.6f}'))\n metric_logger.add_meter('loss_ce', utils.SmoothedValue(window_size=50, fmt='{value:.6f}'))\n metric_logger.add_meter('loss_ce_image', utils.SmoothedValue(window_size=50, fmt='{value:.6f}'))\n if args.use_entity_features:\n metric_logger.add_meter('loss_ce_text', utils.SmoothedValue(window_size=50, fmt='{value:.6f}'))\n metric_logger.add_meter('loss_clip', utils.SmoothedValue(window_size=50, fmt='{value:.6f}'))\n metric_logger.update(loss=1.0)\n metric_logger.update(lr = scheduler._get_lr(epoch)[0])\n\n header = 'Train Epoch: [{}]'.format(epoch)\n print_freq = 50 \n step_size = 100\n warmup_iterations = warmup_steps*step_size \n scalar_step = epoch*len(data_loader)\n num_batches_per_epoch = data_loader.num_batches\n sample_digits = math.ceil(math.log(data_loader.num_samples + 1, 10))\n\n for i, sample in enumerate(metric_logger.log_every(data_loader, print_freq, header)):\n if args.fourier:\n image = fourier_aug(sample['image'].to(device))\n else:\n image = sample['image'].to(device) \n label = sample['label'].long().to(device)\n\n if args.ignore_index:\n pass\n else:\n label[label==-1]=0\n entity = sample['entity']\n\n if args.add_dataset:\n dataset_label = sample['label_dataset']\n\n data_time_m.update(time.time() - end)\n\n optimizer.zero_grad()\n\n if args.add_dataset:\n text_list = ['normal', 'pleural effusion', 'opacity', 'pneumothorax', 'edema', 'atelectasis', 'tube', 'consolidation','enlarged cardiomediastinum','tip', 'pneumonia','line','cardiomegaly', 'fracture','calcification',\n 'device','engorgement', 'nodule', 'wire', 'pacemaker', 'pleural thicken', 'marking', 'scar', 'hyperinflate', 'blunt', 'collapse', 'emphysema', 'aerate', 'mass','infiltration', 'obscure', 'deformity', 'hernia',\n 'drainage', 'distention', 'shift', 'stent', 'lesion', 'hardware', 'dilation', 'aspiration',\n 'fibrosis',\t'No Finding', 'Pleural Other', 'Support Devices', 'Aortic enlargement',\n 'Clavicle fracture', 'Enlarged PA', 'ILD', 'Lung cavity', 'Lung cyst', 'Mediastinal shift',\t\n 'Nodule/Mass', 'Pulmonary fibrosis', 'Rib fracture', 'Other lesion', 'COPD', 'Lung tumor', 'Tuberculosis',\n 'Other diseases']\n\n else:\n\n text_list = ['normal', 'pleural effusion', 'opacity', 'pneumothorax', 'edema', 'atelectasis', 'tube', 'consolidation','enlarged cardiomediastinum','tip', 'pneumonia','line','cardiomegaly', 'fracture','calcification',\n 'device','engorgement', 'nodule', 'wire', 'pacemaker', 'pleural thicken', 'marking', 'scar', 'hyperinflate', 'blunt', 'collapse', 'emphysema', 'aerate', 'mass','infiltration', 'obscure', 'deformity', 'hernia',\n 'drainage', 'distention', 'shift', 'stent', 'lesion', 'hardware', 'dilation', 'aspiration']\n \n \n text_features = get_text_features(text_encoder,text_list,tokenizer,device,max_length=args.max_length)\n entity_features = get_text_features(text_encoder,entity,tokenizer,device,max_length=args.max_length)\n\n image_features,image_features_pool = image_encoder(image)\n if args.add_dataset:\n pred_class_image, moe_img = model(image_features,text_features,args)\n else:\n pred_class_image = model(image_features,text_features)\n\n\n if args.bce or args.asl:\n label = label.float()\n\n label_mask = (label != -1).squeeze()\n\n\n\n if args.add_dataset:\n loss_moe_img = moe_cl_loss(moe_img, dataset_label)\n\n if args.asl:\n pred_class_image = pred_class_image[label_mask]\n label_image = label[label_mask] \n loss_ce_image = ASL_loss(pred_class_image.view(-1,1),label_image.view(-1,1))\n elif args.bce:\n pred_class_image = pred_class_image[label_mask]\n label_image = label[label_mask] \n loss_ce_image = F.binary_cross_entropy(pred_class_image.view(-1,1),label_image.view(-1,1))\n else:\n if args.asl:\n loss_ce_image = ASL_loss(pred_class_image.view(-1,1),label.view(-1,1))\n elif args.bce:\n loss_ce_image = F.binary_cross_entropy_with_logits(pred_class_image.view(-1,1),label.view(-1,1)) \n else:\n loss_ce_image = ce_loss(pred_class_image.view(-1,2),label.view(-1)) \n\n if args.use_entity_features:\n if args.add_dataset:\n pred_class_text, moe_txt = model(entity_features.unsqueeze(1),text_features,args)\n loss_moe_txt = moe_cl_loss(moe_txt, dataset_label)\n else:\n pred_class_text = model(entity_features.unsqueeze(1),text_features)\n\n if args.add_dataset:\n if args.asl:\n pred_class_text = pred_class_text[label_mask]\n label_text = label[label_mask] \n loss_ce_text = ASL_loss(pred_class_text.view(-1,1),label_text.view(-1,1))\n \n elif args.bce:\n pred_class_text = pred_class_text[label_mask]\n label_text = label[label_mask] \n loss_ce_text = F.binary_cross_entropy(pred_class_text.view(-1,1),label_text.view(-1,1))\n\n else:\n if args.asl:\n loss_ce_text = ASL_loss(pred_class_text.view(-1,1),label.view(-1,1))\n elif args.bce:\n loss_ce_text = F.binary_cross_entropy_with_logits(pred_class_text.view(-1,1),label.view(-1,1)) \n else:\n loss_ce_text = ce_loss(pred_class_text.view(-1,2),label.view(-1))\n\n loss_ce = loss_ce_image + loss_ce_text\n if args.add_dataset:\n loss_moe = loss_moe_img + loss_moe_txt\n\n else:\n loss_ce = loss_ce_image\n if args.add_dataset:\n loss_moe = loss_moe_img\n\n\n loss_clip = clip_loss(image_features_pool,entity_features)\n if args.add_dataset:\n loss = loss_ce + loss_clip * args.loss_ratio + args.moe_ratio * loss_moe\n else:\n loss = loss_ce + loss_clip * args.loss_ratio\n \n\n loss.backward()\n optimizer.step() \n \n writer.add_scalar('loss/loss', loss, scalar_step)\n writer.add_scalar('loss/loss_ce', loss_ce, scalar_step)\n writer.add_scalar('loss/loss_ce_image', loss_ce_image, scalar_step)\n if args.use_entity_features:\n writer.add_scalar('loss/loss_ce_text', loss_ce_text, scalar_step)\n writer.add_scalar('loss/loss_clip', loss_clip, scalar_step)\n scalar_step += 1\n\n metric_logger.update(loss=loss.item())\n metric_logger.update(loss_ce=loss_ce.item())\n metric_logger.update(loss_ce_image=loss_ce_image.item())\n if args.use_entity_features:\n metric_logger.update(loss_ce_text=loss_ce_text.item())\n metric_logger.update(loss_clip=loss_clip.item())\n\n\n if epoch==0 and i%step_size==0 and i<=warmup_iterations: \n scheduler.step(i//step_size) \n metric_logger.update(lr = scheduler._get_lr(epoch)[0])\n\n batch_time_m.update(time.time() - end)\n end = time.time()\n batch_count = i + 1\n if i % 100 == 0:\n batch_size = len(image)\n num_samples = batch_count * batch_size\n samples_per_epoch = data_loader.num_samples\n percent_complete = 100.0 * batch_count / num_batches_per_epoch\n\n # NOTE loss is coarsely sampled, just master node and per log update\n loss_m.update(loss.item(), batch_size)\n loss_clip_m.update(loss_clip.item(), batch_size)\n loss_ce_m.update(loss_ce.item(), batch_size)\n loss_ce_image_m.update(loss_ce_image.item(), batch_size)\n if args.use_entity_features:\n loss_ce_text_m.update(loss_ce_text.item(), batch_size)\n logging.info(\n f\"Train Epoch: {epoch} [{num_samples:>{sample_digits}}/{samples_per_epoch} ({percent_complete:.0f}%)] \"\n f\"Loss: {loss_m.val:#.5g} ({loss_m.avg:#.4g}) \"\n f\"Loss_clip: {loss_clip_m.val:#.5g} ({loss_clip_m.avg:#.4g}) \"\n f\"Loss_ce: {loss_ce_m.val:#.5g} ({loss_ce_m.avg:#.4g}) \"\n f\"Loss_ce_image: {loss_ce_image_m.val:#.5g} ({loss_ce_image_m.avg:#.4g}) \"\n f\"Loss_ce_text: {loss_ce_text_m.val:#.5g} ({loss_ce_text_m.avg:#.4g}) \"\n f\"Data (t): {data_time_m.avg:.3f} \"\n f\"Batch (t): {batch_time_m.avg:.3f}, {batch_size/ batch_time_m.val:#g}/s \"\n f\"LR: { scheduler._get_lr(epoch)[0]:5f} \"\n )\n else:\n logging.info(\n f\"Train Epoch: {epoch} [{num_samples:>{sample_digits}}/{samples_per_epoch} ({percent_complete:.0f}%)] \"\n f\"Loss: {loss_m.val:#.5g} ({loss_m.avg:#.4g}) \"\n f\"Loss_clip: {loss_clip_m.val:#.5g} ({loss_clip_m.avg:#.4g}) \"\n f\"Loss_ce: {loss_ce_m.val:#.5g} ({loss_ce_m.avg:#.4g}) \"\n f\"Loss_ce_image: {loss_ce_image_m.val:#.5g} ({loss_ce_image_m.avg:#.4g}) \"\n f\"Data (t): {data_time_m.avg:.3f} \"\n f\"Batch (t): {batch_time_m.avg:.3f}, {batch_size/ batch_time_m.val:#g}/s \"\n f\"LR: { scheduler._get_lr(epoch)[0]:5f} \"\n )\n\n # gather the stats from all processes\n metric_logger.synchronize_between_processes()\n print(\"Averaged stats:\", metric_logger.global_avg()) \n return {k: \"{:.6f}\".format(meter.global_avg) for k, meter in metric_logger.meters.items()} #,loss_epoch.mean()" }, { "identifier": "valid_on_cheXpert", "path": "engine/train.py", "snippet": "def valid_on_cheXpert(model,image_encoder,text_encoder,tokenizer,data_loader, epoch, device, args, config, writer):\n criterion = nn.CrossEntropyLoss()\n model.eval()\n image_encoder.eval()\n text_encoder.eval()\n text_list = ['atelectasis', 'cardiomegaly', 'consolidation', 'edema', 'pleural effusion']\n text_features = get_text_features(text_encoder,text_list,tokenizer,device,max_length=args.max_length)\n \n val_scalar_step = epoch*len(data_loader)\n val_losses = []\n\n # initialize the ground truth and output tensor\n gt = torch.FloatTensor()\n gt = gt.cuda()\n pred = torch.FloatTensor()\n pred = pred.cuda()\n\n for i, sample in enumerate(data_loader):\n image = sample['image'].to(device,non_blocking=True) \n label = sample['label'].long().to(device)\n if args.bce or args.asl:\n label = label.float()\n\n gt = torch.cat((gt, label), 0)\n with torch.no_grad():\n image_features,image_features_pool = image_encoder(image)\n \n # \n if args.add_dataset:\n pred_class,_ = model(image_features,text_features,args)#b,14,2/1\n val_loss = F.binary_cross_entropy(pred_class.view(-1,1),label.view(-1, 1))\n pred = torch.cat((pred, pred_class[:,:,0]), 0)\n else:\n pred_class = model(image_features,text_features)#b,14,2/1\n if args.bce or args.asl:\n val_loss = F.binary_cross_entropy_with_logits(pred_class.view(-1,1),label.view(-1, 1))\n pred_class = torch.sigmoid(pred_class)\n pred = torch.cat((pred, pred_class[:,:,0]), 0)\n else:\n val_loss = criterion(pred_class.view(-1,2),label.view(-1))\n pred_class = torch.softmax(pred_class, dim=-1)\n pred = torch.cat((pred, pred_class[:,:,1]), 0)\n \n val_losses.append(val_loss.item())\n writer.add_scalar('val_loss/loss', val_loss, val_scalar_step)\n val_scalar_step += 1\n metrics = compute_AUCs(gt, pred, n_class=5)\n AUROC_avg = metrics['mean_auc']\n avg_val_loss = np.array(val_losses).mean()\n return avg_val_loss,AUROC_avg,metrics" }, { "identifier": "valid_on_chestxray14", "path": "engine/train.py", "snippet": "def valid_on_chestxray14(model, image_encoder, text_encoder, tokenizer, data_loader, epoch, device, args, config, writer):\n criterion = nn.CrossEntropyLoss()\n model.eval()\n image_encoder.eval()\n text_encoder.eval()\n text_list = [\"atelectasis\",\"cardiomegaly\",\"pleural effusion\",\"infiltration\",\"lung mass\",\"lung nodule\",\"pneumonia\",\"pneumothorax\",\"consolidation\",\"edema\",\"emphysema\",\"fibrosis\",\"pleural thicken\",\"hernia\"]\n text_features = get_text_features(text_encoder,text_list,tokenizer,device,max_length=args.max_length)\n \n val_scalar_step = epoch*len(data_loader)\n val_losses = []\n\n gt = torch.FloatTensor()\n gt = gt.cuda()\n pred = torch.FloatTensor()\n pred = pred.cuda()\n\n for i, sample in enumerate(data_loader):\n image = sample['image'].to(device,non_blocking=True) \n label = sample['label'].long().to(device)\n if args.bce or args.asl:\n label = label.float()\n\n gt = torch.cat((gt, label), 0)\n with torch.no_grad():\n image_features,image_features_pool = image_encoder(image)\n\n if args.add_dataset:\n pred_class,_ = model(image_features,text_features,args)#b,14,2/1\n val_loss = F.binary_cross_entropy(pred_class.view(-1,1),label.view(-1, 1))\n pred = torch.cat((pred, pred_class[:,:,0]), 0)\n else:\n pred_class = model(image_features,text_features)#b,14,2/1\n if args.bce or args.asl:\n val_loss = F.binary_cross_entropy_with_logits(pred_class.view(-1,1),label.view(-1, 1))\n pred_class = torch.sigmoid(pred_class)\n pred = torch.cat((pred, pred_class[:,:,0]), 0)\n else:\n val_loss = criterion(pred_class.view(-1,2),label.view(-1))\n pred_class = torch.softmax(pred_class, dim=-1)\n pred = torch.cat((pred, pred_class[:,:,1]), 0)\n\n\n\n val_losses.append(val_loss.item())\n writer.add_scalar('val_loss/loss', val_loss, val_scalar_step)\n val_scalar_step += 1\n metrics = compute_AUCs(gt, pred, n_class = 14)\n AUROC_avg = metrics['mean_auc']\n avg_val_loss = np.array(val_losses).mean()\n return avg_val_loss,AUROC_avg,metrics" }, { "identifier": "CLP_clinical", "path": "models/clip_tqn.py", "snippet": "class CLP_clinical(nn.Module):\n def __init__(self,\n bert_model_name: str,\n embed_dim: int = 768,\n freeze_layers:Union[Tuple[int, int], int] = None):\n super().__init__()\n self.bert_model = self._get_bert_basemodel(bert_model_name=bert_model_name, freeze_layers=freeze_layers)\n self.mlp_embed = nn.Sequential(\n nn.Linear(embed_dim, embed_dim),\n nn.GELU(),\n nn.Linear(embed_dim, embed_dim)\n )\n self.embed_dim = embed_dim\n self.logit_scale = nn.Parameter(torch.ones([]) * np.log(1 / 0.07))\n self.init_parameters()\n \n def init_parameters(self):\n nn.init.constant_(self.logit_scale, np.log(1 / 0.07))\n for m in self.mlp_embed:\n if isinstance(m, nn.Linear):\n nn.init.normal_(m.weight, std=self.embed_dim ** -0.5)\n\n def _get_bert_basemodel(self, bert_model_name, freeze_layers=None):#12\n try:\n print(bert_model_name)\n config = BertConfig.from_pretrained(bert_model_name, output_hidden_states=True)#bert-base-uncased\n model = AutoModel.from_pretrained(bert_model_name, config=config)#, return_dict=True)\n print(\"Text feature extractor:\", bert_model_name)\n print(\"bert encoder layers:\",len(model.encoder.layer))\n except:\n raise (\"Invalid model name. Check the config file and pass a BERT model from transformers lybrary\")\n\n if freeze_layers is not None:\n for layer_idx in freeze_layers:\n for param in list(model.encoder.layer[layer_idx].parameters()):\n param.requires_grad = False\n return model\n\n def encode_text(self, text):\n #input batch_size,token, return batch_size,dim \n output = self.bert_model(input_ids = text['input_ids'],attention_mask = text['attention_mask'] )\n last_hidden_state, pooler_output, hidden_states = output[0],output[1],output[2]\n encode_out = self.mlp_embed(pooler_output)\n # encode_out = pooler_output\n return encode_out\n \n def forward(self,text1,text2):\n text1_features = self.encode_text(text1)\n text2_features = self.encode_text(text2)\n text1_features = F.normalize(text1_features, dim=-1)\n text2_features = F.normalize(text2_features, dim=-1)\n return text1_features, text2_features, self.logit_scale.exp()" }, { "identifier": "ModelRes", "path": "models/clip_tqn.py", "snippet": "class ModelRes(nn.Module):\n def __init__(self, res_base_model):\n super(ModelRes, self).__init__()\n self.resnet_dict = {\"resnet50\": models.resnet50(pretrained=True)}\n self.resnet = self._get_res_basemodel(res_base_model)\n\n num_ftrs = int(self.resnet.fc.in_features)\n self.res_features = nn.Sequential(*list(self.resnet.children())[:-2])\n\n self.res_l1 = nn.Linear(num_ftrs, num_ftrs)\n self.res_l2 = nn.Linear(num_ftrs, 768)\n\n def _get_res_basemodel(self, res_model_name):\n try:\n res_model = self.resnet_dict[res_model_name]\n print(\"Image feature extractor:\", res_model_name)\n return res_model\n except:\n raise (\"Invalid model name. Check the config file and pass one of: resnet18 or resnet50\")\n\n def forward(self, img):\n batch_size = img.shape[0]\n res_fea = self.res_features(img)\n\n res_fea = rearrange(res_fea,'b d n1 n2 -> b (n1 n2) d')\n h = rearrange(res_fea,'b n d -> (b n) d')\n x = self.res_l1(h)\n x = F.relu(x)\n x = self.res_l2(x)\n out_emb = rearrange(x,'(b n) d -> b n d',b=batch_size)\n out_pool = torch.mean(out_emb,dim=1)\n return out_emb,out_pool" }, { "identifier": "TQN_Model", "path": "models/clip_tqn.py", "snippet": "class TQN_Model(nn.Module):\n def __init__(self, \n embed_dim: int = 768, \n class_num: int = 1, \n lam: list = [1, 0]\n ):\n super().__init__()\n self.d_model = embed_dim\n self.logit_scale = nn.Parameter(torch.ones([]) * np.log(1 / 0.07))\n decoder_layer = TransformerDecoderLayer(self.d_model, 4, 1024,\n 0.1, 'relu',normalize_before=True)\n decoder_layerV1 = TransformerDecoderLayerV1(self.d_model, 4, 1024,\n 0.1, 'relu', True, lam)\n self.decoder_norm = nn.LayerNorm(self.d_model)\n self.decoder = TransformerDecoder(decoder_layer, 4, self.decoder_norm,\n return_intermediate=False)\n self.decoderV1 = TransformerDecoderV1(decoder_layerV1, 4, self.decoder_norm,\n return_intermediate=False)\n \n self.dropout_feas = nn.Dropout(0.1)\n\n self.mlp_head = nn.Sequential( # nn.LayerNorm(768),\n nn.Linear(embed_dim, class_num)\n )\n self.apply(self._init_weights)\n \n @staticmethod\n def _init_weights(module):\n if isinstance(module, nn.Linear):\n module.weight.data.normal_(mean=0.0, std=0.02)\n\n elif isinstance(module, nn.MultiheadAttention):\n module.in_proj_weight.data.normal_(mean=0.0, std=0.02)\n module.out_proj.weight.data.normal_(mean=0.0, std=0.02)\n\n elif isinstance(module, nn.Embedding):\n module.weight.data.normal_(mean=0.0, std=0.02)\n if module.padding_idx is not None:\n module.weight.data[module.padding_idx].zero_()\n \n def forward(self, image_features, text_features):\n\n batch_size = image_features.shape[0]\n image_features = image_features.transpose(0,1)\n text_features = text_features.unsqueeze(1).repeat(1, batch_size, 1)\n image_features = self.decoder_norm(image_features)\n text_features = self.decoder_norm(text_features)\n \n image_features_pool = torch.mean(image_features,dim=0).unsqueeze(0)\n features = self.decoderV1(text_features, image_features, image_features_pool,\n memory_key_padding_mask=None, pos=None, query_pos=None) \n \n features = self.dropout_feas(features).transpose(0,1) #b,embed_dim\n out = self.mlp_head(features) #(batch_size, query_num)\n return out" }, { "identifier": "TQN_Model_Add", "path": "models/clip_tqn.py", "snippet": "class TQN_Model_Add(nn.Module):\n def __init__(self, \n embed_dim: int = 768, \n class_num: int = 1, \n gate_num: int = 3,\n high_dim: int = 32,\n lam: list = [1, 0]\n ):\n super().__init__()\n self.d_model = embed_dim\n self.logit_scale = nn.Parameter(torch.ones([]) * np.log(1 / 0.07))\n decoder_layer = TransformerDecoderLayer(self.d_model, 4, 1024,\n 0.1, 'relu',normalize_before=True)\n decoder_layerV1 = TransformerDecoderLayerV1(self.d_model, 4, 1024,\n 0.1, 'relu', True, lam)\n self.decoder_norm = nn.LayerNorm(self.d_model)\n self.decoder = TransformerDecoder(decoder_layer, 4, self.decoder_norm,\n return_intermediate=False)\n self.decoderV1 = TransformerDecoderV1(decoder_layerV1, 4, self.decoder_norm,\n return_intermediate=False)\n \n self.decoderV1_1 = TransformerDecoderV1(decoder_layerV1, 4, self.decoder_norm,\n return_intermediate=False)\n self.decoderV1_2 = TransformerDecoderV1(decoder_layerV1, 4, self.decoder_norm,\n return_intermediate=False)\n self.decoderV1_3 = TransformerDecoderV1(decoder_layerV1, 4, self.decoder_norm,\n return_intermediate=False)\n\n self.dropout_feas = nn.Dropout(0.1)\n\n self.mlp_head = nn.Sequential( # nn.LayerNorm(768),\n nn.Linear(embed_dim, class_num)\n )\n self.mlp_head_1 = nn.Sequential( # nn.LayerNorm(768),\n nn.Linear(embed_dim, class_num)\n )\n self.mlp_head_2 = nn.Sequential( # nn.LayerNorm(768),\n nn.Linear(embed_dim, class_num)\n )\n self.mlp_head_3 = nn.Sequential( # nn.LayerNorm(768),\n nn.Linear(embed_dim, class_num)\n ) \n \n self.gate_head = nn.Sequential(\n nn.Linear(embed_dim, gate_num)\n )\n self.cl_head = nn.Sequential(\n nn.Linear(gate_num, high_dim)\n )\n\n self.apply(self._init_weights)\n \n @staticmethod\n def _init_weights(module):\n if isinstance(module, nn.Linear):\n module.weight.data.normal_(mean=0.0, std=0.02)\n\n elif isinstance(module, nn.MultiheadAttention):\n module.in_proj_weight.data.normal_(mean=0.0, std=0.02)\n module.out_proj.weight.data.normal_(mean=0.0, std=0.02)\n\n elif isinstance(module, nn.Embedding):\n module.weight.data.normal_(mean=0.0, std=0.02)\n if module.padding_idx is not None:\n module.weight.data[module.padding_idx].zero_()\n \n def forward(self, image_features, text_features, args):\n\n batch_size = image_features.shape[0]\n image_features = image_features.transpose(0,1)\n text_features = text_features.unsqueeze(1).repeat(1, batch_size, 1)\n image_features = self.decoder_norm(image_features)\n text_features = self.decoder_norm(text_features)\n \n image_features_pool = torch.mean(image_features,dim=0).unsqueeze(0)\n features = self.decoderV1(text_features, image_features, image_features_pool,\n memory_key_padding_mask=None, pos=None, query_pos=None)\n gate_weight = self.gate_head(image_features_pool.squeeze(0)) \n \n features = self.dropout_feas(features).transpose(0,1) #b,embed_dim\n \n \n if args.finetune:\n features_1 = self.decoderV1_1(text_features, image_features, image_features_pool,\n memory_key_padding_mask=None, pos=None, query_pos=None)\n features_1 = self.dropout_feas(features_1).transpose(0,1) \n features_2 = self.decoderV1_2(text_features, image_features, image_features_pool,\n memory_key_padding_mask=None, pos=None, query_pos=None)\n features_2 = self.dropout_feas(features_2).transpose(0,1) \n features_3 = self.decoderV1_3(text_features, image_features, image_features_pool,\n memory_key_padding_mask=None, pos=None, query_pos=None)\n features_3 = self.dropout_feas(features_3).transpose(0,1) \n \n out_1 = torch.sigmoid(self.mlp_head_1(features_1))\n out_2 = torch.sigmoid(self.mlp_head_2(features_2))\n out_3 = torch.sigmoid(self.mlp_head_3(features_3))\n\n\n out = self.mlp_head(features)\n \n gate_weight = torch.softmax(gate_weight, dim=1)\n out = torch.sigmoid(out)\n\n high_dimension = self.cl_head(gate_weight)\n out_bias = gate_weight[:,0].unsqueeze(1).unsqueeze(2) * out_1 + gate_weight[:,1].unsqueeze(1).unsqueeze(2) * out_2 + gate_weight[:,2].unsqueeze(1).unsqueeze(2) * out_3\n\n out = args.main_ratio * out + args.bias_ratio * out_bias\n\n return out, high_dimension" }, { "identifier": "ModelDense", "path": "models/clip_tqn.py", "snippet": "class ModelDense(nn.Module):\n def __init__(self, dense_base_model):\n super(ModelDense, self).__init__()\n \n self.densenet_dict = {\"densenet121\": models.densenet121(pretrained=True)}#,\n # \"densenet161\": models.densenet161(pretrained=True)}\n self.densenet = self._get_dense_basemodel(dense_base_model)\n num_ftrs = int(self.densenet.classifier.in_features)\n self.dense_features = self.densenet.features\n self.dense_l1 = nn.Linear(num_ftrs, num_ftrs)\n self.dense_l2 = nn.Linear(num_ftrs, 768)\n\n def _get_dense_basemodel(self, dense_base_model):\n try:\n dense_model = self.densenet_dict[dense_base_model]\n print(\"Image feature extractor:\", dense_base_model)\n return dense_model\n except:\n raise (\"Invalid model name. Check the config file and pass one of: densenet121 or densenet161\")\n\n def forward(self, img):\n batch_size = img.shape[0]\n dense_fea = self.dense_features(img)#N, 1024, 7,7\n dense_fea = rearrange(dense_fea,'b d n1 n2 -> b (n1 n2) d')\n h = rearrange(dense_fea,'b n d -> (b n) d')\n x = self.dense_l1(h)\n x = F.relu(x)\n x = self.dense_l2(x)\n out_emb = rearrange(x,'(b n) d -> b n d',b=batch_size)\n out_pool = torch.mean(out_emb,dim=1)\n return out_emb,out_pool" }, { "identifier": "CLP_clinical2", "path": "models/clip_tqn.py", "snippet": "class CLP_clinical2(nn.Module):\n def __init__(self,\n bert_model_name: str,\n embed_dim: int = 768,\n freeze_layers:Union[Tuple[int, int], int] = None):\n super().__init__()\n self.bert_model = self._get_bert_basemodel(bert_model_name=bert_model_name, freeze_layers=freeze_layers)\n\n\n def _get_bert_basemodel(self, bert_model_name, freeze_layers=None):#12\n try:\n print(bert_model_name)\n model = AutoModel.from_pretrained(bert_model_name)\n print(\"Text feature extractor:\", bert_model_name)\n print(\"bert encoder layers:\",len(model.encoder.layer))\n except:\n raise (\"Invalid model name. Check the config file and pass a BERT model from transformers lybrary\")\n\n if freeze_layers is not None:\n for layer_idx in freeze_layers:\n for param in list(model.encoder.layer[layer_idx].parameters()):\n param.requires_grad = False\n return model\n\n def encode_text(self, text):\n output = self.bert_model(input_ids = text['input_ids'],attention_mask = text['attention_mask'] )\n encode_out = output.last_hidden_state[:,0,:]\n return encode_out\n \n def forward(self,text1,text2):\n text1_features = self.encode_text(text1)\n text2_features = self.encode_text(text2)\n text1_features = F.normalize(text1_features, dim=-1)\n text2_features = F.normalize(text2_features, dim=-1)\n return text1_features, text2_features, self.logit_scale.exp()" }, { "identifier": "BertTokenizer", "path": "models/tokenization_bert.py", "snippet": "class BertTokenizer(PreTrainedTokenizer):\n r\"\"\"\n Construct a BERT tokenizer. Based on WordPiece.\n This tokenizer inherits from :class:`~transformers.PreTrainedTokenizer` which contains most of the main methods.\n Users should refer to this superclass for more information regarding those methods.\n Args:\n vocab_file (:obj:`str`):\n File containing the vocabulary.\n do_lower_case (:obj:`bool`, `optional`, defaults to :obj:`True`):\n Whether or not to lowercase the input when tokenizing.\n do_basic_tokenize (:obj:`bool`, `optional`, defaults to :obj:`True`):\n Whether or not to do basic tokenization before WordPiece.\n never_split (:obj:`Iterable`, `optional`):\n Collection of tokens which will never be split during tokenization. Only has an effect when\n :obj:`do_basic_tokenize=True`\n unk_token (:obj:`str`, `optional`, defaults to :obj:`\"[UNK]\"`):\n The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this\n token instead.\n sep_token (:obj:`str`, `optional`, defaults to :obj:`\"[SEP]\"`):\n The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for\n sequence classification or for a text and a question for question answering. It is also used as the last\n token of a sequence built with special tokens.\n pad_token (:obj:`str`, `optional`, defaults to :obj:`\"[PAD]\"`):\n The token used for padding, for example when batching sequences of different lengths.\n cls_token (:obj:`str`, `optional`, defaults to :obj:`\"[CLS]\"`):\n The classifier token which is used when doing sequence classification (classification of the whole sequence\n instead of per-token classification). It is the first token of the sequence when built with special tokens.\n mask_token (:obj:`str`, `optional`, defaults to :obj:`\"[MASK]\"`):\n The token used for masking values. This is the token used when training this model with masked language\n modeling. This is the token which the model will try to predict.\n tokenize_chinese_chars (:obj:`bool`, `optional`, defaults to :obj:`True`):\n Whether or not to tokenize Chinese characters.\n This should likely be deactivated for Japanese (see this `issue\n <https://github.com/huggingface/transformers/issues/328>`__).\n strip_accents: (:obj:`bool`, `optional`):\n Whether or not to strip all accents. If this option is not specified, then it will be determined by the\n value for :obj:`lowercase` (as in the original BERT).\n \"\"\"\n\n vocab_files_names = VOCAB_FILES_NAMES\n pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP\n pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION\n max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES\n\n def __init__(\n self,\n vocab_file,\n do_lower_case=True,\n do_basic_tokenize=True,\n never_split=None,\n unk_token=\"[UNK]\",\n sep_token=\"[SEP]\",\n pad_token=\"[PAD]\",\n cls_token=\"[CLS]\",\n mask_token=\"[MASK]\",\n tokenize_chinese_chars=True,\n strip_accents=None,\n **kwargs\n ):\n super().__init__(\n do_lower_case=do_lower_case,\n do_basic_tokenize=do_basic_tokenize,\n never_split=never_split,\n unk_token=unk_token,\n sep_token=sep_token,\n pad_token=pad_token,\n cls_token=cls_token,\n mask_token=mask_token,\n tokenize_chinese_chars=tokenize_chinese_chars,\n strip_accents=strip_accents,\n **kwargs,\n )\n\n if not os.path.isfile(vocab_file):\n raise ValueError(\n \"Can't find a vocabulary file at path '{}'. To load the vocabulary from a Google pretrained \"\n \"model use `tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`\".format(vocab_file)\n )\n self.vocab = load_vocab(vocab_file)\n self.ids_to_tokens = collections.OrderedDict([(ids, tok) for tok, ids in self.vocab.items()])\n self.do_basic_tokenize = do_basic_tokenize\n if do_basic_tokenize:\n self.basic_tokenizer = BasicTokenizer(\n do_lower_case=do_lower_case,\n never_split=never_split,\n tokenize_chinese_chars=tokenize_chinese_chars,\n strip_accents=strip_accents,\n )\n self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=self.unk_token)\n\n @property\n def do_lower_case(self):\n return self.basic_tokenizer.do_lower_case\n\n @property\n def vocab_size(self):\n return len(self.vocab)\n\n def get_vocab(self):\n return dict(self.vocab, **self.added_tokens_encoder)\n\n def _tokenize(self, text):\n split_tokens = []\n if self.do_basic_tokenize:\n for token in self.basic_tokenizer.tokenize(text, never_split=self.all_special_tokens):\n\n # If the token is part of the never_split set\n if token in self.basic_tokenizer.never_split:\n split_tokens.append(token)\n else:\n split_tokens += self.wordpiece_tokenizer.tokenize(token)\n else:\n split_tokens = self.wordpiece_tokenizer.tokenize(text)\n return split_tokens\n\n def _convert_token_to_id(self, token):\n \"\"\" Converts a token (str) in an id using the vocab. \"\"\"\n return self.vocab.get(token, self.vocab.get(self.unk_token))\n\n def _convert_id_to_token(self, index):\n \"\"\"Converts an index (integer) in a token (str) using the vocab.\"\"\"\n return self.ids_to_tokens.get(index, self.unk_token)\n\n def convert_tokens_to_string(self, tokens):\n \"\"\" Converts a sequence of tokens (string) in a single string. \"\"\"\n out_string = \" \".join(tokens).replace(\" ##\", \"\").strip()\n return out_string\n\n def build_inputs_with_special_tokens(\n self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None\n ) -> List[int]:\n \"\"\"\n Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and\n adding special tokens. A BERT sequence has the following format:\n - single sequence: ``[CLS] X ``\n - pair of sequences: ``[CLS] A [SEP] B [SEP]``\n Args:\n token_ids_0 (:obj:`List[int]`):\n List of IDs to which the special tokens will be added.\n token_ids_1 (:obj:`List[int]`, `optional`):\n Optional second list of IDs for sequence pairs.\n Returns:\n :obj:`List[int]`: List of `input IDs <../glossary.html#input-ids>`__ with the appropriate special tokens.\n \"\"\"\n if token_ids_1 is None:\n return [self.cls_token_id] + token_ids_0\n cls = [self.cls_token_id]\n sep = [self.sep_token_id]\n return cls + token_ids_0 + sep + token_ids_1 + sep\n\n def get_special_tokens_mask(\n self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False\n ) -> List[int]:\n \"\"\"\n Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding\n special tokens using the tokenizer ``prepare_for_model`` method.\n Args:\n token_ids_0 (:obj:`List[int]`):\n List of IDs.\n token_ids_1 (:obj:`List[int]`, `optional`):\n Optional second list of IDs for sequence pairs.\n already_has_special_tokens (:obj:`bool`, `optional`, defaults to :obj:`False`):\n Whether or not the token list is already formatted with special tokens for the model.\n Returns:\n :obj:`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.\n \"\"\"\n\n if already_has_special_tokens:\n if token_ids_1 is not None:\n raise ValueError(\n \"You should not supply a second sequence if the provided sequence of \"\n \"ids is already formatted with special tokens for the model.\"\n )\n return list(map(lambda x: 1 if x in [self.sep_token_id, self.cls_token_id] else 0, token_ids_0))\n\n if token_ids_1 is not None:\n return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1]\n return [1] + ([0] * len(token_ids_0)) + [1]\n\n def create_token_type_ids_from_sequences(\n self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None\n ) -> List[int]:\n \"\"\"\n Create a mask from the two sequences passed to be used in a sequence-pair classification task. A BERT sequence\n pair mask has the following format:\n ::\n 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1\n | first sequence | second sequence |\n If :obj:`token_ids_1` is :obj:`None`, this method only returns the first portion of the mask (0s).\n Args:\n token_ids_0 (:obj:`List[int]`):\n List of IDs.\n token_ids_1 (:obj:`List[int]`, `optional`):\n Optional second list of IDs for sequence pairs.\n Returns:\n :obj:`List[int]`: List of `token type IDs <../glossary.html#token-type-ids>`_ according to the given\n sequence(s).\n \"\"\"\n sep = [self.sep_token_id]\n cls = [self.cls_token_id]\n if token_ids_1 is None:\n return len(cls + token_ids_0 + sep) * [0]\n return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]\n\n def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:\n index = 0\n if os.path.isdir(save_directory):\n vocab_file = os.path.join(\n save_directory, (filename_prefix + \"-\" if filename_prefix else \"\") + VOCAB_FILES_NAMES[\"vocab_file\"]\n )\n else:\n vocab_file = (filename_prefix + \"-\" if filename_prefix else \"\") + save_directory\n with open(vocab_file, \"w\", encoding=\"utf-8\") as writer:\n for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]):\n if index != token_index:\n logger.warning(\n \"Saving vocabulary to {}: vocabulary indices are not consecutive.\"\n \" Please check that the vocabulary is not corrupted!\".format(vocab_file)\n )\n index = token_index\n writer.write(token + \"\\n\")\n index += 1\n return (vocab_file,)" }, { "identifier": "MIMIC_Dataset", "path": "dataset/dataset_entity.py", "snippet": "class MIMIC_Dataset(Dataset):\n def __init__(self, json_path, csv_path, sty_path,image_res,args):\n self.json_info = json.load(open(json_path,'r'))\n data_info = pd.read_csv(csv_path)\n self.img_path_list = np.asarray(data_info.iloc[:,0])\n self.class_list = np.asarray(data_info.iloc[:,1:])#40 class for fine-grained query list\n sty_info = pd.read_csv(sty_path)\n self.sty_dict_info = self.csv_to_dict(sty_info)\n\n normalize = transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))\n\n if args.colourjitter:\n self.transform = transforms.Compose([ \n transforms.RandomResizedCrop(image_res,scale=(0.2, 1.0), interpolation=transforms.InterpolationMode.BICUBIC),\n transforms.RandomHorizontalFlip(),\n\n transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4, hue=0.4),\n transforms.RandomGrayscale(),\n\n RandomAugment(2,7,isPIL=True,augs=['Identity','AutoContrast','Equalize','Brightness','Sharpness',\n 'ShearX', 'ShearY', 'TranslateX', 'TranslateY', 'Rotate']), \n transforms.ToTensor(),\n normalize,\n ])\n\n else:\n self.transform = transforms.Compose([ \n transforms.RandomResizedCrop(image_res,scale=(0.2, 1.0), interpolation=transforms.InterpolationMode.BICUBIC),\n transforms.RandomHorizontalFlip(),\n RandomAugment(2,7,isPIL=True,augs=['Identity','AutoContrast','Equalize','Brightness','Sharpness',\n 'ShearX', 'ShearY', 'TranslateX', 'TranslateY', 'Rotate']), \n transforms.ToTensor(),\n normalize,\n ]) \n\n \n def csv_to_dict(self,sty_info):\n tui_list = sty_info.iloc[:,0]\n sty_list = sty_info.iloc[:,1]\n sty_dict = defaultdict(list)\n for idx in tqdm(range(len(tui_list))):\n tui_idx = tui_list[idx]\n sty_idx = sty_list[idx]\n sty_dict[tui_idx] = sty_idx\n return sty_dict\n \n def __len__(self):\n return len(self.img_path_list)\n \n def __getitem__(self, index):\n img_path = self.img_path_list[index].replace(\"/nvme/zhangruipeng/zhangxiaoman/dataset/MIMIC-CXR-DCM/files\", '/remote-home/share/medical/public/MIMIC-CXR-JPG/MIMIC-CXR/small/files')\n class_label = self.class_list[index] \n\n # index_transit = np.load(\"/remote-home/tianjiedai/KAD/R1_CLIP_LR/A1_DATA/small/index0626.npy\")\n # new_index_json = index_transit[index]\n # entities = self.json_info[new_index_json]['entities']\n # captions = self.json_info[new_index_json]['caption']\n \n entities = self.json_info[index]['entities']\n captions = self.json_info[index]['caption']\n\n\n if len(entities) != 0:\n caption_list = ''\n entity_details = ''\n for entity in entities:\n sub_caption = entity['caption']\n sub_entities = entity['entity']#搞错了 还不是list\n sub_entity_details = ''\n for sub_entity in sub_entities:\n try:\n sub_entity_details += ' [ENT] ' + sub_entity['Entity'] \n except:\n sub_entity_details += ' [ENT] ' + sub_entity['Entity'] \n entity_details = entity_details + sub_entity_details + ' [SEP] '\n caption_list = caption_list + sub_caption + ' [SEP] '\n else:\n caption_list = ''\n entity_details = ''\n for sub_caption in captions:\n caption_list = caption_list + sub_caption + ' [SEP] '\n entity_details = caption_list\n \n # img = open_jpg(img_path).convert('RGB') \n img = Image.open(img_path).convert('RGB') \n image = self.transform(img)\n return {\n \"image\": image,\n \"label\": class_label,\n \"caption\": caption_list,\n \"entity\": entity_details\n }" }, { "identifier": "Mergetrain_Dataset", "path": "dataset/dataset_entity.py", "snippet": "class Mergetrain_Dataset(Dataset):\n def __init__(self, json_path, csv_path, sty_path,image_res,args):\n self.json_info = json.load(open(json_path,'r'))\n data_info = pd.read_csv(csv_path)\n self.img_path_list = np.asarray(data_info.iloc[:,0])\n self.class_list = np.asarray(data_info.iloc[:,2:])#60 class for fine-grained query list\n self.label_dataset_list = np.asarray(data_info.iloc[:,1])\n\n sty_info = pd.read_csv(sty_path)\n self.sty_dict_info = self.csv_to_dict(sty_info)\n\n normalize = transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))\n\n if args.colourjitter:\n self.transform = transforms.Compose([ \n transforms.RandomResizedCrop(image_res,scale=(0.2, 1.0), interpolation=transforms.InterpolationMode.BICUBIC),\n transforms.RandomHorizontalFlip(),\n\n transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4, hue=0.4),\n transforms.RandomGrayscale(),\n\n RandomAugment(2,7,isPIL=True,augs=['Identity','AutoContrast','Equalize','Brightness','Sharpness',\n 'ShearX', 'ShearY', 'TranslateX', 'TranslateY', 'Rotate']), \n transforms.ToTensor(),\n normalize,\n ])\n\n else:\n self.transform = transforms.Compose([ \n transforms.RandomResizedCrop(image_res,scale=(0.2, 1.0), interpolation=transforms.InterpolationMode.BICUBIC),\n transforms.RandomHorizontalFlip(),\n RandomAugment(2,7,isPIL=True,augs=['Identity','AutoContrast','Equalize','Brightness','Sharpness',\n 'ShearX', 'ShearY', 'TranslateX', 'TranslateY', 'Rotate']), \n transforms.ToTensor(),\n normalize,\n ]) \n\n \n def csv_to_dict(self,sty_info):\n tui_list = sty_info.iloc[:,0]\n sty_list = sty_info.iloc[:,1]\n sty_dict = defaultdict(list)\n for idx in tqdm(range(len(tui_list))):\n tui_idx = tui_list[idx]\n sty_idx = sty_list[idx]\n sty_dict[tui_idx] = sty_idx\n return sty_dict\n \n def __len__(self):\n return len(self.img_path_list)\n \n def __getitem__(self, index):\n\n if self.label_dataset_list[index] == 0:\n img_path = self.img_path_list[index].replace(\"/nvme/zhangruipeng/zhangxiaoman/dataset/MIMIC-CXR-DCM/files\", '/remote-home/share/medical/public/MIMIC-CXR-JPG/MIMIC-CXR/small/files')\n class_label = self.class_list[index] \n\n # index_transit = np.load(\"/remote-home/tianjiedai/KAD/R1_CLIP_LR/A1_DATA/small/index0626.npy\")\n # new_index_json = index_transit[index]\n # entities = self.json_info[new_index_json]['entities']\n # captions = self.json_info[new_index_json]['caption']\n \n entities = self.json_info[index]['entities']\n captions = self.json_info[index]['caption']\n\n\n if len(entities) != 0:\n caption_list = ''\n entity_details = ''\n for entity in entities:\n sub_caption = entity['caption']\n sub_entities = entity['entity']#搞错了 还不是list\n sub_entity_details = ''\n for sub_entity in sub_entities:\n try:\n sub_entity_details += ' [ENT] ' + sub_entity['Entity'] \n except:\n sub_entity_details += ' [ENT] ' + sub_entity['Entity'] \n entity_details = entity_details + sub_entity_details + ' [SEP] '\n caption_list = caption_list + sub_caption + ' [SEP] '\n else:\n caption_list = ''\n entity_details = ''\n for sub_caption in captions:\n caption_list = caption_list + sub_caption + ' [SEP] '\n entity_details = caption_list\n \n # img = open_jpg(img_path).convert('RGB') \n # img = Image.open(img_path).convert('RGB') \n # image = self.transform(img)\n # return {\n # \"image\": image,\n # \"label\": class_label,\n # \"caption\": caption_list,\n # \"entity\": entity_details\n # }\n \n else:\n img_path = self.img_path_list[index]\n class_label = self.class_list[index] \n caption_list = ''\n head = ['normal', 'pleural effusion', 'opacity', 'pneumothorax', 'edema', 'atelectasis', 'tube', 'consolidation','enlarged cardiomediastinum','tip', 'pneumonia','line','cardiomegaly', 'fracture','calcification',\n 'device','engorgement', 'nodule', 'wire', 'pacemaker', 'pleural thicken', 'marking', 'scar', 'hyperinflate', 'blunt', 'collapse', 'emphysema', 'aerate', 'mass','infiltration', 'obscure', 'deformity', 'hernia',\n 'drainage', 'distention', 'shift', 'stent', 'lesion', 'hardware', 'dilation', 'aspiration',\n 'fibrosis',\t'No Finding', 'Pleural Other', 'Support Devices', 'Aortic enlargement',\n 'Clavicle fracture', 'Enlarged PA', 'ILD', 'Lung cavity', 'Lung cyst', 'Mediastinal shift',\t\n 'Nodule/Mass', 'Pulmonary fibrosis', 'Rib fracture', 'Other lesion', 'COPD', 'Lung tumor', 'Tuberculosis',\n 'Other diseases']\n index_positive = np.where(class_label == 1)\n entity = np.array(head)[index_positive]\n entity_details = ''\n for sub_entity in entity:\n entity_details = entity_details + sub_entity + ' [SEP] '\n\n img = Image.open(img_path).convert('RGB') \n image = self.transform(img)\n label_dataset = self.label_dataset_list[index]\n\n return {\n \"image\": image,\n \"label\": class_label,\n \"label_dataset\": label_dataset,\n \"caption\": caption_list,\n \"entity\": entity_details\n }" }, { "identifier": "Chestxray14_Dataset", "path": "dataset/dataset_entity.py", "snippet": "class Chestxray14_Dataset(Dataset):\n def __init__(self, csv_path,image_res):\n data_info = pd.read_csv(csv_path)\n self.img_path_list = np.asarray(data_info.iloc[:,0])\n self.class_list = np.asarray(data_info.iloc[:,3:])\n\n normalize = transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))\n self.transform = transforms.Compose([ \n transforms.Resize(image_res, interpolation=transforms.InterpolationMode.BICUBIC),\n transforms.ToTensor(),\n normalize,\n ])\n \n def __getitem__(self, index):\n img_path = self.img_path_list[index].replace('/mnt/petrelfs/zhangxiaoman/DATA/Chestxray/ChestXray8/','/remote-home/share/medical/public/ChestXray8/')\n class_label = self.class_list[index] \n img = Image.open(img_path).convert('RGB') \n image = self.transform(img)\n return {\n \"image\": image,\n \"label\": class_label\n }\n \n def __len__(self):\n return len(self.img_path_list)" }, { "identifier": "CheXpert_Dataset", "path": "dataset/dataset_entity.py", "snippet": "class CheXpert_Dataset(Dataset):\n def __init__(self, csv_path,image_res):\n data_info = pd.read_csv(csv_path)\n self.img_path_list = np.asarray(data_info.iloc[:,0])\n self.class_list = np.asarray(data_info.iloc[:,[13,7,11,10,15]])\n\n normalize = transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))\n self.transform = transforms.Compose([ \n transforms.Resize([image_res,image_res], interpolation=transforms.InterpolationMode.BICUBIC),\n transforms.ToTensor(),\n normalize,\n ]) \n \n def __getitem__(self, index):\n img_path = os.path.join('/remote-home/share/tianjiedai/',self.img_path_list[index])\n class_label = self.class_list[index] \n img = Image.open(img_path).convert('RGB') \n image = self.transform(img)\n return {\n \"image\": image,\n \"label\": class_label\n }\n \n def __len__(self):\n return len(self.img_path_list)" } ]
import argparse import os import logging import yaml import numpy as np import random import time import datetime import json import math import torch import torch.nn as nn import torch.nn.functional as F import torch.backends.cudnn as cudnn import torch.distributed as dist import socket from pathlib import Path from functools import partial from sklearn.metrics import roc_auc_score from collections import OrderedDict from torch.utils.data import DataLoader from tensorboardX import SummaryWriter from transformers import AutoModel,BertConfig,AutoTokenizer from factory import utils from scheduler import create_scheduler from optim import create_optimizer from engine.train import train,valid_on_cheXpert,valid_on_chestxray14 from models.clip_tqn import CLP_clinical,ModelRes,TQN_Model,TQN_Model_Add,ModelDense,CLP_clinical2 from models.tokenization_bert import BertTokenizer from dataset.dataset_entity import MIMIC_Dataset,Mergetrain_Dataset, Chestxray14_Dataset,CheXpert_Dataset from io import BytesIO
17,048
# import ruamel.yaml as yaml def main(args, config): torch.cuda.current_device() torch.cuda._initialized = True print("Total CUDA devices: ", torch.cuda.device_count()) torch.set_default_tensor_type('torch.FloatTensor') utils.init_distributed_mode(args) device = torch.device(args.device) # fix the seed for reproducibility seed = args.seed + utils.get_rank() torch.manual_seed(seed) np.random.seed(seed) random.seed(seed) cudnn.benchmark = True start_epoch = 0 max_epoch = config['schedular']['epochs'] warmup_steps = config['schedular']['warmup_epochs'] num_tasks = utils.get_world_size() global_rank = utils.get_rank() sampler_rank = global_rank print('sampler_rank',sampler_rank,'num_tasks',num_tasks) #### Dataset #### print("Creating dataset") if args.add_dataset == True: train_dataset = Mergetrain_Dataset(config['train_entity_file'], config['train_fg_query_file_v1'], config['mrsty_file'],config['image_res'], args) else: train_dataset = MIMIC_Dataset(config['train_entity_file'], config['train_fg_query_file_v1'], config['mrsty_file'],config['image_res'], args) train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset,num_replicas=num_tasks, rank=sampler_rank, shuffle=True) train_dataloader = DataLoader( train_dataset, batch_size=config['batch_size'], num_workers=8, pin_memory=True, sampler=train_sampler, collate_fn=None, worker_init_fn=utils.seed_worker, drop_last=True, ) train_dataloader.num_samples = len(train_dataset) train_dataloader.num_batches = len(train_dataloader) val_dataset = Chestxray14_Dataset(config['chestxray_valid_file'],config['image_res']) val_sampler = torch.utils.data.distributed.DistributedSampler(val_dataset,num_replicas=num_tasks, rank=sampler_rank, shuffle=True) val_dataloader =DataLoader( val_dataset, batch_size=config['batch_size'], num_workers=8, pin_memory=True, sampler=val_sampler, collate_fn=None, worker_init_fn=utils.seed_worker, drop_last=True, ) val_dataloader.num_samples = len(val_dataset) val_dataloader.num_batches = len(val_dataloader) test_dataset = Chestxray14_Dataset(config['chestxray_test_file'],config['image_res']) test_sampler = torch.utils.data.distributed.DistributedSampler(test_dataset,num_replicas=num_tasks, rank=sampler_rank, shuffle=True) test_dataloader =DataLoader( test_dataset, batch_size=config['batch_size'], num_workers=8, pin_memory=True, sampler=test_sampler, collate_fn=None, worker_init_fn=utils.seed_worker, drop_last=True, ) test_dataloader.num_samples = len(test_dataset) test_dataloader.num_batches = len(test_dataloader) test_dataset_chexpert = CheXpert_Dataset(config['chexpert_valid_file'],config['image_res']) test_sampler_chexpert = torch.utils.data.distributed.DistributedSampler(test_dataset_chexpert,num_replicas=num_tasks, rank=sampler_rank, shuffle=True) test_dataloader_chexpert =DataLoader( test_dataset_chexpert, batch_size=config['batch_size'], num_workers=4, pin_memory=True, sampler=test_sampler_chexpert, collate_fn=None, worker_init_fn=utils.seed_worker, drop_last=True, ) test_dataloader_chexpert.num_samples = len(test_dataset_chexpert) test_dataloader_chexpert.num_batches = len(test_dataloader_chexpert) if args.image_encoder_name == 'resnet': image_encoder = ModelRes(res_base_model='resnet50').cuda() elif args.image_encoder_name == 'dense': image_encoder = ModelDense(dense_base_model = 'densenet121').cuda() if args.bert_model_name == 'emilyalsentzer/Bio_ClinicalBERT': tokenizer = BertTokenizer.from_pretrained(args.bert_model_name)
# import ruamel.yaml as yaml def main(args, config): torch.cuda.current_device() torch.cuda._initialized = True print("Total CUDA devices: ", torch.cuda.device_count()) torch.set_default_tensor_type('torch.FloatTensor') utils.init_distributed_mode(args) device = torch.device(args.device) # fix the seed for reproducibility seed = args.seed + utils.get_rank() torch.manual_seed(seed) np.random.seed(seed) random.seed(seed) cudnn.benchmark = True start_epoch = 0 max_epoch = config['schedular']['epochs'] warmup_steps = config['schedular']['warmup_epochs'] num_tasks = utils.get_world_size() global_rank = utils.get_rank() sampler_rank = global_rank print('sampler_rank',sampler_rank,'num_tasks',num_tasks) #### Dataset #### print("Creating dataset") if args.add_dataset == True: train_dataset = Mergetrain_Dataset(config['train_entity_file'], config['train_fg_query_file_v1'], config['mrsty_file'],config['image_res'], args) else: train_dataset = MIMIC_Dataset(config['train_entity_file'], config['train_fg_query_file_v1'], config['mrsty_file'],config['image_res'], args) train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset,num_replicas=num_tasks, rank=sampler_rank, shuffle=True) train_dataloader = DataLoader( train_dataset, batch_size=config['batch_size'], num_workers=8, pin_memory=True, sampler=train_sampler, collate_fn=None, worker_init_fn=utils.seed_worker, drop_last=True, ) train_dataloader.num_samples = len(train_dataset) train_dataloader.num_batches = len(train_dataloader) val_dataset = Chestxray14_Dataset(config['chestxray_valid_file'],config['image_res']) val_sampler = torch.utils.data.distributed.DistributedSampler(val_dataset,num_replicas=num_tasks, rank=sampler_rank, shuffle=True) val_dataloader =DataLoader( val_dataset, batch_size=config['batch_size'], num_workers=8, pin_memory=True, sampler=val_sampler, collate_fn=None, worker_init_fn=utils.seed_worker, drop_last=True, ) val_dataloader.num_samples = len(val_dataset) val_dataloader.num_batches = len(val_dataloader) test_dataset = Chestxray14_Dataset(config['chestxray_test_file'],config['image_res']) test_sampler = torch.utils.data.distributed.DistributedSampler(test_dataset,num_replicas=num_tasks, rank=sampler_rank, shuffle=True) test_dataloader =DataLoader( test_dataset, batch_size=config['batch_size'], num_workers=8, pin_memory=True, sampler=test_sampler, collate_fn=None, worker_init_fn=utils.seed_worker, drop_last=True, ) test_dataloader.num_samples = len(test_dataset) test_dataloader.num_batches = len(test_dataloader) test_dataset_chexpert = CheXpert_Dataset(config['chexpert_valid_file'],config['image_res']) test_sampler_chexpert = torch.utils.data.distributed.DistributedSampler(test_dataset_chexpert,num_replicas=num_tasks, rank=sampler_rank, shuffle=True) test_dataloader_chexpert =DataLoader( test_dataset_chexpert, batch_size=config['batch_size'], num_workers=4, pin_memory=True, sampler=test_sampler_chexpert, collate_fn=None, worker_init_fn=utils.seed_worker, drop_last=True, ) test_dataloader_chexpert.num_samples = len(test_dataset_chexpert) test_dataloader_chexpert.num_batches = len(test_dataloader_chexpert) if args.image_encoder_name == 'resnet': image_encoder = ModelRes(res_base_model='resnet50').cuda() elif args.image_encoder_name == 'dense': image_encoder = ModelDense(dense_base_model = 'densenet121').cuda() if args.bert_model_name == 'emilyalsentzer/Bio_ClinicalBERT': tokenizer = BertTokenizer.from_pretrained(args.bert_model_name)
text_encoder = CLP_clinical2(bert_model_name=args.bert_model_name).cuda()
11
2023-10-30 00:24:16+00:00
24k
ifrit98/storage-subnet
neurons/miner.py
[ { "identifier": "hash_data", "path": "storage/shared/ecc.py", "snippet": "def hash_data(data):\n \"\"\"\n Compute a SHA3-256 hash of the input data and return its integer representation.\n\n The function handles both byte-like and non-byte-like inputs by converting non-byte inputs to\n strings and then encoding to bytes before hashing.\n\n Parameters:\n - data (bytes | bytearray | object): Data to be hashed.\n\n Returns:\n - int: Integer representation of the SHA3-256 hash of the input data.\n\n Raises:\n - TypeError: If the hashing operation encounters an incompatible data type.\n \"\"\"\n if not isinstance(data, (bytes, bytearray)):\n data_str = str(data)\n data = data_str.encode()\n h = hashlib.sha3_256(data).hexdigest()\n return int(h, 16)" }, { "identifier": "setup_CRS", "path": "storage/shared/ecc.py", "snippet": "def setup_CRS(curve=\"P-256\"):\n \"\"\"\n Generate a pair of random points to serve as a Common Reference String (CRS) for elliptic curve operations.\n\n The CRS is essential for various cryptographic protocols that rely on a shared reference\n between parties, typically for the purpose of ensuring consistent cryptographic operations.\n\n Parameters:\n - curve (str, optional): Name of the elliptic curve to use; defaults to \"P-256\".\n\n Returns:\n - tuple(ECC.EccPoint, ECC.EccPoint): A 2-tuple of ECC.EccPoint instances representing the base points (g, h).\n\n Raises:\n - ValueError: If the specified elliptic curve name is not recognized.\n \"\"\"\n curve_obj = ECC.generate(curve=curve)\n g = curve_obj.pointQ # Base point\n h = ECC.generate(curve=curve).pointQ # Another random point\n return g, h" }, { "identifier": "ECCommitment", "path": "storage/shared/ecc.py", "snippet": "class ECCommitment:\n \"\"\"\n Elliptic Curve based commitment scheme allowing one to commit to a chosen value while keeping it hidden to others.\n\n Attributes:\n g (ECC.EccPoint): The base point of the elliptic curve used as part of the commitment.\n h (ECC.EccPoint): Another random point on the elliptic curve used as part of the commitment.\n\n Methods:\n commit(m): Accepts a message, hashes it, and produces a commitment to the hashed message.\n open(c, m_val, r): Accepts a commitment, a hashed message, and a random value to verify the commitment.\n\n The `commit` method will print the commitment process, and the `open` method will print the verification process.\n \"\"\"\n\n def __init__(self, g, h, verbose=False):\n self.g = g # Base point of the curve\n self.h = h # Another random point on the curve\n self.verbose = verbose\n\n def commit(self, m): # AKA Seal.\n \"\"\"\n Create a cryptographic commitment to a message.\n\n The message is hashed, and the hash is used along with a random number to form the commitment\n using the public parameters g and h. The commitment can be verified with the `open` method.\n\n Parameters:\n - m (bytes | bytearray | object): The message to commit to.\n\n Returns:\n - tuple: A 3-tuple (commitment, hashed message value, random number used in the commitment).\n\n Side Effects:\n - This method will print the commitment details to the console.\n\n Raises:\n - Exception: If the commitment calculation fails.\n \"\"\"\n m_val = hash_data(m) # Compute hash of the data\n r = random.randint(1, 2**256)\n c1 = self.g.__mul__(m_val)\n c2 = self.h.__mul__(r)\n c = c1.__add__(c2)\n if self.verbose:\n print(\n f\"Committing: Data = {m}\\nHashed Value = {m_val}\\nRandom Value = {r}\\nComputed Commitment = {c}\\n\"\n )\n return c, m_val, r\n\n def open(self, c, m_val, r):\n \"\"\"\n Verify a commitment using the original message hash and randomness.\n\n This method recomputes the commitment using the public parameters and compares it with\n the provided commitment to check its validity.\n\n Parameters:\n - c (ECC.EccPoint): The commitment point to verify.\n - m_val (int): The integer value of the hashed message used in the commitment.\n - r (int): The random number used in the commitment.\n\n Returns:\n - bool: True if the verification succeeds (commitment is valid), False otherwise.\n\n Side Effects:\n - This method will print the verification details to the console.\n\n Raises:\n - Exception: If the verification calculation fails.\n \"\"\"\n c1 = self.g.__mul__(m_val)\n c2 = self.h.__mul__(r)\n computed_c = c1.__add__(c2)\n if self.verbose:\n print(\n f\"\\nOpening: Hashed Value = {m_val}\\nRandom Value = {r}\\nRecomputed Commitment = {computed_c}\\nOriginal Commitment = {c}\"\n )\n return computed_c == c" }, { "identifier": "ecc_point_to_hex", "path": "storage/shared/ecc.py", "snippet": "def ecc_point_to_hex(point):\n \"\"\"\n Convert an elliptic curve point to a hexadecimal string.\n\n This encoding is typically used for compact representation or for preparing the data\n to be transmitted over protocols that may not support binary data.\n\n Parameters:\n - point (ECC.EccPoint): An ECC point to convert.\n\n Returns:\n - str: Hexadecimal string representing the elliptic curve point.\n\n Raises:\n - AttributeError: If the input is not a valid ECC point with accessible x and y coordinates.\n \"\"\"\n point_str = \"{},{}\".format(point.x, point.y)\n return binascii.hexlify(point_str.encode()).decode()" }, { "identifier": "hex_to_ecc_point", "path": "storage/shared/ecc.py", "snippet": "def hex_to_ecc_point(hex_str, curve):\n \"\"\"\n Convert a hexadecimal string back into an elliptic curve point.\n\n This function is typically used to deserialize an ECC point that has been transmitted or stored as a hex string.\n\n Parameters:\n - hex_str (str): The hex string representing an elliptic curve point.\n - curve (str): The name of the elliptic curve the point belongs to.\n\n Returns:\n - ECC.EccPoint: The elliptic curve point represented by the hex string.\n\n Raises:\n - ValueError: If the hex string is not properly formatted or does not represent a valid point on the specified curve.\n \"\"\"\n point_str = binascii.unhexlify(hex_str).decode()\n x, y = map(int, point_str.split(\",\"))\n return ECC.EccPoint(x, y, curve=curve)" }, { "identifier": "MerkleTree", "path": "storage/shared/merkle.py", "snippet": "class MerkleTree(object):\n \"\"\"\n Represents a Merkle Tree, a data structure used for efficiently summarizing and verifying the\n integrity of large sets of data. The Merkle Tree is a binary tree where each leaf node is the hash\n of a data block and every non-leaf node is the hash of its children nodes.\n\n Attributes:\n hash_function (callable): The hash function used for generating hashes of the blocks\n and non-leaf nodes in the Merkle Tree.\n leaves (list): A list where each element is a bytearray representing the hashed value of a leaf.\n levels (list of lists): A list of lists where each sublist represents a level of the tree, starting\n from the leaves up to the root.\n is_ready (bool): Indicates whether the tree has been fully constructed and is ready to provide\n the Merkle root and proofs.\n\n Methods:\n add_leaf(values, do_hash=False): Adds one or multiple leaves to the tree. If `do_hash` is True,\n it will hash the values before adding them as leaves.\n get_leaf(index): Retrieves the hexadecimal string representation of a leaf at the given index.\n get_leaf_count(): Returns the total number of leaves in the tree.\n get_tree_ready_state(): Checks if the tree has been fully constructed.\n make_tree(): Constructs the Merkle Tree from the current leaves. This method must be called\n after all leaves are added and before retrieving the Merkle root or proofs.\n get_merkle_root(): Retrieves the Merkle root as a hexadecimal string if the tree is ready.\n get_proof(index): Generates a proof of inclusion for the leaf at the given index. This proof\n consists of a list of sibling hashes that, when combined with the target leaf,\n can reproduce the Merkle root.\n update_leaf(index, new_value): Updates the value of the leaf at the given index with `new_value`\n and recalculates the hashes up the tree to reflect this change.\n serialize(): Converts the Merkle Tree into a JSON-formatted string for storage or transmission.\n deserialize(json_data, hash_type=\"sha3_256\"): Reconstructs the Merkle Tree from a JSON string,\n using the specified hash function.\n\n Raises:\n Exception: If the `hash_type` provided during initialization is not supported or recognized.\n\n Example:\n # Create a Merkle tree using the SHA3-256 hash function\n merkle_tree = MerkleTree(hash_type='sha3_256')\n\n # Add data blocks (as leaves) to the tree\n merkle_tree.add_leaf(['block1', 'block2', 'block3'], do_hash=True)\n\n # Construct the tree\n merkle_tree.make_tree()\n\n # Retrieve the Merkle root\n root = merkle_tree.get_merkle_root()\n\n # Get proof of inclusion for the first data block\n proof = merkle_tree.get_proof(0)\n\n # Update the value of the first leaf and reconstruct the tree\n merkle_tree.update_leaf(0, 'new_block1_hashed_value')\n merkle_tree.make_tree()\n\n # Serialize the tree for storage\n serialized_tree = merkle_tree.serialize()\n\n # Deserialize the tree for later use\n deserialized_tree = MerkleTree.deserialize(serialized_tree, hash_type='sha3_256')\n\n Note:\n The hash_function attribute is determined by the hash_type parameter provided at initialization.\n Only hash types supported by the `hashlib` library can be used. Attempting to use an unsupported\n hash type will result in an exception.\n \"\"\"\n\n def __init__(self, hash_type=\"sha3_256\"):\n hash_type = hash_type.lower()\n if hash_type in [\"sha3_256\"]:\n self.hash_function = getattr(hashlib, hash_type)\n else:\n raise Exception(\"`hash_type` {} nor supported\".format(hash_type))\n\n self.reset_tree()\n\n def __eq__(self, other):\n if not isinstance(other, MerkleTree):\n return False\n return self.serialize() == other.serialize()\n\n def _to_hex(self, x):\n try: # python3\n return x.hex()\n except: # python2\n return binascii.hexlify(x)\n\n def reset_tree(self):\n self.leaves = list()\n self.levels = None\n self.is_ready = False\n\n def add_leaf(self, values, do_hash=False):\n self.is_ready = False\n # check if single leaf\n if not isinstance(values, tuple) and not isinstance(values, list):\n values = [values]\n for v in values:\n if do_hash:\n v = v.encode(\"utf-8\")\n v = self.hash_function(v).hexdigest()\n v = bytearray.fromhex(v)\n self.leaves.append(v)\n\n def get_leaf(self, index):\n return self._to_hex(self.leaves[index])\n\n def get_leaf_count(self):\n return len(self.leaves)\n\n def get_tree_ready_state(self):\n return self.is_ready\n\n def _calculate_next_level(self):\n solo_leave = None\n N = len(self.levels[0]) # number of leaves on the level\n if N % 2 == 1: # if odd number of leaves on the level\n solo_leave = self.levels[0][-1]\n N -= 1\n\n new_level = []\n for l, r in zip(self.levels[0][0:N:2], self.levels[0][1:N:2]):\n new_level.append(self.hash_function(l + r).digest())\n if solo_leave is not None:\n new_level.append(solo_leave)\n self.levels = [\n new_level,\n ] + self.levels # prepend new level\n\n def make_tree(self):\n \"\"\"\n Constructs the Merkle Tree from the leaves that have been added.\n\n This must be called after adding all the leaves and before calling\n get_merkle_root or get_proof to ensure the tree is constructed.\n \"\"\"\n self.is_ready = False\n if self.get_leaf_count() > 0:\n self.levels = [\n self.leaves,\n ]\n while len(self.levels[0]) > 1:\n self._calculate_next_level()\n self.is_ready = True\n\n def get_merkle_root(self):\n if self.is_ready:\n if self.levels is not None:\n return self._to_hex(self.levels[0][0])\n else:\n return None\n else:\n return None\n\n def get_proof(self, index):\n \"\"\"\n Generates the proof for the existence of a leaf at the specified index within the Merkle Tree.\n\n A Merkle proof is a collection of sibling hashes on the path from a leaf to the root of the tree.\n This proof can be used to independently verify that a leaf is indeed part of the Merkle tree without\n needing the entire tree. Each element of the proof shows the direction ('left' or 'right') and the\n corresponding hash that pairs with the path to the root.\n\n Parameters:\n index (int): The index of the target leaf for which to generate the Merkle proof. The index must\n correspond to the position of the leaf in the original list of leaves when the tree\n was constructed.\n\n Returns:\n list of dicts: A list where each dictionary contains a single key-value pair. The key is either\n 'left' or 'right', indicating the side of the sibling hash, and the value is a\n string representing the hexadecimal hash value of the sibling. If the tree is not\n ready or the index is out of bounds, None is returned.\n\n Raises:\n IndexError: If the index provided is not within the range of the leaves in the tree.\n ValueError: If the tree has not been constructed by calling `make_tree` method, or the index\n is not an integer.\n\n Example:\n # Assuming `merkle_tree` is an instance of `MerkleTree` and has been populated with leaves and made ready\n proof = merkle_tree.get_proof(2)\n print(proof) # Outputs something like [{'left': 'abcd...'}, {'right': 'ef01...'}]\n\n Note:\n The Merkle proof is only valid if the tree is in the ready state (`is_ready` attribute is True),\n which occurs after the `make_tree` method has been called. If the tree is not ready or the index\n is not valid, the method will return None.\n \"\"\"\n if self.levels is None:\n return None\n elif not self.is_ready or index > len(self.leaves) - 1 or index < 0:\n return None\n else:\n proof = []\n for x in range(len(self.levels) - 1, 0, -1):\n level_len = len(self.levels[x])\n if (index == level_len - 1) and (\n level_len % 2 == 1\n ): # skip if this is an odd end node\n index = int(index / 2.0)\n continue\n is_right_node = index % 2\n sibling_index = index - 1 if is_right_node else index + 1\n sibling_pos = \"left\" if is_right_node else \"right\"\n sibling_value = self._to_hex(self.levels[x][sibling_index])\n proof.append({sibling_pos: sibling_value})\n index = int(index / 2.0)\n return proof\n\n def update_leaf(self, index, new_value):\n \"\"\"\n Updates the value of a leaf at a given index in the Merkle Tree and recalculates the hashes along\n the path from the updated leaf to the root of the tree to reflect the change.\n\n This method allows the Merkle Tree to maintain integrity by ensuring that any updates to the leaf\n nodes are propagated upwards, resulting in a new Merkle root that represents the current state of\n the leaves.\n\n Parameters:\n index (int): The index of the leaf to update. The index is zero-based and must be less than\n the number of leaves in the tree.\n new_value (str): The new value in hexadecimal format to which the leaf should be updated. This\n value should be a valid hexadecimal string that represents the hashed data\n if hashing was applied to the leaves upon tree construction.\n\n Returns:\n None\n\n Raises:\n ValueError: If the tree is not ready for updates (i.e., `is_ready` is False), if the index is\n not an integer, if the new_value is not a hexadecimal string, or if the index is\n out of bounds (less than 0 or greater than or equal to the number of leaves).\n IndexError: If the index is out of the range of current leaves.\n\n Example:\n # Assuming `merkle_tree` is an instance of `MerkleTree`, populated with leaves and made ready.\n merkle_tree.update_leaf(0, 'a1b2c3d4e5f67890')\n # The leaf at index 0 is updated, and changes are propagated to the root.\n\n Note:\n The tree must have been constructed and be in a ready state before calling this method. If the\n tree has not been made by calling the `make_tree` method, or the index is invalid, this method\n will not perform an update and will return None.\n \"\"\"\n if not self.is_ready:\n return None\n new_value = bytearray.fromhex(new_value)\n self.levels[-1][index] = new_value\n for x in range(len(self.levels) - 1, 0, -1):\n parent_index = index // 2\n left_child = self.levels[x][parent_index * 2]\n try:\n right_child = self.levels[x][parent_index * 2 + 1]\n except IndexError:\n right_child = bytearray()\n self.levels[x - 1][parent_index] = self.hash_function(\n left_child + right_child\n ).digest()\n index = parent_index\n\n def serialize(self):\n \"\"\"\n Serializes the MerkleTree object into a JSON string.\n \"\"\"\n # Convert the bytearray leaves and levels to hex strings for serialization\n leaves = [self._to_hex(leaf) for leaf in self.leaves]\n levels = None\n if self.levels is not None:\n levels = []\n for level in self.levels:\n levels.append([self._to_hex(item) for item in level])\n\n # Construct a dictionary with the MerkleTree properties\n merkle_tree_data = {\n \"leaves\": leaves,\n \"levels\": levels,\n \"is_ready\": self.is_ready,\n }\n\n # Convert the dictionary to a JSON string\n return json.dumps(merkle_tree_data)\n\n @classmethod\n def deserialize(cls, json_data, hash_type=\"sha3_256\"):\n \"\"\"\n Deserializes the JSON string into a MerkleTree object.\n \"\"\"\n # Convert the JSON string back to a dictionary\n merkle_tree_data = json.loads(json_data)\n\n # Create a new MerkleTree object\n m_tree = cls(hash_type)\n\n # Convert the hex strings back to bytearrays and set the leaves and levels\n m_tree.leaves = [bytearray.fromhex(leaf) for leaf in merkle_tree_data[\"leaves\"]]\n if merkle_tree_data[\"levels\"] is not None:\n m_tree.levels = []\n for level in merkle_tree_data[\"levels\"]:\n m_tree.levels.append([bytearray.fromhex(item) for item in level])\n m_tree.is_ready = merkle_tree_data[\"is_ready\"]\n\n return m_tree" }, { "identifier": "b64_encode", "path": "storage/shared/utils.py", "snippet": "def b64_encode(data: Union[bytes, str, List[str], List[bytes], dict]) -> str:\n \"\"\"\n Encodes the given data into a base64 string. If the data is a list or dictionary of bytes, it converts\n the bytes into hexadecimal strings before encoding.\n\n Args:\n data (list or dict): The data to be base64 encoded. Can be a list of bytes or a dictionary with bytes values.\n\n Returns:\n str: The base64 encoded string of the input data.\n\n Raises:\n TypeError: If the input is not a list, dict, or bytes.\n \"\"\"\n if isinstance(data, bytes):\n data = data.hex()\n if isinstance(data, list) and len(data) and isinstance(data[0], bytes):\n data = [d.hex() for d in data]\n if isinstance(data, dict) and isinstance(data[list(data.keys())[0]], bytes):\n data = {k: v.hex() for k, v in data.items()}\n return base64.b64encode(json.dumps(data).encode()).decode(\"utf-8\")" }, { "identifier": "b64_decode", "path": "storage/shared/utils.py", "snippet": "def b64_decode(data: bytes, decode_hex: bool = False, encrypted: bool = False):\n \"\"\"\n Decodes a base64 string into a list or dictionary. If decode_hex is True, it converts any hexadecimal strings\n within the data back into bytes.\n\n Args:\n data (bytes or str): The base64 encoded data to be decoded.\n decode_hex (bool): A flag to indicate whether to decode hex strings into bytes. Defaults to False.\n\n Returns:\n list or dict: The decoded data. Returns a list if the original encoded data was a list, and a dict if it was a dict.\n\n Raises:\n ValueError: If the input is not properly base64 encoded or if hex decoding fails.\n \"\"\"\n data = data.decode(\"utf-8\") if isinstance(data, bytes) else data\n decoded_data = json.loads(\n base64.b64decode(data) if encrypted else base64.b64decode(data).decode(\"utf-8\")\n )\n if decode_hex:\n try:\n decoded_data = (\n [bytes.fromhex(d) for d in decoded_data]\n if isinstance(decoded_data, list)\n else {k: bytes.fromhex(v) for k, v in decoded_data.items()}\n )\n except:\n pass\n return decoded_data" }, { "identifier": "chunk_data", "path": "storage/shared/utils.py", "snippet": "def chunk_data(data: bytes, chunksize: int) -> List[bytes]:\n \"\"\"\n Generator function that chunks the given data into pieces of a specified size.\n\n Args:\n data (bytes): The binary data to be chunked.\n chunksize (int): The size of each chunk in bytes.\n\n Yields:\n bytes: A chunk of the data with the size equal to 'chunksize' or the remaining size of data.\n\n Raises:\n ValueError: If 'chunksize' is less than or equal to 0.\n \"\"\"\n for i in range(0, len(data), chunksize):\n yield data[i : i + chunksize]" }, { "identifier": "safe_key_search", "path": "storage/shared/utils.py", "snippet": "async def safe_key_search(database: aioredis.Redis, pattern: str) -> List[str]:\n \"\"\"\n Safely search for keys in the database that doesn't block.\n `scan_iter` uses cursor under the hood.\n \"\"\"\n return [key for key in await database.scan_iter(pattern)]" }, { "identifier": "run", "path": "storage/miner/run.py", "snippet": "def run(self):\n \"\"\"\n Initiates and manages the main loop for the miner on the Bittensor network.\n\n This function performs the following primary tasks:\n 1. Check for registration on the Bittensor network.\n 2. Attaches the miner's forward, blacklist, and priority functions to its axon.\n 3. Starts the miner's axon, making it active on the network.\n 4. Regularly updates the metagraph with the latest network state.\n 5. Optionally sets weights on the network, defining how much trust to assign to other nodes.\n 6. Handles graceful shutdown on keyboard interrupts and logs unforeseen errors.\n\n The miner continues its operations until `should_exit` is set to True or an external interruption occurs.\n During each epoch of its operation, the miner waits for new blocks on the Bittensor network, updates its\n knowledge of the network (metagraph), and sets its weights. This process ensures the miner remains active\n and up-to-date with the network's latest state.\n\n Note:\n - The function leverages the global configurations set during the initialization of the miner.\n - The miner's axon serves as its interface to the Bittensor network, handling incoming and outgoing requests.\n\n Raises:\n KeyboardInterrupt: If the miner is stopped by a manual interruption.\n Exception: For unforeseen errors during the miner's operation, which are logged for diagnosis.\n \"\"\"\n block_handler_substrate = SubstrateInterface(\n ss58_format=bt.__ss58_format__,\n use_remote_preset=True,\n url=self.subtensor.chain_endpoint,\n type_registry=bt.__type_registry__,\n )\n\n netuid = self.config.netuid\n\n # --- Check for registration.\n if not self.subtensor.is_hotkey_registered(\n netuid=netuid,\n hotkey_ss58=self.wallet.hotkey.ss58_address,\n ):\n bt.logging.error(\n f\"Wallet: {self.wallet} is not registered on netuid {netuid}\"\n f\"Please register the hotkey using `btcli subnets register` before trying again\"\n )\n exit()\n\n tempo = block_handler_substrate.query(\n module=\"SubtensorModule\", storage_function=\"Tempo\", params=[netuid]\n ).value\n\n last_extrinsic_hash = None\n checked_extrinsics_count = 0\n should_retry = False\n\n def handler(obj, update_nr, subscription_id):\n current_block = obj[\"header\"][\"number\"]\n block_hash = block_handler_substrate.get_block_hash(current_block)\n bt.logging.debug(f\"New block #{current_block}\")\n\n bt.logging.debug(\n f\"Blocks since epoch: {(current_block + netuid + 1) % (tempo + 1)}\"\n )\n\n nonlocal last_extrinsic_hash\n nonlocal checked_extrinsics_count\n nonlocal should_retry\n\n if last_extrinsic_hash != None:\n try:\n receipt = block_handler_substrate.retrieve_extrinsic_by_hash(\n block_hash, last_extrinsic_hash\n )\n bt.logging.debug(\n f\"Last set-weights call: {'Success' if receipt.is_success else format('Failure, reason: %s', receipt.error_message['name'] if receipt.error_message != None else 'nil')}\"\n )\n\n should_retry = False\n last_extrinsic_hash = None\n checked_extrinsics_count = 0\n except Exception as e:\n checked_extrinsics_count += 1\n bt.logging.debug(f\"An error occurred, extrinsic not found in block.\")\n finally:\n if checked_extrinsics_count >= 20:\n should_retry = True\n last_extrinsic_hash = None\n checked_extrinsics_count = 0\n\n if ((current_block + netuid + 1) % (tempo + 1) == 0) or should_retry:\n bt.logging.info(\n f\"New epoch started, setting weights at block {current_block}\"\n )\n with self.subtensor.substrate as substrate:\n call = substrate.compose_call(\n call_module=\"SubtensorModule\",\n call_function=\"set_weights\",\n call_params={\n \"dests\": [self.my_subnet_uid],\n \"weights\": [65535],\n \"netuid\": netuid,\n \"version_key\": 1,\n },\n )\n\n # Period dictates how long the extrinsic will stay as part of waiting pool\n extrinsic = substrate.create_signed_extrinsic(\n call=call, keypair=self.wallet.hotkey, era={\"period\": 1000}\n )\n\n dry_run = runtime_call(\n substrate=substrate,\n api=\"TaggedTransactionQueue\",\n method=\"validate_transaction\",\n params=[\"InBlock\", extrinsic, block_hash],\n block_hash=block_hash,\n )\n bt.logging.debug(dry_run)\n\n response = substrate.submit_extrinsic(\n extrinsic,\n wait_for_inclusion=False,\n wait_for_finalization=False,\n )\n\n result_data = substrate.rpc_request(\"author_pendingExtrinsics\", [])\n for extrinsic_data in result_data[\"result\"]:\n extrinsic = substrate.runtime_config.create_scale_object(\n \"Extrinsic\", metadata=substrate.metadata\n )\n extrinsic.decode(\n ScaleBytes(extrinsic_data),\n check_remaining=substrate.config.get(\"strict_scale_decode\"),\n )\n\n if extrinsic.value[\"extrinsic_hash\"] == response.extrinsic_hash:\n bt.logging.debug(\n \"Weights transaction is in the pending transaction pool\"\n )\n\n last_extrinsic_hash = response.extrinsic_hash\n should_retry = False\n\n # --- Update the miner storage information periodically.\n if not should_retry:\n update_storage_stats(self)\n bt.logging.debug(\"Storage statistics updated...\")\n\n if self.should_exit:\n return True\n\n block_handler_substrate.subscribe_block_headers(handler)" }, { "identifier": "set_weights", "path": "storage/miner/set_weights.py", "snippet": "def set_weights_for_miner(\n subtensor: \"bt.subtensor\",\n netuid: int,\n uid: int,\n wallet: \"bt.wallet\",\n metagraph: \"bt.metagraph\",\n wandb_on: bool = False,\n tempo: int = 360,\n wait_for_inclusion: bool = False,\n wait_for_finalization: bool = False,\n) -> bool:" }, { "identifier": "compute_subsequent_commitment", "path": "storage/miner/utils.py", "snippet": "def compute_subsequent_commitment(data, previous_seed, new_seed, verbose=False):\n \"\"\"\n Computes a new commitment based on provided data and a change from an old seed to a new seed.\n This function is typically used in cryptographic operations to update commitments without\n altering the underlying data.\n\n Parameters:\n - data: The original data for which the commitment is being updated.\n - previous_seed: The seed used in the previous commitment.\n - new_seed: The seed to be used for the new commitment.\n - verbose (bool): If True, additional debug information will be printed. Defaults to False.\n\n Returns:\n - A tuple containing the new commitment and the proof of the old commitment.\n\n If verbose is set to True, debug information about the types and contents of the parameters\n will be printed to aid in debugging.\n \"\"\"\n if verbose:\n bt.logging.debug(\"IN COMPUTE SUBESEQUENT COMMITMENT\")\n bt.logging.debug(\"type of data :\", type(data))\n bt.logging.debug(\"type of prev_seed:\", type(previous_seed))\n bt.logging.debug(\"type of new_seed :\", type(new_seed))\n proof = hash_data(data + previous_seed)\n return hash_data(str(proof).encode(\"utf-8\") + new_seed), proof" }, { "identifier": "save_data_to_filesystem", "path": "storage/miner/utils.py", "snippet": "def save_data_to_filesystem(data, directory, filename):\n \"\"\"\n Saves data to the filesystem at the specified directory and filename. If the directory does\n not exist, it is created.\n\n Parameters:\n - data: The data to be saved.\n - directory (str): The directory path where the data should be saved.\n - filename (str): The name of the file to save the data in.\n\n Returns:\n - file_path (str): The full path to the saved file.\n\n This function is useful for persisting data to the disk.\n \"\"\"\n # Ensure the directory exists\n directory = os.path.expanduser(directory)\n os.makedirs(directory, exist_ok=True)\n file_path = os.path.join(directory, filename)\n with open(file_path, \"wb\") as file:\n file.write(data)\n return file_path" }, { "identifier": "load_from_filesystem", "path": "storage/miner/utils.py", "snippet": "def load_from_filesystem(filepath):\n \"\"\"\n Loads data from a file in the filesystem.\n\n Parameters:\n - filepath (str): The path to the file from which data is to be loaded.\n\n Returns:\n - data: The data read from the file.\n\n This function is a straightforward utility for reading binary data from a file.\n \"\"\"\n with open(os.path.expanduser(filepath), \"rb\") as file:\n data = file.read()\n return data" }, { "identifier": "commit_data_with_seed", "path": "storage/miner/utils.py", "snippet": "def commit_data_with_seed(committer, data_chunks, n_chunks, seed):\n \"\"\"\n Commits chunks of data with a seed using a Merkle tree structure to create a proof of\n integrity for each chunk. This function is used in environments where the integrity\n and order of data need to be verifiable.\n\n Parameters:\n - committer: The committing object, which should have a commit method.\n - data_chunks (list): A list of data chunks to be committed.\n - n_chunks (int): The number of chunks expected to be committed.\n - seed: A seed value that is combined with data chunks before commitment.\n\n Returns:\n - randomness (list): A list of randomness values associated with each data chunk's commitment.\n - chunks (list): The list of original data chunks that were committed.\n - points (list): A list of commitment points in hex format.\n - merkle_tree (MerkleTree): A Merkle tree constructed from the commitment points.\n\n This function handles the conversion of commitment points to hex format and adds them to the\n Merkle tree. The completed tree represents the combined commitments.\n \"\"\"\n merkle_tree = MerkleTree()\n\n # Commit each chunk of data\n randomness, chunks, points = [None] * n_chunks, [None] * n_chunks, [None] * n_chunks\n for index, chunk in enumerate(data_chunks):\n c, m_val, r = committer.commit(chunk + str(seed).encode())\n c_hex = ecc_point_to_hex(c)\n randomness[index] = r\n chunks[index] = chunk\n points[index] = c_hex\n merkle_tree.add_leaf(c_hex)\n\n # Create the tree from the leaves\n merkle_tree.make_tree()\n return randomness, chunks, points, merkle_tree" }, { "identifier": "init_wandb", "path": "storage/miner/utils.py", "snippet": "def init_wandb(self, reinit=False):\n \"\"\"Starts a new wandb run.\"\"\"\n tags = [\n self.wallet.hotkey.ss58_address,\n storage.__version__,\n str(storage.__spec_version__),\n f\"netuid_{self.metagraph.netuid}\",\n ]\n\n if self.config.mock:\n tags.append(\"mock\")\n\n wandb_config = {\n key: copy.deepcopy(self.config.get(key, None))\n for key in (\"neuron\", \"reward\", \"netuid\", \"wandb\")\n }\n\n if wandb_config[\"neuron\"] is not None:\n wandb_config[\"neuron\"].pop(\"full_path\", None)\n\n self.wandb = wandb.init(\n anonymous=\"allow\",\n reinit=reinit,\n project=self.config.wandb.project_name,\n entity=self.config.wandb.entity,\n config=wandb_config,\n mode=\"offline\" if self.config.wandb.offline else \"online\",\n dir=self.config.neuron.full_path\n if self.config.neuron is not None\n else \"wandb_logs\",\n tags=tags,\n notes=self.config.wandb.notes,\n )\n bt.logging.success(\n prefix=\"Started a new wandb run\",\n sufix=f\"<blue> {self.wandb.name} </blue>\",\n )" }, { "identifier": "get_directory_size", "path": "storage/miner/utils.py", "snippet": "def get_directory_size(path):\n \"\"\"\n Calculates the total size of files in a specified directory.\n\n This function traverses the directory at the given path, including all subdirectories, and sums up the size\n of each file to calculate the total directory size.\n\n Args:\n path (str): The file path of the directory whose size is to be calculated.\n\n Returns:\n int: The total size of the directory in bytes (B).\n\n Usage:\n directory_size_gb = get_directory_size('/path/to/directory')\n \"\"\"\n total_size = 0\n path = os.path.expanduser(path)\n for dirpath, dirnames, filenames in os.walk(path):\n for f in filenames:\n fp = os.path.join(dirpath, f)\n if not os.path.islink(fp):\n total_size += os.path.getsize(fp)\n return total_size" }, { "identifier": "get_free_disk_space", "path": "storage/miner/utils.py", "snippet": "def get_free_disk_space(path=\".\"):\n \"\"\"\n Retrieves the free disk space for the drive containing the specified path.\n\n This function provides the free disk space of the drive on which the specified path resides.\n It's useful for understanding the storage capacity and usage of the system where the miner is running.\n\n Args:\n path (str): A file path on the drive whose free disk space is to be fetched. Typically, you can\n provide the root path ('/') to get the stats for the primary drive.\n\n Returns:\n int: The free space on the disk in bytes (B).\n\n Usage:\n free_disk_space_gb = get_free_disk_space('/')\n \"\"\"\n stats = get_disk_space_stats(path)\n free = stats.get(\"free_bytes\", 0)\n return free" }, { "identifier": "update_storage_stats", "path": "storage/miner/utils.py", "snippet": "def update_storage_stats(self):\n \"\"\"\n Updates the miner's storage statistics.\n\n This function updates the miner's storage statistics, including the free disk space, current storage usage,\n and percent disk usage. It's useful for understanding the storage capacity and usage of the system where\n the miner is running.\n \"\"\"\n\n self.free_memory = get_free_disk_space()\n bt.logging.info(f\"Free memory: {self.free_memory} bytes\")\n self.current_storage_usage = get_directory_size(self.config.database.directory)\n bt.logging.info(f\"Miner storage usage: {self.current_storage_usage} bytes\")\n self.percent_disk_usage = self.current_storage_usage / self.free_memory\n bt.logging.info(f\"Miner % disk usage : {100 * self.percent_disk_usage:.3f}%\")" }, { "identifier": "config", "path": "storage/miner/config.py", "snippet": "def config(cls):\n parser = argparse.ArgumentParser()\n bt.subtensor.add_args(parser)\n bt.logging.add_args(parser)\n bt.wallet.add_args(parser)\n bt.axon.add_args(parser)\n cls.add_args(parser)\n return bt.config(parser)" }, { "identifier": "check_config", "path": "storage/miner/config.py", "snippet": "def check_config(cls, config: \"bt.Config\"):\n r\"\"\"Checks/validates the config namespace object.\"\"\"\n bt.logging.check_config(config)\n\n if config.mock:\n config.wallet._mock = True\n\n timestamp = datetime.datetime.now().strftime(\"%Y%m%d_%H%M%S\")\n full_path = os.path.expanduser(\n \"{}/{}/{}/netuid{}/{}\".format(\n config.logging.logging_dir,\n config.wallet.name,\n config.wallet.hotkey,\n config.netuid,\n config.miner.name,\n )\n )\n log_path = os.path.join(full_path, \"logs\", timestamp)\n\n config.miner.log_path = os.path.expanduser(log_path)\n config.miner.full_path = os.path.expanduser(full_path)\n\n if not os.path.exists(config.miner.full_path):\n os.makedirs(config.miner.full_path, exist_ok=True)\n if not os.path.exists(config.miner.log_path):\n os.makedirs(config.miner.log_path, exist_ok=True)\n\n if not config.miner.dont_save_events:\n # Add custom event logger for the events.\n logger.level(\"EVENTS\", no=38, icon=\"📝\")\n logger.add(\n config.miner.full_path + \"/\" + \"EVENTS.log\",\n rotation=config.miner.events_retention_size,\n serialize=True,\n enqueue=True,\n backtrace=False,\n diagnose=False,\n level=\"EVENTS\",\n format=\"{time:YYYY-MM-DD at HH:mm:ss} | {level} | {message}\",\n )\n\n logger.add(\n config.miner.full_path + \"/\" + \"INFO.log\",\n rotation=config.miner.events_retention_size,\n serialize=True,\n enqueue=True,\n backtrace=False,\n diagnose=False,\n level=\"INFO\",\n format=\"{time:YYYY-MM-DD at HH:mm:ss} | {level} | {message}\",\n )\n\n logger.add(\n config.miner.full_path + \"/\" + \"DEBUG.log\",\n rotation=config.miner.events_retention_size,\n serialize=True,\n enqueue=True,\n backtrace=False,\n diagnose=False,\n level=\"DEBUG\",\n format=\"{time:YYYY-MM-DD at HH:mm:ss} | {level} | {message}\",\n )\n\n logger.add(\n config.miner.full_path + \"/\" + \"TRACE.log\",\n rotation=config.miner.events_retention_size,\n serialize=True,\n enqueue=True,\n backtrace=False,\n diagnose=False,\n level=\"TRACE\",\n format=\"{time:YYYY-MM-DD at HH:mm:ss} | {level} | {message}\",\n )" }, { "identifier": "add_args", "path": "storage/miner/config.py", "snippet": "def add_args(cls, parser):\n parser.add_argument(\"--netuid\", type=int, default=21, help=\"The chain subnet uid.\")\n parser.add_argument(\"--test\", default=False, action=\"store_true\")\n parser.add_argument(\n \"--miner.name\",\n type=str,\n help=\"Trials for this miner go in miner.root / (wallet_cold - wallet_hot) / miner.name. \",\n default=\"core_storage_miner\",\n )\n parser.add_argument(\n \"--miner.device\",\n type=str,\n help=\"Device to run the validator on.\",\n default=\"cuda\" if torch.cuda.is_available() else \"cpu\",\n )\n parser.add_argument(\"--miner.verbose\", default=False, action=\"store_true\")\n\n parser.add_argument(\n \"--database.host\", default=\"localhost\", help=\"The host of the redis database.\"\n )\n parser.add_argument(\n \"--database.port\",\n type=int,\n default=6379,\n help=\"The port of the redis database.\",\n )\n parser.add_argument(\n \"--database.index\",\n type=int,\n default=0,\n help=\"The index of the redis database.\",\n )\n parser.add_argument(\n \"--database.directory\",\n default=\"~/.data\",\n help=\"The directory to store data in.\",\n )\n\n # Run config.\n parser.add_argument(\n \"--miner.set_weights_wait_for_inclusion\",\n action=\"store_true\",\n help=\"Wether to wait for the set_weights extrinsic to enter a block\",\n default=False,\n )\n parser.add_argument(\n \"--miner.set_weights_wait_for_finalization\",\n action=\"store_true\",\n help=\"Wether to wait for the set_weights extrinsic to be finalized on the chain\",\n default=False,\n )\n parser.add_argument(\n \"--miner.seconds_to_wait_to_log_presence_message\",\n type=int,\n help=\"How many seconds to wait before logging a presence message.\",\n default=4,\n )\n\n # Blacklist.\n parser.add_argument(\n \"--miner.blacklist.blacklist\",\n type=str,\n required=False,\n nargs=\"*\",\n help=\"Blacklist certain hotkeys\",\n default=[],\n )\n parser.add_argument(\n \"--miner.blacklist.whitelist\",\n type=str,\n required=False,\n nargs=\"*\",\n help=\"Whitelist certain hotkeys\",\n default=[],\n )\n parser.add_argument(\n \"--miner.blacklist.force_validator_permit\",\n action=\"store_true\",\n help=\"Only allow requests from validators\",\n default=False,\n )\n parser.add_argument(\n \"--miner.blacklist.allow_non_registered\",\n action=\"store_true\",\n help=\"If True, the miner will allow non-registered hotkeys to mine.\",\n default=False,\n )\n parser.add_argument(\n \"--miner.blacklist.minimum_stake_requirement\",\n type=float,\n help=\"Minimum stake requirement\",\n default=0.0,\n )\n parser.add_argument(\n \"--miner.blacklist.min_request_period\",\n type=int,\n help=\"Time period (in minute) to serve a maximum of 50 requests for each hotkey\",\n default=5,\n )\n\n # Priority.\n parser.add_argument(\n \"--miner.priority.default\",\n type=float,\n help=\"Default priority of non-registered requests\",\n default=0.0,\n )\n parser.add_argument(\n \"--miner.priority.time_stake_multiplicate\",\n type=int,\n help=\"Time (in minute) it takes to make the stake twice more important in the priority queue\",\n default=10,\n )\n parser.add_argument(\n \"--miner.priority.len_request_timestamps\",\n type=int,\n help=\"Number of historic request timestamps to record\",\n default=50,\n )\n # Switches.\n parser.add_argument(\n \"--miner.no_set_weights\",\n action=\"store_true\",\n help=\"If True, the miner does not set weights.\",\n default=False,\n )\n parser.add_argument(\n \"--miner.no_serve\",\n action=\"store_true\",\n help=\"If True, the miner doesnt serve the axon.\",\n default=False,\n )\n parser.add_argument(\n \"--miner.no_start_axon\",\n action=\"store_true\",\n help=\"If True, the miner doesnt start the axon.\",\n default=False,\n )\n\n # Mocks.\n parser.add_argument(\n \"--miner.mock_subtensor\",\n action=\"store_true\",\n help=\"If True, the miner will allow non-registered hotkeys to mine.\",\n default=False,\n )\n\n # Wandb args\n parser.add_argument(\n \"--wandb.off\", action=\"store_true\", help=\"Turn off wandb.\", default=False\n )\n parser.add_argument(\n \"--wandb.project_name\",\n type=str,\n help=\"The name of the project where you are sending the new run.\",\n default=\"philanthropic-thunder\",\n )\n parser.add_argument(\n \"--wandb.entity\",\n type=str,\n help=\"An entity is a username or team name where youre sending runs.\",\n default=\"philanthrope\",\n )\n parser.add_argument(\n \"--wandb.offline\",\n action=\"store_true\",\n help=\"Runs wandb in offline mode.\",\n default=False,\n )\n parser.add_argument(\n \"--wandb.weights_step_length\",\n type=int,\n help=\"How many steps before we log the weights.\",\n default=10,\n )\n parser.add_argument(\n \"--wandb.run_step_length\",\n type=int,\n help=\"How many steps before we rollover to a new run.\",\n default=1500,\n )\n parser.add_argument(\n \"--wandb.notes\",\n type=str,\n help=\"Notes to add to the wandb run.\",\n default=\"\",\n )" }, { "identifier": "store_chunk_metadata", "path": "storage/miner/database.py", "snippet": "async def store_chunk_metadata(r, chunk_hash, filepath, hotkey, size, seed):\n \"\"\"\n Stores the metadata of a chunk in a Redis database.\n\n Args:\n r (redis.Redis): The Redis connection instance.\n chunk_hash (str): The unique hash identifying the chunk.\n hotkey (str): Miner hotkey associated with the chunk.\n size (int): The size of the chunk.\n seed (str): The seed associated with the chunk.\n\n This function stores the filepath, size (as a string), and seed for the given chunk hash.\n \"\"\"\n # Ensure that all data are in the correct format\n metadata = {\n \"filepath\": filepath,\n \"hotkey\": hotkey,\n \"size\": str(size), # Convert size to string\n \"seed\": seed, # Store seed directly\n }\n\n # Use hmset (or hset which is its modern equivalent) to store the hash\n for key, value in metadata.items():\n await r.hset(chunk_hash, key, value)" }, { "identifier": "update_seed_info", "path": "storage/miner/database.py", "snippet": "async def update_seed_info(r, chunk_hash, hotkey, seed):\n \"\"\"\n Updates the seed information for a specific chunk in the Redis database.\n\n Args:\n r (redis.Redis): The Redis connection instance.\n chunk_hash (str): The unique hash identifying the chunk.\n hotkey (str): The caller hotkey value to be updated.\n seed (str): The new seed value to be updated.\n\n This function updates the seed information for the specified chunk hash.\n \"\"\"\n # Update the existing seed information\n await r.hset(chunk_hash, \"seed\", seed)\n await r.hset(chunk_hash, \"hotkey\", hotkey)" }, { "identifier": "get_chunk_metadata", "path": "storage/miner/database.py", "snippet": "async def get_chunk_metadata(r, chunk_hash):\n \"\"\"\n Retrieves the metadata for a specific chunk from the Redis database.\n\n Args:\n r (redis.Redis): The Redis connection instance.\n chunk_hash (str): The unique hash identifying the chunk.\n\n Returns:\n dict: A dictionary containing the chunk's metadata, including filepath, size, and seed.\n Size is converted to an integer, and seed is decoded from bytes to a string.\n \"\"\"\n metadata = await r.hgetall(chunk_hash)\n if metadata:\n metadata[b\"size\"] = int(metadata[b\"size\"])\n metadata[b\"seed\"] = metadata[b\"seed\"].decode(\"utf-8\")\n return metadata" } ]
import os import sys import copy import json import time import torch import typing import base64 import asyncio import aioredis import argparse import threading import traceback import bittensor as bt import storage from collections import defaultdict from Crypto.Random import get_random_bytes from typing import Dict from pprint import pprint, pformat from storage.shared.ecc import ( hash_data, setup_CRS, ECCommitment, ecc_point_to_hex, hex_to_ecc_point, ) from storage.shared.merkle import ( MerkleTree, ) from storage.shared.utils import b64_encode, b64_decode, chunk_data, safe_key_search from storage.miner import ( run, set_weights, ) from storage.miner.utils import ( compute_subsequent_commitment, save_data_to_filesystem, load_from_filesystem, commit_data_with_seed, init_wandb, get_directory_size, get_free_disk_space, update_storage_stats, ) from storage.miner.config import ( config, check_config, add_args, ) from storage.miner.database import ( store_chunk_metadata, update_seed_info, get_chunk_metadata, )
15,119
synapse.seed, ) # Commit to the entire data block bt.logging.trace(f"entering ECCommitment()") committer = ECCommitment( hex_to_ecc_point(synapse.g, synapse.curve), hex_to_ecc_point(synapse.h, synapse.curve), ) bt.logging.trace(f"entering commit()") c, m_val, r = committer.commit(encrypted_byte_data + str(synapse.seed).encode()) if self.config.miner.verbose: bt.logging.debug(f"committer: {committer}") bt.logging.debug(f"encrypted_byte_data: {encrypted_byte_data}") bt.logging.debug(f"c: {c}") bt.logging.debug(f"m_val: {m_val}") bt.logging.debug(f"r: {r}") # Send back some proof that we stored the data synapse.randomness = r synapse.commitment = ecc_point_to_hex(c) bt.logging.trace(f"signed commitment: {synapse.commitment}") # Initialize the commitment hash with the initial commitment for chained proofs synapse.commitment_hash = str(m_val) bt.logging.trace(f"initial commitment_hash: {synapse.commitment_hash}") if self.config.miner.verbose: bt.logging.debug(f"signed m_val: {synapse.signature.hex()}") bt.logging.debug(f"type(seed): {type(synapse.seed)}") bt.logging.debug(f"initial commitment_hash: {synapse.commitment_hash}") bt.logging.info( f"stored data hash {data_hash} with commitment: {synapse.commitment}" ) # Don't send data back, no need. synapse.encrypted_data = base64.b64encode(b"").decode() # Empty b64 response return synapse async def challenge( self, synapse: storage.protocol.Challenge ) -> storage.protocol.Challenge: """ Handles a data challenge by providing cryptographic proof of data possession. This method retrieves the specified data from storage, calculates its commitment using elliptic curve cryptography, and constructs a Merkle proof. The response includes the requested data chunk, Merkle proof, root, and the commitment, which collectively serve as verifiable evidence of data possession. Args: synapse (storage.protocol.Challenge): An object representing the challenge request, which includes parameters such as the hash of the data to retrieve, chunk size, challenge index, and elliptic curve parameters for commitment calculation. Returns: storage.protocol.Challenge: The synapse object is updated with the response to the challenge, including the encrypted data chunk, commitment point, Merkle proof, and root hash. The method performs the following steps: 1. Fetches the encrypted data from storage using the hash provided in the challenge. 2. Splits the data into chunks based on the specified chunk size. 3. Computes a new commitment hash to provide a time-bound proof of possession. 4. Generates a Merkle tree from the committed data chunks and extracts a proof for the requested chunk. 5. Encodes the requested chunk and Merkle proof in base64 for transmission. 6. Updates the challenge synapse with the commitment, data chunk, randomness, and Merkle proof. 7. Records the updated commitment hash in storage for future challenges. This method ensures data integrity and allows the verification of data possession without disclosing the entire data set. It is designed to fulfill data verification requests in a secure and verifiable manner. Example usage: Assuming an initialized 'synapse' object with the challenge parameters: >>> updated_synapse = self.challenge(synapse) """ # Retrieve the data itself from miner storage bt.logging.info(f"received challenge hash: {synapse.challenge_hash}") self.request_count += 1 bt.logging.trace(f"entering get_chunk_metadata()") data = await get_chunk_metadata(self.database, synapse.challenge_hash) if data is None: bt.logging.error(f"No data found for {synapse.challenge_hash}") return synapse bt.logging.trace(f"retrieved data: {pformat(data)}") # Chunk the data according to the specified (random) chunk size filepath = data.get(b"filepath", None) if filepath is None: bt.logging.warning( f"No file found for {synapse.challenge_hash} in index, trying path construction..." ) # fallback to load the data from the filesystem via database path construction filepath = os.path.expanduser( f"{self.config.database.directory}/{synapse.challenge_hash}" ) if not os.path.isfile(filepath): bt.logging.error( f"No file found for {synapse.challenge_hash} in {self.config.database.directory}." ) return synapse bt.logging.trace(f"entering load_from_filesystem()") try: encrypted_data_bytes = load_from_filesystem(filepath) except Exception as e: bt.logging.error(f"Error loading file {filepath}: {e}") synapse.axon.status_code = 404 synapse.axon.status_message = "File not found" return synapse # Construct the next commitment hash using previous commitment and hash # of the data to prove storage over time prev_seed = data.get(b"seed", "").encode() if prev_seed == None: bt.logging.error(f"No seed found for {synapse.challenge_hash}") return synapse bt.logging.trace(f"entering comput_subsequent_commitment()...") new_seed = synapse.seed.encode()
# The MIT License (MIT) # Copyright © 2023 Yuma Rao # Copyright © 2023 philanthrope # Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated # documentation files (the “Software”), to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, # and to permit persons to whom the Software is furnished to do so, subject to the following conditions: # The above copyright notice and this permission notice shall be included in all copies or substantial portions of # the Software. # THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO # THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. # import this repo class miner: @classmethod def check_config(cls, config: "bt.Config"): """ Adds neuron-specific arguments to the argument parser. Args: parser (argparse.ArgumentParser): Parser to add arguments to. This class method enriches the argument parser with options specific to the neuron's configuration. """ check_config(cls, config) @classmethod def add_args(cls, parser): """ Adds neuron-specific arguments to the argument parser. Args: parser (argparse.ArgumentParser): Parser to add arguments to. This class method enriches the argument parser with options specific to the neuron's configuration. """ add_args(cls, parser) @classmethod def config(cls): """ Retrieves the configuration for the neuron. Returns: bt.Config: The configuration object for the neuron. This class method returns the neuron's configuration, which is used throughout the neuron's lifecycle for various functionalities and operations. """ return config(cls) subtensor: "bt.subtensor" wallet: "bt.wallet" metagraph: "bt.metagraph" def __init__(self): self.config = miner.config() self.check_config(self.config) bt.logging(config=self.config, logging_dir=self.config.miner.full_path) bt.logging.info(f"{self.config}") bt.logging.info("miner.__init__()") # Init device. bt.logging.debug("loading device") self.device = torch.device(self.config.miner.device) bt.logging.debug(str(self.device)) # Init subtensor bt.logging.debug("loading subtensor") self.subtensor = bt.subtensor(config=self.config) bt.logging.debug(str(self.subtensor)) self.current_block = self.subtensor.get_current_block() # Init wallet. bt.logging.debug("loading wallet") self.wallet = bt.wallet(config=self.config) self.wallet.create_if_non_existent() if not self.config.wallet._mock: if not self.subtensor.is_hotkey_registered_on_subnet( hotkey_ss58=self.wallet.hotkey.ss58_address, netuid=self.config.netuid ): raise Exception( f"Wallet not currently registered on netuid {self.config.netuid}, please first register wallet before running" ) bt.logging.debug(f"wallet: {str(self.wallet)}") # Init metagraph. bt.logging.debug("loading metagraph") self.metagraph = bt.metagraph( netuid=self.config.netuid, network=self.subtensor.network, sync=False ) # Make sure not to sync without passing subtensor self.metagraph.sync(subtensor=self.subtensor) # Sync metagraph with subtensor. bt.logging.debug(str(self.metagraph)) # Setup database self.database = aioredis.StrictRedis( host=self.config.database.host, port=self.config.database.port, db=self.config.database.index, socket_keepalive=True, socket_connect_timeout=300, ) self.my_subnet_uid = self.metagraph.hotkeys.index( self.wallet.hotkey.ss58_address ) bt.logging.info(f"Running miner on uid: {self.my_subnet_uid}") # Init wandb. if not self.config.wandb.off: bt.logging.debug("loading wandb") init_wandb(self) # The axon handles request processing, allowing validators to send this process requests. self.axon = bt.axon(wallet=self.wallet, config=self.config) bt.logging.info(f"Axon {self.axon}") # Attach determiners which functions are called when servicing a request. bt.logging.info(f"Attaching forward functions to axon.") self.axon.attach( forward_fn=self.store, blacklist_fn=self.store_blacklist_fn, priority_fn=self.store_priority_fn, ).attach( forward_fn=self.challenge, blacklist_fn=self.challenge_blacklist_fn, priority_fn=self.challenge_priority_fn, ).attach( forward_fn=self.retrieve, blacklist_fn=self.retrieve_blacklist_fn, priority_fn=self.retrieve_priority_fn, ) # Serve passes the axon information to the network + netuid we are hosting on. # This will auto-update if the axon port of external ip have changed. bt.logging.info( f"Serving axon {self.axon} on network: {self.subtensor.chain_endpoint} with netuid: {self.config.netuid}" ) self.axon.serve(netuid=self.config.netuid, subtensor=self.subtensor) # Start starts the miner's axon, making it active on the network. bt.logging.info(f"Starting axon server on port: {self.config.axon.port}") self.axon.start() # Init the event loop. self.loop = asyncio.get_event_loop() # Instantiate runners self.should_exit: bool = False self.is_running: bool = False self.thread: threading.Thread = None self.lock = asyncio.Lock() self.request_timestamps: Dict = {} self.step = 0 # Init the miner's storage request tracker self.request_count = 0 self.start_request_count_timer() self.requests_per_hour = [] self.average_requests_per_hour = 0 # Init the miner's storage usage tracker update_storage_stats(self) def start_request_count_timer(self): """ Initializes and starts a timer for tracking the number of requests received by the miner in an hour. This method sets up a one-hour timer that, upon expiration, calls the `reset_request_count` method to log the number of requests received and reset the count for the next hour. The timer is set to run in a separate thread to avoid blocking the main execution. Usage: Should be called during the initialization of the miner to start tracking requests per hour. """ self.request_count_timer = threading.Timer(3600, self.reset_request_count) self.request_count_timer.start() def reset_request_count(self): """ Logs the number of requests received in the last hour and resets the count. This method is automatically called when the one-hour timer set by `start_request_count_timer` expires. It logs the count of requests received in the last hour and then resets the count. Additionally, it restarts the timer for the next hour. Usage: This method is intended to be called automatically by a timer and typically should not be called directly. """ bt.logging.info( f"Number of requests received in the last hour: {self.request_count}" ) self.requests_per_hour.append(self.request_count) bt.logging.info(f"Requests per hour: {self.requests_per_hour}") self.average_requests_per_hour = sum(self.requests_per_hour) / len( self.requests_per_hour ) bt.logging.info(f"Average requests per hour: {self.average_requests_per_hour}") self.request_count = 0 self.start_request_count_timer() @property async def total_storage(self): """ Calculates the total size of data stored by the miner. This method fetches all data keys from the Redis database and sums up the size of each data object. It provides an estimate of the total amount of data currently held by the miner. Returns: int: Total size of data (in bytes) stored by the miner. Example: >>> miner.total_storage() 102400 # Example output indicating 102,400 bytes of data stored """ # Fetch all keys from Redis all_keys = await safe_key_search(self.database, "*") # Filter out keys that contain a period (temporary, remove later) filtered_keys = [key for key in all_keys if b"." not in key] # Get the size of each data object and sum them up total_size = sum( [ await get_chunk_metadata(self.database, key).get(b"size", 0) for key in filtered_keys ] ) return total_size def store_blacklist_fn( self, synapse: storage.protocol.Store ) -> typing.Tuple[bool, str]: """ Determines whether a given synapse should be blacklisted based on the recognition of the hotkey in the metagraph. This function is used to filter out requests from entities that are not part of the network's current state. Parameters: - synapse (bt.Synapse): The synapse object which contains the dendrite information including the hotkey. Returns: - (bool, str): A tuple where the first element is a boolean indicating whether the synapse's hotkey is blacklisted, and the second element is a string message explaining the reason. If the hotkey is not recognized in the metagraph, the synapse is blacklisted, and the function returns (True, "Unrecognized hotkey"). Otherwise, it returns (False, "Hotkey recognized!"), allowing the synapse to interact with the network. Usage: This method is internally used by the network to ensure that only recognized entities can participate in communication or transactions. """ if synapse.dendrite.hotkey not in self.metagraph.hotkeys: # Ignore requests from unrecognized entities. bt.logging.trace( f"Blacklisting unrecognized hotkey {synapse.dendrite.hotkey}" ) return True, "Unrecognized hotkey" bt.logging.trace( f"Not Blacklisting recognized hotkey {synapse.dendrite.hotkey}" ) return False, "Hotkey recognized!" def store_priority_fn(self, synapse: storage.protocol.Store) -> float: """ Assigns a priority to a given synapse based on the stake of the calling entity in the metagraph. This function is crucial for prioritizing network requests and ensuring that higher-stake entities are given precedence in processing. Parameters: - synapse (bt.Synapse): The synapse object which contains the dendrite information including the hotkey of the caller. Returns: - float: The priority value assigned to the synapse, derived from the stake of the calling hotkey in the metagraph. The priority is determined by the stake associated with the caller's UID in the metagraph. A higher stake results in a higher priority. Usage: This method is used within the network's request handling mechanism to allocate resources and processing time based on the stake-based priority of each request. """ caller_uid = self.metagraph.hotkeys.index( synapse.dendrite.hotkey ) # Get the caller index. prirority = float( self.metagraph.S[caller_uid] ) # Return the stake as the priority. bt.logging.trace( f"Prioritizing {synapse.dendrite.hotkey} with value: ", prirority ) return prirority def challenge_blacklist_fn( self, synapse: storage.protocol.Challenge ) -> typing.Tuple[bool, str]: """ Determines whether a given synapse should be blacklisted based on the recognition of the hotkey in the metagraph. This function is used to filter out requests from entities that are not part of the network's current state. Parameters: - synapse (bt.Synapse): The synapse object which contains the dendrite information including the hotkey. Returns: - (bool, str): A tuple where the first element is a boolean indicating whether the synapse's hotkey is blacklisted, and the second element is a string message explaining the reason. If the hotkey is not recognized in the metagraph, the synapse is blacklisted, and the function returns (True, "Unrecognized hotkey"). Otherwise, it returns (False, "Hotkey recognized!"), allowing the synapse to interact with the network. Usage: This method is internally used by the network to ensure that only recognized entities can participate in communication or transactions. """ if synapse.dendrite.hotkey not in self.metagraph.hotkeys: # Ignore requests from unrecognized entities. bt.logging.trace( f"Blacklisting unrecognized hotkey {synapse.dendrite.hotkey}" ) return True, "Unrecognized hotkey" bt.logging.trace( f"Not Blacklisting recognized hotkey {synapse.dendrite.hotkey}" ) return False, "Hotkey recognized!" def challenge_priority_fn(self, synapse: storage.protocol.Challenge) -> float: """ Assigns a priority to a given synapse based on the stake of the calling entity in the metagraph. This function is crucial for prioritizing network requests and ensuring that higher-stake entities are given precedence in processing. Parameters: - synapse (bt.Synapse): The synapse object which contains the dendrite information including the hotkey of the caller. Returns: - float: The priority value assigned to the synapse, derived from the stake of the calling hotkey in the metagraph. The priority is determined by the stake associated with the caller's UID in the metagraph. A higher stake results in a higher priority. Usage: This method is used within the network's request handling mechanism to allocate resources and processing time based on the stake-based priority of each request. """ caller_uid = self.metagraph.hotkeys.index( synapse.dendrite.hotkey ) # Get the caller index. prirority = float( self.metagraph.S[caller_uid] ) # Return the stake as the priority. bt.logging.trace( f"Prioritizing {synapse.dendrite.hotkey} with value: ", prirority ) return prirority def retrieve_blacklist_fn( self, synapse: storage.protocol.Retrieve ) -> typing.Tuple[bool, str]: """ Determines whether a given synapse should be blacklisted based on the recognition of the hotkey in the metagraph. This function is used to filter out requests from entities that are not part of the network's current state. Parameters: - synapse (bt.Synapse): The synapse object which contains the dendrite information including the hotkey. Returns: - (bool, str): A tuple where the first element is a boolean indicating whether the synapse's hotkey is blacklisted, and the second element is a string message explaining the reason. If the hotkey is not recognized in the metagraph, the synapse is blacklisted, and the function returns (True, "Unrecognized hotkey"). Otherwise, it returns (False, "Hotkey recognized!"), allowing the synapse to interact with the network. Usage: This method is internally used by the network to ensure that only recognized entities can participate in communication or transactions. """ if synapse.dendrite.hotkey not in self.metagraph.hotkeys: # Ignore requests from unrecognized entities. bt.logging.trace( f"Blacklisting unrecognized hotkey {synapse.dendrite.hotkey}" ) return True, "Unrecognized hotkey" bt.logging.trace( f"Not Blacklisting recognized hotkey {synapse.dendrite.hotkey}" ) return False, "Hotkey recognized!" def retrieve_priority_fn(self, synapse: storage.protocol.Retrieve) -> float: """ Assigns a priority to a given synapse based on the stake of the calling entity in the metagraph. This function is crucial for prioritizing network requests and ensuring that higher-stake entities are given precedence in processing. Parameters: - synapse (bt.Synapse): The synapse object which contains the dendrite information including the hotkey of the caller. Returns: - float: The priority value assigned to the synapse, derived from the stake of the calling hotkey in the metagraph. The priority is determined by the stake associated with the caller's UID in the metagraph. A higher stake results in a higher priority. Usage: This method is used within the network's request handling mechanism to allocate resources and processing time based on the stake-based priority of each request. """ caller_uid = self.metagraph.hotkeys.index( synapse.dendrite.hotkey ) # Get the caller index. prirority = float( self.metagraph.S[caller_uid] ) # Return the stake as the priority. bt.logging.trace( f"Prioritizing {synapse.dendrite.hotkey} with value: ", prirority ) return prirority async def store(self, synapse: storage.protocol.Store) -> storage.protocol.Store: """ Processes the storage request from a synapse by securely storing the provided data and returning a proof of storage. The data is committed using elliptic curve cryptography, stored on the filesystem, and the metadata is recorded in a Redis database. A cryptographic proof of the commitment, along with a digital signature from the server's hotkey, is returned in the synapse for verification by the requester. Args: synapse (storage.protocol.Store): An object containing the data to be stored, encoded in base64 format, along with associated metadata like the cryptographic curve parameters, a seed for the commitment, and the expected commitment group elements. Returns: storage.protocol.Store: The synapse is returned with additional fields populated, including the randomness used in the commitment, the commitment point itself, a signature from this storage server's hotkey, and a commitment hash that can be used for chained proofs. The method performs the following operations: 1. Decodes the base64-encoded data into raw bytes. 2. Commits to the data using the provided elliptic curve parameters and the seed to generate a commitment point. 3. Stores the raw byte data in the filesystem using a hash of the data as the filename. 4. Records metadata about the stored data in the Redis database, including the file path, previous seed, and data size. 5. Updates the synapse object with the commitment details and a digital signature. This process ensures the integrity and non-repudiation of the data storage, allowing clients to verify that their data has been stored correctly without the need to retrieve the full data set. Example usage: Assuming an initialized 'committer' object and 'synapse' with necessary data: >>> updated_synapse = self.store(synapse) """ bt.logging.info(f"received store request: {synapse.encrypted_data[:24]}") self.request_count += 1 # Decode the data from base64 to raw bytes encrypted_byte_data = base64.b64decode(synapse.encrypted_data) bt.logging.trace(f"store b64decrypted data: {encrypted_byte_data[:24]}") # Store the data with the hash as the key in the filesystem bt.logging.trace(f"entering hash_data()") data_hash = hash_data(encrypted_byte_data) # If already storing this hash, simply update the validator seeds and return challenge bt.logging.trace(f"checking if data already exists...") if await self.database.exists(data_hash): # update the validator seed challenge hash in storage await update_seed_info( self.database, data_hash, synapse.dendrite.hotkey, synapse.seed ) else: # Store the data in the filesystem filepath = save_data_to_filesystem( encrypted_byte_data, self.config.database.directory, str(data_hash) ) bt.logging.trace(f"stored data {data_hash} in filepath: {filepath}") # Add the initial chunk, size, and validator seed information await store_chunk_metadata( self.database, data_hash, filepath, synapse.dendrite.hotkey, sys.getsizeof(encrypted_byte_data), synapse.seed, ) # Commit to the entire data block bt.logging.trace(f"entering ECCommitment()") committer = ECCommitment( hex_to_ecc_point(synapse.g, synapse.curve), hex_to_ecc_point(synapse.h, synapse.curve), ) bt.logging.trace(f"entering commit()") c, m_val, r = committer.commit(encrypted_byte_data + str(synapse.seed).encode()) if self.config.miner.verbose: bt.logging.debug(f"committer: {committer}") bt.logging.debug(f"encrypted_byte_data: {encrypted_byte_data}") bt.logging.debug(f"c: {c}") bt.logging.debug(f"m_val: {m_val}") bt.logging.debug(f"r: {r}") # Send back some proof that we stored the data synapse.randomness = r synapse.commitment = ecc_point_to_hex(c) bt.logging.trace(f"signed commitment: {synapse.commitment}") # Initialize the commitment hash with the initial commitment for chained proofs synapse.commitment_hash = str(m_val) bt.logging.trace(f"initial commitment_hash: {synapse.commitment_hash}") if self.config.miner.verbose: bt.logging.debug(f"signed m_val: {synapse.signature.hex()}") bt.logging.debug(f"type(seed): {type(synapse.seed)}") bt.logging.debug(f"initial commitment_hash: {synapse.commitment_hash}") bt.logging.info( f"stored data hash {data_hash} with commitment: {synapse.commitment}" ) # Don't send data back, no need. synapse.encrypted_data = base64.b64encode(b"").decode() # Empty b64 response return synapse async def challenge( self, synapse: storage.protocol.Challenge ) -> storage.protocol.Challenge: """ Handles a data challenge by providing cryptographic proof of data possession. This method retrieves the specified data from storage, calculates its commitment using elliptic curve cryptography, and constructs a Merkle proof. The response includes the requested data chunk, Merkle proof, root, and the commitment, which collectively serve as verifiable evidence of data possession. Args: synapse (storage.protocol.Challenge): An object representing the challenge request, which includes parameters such as the hash of the data to retrieve, chunk size, challenge index, and elliptic curve parameters for commitment calculation. Returns: storage.protocol.Challenge: The synapse object is updated with the response to the challenge, including the encrypted data chunk, commitment point, Merkle proof, and root hash. The method performs the following steps: 1. Fetches the encrypted data from storage using the hash provided in the challenge. 2. Splits the data into chunks based on the specified chunk size. 3. Computes a new commitment hash to provide a time-bound proof of possession. 4. Generates a Merkle tree from the committed data chunks and extracts a proof for the requested chunk. 5. Encodes the requested chunk and Merkle proof in base64 for transmission. 6. Updates the challenge synapse with the commitment, data chunk, randomness, and Merkle proof. 7. Records the updated commitment hash in storage for future challenges. This method ensures data integrity and allows the verification of data possession without disclosing the entire data set. It is designed to fulfill data verification requests in a secure and verifiable manner. Example usage: Assuming an initialized 'synapse' object with the challenge parameters: >>> updated_synapse = self.challenge(synapse) """ # Retrieve the data itself from miner storage bt.logging.info(f"received challenge hash: {synapse.challenge_hash}") self.request_count += 1 bt.logging.trace(f"entering get_chunk_metadata()") data = await get_chunk_metadata(self.database, synapse.challenge_hash) if data is None: bt.logging.error(f"No data found for {synapse.challenge_hash}") return synapse bt.logging.trace(f"retrieved data: {pformat(data)}") # Chunk the data according to the specified (random) chunk size filepath = data.get(b"filepath", None) if filepath is None: bt.logging.warning( f"No file found for {synapse.challenge_hash} in index, trying path construction..." ) # fallback to load the data from the filesystem via database path construction filepath = os.path.expanduser( f"{self.config.database.directory}/{synapse.challenge_hash}" ) if not os.path.isfile(filepath): bt.logging.error( f"No file found for {synapse.challenge_hash} in {self.config.database.directory}." ) return synapse bt.logging.trace(f"entering load_from_filesystem()") try: encrypted_data_bytes = load_from_filesystem(filepath) except Exception as e: bt.logging.error(f"Error loading file {filepath}: {e}") synapse.axon.status_code = 404 synapse.axon.status_message = "File not found" return synapse # Construct the next commitment hash using previous commitment and hash # of the data to prove storage over time prev_seed = data.get(b"seed", "").encode() if prev_seed == None: bt.logging.error(f"No seed found for {synapse.challenge_hash}") return synapse bt.logging.trace(f"entering comput_subsequent_commitment()...") new_seed = synapse.seed.encode()
next_commitment, proof = compute_subsequent_commitment(
12
2023-10-26 18:54:47+00:00
24k
cpacker/MemGPT
memgpt/agent.py
[ { "identifier": "AgentState", "path": "memgpt/data_types.py", "snippet": "class AgentState:\n def __init__(\n self,\n name: str,\n user_id: uuid.UUID,\n persona: str, # the filename where the persona was originally sourced from\n human: str, # the filename where the human was originally sourced from\n llm_config: LLMConfig,\n embedding_config: EmbeddingConfig,\n preset: str,\n # (in-context) state contains:\n # persona: str # the current persona text\n # human: str # the current human text\n # system: str, # system prompt (not required if initializing with a preset)\n # functions: dict, # schema definitions ONLY (function code linked at runtime)\n # messages: List[dict], # in-context messages\n id: Optional[uuid.UUID] = None,\n state: Optional[dict] = None,\n created_at: Optional[str] = None,\n ):\n if id is None:\n self.id = uuid.uuid4()\n else:\n self.id = id\n assert isinstance(self.id, uuid.UUID), f\"UUID {self.id} must be a UUID type\"\n assert isinstance(user_id, uuid.UUID), f\"UUID {user_id} must be a UUID type\"\n\n # TODO(swooders) we need to handle the case where name is None here\n # in AgentConfig we autogenerate a name, not sure what the correct thing w/ DBs is, what about NounAdjective combos? Like giphy does? BoredGiraffe etc\n self.name = name\n self.user_id = user_id\n self.preset = preset\n self.persona = persona\n self.human = human\n\n self.llm_config = llm_config\n self.embedding_config = embedding_config\n\n self.created_at = created_at if created_at is not None else datetime.now()\n\n # state\n self.state = {} if not state else state" }, { "identifier": "Message", "path": "memgpt/data_types.py", "snippet": "class Message(Record):\n \"\"\"Representation of a message sent.\n\n Messages can be:\n - agent->user (role=='agent')\n - user->agent and system->agent (role=='user')\n - or function/tool call returns (role=='function'/'tool').\n \"\"\"\n\n def __init__(\n self,\n user_id: uuid.UUID,\n agent_id: uuid.UUID,\n role: str,\n text: str,\n model: Optional[str] = None, # model used to make function call\n name: Optional[str] = None, # optional participant name\n created_at: Optional[str] = None,\n tool_calls: Optional[List[ToolCall]] = None, # list of tool calls requested\n tool_call_id: Optional[str] = None,\n embedding: Optional[np.ndarray] = None,\n id: Optional[uuid.UUID] = None,\n ):\n super().__init__(id)\n self.user_id = user_id\n self.agent_id = agent_id\n self.text = text\n self.model = model # model name (e.g. gpt-4)\n self.created_at = datetime.now().astimezone() if created_at is None else created_at\n\n # openai info\n assert role in [\"system\", \"assistant\", \"user\", \"tool\"]\n self.role = role # role (agent/user/function)\n self.name = name\n\n # tool (i.e. function) call info (optional)\n\n # if role == \"assistant\", this MAY be specified\n # if role != \"assistant\", this must be null\n assert tool_calls is None or isinstance(tool_calls, list)\n self.tool_calls = tool_calls\n\n # if role == \"tool\", then this must be specified\n # if role != \"tool\", this must be null\n if role == \"tool\":\n assert tool_call_id is not None\n else:\n assert tool_call_id is None\n self.tool_call_id = tool_call_id\n\n # embedding (optional)\n self.embedding = embedding\n\n # def __repr__(self):\n # pass\n\n @staticmethod\n def dict_to_message(\n user_id: uuid.UUID,\n agent_id: uuid.UUID,\n openai_message_dict: dict,\n model: Optional[str] = None, # model used to make function call\n allow_functions_style: bool = False, # allow deprecated functions style?\n ):\n \"\"\"Convert a ChatCompletion message object into a Message object (synced to DB)\"\"\"\n\n # If we're going from deprecated function form\n if openai_message_dict[\"role\"] == \"function\":\n if not allow_functions_style:\n raise DeprecationWarning(openai_message_dict)\n assert \"tool_call_id\" in openai_message_dict, openai_message_dict\n\n # Convert from 'function' response to a 'tool' response\n # NOTE: this does not conventionally include a tool_call_id, it's on the caster to provide it\n return Message(\n user_id=user_id,\n agent_id=agent_id,\n model=model,\n # standard fields expected in an OpenAI ChatCompletion message object\n role=\"tool\", # NOTE\n text=openai_message_dict[\"content\"],\n name=openai_message_dict[\"name\"] if \"name\" in openai_message_dict else None,\n tool_calls=openai_message_dict[\"tool_calls\"] if \"tool_calls\" in openai_message_dict else None,\n tool_call_id=openai_message_dict[\"tool_call_id\"] if \"tool_call_id\" in openai_message_dict else None,\n )\n\n elif \"function_call\" in openai_message_dict and openai_message_dict[\"function_call\"] is not None:\n if not allow_functions_style:\n raise DeprecationWarning(openai_message_dict)\n assert openai_message_dict[\"role\"] == \"assistant\", openai_message_dict\n assert \"tool_call_id\" in openai_message_dict, openai_message_dict\n\n # Convert a function_call (from an assistant message) into a tool_call\n # NOTE: this does not conventionally include a tool_call_id (ToolCall.id), it's on the caster to provide it\n tool_calls = [\n ToolCall(\n id=openai_message_dict[\"tool_call_id\"], # NOTE: unconventional source, not to spec\n tool_call_type=\"function\",\n function={\n \"name\": openai_message_dict[\"function_call\"][\"name\"],\n \"arguments\": openai_message_dict[\"function_call\"][\"arguments\"],\n },\n )\n ]\n\n return Message(\n user_id=user_id,\n agent_id=agent_id,\n model=model,\n # standard fields expected in an OpenAI ChatCompletion message object\n role=openai_message_dict[\"role\"],\n text=openai_message_dict[\"content\"],\n name=openai_message_dict[\"name\"] if \"name\" in openai_message_dict else None,\n tool_calls=tool_calls,\n tool_call_id=None, # NOTE: None, since this field is only non-null for role=='tool'\n )\n\n else:\n # Basic sanity check\n if openai_message_dict[\"role\"] == \"tool\":\n assert \"tool_call_id\" in openai_message_dict and openai_message_dict[\"tool_call_id\"] is not None, openai_message_dict\n else:\n if \"tool_call_id\" in openai_message_dict:\n assert openai_message_dict[\"tool_call_id\"] is None, openai_message_dict\n\n if \"tool_calls\" in openai_message_dict and openai_message_dict[\"tool_calls\"] is not None:\n assert openai_message_dict[\"role\"] == \"assistant\", openai_message_dict\n\n tool_calls = [\n ToolCall(id=tool_call[\"id\"], tool_call_type=tool_call[\"type\"], function=tool_call[\"function\"])\n for tool_call in openai_message_dict[\"tool_calls\"]\n ]\n else:\n tool_calls = None\n\n # If we're going from tool-call style\n return Message(\n user_id=user_id,\n agent_id=agent_id,\n model=model,\n # standard fields expected in an OpenAI ChatCompletion message object\n role=openai_message_dict[\"role\"],\n text=openai_message_dict[\"content\"],\n name=openai_message_dict[\"name\"] if \"name\" in openai_message_dict else None,\n tool_calls=tool_calls,\n tool_call_id=openai_message_dict[\"tool_call_id\"] if \"tool_call_id\" in openai_message_dict else None,\n )\n\n def to_openai_dict(self):\n \"\"\"Go from Message class to ChatCompletion message object\"\"\"\n\n # TODO change to pydantic casting, eg `return SystemMessageModel(self)`\n\n if self.role == \"system\":\n assert all([v is not None for v in [self.text, self.role]]), vars(self)\n openai_message = {\n \"content\": self.text,\n \"role\": self.role,\n }\n # Optional field, do not include if null\n if self.name is not None:\n openai_message[\"name\"] = self.name\n\n elif self.role == \"user\":\n assert all([v is not None for v in [self.text, self.role]]), vars(self)\n openai_message = {\n \"content\": self.text,\n \"role\": self.role,\n }\n # Optional field, do not include if null\n if self.name is not None:\n openai_message[\"name\"] = self.name\n\n elif self.role == \"assistant\":\n assert all([v is not None for v in [self.text, self.role]]), vars(self)\n openai_message = {\n \"content\": self.text,\n \"role\": self.role,\n }\n # Optional fields, do not include if null\n if self.name is not None:\n openai_message[\"name\"] = self.name\n if self.tool_calls is not None:\n openai_message[\"tool_calls\"] = [tool_call.to_dict() for tool_call in self.tool_calls]\n\n elif self.role == \"tool\":\n assert all([v is not None for v in [self.text, self.role, self.tool_call_id]]), vars(self)\n openai_message = {\n \"content\": self.text,\n \"role\": self.role,\n \"tool_call_id\": self.tool_call_id,\n }\n\n else:\n raise ValueError(self.role)\n\n return openai_message" }, { "identifier": "chat_completion_response", "path": "memgpt/models/chat_completion_response.py", "snippet": "class FunctionCall(BaseModel):\nclass ToolCall(BaseModel):\nclass LogProbToken(BaseModel):\nclass MessageContentLogProb(BaseModel):\nclass Message(BaseModel):\nclass Choice(BaseModel):\nclass UsageStatistics(BaseModel):\nclass ChatCompletionResponse(BaseModel):" }, { "identifier": "AgentInterface", "path": "memgpt/interface.py", "snippet": "class AgentInterface(ABC):\r\n \"\"\"Interfaces handle MemGPT-related events (observer pattern)\"\"\"\r\n\r\n @abstractmethod\r\n def user_message(self, msg):\r\n \"\"\"MemGPT receives a user message\"\"\"\r\n raise NotImplementedError\r\n\r\n @abstractmethod\r\n def internal_monologue(self, msg):\r\n \"\"\"MemGPT generates some internal monologue\"\"\"\r\n raise NotImplementedError\r\n\r\n @abstractmethod\r\n def assistant_message(self, msg):\r\n \"\"\"MemGPT uses send_message\"\"\"\r\n raise NotImplementedError\r\n\r\n @abstractmethod\r\n def function_message(self, msg):\r\n \"\"\"MemGPT calls a function\"\"\"\r\n raise NotImplementedError\r" }, { "identifier": "PersistenceManager", "path": "memgpt/persistence_manager.py", "snippet": "class PersistenceManager(ABC):\r\n @abstractmethod\r\n def trim_messages(self, num):\r\n pass\r\n\r\n @abstractmethod\r\n def prepend_to_messages(self, added_messages):\r\n pass\r\n\r\n @abstractmethod\r\n def append_to_messages(self, added_messages):\r\n pass\r\n\r\n @abstractmethod\r\n def swap_system_message(self, new_system_message):\r\n pass\r\n\r\n @abstractmethod\r\n def update_memory(self, new_memory):\r\n pass\r" }, { "identifier": "LocalStateManager", "path": "memgpt/persistence_manager.py", "snippet": "class LocalStateManager(PersistenceManager):\r\n \"\"\"In-memory state manager has nothing to manage, all agents are held in-memory\"\"\"\r\n\r\n recall_memory_cls = BaseRecallMemory\r\n archival_memory_cls = EmbeddingArchivalMemory\r\n\r\n def __init__(self, agent_state: AgentState):\r\n # Memory held in-state useful for debugging stateful versions\r\n self.memory = None\r\n self.messages = [] # current in-context messages\r\n # self.all_messages = [] # all messages seen in current session (needed if lazily synchronizing state with DB)\r\n self.archival_memory = EmbeddingArchivalMemory(agent_state)\r\n self.recall_memory = BaseRecallMemory(agent_state)\r\n self.agent_state = agent_state\r\n\r\n def save(self):\r\n \"\"\"Ensure storage connectors save data\"\"\"\r\n self.archival_memory.save()\r\n self.recall_memory.save()\r\n\r\n def init(self, agent):\r\n \"\"\"Connect persistent state manager to agent\"\"\"\r\n printd(f\"Initializing {self.__class__.__name__} with agent object\")\r\n # self.all_messages = [{\"timestamp\": get_local_time(), \"message\": msg} for msg in agent.messages.copy()]\r\n self.messages = [{\"timestamp\": get_local_time(), \"message\": msg} for msg in agent.messages.copy()]\r\n self.memory = agent.memory\r\n # printd(f\"{self.__class__.__name__}.all_messages.len = {len(self.all_messages)}\")\r\n printd(f\"{self.__class__.__name__}.messages.len = {len(self.messages)}\")\r\n\r\n '''\r\n def json_to_message(self, message_json) -> Message:\r\n \"\"\"Convert agent message JSON into Message object\"\"\"\r\n\r\n # get message\r\n if \"message\" in message_json:\r\n message = message_json[\"message\"]\r\n else:\r\n message = message_json\r\n\r\n # get timestamp\r\n if \"timestamp\" in message_json:\r\n timestamp = parse_formatted_time(message_json[\"timestamp\"])\r\n else:\r\n timestamp = get_local_time()\r\n\r\n # TODO: change this when we fully migrate to tool calls API\r\n if \"function_call\" in message:\r\n tool_calls = [\r\n ToolCall(\r\n id=message[\"tool_call_id\"],\r\n tool_call_type=\"function\",\r\n function={\r\n \"name\": message[\"function_call\"][\"name\"],\r\n \"arguments\": message[\"function_call\"][\"arguments\"],\r\n },\r\n )\r\n ]\r\n printd(f\"Saving tool calls {[vars(tc) for tc in tool_calls]}\")\r\n else:\r\n tool_calls = None\r\n\r\n # if message[\"role\"] == \"function\":\r\n # message[\"role\"] = \"tool\"\r\n\r\n return Message(\r\n user_id=self.agent_state.user_id,\r\n agent_id=self.agent_state.id,\r\n role=message[\"role\"],\r\n text=message[\"content\"],\r\n name=message[\"name\"] if \"name\" in message else None,\r\n model=self.agent_state.llm_config.model,\r\n created_at=timestamp,\r\n tool_calls=tool_calls,\r\n tool_call_id=message[\"tool_call_id\"] if \"tool_call_id\" in message else None,\r\n id=message[\"id\"] if \"id\" in message else None,\r\n )\r\n '''\r\n\r\n def trim_messages(self, num):\r\n # printd(f\"InMemoryStateManager.trim_messages\")\r\n self.messages = [self.messages[0]] + self.messages[num:]\r\n\r\n def prepend_to_messages(self, added_messages: List[Message]):\r\n # first tag with timestamps\r\n # added_messages = [{\"timestamp\": get_local_time(), \"message\": msg} for msg in added_messages]\r\n\r\n printd(f\"{self.__class__.__name__}.prepend_to_message\")\r\n self.messages = [self.messages[0]] + added_messages + self.messages[1:]\r\n\r\n # add to recall memory\r\n self.recall_memory.insert_many([m for m in added_messages])\r\n\r\n def append_to_messages(self, added_messages: List[Message]):\r\n # first tag with timestamps\r\n # added_messages = [{\"timestamp\": get_local_time(), \"message\": msg} for msg in added_messages]\r\n\r\n printd(f\"{self.__class__.__name__}.append_to_messages\")\r\n self.messages = self.messages + added_messages\r\n\r\n # add to recall memory\r\n self.recall_memory.insert_many([m for m in added_messages])\r\n\r\n def swap_system_message(self, new_system_message: Message):\r\n # first tag with timestamps\r\n # new_system_message = {\"timestamp\": get_local_time(), \"message\": new_system_message}\r\n\r\n printd(f\"{self.__class__.__name__}.swap_system_message\")\r\n self.messages[0] = new_system_message\r\n\r\n # add to recall memory\r\n self.recall_memory.insert(new_system_message)\r\n\r\n def update_memory(self, new_memory):\r\n printd(f\"{self.__class__.__name__}.update_memory\")\r\n self.memory = new_memory\r" }, { "identifier": "MemGPTConfig", "path": "memgpt/config.py", "snippet": "class MemGPTConfig:\n config_path: str = os.path.join(MEMGPT_DIR, \"config\")\n anon_clientid: str = None\n\n # preset\n preset: str = DEFAULT_PRESET\n\n # persona parameters\n persona: str = DEFAULT_PERSONA\n human: str = DEFAULT_HUMAN\n agent: str = None\n\n # model parameters\n default_llm_config: LLMConfig = field(default_factory=LLMConfig)\n\n # embedding parameters\n default_embedding_config: EmbeddingConfig = field(default_factory=EmbeddingConfig)\n\n # database configs: archival\n archival_storage_type: str = \"chroma\" # local, db\n archival_storage_path: str = os.path.join(MEMGPT_DIR, \"chroma\")\n archival_storage_uri: str = None # TODO: eventually allow external vector DB\n\n # database configs: recall\n recall_storage_type: str = \"sqlite\" # local, db\n recall_storage_path: str = MEMGPT_DIR\n recall_storage_uri: str = None # TODO: eventually allow external vector DB\n\n # database configs: metadata storage (sources, agents, data sources)\n metadata_storage_type: str = \"sqlite\"\n metadata_storage_path: str = MEMGPT_DIR\n metadata_storage_uri: str = None\n\n # database configs: agent state\n persistence_manager_type: str = None # in-memory, db\n persistence_manager_save_file: str = None # local file\n persistence_manager_uri: str = None # db URI\n\n # version (for backcompat)\n memgpt_version: str = None\n\n # user info\n policies_accepted: bool = False\n\n def __post_init__(self):\n # ensure types\n # self.embedding_chunk_size = int(self.embedding_chunk_size)\n # self.embedding_dim = int(self.embedding_dim)\n # self.context_window = int(self.context_window)\n pass\n\n @staticmethod\n def generate_uuid() -> str:\n return uuid.UUID(int=uuid.getnode()).hex\n\n @classmethod\n def load(cls) -> \"MemGPTConfig\":\n # avoid circular import\n from memgpt.migrate import config_is_compatible, VERSION_CUTOFF\n\n if not config_is_compatible(allow_empty=True):\n error_message = \" \".join(\n [\n f\"\\nYour current config file is incompatible with MemGPT versions later than {VERSION_CUTOFF}.\",\n f\"\\nTo use MemGPT, you must either downgrade your MemGPT version (<= {VERSION_CUTOFF}) or regenerate your config using `memgpt configure`, or `memgpt migrate` if you would like to migrate old agents.\",\n ]\n )\n raise ValueError(error_message)\n\n config = configparser.ConfigParser()\n\n # allow overriding with env variables\n if os.getenv(\"MEMGPT_CONFIG_PATH\"):\n config_path = os.getenv(\"MEMGPT_CONFIG_PATH\")\n else:\n config_path = MemGPTConfig.config_path\n\n # insure all configuration directories exist\n cls.create_config_dir()\n if os.path.exists(config_path):\n # read existing config\n config.read(config_path)\n\n # Handle extraction of nested LLMConfig and EmbeddingConfig\n llm_config_dict = {\n # Extract relevant LLM configuration from the config file\n \"model\": get_field(config, \"model\", \"model\"),\n \"model_endpoint\": get_field(config, \"model\", \"model_endpoint\"),\n \"model_endpoint_type\": get_field(config, \"model\", \"model_endpoint_type\"),\n \"model_wrapper\": get_field(config, \"model\", \"model_wrapper\"),\n \"context_window\": get_field(config, \"model\", \"context_window\"),\n }\n embedding_config_dict = {\n # Extract relevant Embedding configuration from the config file\n \"embedding_endpoint\": get_field(config, \"embedding\", \"embedding_endpoint\"),\n \"embedding_model\": get_field(config, \"embedding\", \"embedding_model\"),\n \"embedding_endpoint_type\": get_field(config, \"embedding\", \"embedding_endpoint_type\"),\n \"embedding_dim\": get_field(config, \"embedding\", \"embedding_dim\"),\n \"embedding_chunk_size\": get_field(config, \"embedding\", \"chunk_size\"),\n }\n # Correct the types that aren't strings\n if llm_config_dict[\"context_window\"] is not None:\n llm_config_dict[\"context_window\"] = int(llm_config_dict[\"context_window\"])\n if embedding_config_dict[\"embedding_dim\"] is not None:\n embedding_config_dict[\"embedding_dim\"] = int(embedding_config_dict[\"embedding_dim\"])\n if embedding_config_dict[\"embedding_chunk_size\"] is not None:\n embedding_config_dict[\"embedding_chunk_size\"] = int(embedding_config_dict[\"embedding_chunk_size\"])\n # Construct the inner properties\n llm_config = LLMConfig(**llm_config_dict)\n embedding_config = EmbeddingConfig(**embedding_config_dict)\n\n # Everything else\n config_dict = {\n # Two prepared configs\n \"default_llm_config\": llm_config,\n \"default_embedding_config\": embedding_config,\n # Agent related\n \"preset\": get_field(config, \"defaults\", \"preset\"),\n \"persona\": get_field(config, \"defaults\", \"persona\"),\n \"human\": get_field(config, \"defaults\", \"human\"),\n \"agent\": get_field(config, \"defaults\", \"agent\"),\n # Storage related\n \"archival_storage_type\": get_field(config, \"archival_storage\", \"type\"),\n \"archival_storage_path\": get_field(config, \"archival_storage\", \"path\"),\n \"archival_storage_uri\": get_field(config, \"archival_storage\", \"uri\"),\n \"recall_storage_type\": get_field(config, \"recall_storage\", \"type\"),\n \"recall_storage_path\": get_field(config, \"recall_storage\", \"path\"),\n \"recall_storage_uri\": get_field(config, \"recall_storage\", \"uri\"),\n \"metadata_storage_type\": get_field(config, \"metadata_storage\", \"type\"),\n \"metadata_storage_path\": get_field(config, \"metadata_storage\", \"path\"),\n \"metadata_storage_uri\": get_field(config, \"metadata_storage\", \"uri\"),\n # Misc\n \"anon_clientid\": get_field(config, \"client\", \"anon_clientid\"),\n \"config_path\": config_path,\n \"memgpt_version\": get_field(config, \"version\", \"memgpt_version\"),\n }\n\n # Don't include null values\n config_dict = {k: v for k, v in config_dict.items() if v is not None}\n\n return cls(**config_dict)\n\n # create new config\n anon_clientid = MemGPTConfig.generate_uuid()\n config = cls(anon_clientid=anon_clientid, config_path=config_path)\n config.create_config_dir() # create dirs\n config.save() # save updated config\n\n return config\n\n def save(self):\n import memgpt\n\n config = configparser.ConfigParser()\n\n # CLI defaults\n set_field(config, \"defaults\", \"preset\", self.preset)\n set_field(config, \"defaults\", \"persona\", self.persona)\n set_field(config, \"defaults\", \"human\", self.human)\n set_field(config, \"defaults\", \"agent\", self.agent)\n\n # model defaults\n set_field(config, \"model\", \"model\", self.default_llm_config.model)\n set_field(config, \"model\", \"model_endpoint\", self.default_llm_config.model_endpoint)\n set_field(config, \"model\", \"model_endpoint_type\", self.default_llm_config.model_endpoint_type)\n set_field(config, \"model\", \"model_wrapper\", self.default_llm_config.model_wrapper)\n set_field(config, \"model\", \"context_window\", str(self.default_llm_config.context_window))\n\n # embeddings\n set_field(config, \"embedding\", \"embedding_endpoint_type\", self.default_embedding_config.embedding_endpoint_type)\n set_field(config, \"embedding\", \"embedding_endpoint\", self.default_embedding_config.embedding_endpoint)\n set_field(config, \"embedding\", \"embedding_model\", self.default_embedding_config.embedding_model)\n set_field(config, \"embedding\", \"embedding_dim\", str(self.default_embedding_config.embedding_dim))\n set_field(config, \"embedding\", \"embedding_chunk_size\", str(self.default_embedding_config.embedding_chunk_size))\n\n # archival storage\n set_field(config, \"archival_storage\", \"type\", self.archival_storage_type)\n set_field(config, \"archival_storage\", \"path\", self.archival_storage_path)\n set_field(config, \"archival_storage\", \"uri\", self.archival_storage_uri)\n\n # recall storage\n set_field(config, \"recall_storage\", \"type\", self.recall_storage_type)\n set_field(config, \"recall_storage\", \"path\", self.recall_storage_path)\n set_field(config, \"recall_storage\", \"uri\", self.recall_storage_uri)\n\n # metadata storage\n set_field(config, \"metadata_storage\", \"type\", self.metadata_storage_type)\n set_field(config, \"metadata_storage\", \"path\", self.metadata_storage_path)\n set_field(config, \"metadata_storage\", \"uri\", self.metadata_storage_uri)\n\n # set version\n set_field(config, \"version\", \"memgpt_version\", memgpt.__version__)\n\n # client\n if not self.anon_clientid:\n self.anon_clientid = self.generate_uuid()\n set_field(config, \"client\", \"anon_clientid\", self.anon_clientid)\n\n # always make sure all directories are present\n self.create_config_dir()\n\n with open(self.config_path, \"w\") as f:\n config.write(f)\n logger.debug(f\"Saved Config: {self.config_path}\")\n\n @staticmethod\n def exists():\n # allow overriding with env variables\n if os.getenv(\"MEMGPT_CONFIG_PATH\"):\n config_path = os.getenv(\"MEMGPT_CONFIG_PATH\")\n else:\n config_path = MemGPTConfig.config_path\n\n assert not os.path.isdir(config_path), f\"Config path {config_path} cannot be set to a directory.\"\n return os.path.exists(config_path)\n\n @staticmethod\n def create_config_dir():\n if not os.path.exists(MEMGPT_DIR):\n os.makedirs(MEMGPT_DIR, exist_ok=True)\n\n folders = [\"personas\", \"humans\", \"archival\", \"agents\", \"functions\", \"system_prompts\", \"presets\", \"settings\"]\n\n for folder in folders:\n if not os.path.exists(os.path.join(MEMGPT_DIR, folder)):\n os.makedirs(os.path.join(MEMGPT_DIR, folder))" }, { "identifier": "get_login_event", "path": "memgpt/system.py", "snippet": "def get_login_event(last_login=\"Never (first login)\", include_location=False, location_name=\"San Francisco, CA, USA\"):\r\n # Package the message with time and location\r\n formatted_time = get_local_time()\r\n packaged_message = {\r\n \"type\": \"login\",\r\n \"last_login\": last_login,\r\n \"time\": formatted_time,\r\n }\r\n\r\n if include_location:\r\n packaged_message[\"location\"] = location_name\r\n\r\n return json.dumps(packaged_message, ensure_ascii=JSON_ENSURE_ASCII)\r" }, { "identifier": "package_function_response", "path": "memgpt/system.py", "snippet": "def package_function_response(was_success, response_string, timestamp=None):\r\n formatted_time = get_local_time() if timestamp is None else timestamp\r\n packaged_message = {\r\n \"status\": \"OK\" if was_success else \"Failed\",\r\n \"message\": response_string,\r\n \"time\": formatted_time,\r\n }\r\n\r\n return json.dumps(packaged_message, ensure_ascii=JSON_ENSURE_ASCII)\r" }, { "identifier": "package_summarize_message", "path": "memgpt/system.py", "snippet": "def package_summarize_message(summary, summary_length, hidden_message_count, total_message_count, timestamp=None):\r\n context_message = (\r\n f\"Note: prior messages ({hidden_message_count} of {total_message_count} total messages) have been hidden from view due to conversation memory constraints.\\n\"\r\n + f\"The following is a summary of the previous {summary_length} messages:\\n {summary}\"\r\n )\r\n\r\n formatted_time = get_local_time() if timestamp is None else timestamp\r\n packaged_message = {\r\n \"type\": \"system_alert\",\r\n \"message\": context_message,\r\n \"time\": formatted_time,\r\n }\r\n\r\n return json.dumps(packaged_message, ensure_ascii=JSON_ENSURE_ASCII)\r" }, { "identifier": "get_initial_boot_messages", "path": "memgpt/system.py", "snippet": "def get_initial_boot_messages(version=\"startup\"):\r\n if version == \"startup\":\r\n initial_boot_message = INITIAL_BOOT_MESSAGE\r\n messages = [\r\n {\"role\": \"assistant\", \"content\": initial_boot_message},\r\n ]\r\n\r\n elif version == \"startup_with_send_message\":\r\n tool_call_id = str(uuid.uuid4())\r\n messages = [\r\n # first message includes both inner monologue and function call to send_message\r\n {\r\n \"role\": \"assistant\",\r\n \"content\": INITIAL_BOOT_MESSAGE_SEND_MESSAGE_THOUGHT,\r\n # \"function_call\": {\r\n # \"name\": \"send_message\",\r\n # \"arguments\": '{\\n \"message\": \"' + f\"{INITIAL_BOOT_MESSAGE_SEND_MESSAGE_FIRST_MSG}\" + '\"\\n}',\r\n # },\r\n \"tool_calls\": [\r\n {\r\n \"id\": tool_call_id,\r\n \"type\": \"function\",\r\n \"function\": {\r\n \"name\": \"send_message\",\r\n \"arguments\": '{\\n \"message\": \"' + f\"{INITIAL_BOOT_MESSAGE_SEND_MESSAGE_FIRST_MSG}\" + '\"\\n}',\r\n },\r\n }\r\n ],\r\n },\r\n # obligatory function return message\r\n {\r\n # \"role\": \"function\",\r\n \"role\": \"tool\",\r\n \"name\": \"send_message\", # NOTE: technically not up to spec, this is old functions style\r\n \"content\": package_function_response(True, None),\r\n \"tool_call_id\": tool_call_id,\r\n },\r\n ]\r\n\r\n elif version == \"startup_with_send_message_gpt35\":\r\n tool_call_id = str(uuid.uuid4())\r\n messages = [\r\n # first message includes both inner monologue and function call to send_message\r\n {\r\n \"role\": \"assistant\",\r\n \"content\": \"*inner thoughts* Still waiting on the user. Sending a message with function.\",\r\n # \"function_call\": {\"name\": \"send_message\", \"arguments\": '{\\n \"message\": \"' + f\"Hi, is anyone there?\" + '\"\\n}'},\r\n \"tool_calls\": [\r\n {\r\n \"id\": tool_call_id,\r\n \"type\": \"function\",\r\n \"function\": {\r\n \"name\": \"send_message\",\r\n \"arguments\": '{\\n \"message\": \"' + f\"Hi, is anyone there?\" + '\"\\n}',\r\n },\r\n }\r\n ],\r\n },\r\n # obligatory function return message\r\n {\r\n # \"role\": \"function\",\r\n \"role\": \"tool\",\r\n \"name\": \"send_message\",\r\n \"content\": package_function_response(True, None),\r\n \"tool_call_id\": tool_call_id,\r\n },\r\n ]\r\n\r\n else:\r\n raise ValueError(version)\r\n\r\n return messages\r" }, { "identifier": "CoreMemory", "path": "memgpt/memory.py", "snippet": "class CoreMemory(object):\r\n \"\"\"Held in-context inside the system message\r\n\r\n Core Memory: Refers to the system block, which provides essential, foundational context to the AI.\r\n This includes the persona information, essential user details,\r\n and any other baseline data you deem necessary for the AI's basic functioning.\r\n \"\"\"\r\n\r\n def __init__(self, persona=None, human=None, persona_char_limit=None, human_char_limit=None, archival_memory_exists=True):\r\n self.persona = persona\r\n self.human = human\r\n self.persona_char_limit = persona_char_limit\r\n self.human_char_limit = human_char_limit\r\n\r\n # affects the error message the AI will see on overflow inserts\r\n self.archival_memory_exists = archival_memory_exists\r\n\r\n def __repr__(self) -> str:\r\n return f\"\\n### CORE MEMORY ###\" + f\"\\n=== Persona ===\\n{self.persona}\" + f\"\\n\\n=== Human ===\\n{self.human}\"\r\n\r\n def to_dict(self):\r\n return {\r\n \"persona\": self.persona,\r\n \"human\": self.human,\r\n }\r\n\r\n @classmethod\r\n def load(cls, state):\r\n return cls(state[\"persona\"], state[\"human\"])\r\n\r\n def edit_persona(self, new_persona):\r\n if self.persona_char_limit and len(new_persona) > self.persona_char_limit:\r\n error_msg = f\"Edit failed: Exceeds {self.persona_char_limit} character limit (requested {len(new_persona)}).\"\r\n if self.archival_memory_exists:\r\n error_msg = f\"{error_msg} Consider summarizing existing core memories in 'persona' and/or moving lower priority content to archival memory to free up space in core memory, then trying again.\"\r\n raise ValueError(error_msg)\r\n\r\n self.persona = new_persona\r\n return len(self.persona)\r\n\r\n def edit_human(self, new_human):\r\n if self.human_char_limit and len(new_human) > self.human_char_limit:\r\n error_msg = f\"Edit failed: Exceeds {self.human_char_limit} character limit (requested {len(new_human)}).\"\r\n if self.archival_memory_exists:\r\n error_msg = f\"{error_msg} Consider summarizing existing core memories in 'human' and/or moving lower priority content to archival memory to free up space in core memory, then trying again.\"\r\n raise ValueError(error_msg)\r\n\r\n self.human = new_human\r\n return len(self.human)\r\n\r\n def edit(self, field, content):\r\n if field == \"persona\":\r\n return self.edit_persona(content)\r\n elif field == \"human\":\r\n return self.edit_human(content)\r\n else:\r\n raise KeyError(f'No memory section named {field} (must be either \"persona\" or \"human\")')\r\n\r\n def edit_append(self, field, content, sep=\"\\n\"):\r\n if field == \"persona\":\r\n new_content = self.persona + sep + content\r\n return self.edit_persona(new_content)\r\n elif field == \"human\":\r\n new_content = self.human + sep + content\r\n return self.edit_human(new_content)\r\n else:\r\n raise KeyError(f'No memory section named {field} (must be either \"persona\" or \"human\")')\r\n\r\n def edit_replace(self, field, old_content, new_content):\r\n if len(old_content) == 0:\r\n raise ValueError(\"old_content cannot be an empty string (must specify old_content to replace)\")\r\n\r\n if field == \"persona\":\r\n if old_content in self.persona:\r\n new_persona = self.persona.replace(old_content, new_content)\r\n return self.edit_persona(new_persona)\r\n else:\r\n raise ValueError(\"Content not found in persona (make sure to use exact string)\")\r\n elif field == \"human\":\r\n if old_content in self.human:\r\n new_human = self.human.replace(old_content, new_content)\r\n return self.edit_human(new_human)\r\n else:\r\n raise ValueError(\"Content not found in human (make sure to use exact string)\")\r\n else:\r\n raise KeyError(f'No memory section named {field} (must be either \"persona\" or \"human\")')\r" }, { "identifier": "summarize_messages", "path": "memgpt/memory.py", "snippet": "def summarize_messages(\r\n agent_state: AgentState,\r\n message_sequence_to_summarize,\r\n):\r\n \"\"\"Summarize a message sequence using GPT\"\"\"\r\n # we need the context_window\r\n context_window = agent_state.llm_config.context_window\r\n\r\n summary_prompt = SUMMARY_PROMPT_SYSTEM\r\n summary_input = str(message_sequence_to_summarize)\r\n summary_input_tkns = count_tokens(summary_input)\r\n if summary_input_tkns > MESSAGE_SUMMARY_WARNING_FRAC * context_window:\r\n trunc_ratio = (MESSAGE_SUMMARY_WARNING_FRAC * context_window / summary_input_tkns) * 0.8 # For good measure...\r\n cutoff = int(len(message_sequence_to_summarize) * trunc_ratio)\r\n summary_input = str(\r\n [summarize_messages(agent_state, message_sequence_to_summarize=message_sequence_to_summarize[:cutoff])]\r\n + message_sequence_to_summarize[cutoff:]\r\n )\r\n message_sequence = [\r\n {\"role\": \"system\", \"content\": summary_prompt},\r\n {\"role\": \"user\", \"content\": summary_input},\r\n ]\r\n\r\n response = create(\r\n agent_state=agent_state,\r\n messages=message_sequence,\r\n )\r\n\r\n printd(f\"summarize_messages gpt reply: {response.choices[0]}\")\r\n reply = response.choices[0].message.content\r\n return reply\r" }, { "identifier": "create", "path": "memgpt/llm_api_tools.py", "snippet": "@retry_with_exponential_backoff\r\ndef create(\r\n agent_state: AgentState,\r\n messages,\r\n functions=None,\r\n functions_python=None,\r\n function_call=\"auto\",\r\n # hint\r\n first_message=False,\r\n # use tool naming?\r\n # if false, will use deprecated 'functions' style\r\n use_tool_naming=True,\r\n) -> ChatCompletionResponse:\r\n \"\"\"Return response to chat completion with backoff\"\"\"\r\n from memgpt.utils import printd\r\n\r\n printd(f\"Using model {agent_state.llm_config.model_endpoint_type}, endpoint: {agent_state.llm_config.model_endpoint}\")\r\n\r\n # TODO eventually refactor so that credentials are passed through\r\n credentials = MemGPTCredentials.load()\r\n\r\n # openai\r\n if agent_state.llm_config.model_endpoint_type == \"openai\":\r\n # TODO do the same for Azure?\r\n if credentials.openai_key is None:\r\n raise ValueError(f\"OpenAI key is missing from MemGPT config file\")\r\n if use_tool_naming:\r\n data = dict(\r\n model=agent_state.llm_config.model,\r\n messages=messages,\r\n tools=[{\"type\": \"function\", \"function\": f} for f in functions],\r\n tool_choice=function_call,\r\n user=str(agent_state.user_id),\r\n )\r\n else:\r\n data = dict(\r\n model=agent_state.llm_config.model,\r\n messages=messages,\r\n functions=functions,\r\n function_call=function_call,\r\n user=str(agent_state.user_id),\r\n )\r\n return openai_chat_completions_request(\r\n url=agent_state.llm_config.model_endpoint, # https://api.openai.com/v1 -> https://api.openai.com/v1/chat/completions\r\n api_key=credentials.openai_key,\r\n data=data,\r\n )\r\n\r\n # azure\r\n elif agent_state.llm_config.model_endpoint_type == \"azure\":\r\n azure_deployment = (\r\n credentials.azure_deployment\r\n if credentials.azure_deployment is not None\r\n else MODEL_TO_AZURE_ENGINE[agent_state.llm_config.model]\r\n )\r\n if use_tool_naming:\r\n data = dict(\r\n # NOTE: don't pass model to Azure calls, that is the deployment_id\r\n # model=agent_config.model,\r\n messages=messages,\r\n tools=[{\"type\": \"function\", \"function\": f} for f in functions],\r\n tool_choice=function_call,\r\n user=str(agent_state.user_id),\r\n )\r\n else:\r\n data = dict(\r\n # NOTE: don't pass model to Azure calls, that is the deployment_id\r\n # model=agent_config.model,\r\n messages=messages,\r\n functions=functions,\r\n function_call=function_call,\r\n user=str(agent_state.user_id),\r\n )\r\n return azure_openai_chat_completions_request(\r\n resource_name=credentials.azure_endpoint,\r\n deployment_id=azure_deployment,\r\n api_version=credentials.azure_version,\r\n api_key=credentials.azure_key,\r\n data=data,\r\n )\r\n\r\n # local model\r\n else:\r\n return get_chat_completion(\r\n model=agent_state.llm_config.model,\r\n messages=messages,\r\n functions=functions,\r\n functions_python=functions_python,\r\n function_call=function_call,\r\n context_window=agent_state.llm_config.context_window,\r\n endpoint=agent_state.llm_config.model_endpoint,\r\n endpoint_type=agent_state.llm_config.model_endpoint_type,\r\n wrapper=agent_state.llm_config.model_wrapper,\r\n user=str(agent_state.user_id),\r\n # hint\r\n first_message=first_message,\r\n # auth-related\r\n auth_type=credentials.openllm_auth_type,\r\n auth_key=credentials.openllm_key,\r\n )\r" }, { "identifier": "is_context_overflow_error", "path": "memgpt/llm_api_tools.py", "snippet": "def is_context_overflow_error(exception):\r\n from memgpt.utils import printd\r\n\r\n match_string = \"maximum context length\"\r\n\r\n # Backwards compatability with openai python package/client v0.28 (pre-v1 client migration)\r\n if match_string in str(exception):\r\n printd(f\"Found '{match_string}' in str(exception)={(str(exception))}\")\r\n return True\r\n\r\n # Based on python requests + OpenAI REST API (/v1)\r\n elif isinstance(exception, requests.exceptions.HTTPError):\r\n if exception.response is not None and \"application/json\" in exception.response.headers.get(\"Content-Type\", \"\"):\r\n try:\r\n error_details = exception.response.json()\r\n if \"error\" not in error_details:\r\n printd(f\"HTTPError occured, but couldn't find error field: {error_details}\")\r\n return False\r\n else:\r\n error_details = error_details[\"error\"]\r\n\r\n # Check for the specific error code\r\n if error_details.get(\"code\") == \"context_length_exceeded\":\r\n printd(f\"HTTPError occured, caught error code {error_details.get('code')}\")\r\n return True\r\n # Soft-check for \"maximum context length\" inside of the message\r\n elif error_details.get(\"message\") and \"maximum context length\" in error_details.get(\"message\"):\r\n printd(f\"HTTPError occured, found '{match_string}' in error message contents ({error_details})\")\r\n return True\r\n else:\r\n printd(f\"HTTPError occured, but unknown error message: {error_details}\")\r\n return False\r\n except ValueError:\r\n # JSON decoding failed\r\n printd(f\"HTTPError occurred ({exception}), but no JSON error message.\")\r\n\r\n # Generic fail\r\n else:\r\n return False\r" }, { "identifier": "get_tool_call_id", "path": "memgpt/utils.py", "snippet": "def get_tool_call_id() -> str:\r\n return str(uuid.uuid4())\r" }, { "identifier": "get_local_time", "path": "memgpt/utils.py", "snippet": "def get_local_time(timezone=None):\r\n if timezone is not None:\r\n time_str = get_local_time_timezone(timezone)\r\n else:\r\n # Get the current time, which will be in the local timezone of the computer\r\n local_time = datetime.now().astimezone()\r\n\r\n # You may format it as you desire, including AM/PM\r\n time_str = local_time.strftime(\"%Y-%m-%d %I:%M:%S %p %Z%z\")\r\n\r\n return time_str.strip()\r" }, { "identifier": "parse_json", "path": "memgpt/utils.py", "snippet": "def parse_json(string):\r\n \"\"\"Parse JSON string into JSON with both json and demjson\"\"\"\r\n result = None\r\n try:\r\n result = json.loads(string)\r\n return result\r\n except Exception as e:\r\n print(f\"Error parsing json with json package: {e}\")\r\n\r\n try:\r\n result = demjson.decode(string)\r\n return result\r\n except demjson.JSONDecodeError as e:\r\n print(f\"Error parsing json with demjson package: {e}\")\r\n raise e\r" }, { "identifier": "united_diff", "path": "memgpt/utils.py", "snippet": "def united_diff(str1, str2):\r\n lines1 = str1.splitlines(True)\r\n lines2 = str2.splitlines(True)\r\n diff = difflib.unified_diff(lines1, lines2)\r\n return \"\".join(diff)\r" }, { "identifier": "printd", "path": "memgpt/utils.py", "snippet": "def printd(*args, **kwargs):\r\n if DEBUG:\r\n print(*args, **kwargs)\r" }, { "identifier": "count_tokens", "path": "memgpt/utils.py", "snippet": "def count_tokens(s: str, model: str = \"gpt-4\") -> int:\r\n encoding = tiktoken.encoding_for_model(model)\r\n return len(encoding.encode(s))\r" }, { "identifier": "get_schema_diff", "path": "memgpt/utils.py", "snippet": "def get_schema_diff(schema_a, schema_b):\r\n # Assuming f_schema and linked_function['json_schema'] are your JSON schemas\r\n f_schema_json = json.dumps(schema_a, indent=2, ensure_ascii=JSON_ENSURE_ASCII)\r\n linked_function_json = json.dumps(schema_b, indent=2, ensure_ascii=JSON_ENSURE_ASCII)\r\n\r\n # Compute the difference using difflib\r\n difference = list(difflib.ndiff(f_schema_json.splitlines(keepends=True), linked_function_json.splitlines(keepends=True)))\r\n\r\n # Filter out lines that don't represent changes\r\n difference = [line for line in difference if line.startswith(\"+ \") or line.startswith(\"- \")]\r\n\r\n return \"\".join(difference)\r" }, { "identifier": "validate_function_response", "path": "memgpt/utils.py", "snippet": "def validate_function_response(function_response_string: any, strict: bool = False, truncate: bool = True) -> str:\r\n \"\"\"Check to make sure that a function used by MemGPT returned a valid response\r\n\r\n Responses need to be strings (or None) that fall under a certain text count limit.\r\n \"\"\"\r\n if not isinstance(function_response_string, str):\r\n # Soft correction for a few basic types\r\n\r\n if function_response_string is None:\r\n # function_response_string = \"Empty (no function output)\"\r\n function_response_string = \"None\" # backcompat\r\n\r\n elif isinstance(function_response_string, dict):\r\n if strict:\r\n # TODO add better error message\r\n raise ValueError(function_response_string)\r\n\r\n # Allow dict through since it will be cast to json.dumps()\r\n try:\r\n # TODO find a better way to do this that won't result in double escapes\r\n function_response_string = json.dumps(function_response_string, ensure_ascii=JSON_ENSURE_ASCII)\r\n except:\r\n raise ValueError(function_response_string)\r\n\r\n else:\r\n if strict:\r\n # TODO add better error message\r\n raise ValueError(function_response_string)\r\n\r\n # Try to convert to a string, but throw a warning to alert the user\r\n try:\r\n function_response_string = str(function_response_string)\r\n except:\r\n raise ValueError(function_response_string)\r\n\r\n # Now check the length and make sure it doesn't go over the limit\r\n # TODO we should change this to a max token limit that's variable based on tokens remaining (or context-window)\r\n if truncate and len(function_response_string) > FUNCTION_RETURN_CHAR_LIMIT:\r\n print(\r\n f\"{CLI_WARNING_PREFIX}function return was over limit ({len(function_response_string)} > {FUNCTION_RETURN_CHAR_LIMIT}) and was truncated\"\r\n )\r\n function_response_string = f\"{function_response_string[:FUNCTION_RETURN_CHAR_LIMIT]}... [NOTE: function output was truncated since it exceeded the character limit ({len(function_response_string)} > {FUNCTION_RETURN_CHAR_LIMIT})]\"\r\n\r\n return function_response_string\r" }, { "identifier": "verify_first_message_correctness", "path": "memgpt/utils.py", "snippet": "def verify_first_message_correctness(\r\n response: ChatCompletionResponse, require_send_message: bool = True, require_monologue: bool = False\r\n) -> bool:\r\n \"\"\"Can be used to enforce that the first message always uses send_message\"\"\"\r\n response_message = response.choices[0].message\r\n\r\n # First message should be a call to send_message with a non-empty content\r\n if require_send_message and not (response_message.function_call or response_message.tool_calls):\r\n printd(f\"First message didn't include function call: {response_message}\")\r\n return False\r\n\r\n assert not (response_message.function_call and response_message.tool_calls), response_message\r\n function_call = response_message.function_call if response_message.function_call else response_message.tool_calls[0].function\r\n function_name = function_call.name if function_call is not None else \"\"\r\n if require_send_message and function_name != \"send_message\" and function_name != \"archival_memory_search\":\r\n printd(f\"First message function call wasn't send_message or archival_memory_search: {response_message}\")\r\n return False\r\n\r\n if require_monologue and (not response_message.content or response_message.content is None or response_message.content == \"\"):\r\n printd(f\"First message missing internal monologue: {response_message}\")\r\n return False\r\n\r\n if response_message.content:\r\n ### Extras\r\n monologue = response_message.content\r\n\r\n def contains_special_characters(s):\r\n special_characters = '(){}[]\"'\r\n return any(char in s for char in special_characters)\r\n\r\n if contains_special_characters(monologue):\r\n printd(f\"First message internal monologue contained special characters: {response_message}\")\r\n return False\r\n # if 'functions' in monologue or 'send_message' in monologue or 'inner thought' in monologue.lower():\r\n if \"functions\" in monologue or \"send_message\" in monologue:\r\n # Sometimes the syntax won't be correct and internal syntax will leak into message.context\r\n printd(f\"First message internal monologue contained reserved words: {response_message}\")\r\n return False\r\n\r\n return True\r" }, { "identifier": "FIRST_MESSAGE_ATTEMPTS", "path": "memgpt/constants.py", "snippet": "FIRST_MESSAGE_ATTEMPTS = 10\r" }, { "identifier": "MESSAGE_SUMMARY_WARNING_FRAC", "path": "memgpt/constants.py", "snippet": "MESSAGE_SUMMARY_WARNING_FRAC = 0.75\r" }, { "identifier": "MESSAGE_SUMMARY_TRUNC_TOKEN_FRAC", "path": "memgpt/constants.py", "snippet": "MESSAGE_SUMMARY_TRUNC_TOKEN_FRAC = 0.75\r" }, { "identifier": "MESSAGE_SUMMARY_TRUNC_KEEP_N_LAST", "path": "memgpt/constants.py", "snippet": "MESSAGE_SUMMARY_TRUNC_KEEP_N_LAST = 3\r" }, { "identifier": "CORE_MEMORY_HUMAN_CHAR_LIMIT", "path": "memgpt/constants.py", "snippet": "CORE_MEMORY_HUMAN_CHAR_LIMIT = 2000\r" }, { "identifier": "CORE_MEMORY_PERSONA_CHAR_LIMIT", "path": "memgpt/constants.py", "snippet": "CORE_MEMORY_PERSONA_CHAR_LIMIT = 2000\r" }, { "identifier": "LLM_MAX_TOKENS", "path": "memgpt/constants.py", "snippet": "LLM_MAX_TOKENS = {\r\n \"DEFAULT\": 8192,\r\n ## OpenAI models: https://platform.openai.com/docs/models/overview\r\n # gpt-4\r\n \"gpt-4-1106-preview\": 128000,\r\n \"gpt-4\": 8192,\r\n \"gpt-4-32k\": 32768,\r\n \"gpt-4-0613\": 8192,\r\n \"gpt-4-32k-0613\": 32768,\r\n \"gpt-4-0314\": 8192, # legacy\r\n \"gpt-4-32k-0314\": 32768, # legacy\r\n # gpt-3.5\r\n \"gpt-3.5-turbo-1106\": 16385,\r\n \"gpt-3.5-turbo\": 4096,\r\n \"gpt-3.5-turbo-16k\": 16385,\r\n \"gpt-3.5-turbo-0613\": 4096, # legacy\r\n \"gpt-3.5-turbo-16k-0613\": 16385, # legacy\r\n \"gpt-3.5-turbo-0301\": 4096, # legacy\r\n}\r" }, { "identifier": "CLI_WARNING_PREFIX", "path": "memgpt/constants.py", "snippet": "CLI_WARNING_PREFIX = \"Warning: \"\r" }, { "identifier": "JSON_ENSURE_ASCII", "path": "memgpt/constants.py", "snippet": "JSON_ENSURE_ASCII = False\r" }, { "identifier": "LLMError", "path": "memgpt/errors.py", "snippet": "class LLMError(Exception):\n \"\"\"Base class for all LLM-related errors.\"\"\"\n\n pass" }, { "identifier": "USER_FUNCTIONS_DIR", "path": "memgpt/functions/functions.py", "snippet": "USER_FUNCTIONS_DIR = os.path.join(MEMGPT_DIR, \"functions\")" }, { "identifier": "load_all_function_sets", "path": "memgpt/functions/functions.py", "snippet": "def load_all_function_sets(merge=True):\n # functions/examples/*.py\n scripts_dir = os.path.dirname(os.path.abspath(__file__)) # Get the directory of the current script\n function_sets_dir = os.path.join(scripts_dir, \"function_sets\") # Path to the function_sets directory\n # List all .py files in the directory (excluding __init__.py)\n example_module_files = [f for f in os.listdir(function_sets_dir) if f.endswith(\".py\") and f != \"__init__.py\"]\n\n # ~/.memgpt/functions/*.py\n # create if missing\n if not os.path.exists(USER_FUNCTIONS_DIR):\n os.makedirs(USER_FUNCTIONS_DIR)\n user_module_files = [f for f in os.listdir(USER_FUNCTIONS_DIR) if f.endswith(\".py\") and f != \"__init__.py\"]\n\n # combine them both (pull from both examples and user-provided)\n # all_module_files = example_module_files + user_module_files\n\n # Add user_scripts_dir to sys.path\n if USER_FUNCTIONS_DIR not in sys.path:\n sys.path.append(USER_FUNCTIONS_DIR)\n\n schemas_and_functions = {}\n for dir_path, module_files in [(function_sets_dir, example_module_files), (USER_FUNCTIONS_DIR, user_module_files)]:\n for file in module_files:\n module_name = file[:-3] # Remove '.py' from filename\n if dir_path == USER_FUNCTIONS_DIR:\n # For user scripts, adjust the module name appropriately\n module_full_path = os.path.join(dir_path, file)\n try:\n spec = importlib.util.spec_from_file_location(module_name, module_full_path)\n module = importlib.util.module_from_spec(spec)\n spec.loader.exec_module(module)\n except ModuleNotFoundError as e:\n # Handle missing module imports\n missing_package = str(e).split(\"'\")[1] # Extract the name of the missing package\n print(f\"{CLI_WARNING_PREFIX}skipped loading python file '{module_full_path}'!\")\n print(\n f\"'{file}' imports '{missing_package}', but '{missing_package}' is not installed locally - install python package '{missing_package}' to link functions from '{file}' to MemGPT.\"\n )\n continue\n except SyntaxError as e:\n # Handle syntax errors in the module\n print(f\"{CLI_WARNING_PREFIX}skipped loading python file '{file}' due to a syntax error: {e}\")\n continue\n except Exception as e:\n # Handle other general exceptions\n print(f\"{CLI_WARNING_PREFIX}skipped loading python file '{file}': {e}\")\n continue\n else:\n # For built-in scripts, use the existing method\n full_module_name = f\"memgpt.functions.function_sets.{module_name}\"\n try:\n module = importlib.import_module(full_module_name)\n except Exception as e:\n # Handle other general exceptions\n print(f\"{CLI_WARNING_PREFIX}skipped loading python module '{full_module_name}': {e}\")\n continue\n\n try:\n # Load the function set\n function_set = load_function_set(module)\n schemas_and_functions[module_name] = function_set\n except ValueError as e:\n print(f\"Error loading function set '{module_name}': {e}\")\n\n if merge:\n # Put all functions from all sets into the same level dict\n merged_functions = {}\n for set_name, function_set in schemas_and_functions.items():\n for function_name, function_info in function_set.items():\n if function_name in merged_functions:\n raise ValueError(f\"Duplicate function name '{function_name}' found in function set '{set_name}'\")\n merged_functions[function_name] = function_info\n return merged_functions\n else:\n # Nested dict where the top level is organized by the function set name\n return schemas_and_functions" } ]
import datetime import uuid import glob import inspect import os import json import traceback from pathlib import Path from typing import List, Tuple from box import Box from memgpt.data_types import AgentState, Message from memgpt.models import chat_completion_response from memgpt.interface import AgentInterface from memgpt.persistence_manager import PersistenceManager, LocalStateManager from memgpt.config import MemGPTConfig from memgpt.system import get_login_event, package_function_response, package_summarize_message, get_initial_boot_messages from memgpt.memory import CoreMemory as InContextMemory, summarize_messages from memgpt.llm_api_tools import create, is_context_overflow_error from memgpt.utils import ( get_tool_call_id, get_local_time, parse_json, united_diff, printd, count_tokens, get_schema_diff, validate_function_response, verify_first_message_correctness, ) from memgpt.constants import ( FIRST_MESSAGE_ATTEMPTS, MESSAGE_SUMMARY_WARNING_FRAC, MESSAGE_SUMMARY_TRUNC_TOKEN_FRAC, MESSAGE_SUMMARY_TRUNC_KEEP_N_LAST, CORE_MEMORY_HUMAN_CHAR_LIMIT, CORE_MEMORY_PERSONA_CHAR_LIMIT, LLM_MAX_TOKENS, CLI_WARNING_PREFIX, JSON_ENSURE_ASCII, ) from .errors import LLMError from .functions.functions import USER_FUNCTIONS_DIR, load_all_function_sets
16,095
self.interface.function_message(f"Error: {error_msg}") return messages, None, True # force a heartbeat to allow agent to handle error # If no failures happened along the way: ... # Step 4: send the info on the function call and function response to GPT self.interface.function_message(f"Success: {function_response_string}") messages.append( Message.dict_to_message( agent_id=self.agent_state.id, user_id=self.agent_state.user_id, model=self.model, openai_message_dict={ "role": "tool", "name": function_name, "content": function_response, "tool_call_id": tool_call_id, }, ) ) # extend conversation with function response else: # Standard non-function reply self.interface.internal_monologue(response_message.content) messages.append( Message.dict_to_message( agent_id=self.agent_state.id, user_id=self.agent_state.user_id, model=self.model, openai_message_dict=response_message.model_dump(), ) ) # extend conversation with assistant's reply heartbeat_request = None function_failed = None return messages, heartbeat_request, function_failed def step(self, user_message, first_message=False, first_message_retry_limit=FIRST_MESSAGE_ATTEMPTS, skip_verify=False): """Top-level event message handler for the MemGPT agent""" try: # Step 0: add user message if user_message is not None: self.interface.user_message(user_message) packed_user_message = {"role": "user", "content": user_message} # Special handling for AutoGen messages with 'name' field try: user_message_json = json.loads(user_message) # Treat 'name' as a special field # If it exists in the input message, elevate it to the 'message' level if "name" in user_message_json: packed_user_message["name"] = user_message_json["name"] user_message_json.pop("name", None) packed_user_message["content"] = json.dumps(user_message_json, ensure_ascii=JSON_ENSURE_ASCII) except Exception as e: print(f"{CLI_WARNING_PREFIX}handling of 'name' field failed with: {e}") input_message_sequence = self.messages + [packed_user_message] else: input_message_sequence = self.messages if len(input_message_sequence) > 1 and input_message_sequence[-1]["role"] != "user": printd(f"{CLI_WARNING_PREFIX}Attempting to run ChatCompletion without user as the last message in the queue") # Step 1: send the conversation and available functions to GPT if not skip_verify and (first_message or self.messages_total == self.messages_total_init): printd(f"This is the first message. Running extra verifier on AI response.") counter = 0 while True: response = self._get_ai_reply( message_sequence=input_message_sequence, first_message=True, # passed through to the prompt formatter ) if verify_first_message_correctness(response, require_monologue=self.first_message_verify_mono): break counter += 1 if counter > first_message_retry_limit: raise Exception(f"Hit first message retry limit ({first_message_retry_limit})") else: response = self._get_ai_reply( message_sequence=input_message_sequence, ) # Step 2: check if LLM wanted to call a function # (if yes) Step 3: call the function # (if yes) Step 4: send the info on the function call and function response to LLM response_message = response.choices[0].message response_message_copy = response_message.copy() all_response_messages, heartbeat_request, function_failed = self._handle_ai_response(response_message) # Add the extra metadata to the assistant response # (e.g. enough metadata to enable recreating the API call) # assert "api_response" not in all_response_messages[0] # all_response_messages[0]["api_response"] = response_message_copy # assert "api_args" not in all_response_messages[0] # all_response_messages[0]["api_args"] = { # "model": self.model, # "messages": input_message_sequence, # "functions": self.functions, # } # Step 4: extend the message history if user_message is not None: all_new_messages = [ Message.dict_to_message( agent_id=self.agent_state.id, user_id=self.agent_state.user_id, model=self.model, openai_message_dict=packed_user_message, ) ] + all_response_messages else: all_new_messages = all_response_messages # Check the memory pressure and potentially issue a memory pressure warning current_total_tokens = response.usage.total_tokens active_memory_warning = False # We can't do summarize logic properly if context_window is undefined if self.agent_state.llm_config.context_window is None: # Fallback if for some reason context_window is missing, just set to the default
def link_functions(function_schemas): """Link function definitions to list of function schemas""" # need to dynamically link the functions # the saved agent.functions will just have the schemas, but we need to # go through the functions library and pull the respective python functions # Available functions is a mapping from: # function_name -> { # json_schema: schema # python_function: function # } # agent.functions is a list of schemas (OpenAI kwarg functions style, see: https://platform.openai.com/docs/api-reference/chat/create) # [{'name': ..., 'description': ...}, {...}] available_functions = load_all_function_sets() linked_function_set = {} for f_schema in function_schemas: # Attempt to find the function in the existing function library f_name = f_schema.get("name") if f_name is None: raise ValueError(f"While loading agent.state.functions encountered a bad function schema object with no name:\n{f_schema}") linked_function = available_functions.get(f_name) if linked_function is None: raise ValueError( f"Function '{f_name}' was specified in agent.state.functions, but is not in function library:\n{available_functions.keys()}" ) # Once we find a matching function, make sure the schema is identical if json.dumps(f_schema, ensure_ascii=JSON_ENSURE_ASCII) != json.dumps( linked_function["json_schema"], ensure_ascii=JSON_ENSURE_ASCII ): # error_message = ( # f"Found matching function '{f_name}' from agent.state.functions inside function library, but schemas are different." # + f"\n>>>agent.state.functions\n{json.dumps(f_schema, indent=2, ensure_ascii=JSON_ENSURE_ASCII)}" # + f"\n>>>function library\n{json.dumps(linked_function['json_schema'], indent=2, ensure_ascii=JSON_ENSURE_ASCII)}" # ) schema_diff = get_schema_diff(f_schema, linked_function["json_schema"]) error_message = ( f"Found matching function '{f_name}' from agent.state.functions inside function library, but schemas are different.\n" + "".join(schema_diff) ) # NOTE to handle old configs, instead of erroring here let's just warn # raise ValueError(error_message) printd(error_message) linked_function_set[f_name] = linked_function return linked_function_set def initialize_memory(ai_notes, human_notes): if ai_notes is None: raise ValueError(ai_notes) if human_notes is None: raise ValueError(human_notes) memory = InContextMemory(human_char_limit=CORE_MEMORY_HUMAN_CHAR_LIMIT, persona_char_limit=CORE_MEMORY_PERSONA_CHAR_LIMIT) memory.edit_persona(ai_notes) memory.edit_human(human_notes) return memory def construct_system_with_memory(system, memory, memory_edit_timestamp, archival_memory=None, recall_memory=None, include_char_count=True): full_system_message = "\n".join( [ system, "\n", f"### Memory [last modified: {memory_edit_timestamp.strip()}]", f"{len(recall_memory) if recall_memory else 0} previous messages between you and the user are stored in recall memory (use functions to access them)", f"{len(archival_memory) if archival_memory else 0} total memories you created are stored in archival memory (use functions to access them)", "\nCore memory shown below (limited in size, additional information stored in archival / recall memory):", f'<persona characters="{len(memory.persona)}/{memory.persona_char_limit}">' if include_char_count else "<persona>", memory.persona, "</persona>", f'<human characters="{len(memory.human)}/{memory.human_char_limit}">' if include_char_count else "<human>", memory.human, "</human>", ] ) return full_system_message def initialize_message_sequence( model, system, memory, archival_memory=None, recall_memory=None, memory_edit_timestamp=None, include_initial_boot_message=True, ): if memory_edit_timestamp is None: memory_edit_timestamp = get_local_time() full_system_message = construct_system_with_memory( system, memory, memory_edit_timestamp, archival_memory=archival_memory, recall_memory=recall_memory ) first_user_message = get_login_event() # event letting MemGPT know the user just logged in if include_initial_boot_message: if model is not None and "gpt-3.5" in model: initial_boot_messages = get_initial_boot_messages("startup_with_send_message_gpt35") else: initial_boot_messages = get_initial_boot_messages("startup_with_send_message") messages = ( [ {"role": "system", "content": full_system_message}, ] + initial_boot_messages + [ {"role": "user", "content": first_user_message}, ] ) else: messages = [ {"role": "system", "content": full_system_message}, {"role": "user", "content": first_user_message}, ] return messages class Agent(object): def __init__( self, agent_state: AgentState, interface: AgentInterface, # extras messages_total=None, # TODO remove? first_message_verify_mono=True, # TODO move to config? memgpt_config: MemGPTConfig = None, ): # Hold a copy of the state that was used to init the agent self.agent_state = agent_state # gpt-4, gpt-3.5-turbo, ... self.model = agent_state.llm_config.model # Store the system instructions (used to rebuild memory) if "system" not in agent_state.state: raise ValueError(f"'system' not found in provided AgentState") self.system = agent_state.state["system"] if "functions" not in agent_state.state: raise ValueError(f"'functions' not found in provided AgentState") # Store the functions schemas (this is passed as an argument to ChatCompletion) self.functions = agent_state.state["functions"] # these are the schema # Link the actual python functions corresponding to the schemas self.functions_python = {k: v["python_function"] for k, v in link_functions(function_schemas=self.functions).items()} assert all([callable(f) for k, f in self.functions_python.items()]), self.functions_python # Initialize the memory object if "persona" not in agent_state.state: raise ValueError(f"'persona' not found in provided AgentState") if "human" not in agent_state.state: raise ValueError(f"'human' not found in provided AgentState") self.memory = initialize_memory(ai_notes=agent_state.state["persona"], human_notes=agent_state.state["human"]) # Interface must implement: # - internal_monologue # - assistant_message # - function_message # ... # Different interfaces can handle events differently # e.g., print in CLI vs send a discord message with a discord bot self.interface = interface # Create the persistence manager object based on the AgentState info # TODO self.persistence_manager = LocalStateManager(agent_state=agent_state) # State needed for heartbeat pausing self.pause_heartbeats_start = None self.pause_heartbeats_minutes = 0 self.first_message_verify_mono = first_message_verify_mono # Controls if the convo memory pressure warning is triggered # When an alert is sent in the message queue, set this to True (to avoid repeat alerts) # When the summarizer is run, set this back to False (to reset) self.agent_alerted_about_memory_pressure = False # Read local config if not provided if not memgpt_config: self.memgpt_config = MemGPTConfig() else: self.memgpt_config = memgpt_config # Initialize connection to metedata store # self.ms = MetadataStore(self.memgpt_config) # Once the memory object is initialized, use it to "bake" the system message if "messages" in agent_state.state and agent_state.state["messages"] is not None: # print(f"Agent.__init__ :: loading, state={agent_state.state['messages']}") if not isinstance(agent_state.state["messages"], list): raise ValueError(f"'messages' in AgentState was bad type: {type(agent_state.state['messages'])}") assert all([isinstance(msg, str) for msg in agent_state.state["messages"]]) # Convert to IDs, and pull from the database self._messages = [ self.persistence_manager.recall_memory.storage.get(uuid.UUID(msg_id)) for msg_id in agent_state.state["messages"] ] assert all([isinstance(msg, Message) for msg in self._messages]), (self._messages, agent_state.state["messages"]) else: # print(f"Agent.__init__ :: creating, state={agent_state.state['messages']}") init_messages = initialize_message_sequence( self.model, self.system, self.memory, ) init_messages_objs = [] for msg in init_messages: init_messages_objs.append( Message.dict_to_message( agent_id=self.agent_state.id, user_id=self.agent_state.user_id, model=self.model, openai_message_dict=msg ) ) self._messages = [] self.messages_total = 0 self._append_to_messages(added_messages=init_messages_objs) assert all([isinstance(msg, Message) for msg in self._messages]), (self._messages, init_messages) # Keep track of the total number of messages throughout all time self.messages_total = messages_total if messages_total is not None else (len(self._messages) - 1) # (-system) # self.messages_total_init = self.messages_total self.messages_total_init = len(self._messages) - 1 printd(f"Agent initialized, self.messages_total={self.messages_total}") # Create the agent in the DB # self.save() self.update_state() @property def messages(self) -> List[dict]: """Getter method that converts the internal Message list into OpenAI-style dicts""" return [msg.to_openai_dict() for msg in self._messages] @messages.setter def messages(self, value): raise Exception("Modifying message list directly not allowed") def _trim_messages(self, num): """Trim messages from the front, not including the system message""" self.persistence_manager.trim_messages(num) new_messages = [self.messages[0]] + self.messages[num:] self._messages = new_messages def _prepend_to_messages(self, added_messages: List[Message]): """Wrapper around self.messages.prepend to allow additional calls to a state/persistence manager""" assert all([isinstance(msg, Message) for msg in added_messages]) self.persistence_manager.prepend_to_messages(added_messages) new_messages = [self.messages[0]] + added_messages + self.messages[1:] # prepend (no system) self._messages = new_messages self.messages_total += len(added_messages) # still should increment the message counter (summaries are additions too) def _append_to_messages(self, added_messages: List[Message]): """Wrapper around self.messages.append to allow additional calls to a state/persistence manager""" assert all([isinstance(msg, Message) for msg in added_messages]) self.persistence_manager.append_to_messages(added_messages) # strip extra metadata if it exists # for msg in added_messages: # msg.pop("api_response", None) # msg.pop("api_args", None) new_messages = self._messages + added_messages # append self._messages = new_messages self.messages_total += len(added_messages) def _swap_system_message(self, new_system_message: Message): assert isinstance(new_system_message, Message) assert new_system_message.role == "system", new_system_message assert self._messages[0].role == "system", self._messages self.persistence_manager.swap_system_message(new_system_message) new_messages = [new_system_message] + self._messages[1:] # swap index 0 (system) self._messages = new_messages def _get_ai_reply( self, message_sequence: List[dict], function_call: str = "auto", first_message: bool = False, # hint ) -> chat_completion_response.ChatCompletionResponse: """Get response from LLM API""" try: response = create( agent_state=self.agent_state, messages=message_sequence, functions=self.functions, functions_python=self.functions_python, function_call=function_call, # hint first_message=first_message, ) # special case for 'length' if response.choices[0].finish_reason == "length": raise Exception("Finish reason was length (maximum context length)") # catches for soft errors if response.choices[0].finish_reason not in ["stop", "function_call", "tool_calls"]: raise Exception(f"API call finish with bad finish reason: {response}") # unpack with response.choices[0].message.content return response except Exception as e: raise e def _handle_ai_response( self, response_message: chat_completion_response.Message, override_tool_call_id: bool = True ) -> Tuple[List[Message], bool, bool]: """Handles parsing and function execution""" messages = [] # append these to the history when done # Step 2: check if LLM wanted to call a function if response_message.function_call or (response_message.tool_calls is not None and len(response_message.tool_calls) > 0): if response_message.function_call: raise DeprecationWarning(response_message) if response_message.tool_calls is not None and len(response_message.tool_calls) > 1: raise NotImplementedError(f">1 tool call not supported") # The content if then internal monologue, not chat self.interface.internal_monologue(response_message.content) # generate UUID for tool call if override_tool_call_id or response_message.function_call: tool_call_id = get_tool_call_id() # needs to be a string for JSON response_message.tool_calls[0].id = tool_call_id else: tool_call_id = response_message.tool_calls[0].id assert tool_call_id is not None # should be defined # only necessary to add the tool_cal_id to a function call (antipattern) # response_message_dict = response_message.model_dump() # response_message_dict["tool_call_id"] = tool_call_id # role: assistant (requesting tool call, set tool call ID) messages.append( Message.dict_to_message( agent_id=self.agent_state.id, user_id=self.agent_state.user_id, model=self.model, openai_message_dict=response_message.model_dump(), ) ) # extend conversation with assistant's reply printd(f"Function call message: {messages[-1]}") # Step 3: call the function # Note: the JSON response may not always be valid; be sure to handle errors # Failure case 1: function name is wrong function_call = ( response_message.function_call if response_message.function_call is not None else response_message.tool_calls[0].function ) function_name = function_call.name printd(f"Request to call function {function_name} with tool_call_id: {tool_call_id}") try: function_to_call = self.functions_python[function_name] except KeyError as e: error_msg = f"No function named {function_name}" function_response = package_function_response(False, error_msg) messages.append( Message.dict_to_message( agent_id=self.agent_state.id, user_id=self.agent_state.user_id, model=self.model, openai_message_dict={ "role": "tool", "name": function_name, "content": function_response, "tool_call_id": tool_call_id, }, ) ) # extend conversation with function response self.interface.function_message(f"Error: {error_msg}") return messages, None, True # force a heartbeat to allow agent to handle error # Failure case 2: function name is OK, but function args are bad JSON try: raw_function_args = function_call.arguments function_args = parse_json(raw_function_args) except Exception as e: error_msg = f"Error parsing JSON for function '{function_name}' arguments: {raw_function_args}" function_response = package_function_response(False, error_msg) messages.append( Message.dict_to_message( agent_id=self.agent_state.id, user_id=self.agent_state.user_id, model=self.model, openai_message_dict={ "role": "tool", "name": function_name, "content": function_response, "tool_call_id": tool_call_id, }, ) ) # extend conversation with function response self.interface.function_message(f"Error: {error_msg}") return messages, None, True # force a heartbeat to allow agent to handle error # (Still parsing function args) # Handle requests for immediate heartbeat heartbeat_request = function_args.pop("request_heartbeat", None) if not (isinstance(heartbeat_request, bool) or heartbeat_request is None): printd( f"{CLI_WARNING_PREFIX}'request_heartbeat' arg parsed was not a bool or None, type={type(heartbeat_request)}, value={heartbeat_request}" ) heartbeat_request = None # Failure case 3: function failed during execution self.interface.function_message(f"Running {function_name}({function_args})") try: spec = inspect.getfullargspec(function_to_call).annotations for name, arg in function_args.items(): if isinstance(function_args[name], dict): function_args[name] = spec[name](**function_args[name]) function_args["self"] = self # need to attach self to arg since it's dynamically linked function_response = function_to_call(**function_args) if function_name in ["conversation_search", "conversation_search_date", "archival_memory_search"]: # with certain functions we rely on the paging mechanism to handle overflow truncate = False else: # but by default, we add a truncation safeguard to prevent bad functions from # overflow the agent context window truncate = True function_response_string = validate_function_response(function_response, truncate=truncate) function_args.pop("self", None) function_response = package_function_response(True, function_response_string) function_failed = False except Exception as e: function_args.pop("self", None) # error_msg = f"Error calling function {function_name} with args {function_args}: {str(e)}" # Less detailed - don't provide full args, idea is that it should be in recent context so no need (just adds noise) error_msg = f"Error calling function {function_name}: {str(e)}" error_msg_user = f"{error_msg}\n{traceback.format_exc()}" printd(error_msg_user) function_response = package_function_response(False, error_msg) messages.append( Message.dict_to_message( agent_id=self.agent_state.id, user_id=self.agent_state.user_id, model=self.model, openai_message_dict={ "role": "tool", "name": function_name, "content": function_response, "tool_call_id": tool_call_id, }, ) ) # extend conversation with function response self.interface.function_message(f"Error: {error_msg}") return messages, None, True # force a heartbeat to allow agent to handle error # If no failures happened along the way: ... # Step 4: send the info on the function call and function response to GPT self.interface.function_message(f"Success: {function_response_string}") messages.append( Message.dict_to_message( agent_id=self.agent_state.id, user_id=self.agent_state.user_id, model=self.model, openai_message_dict={ "role": "tool", "name": function_name, "content": function_response, "tool_call_id": tool_call_id, }, ) ) # extend conversation with function response else: # Standard non-function reply self.interface.internal_monologue(response_message.content) messages.append( Message.dict_to_message( agent_id=self.agent_state.id, user_id=self.agent_state.user_id, model=self.model, openai_message_dict=response_message.model_dump(), ) ) # extend conversation with assistant's reply heartbeat_request = None function_failed = None return messages, heartbeat_request, function_failed def step(self, user_message, first_message=False, first_message_retry_limit=FIRST_MESSAGE_ATTEMPTS, skip_verify=False): """Top-level event message handler for the MemGPT agent""" try: # Step 0: add user message if user_message is not None: self.interface.user_message(user_message) packed_user_message = {"role": "user", "content": user_message} # Special handling for AutoGen messages with 'name' field try: user_message_json = json.loads(user_message) # Treat 'name' as a special field # If it exists in the input message, elevate it to the 'message' level if "name" in user_message_json: packed_user_message["name"] = user_message_json["name"] user_message_json.pop("name", None) packed_user_message["content"] = json.dumps(user_message_json, ensure_ascii=JSON_ENSURE_ASCII) except Exception as e: print(f"{CLI_WARNING_PREFIX}handling of 'name' field failed with: {e}") input_message_sequence = self.messages + [packed_user_message] else: input_message_sequence = self.messages if len(input_message_sequence) > 1 and input_message_sequence[-1]["role"] != "user": printd(f"{CLI_WARNING_PREFIX}Attempting to run ChatCompletion without user as the last message in the queue") # Step 1: send the conversation and available functions to GPT if not skip_verify and (first_message or self.messages_total == self.messages_total_init): printd(f"This is the first message. Running extra verifier on AI response.") counter = 0 while True: response = self._get_ai_reply( message_sequence=input_message_sequence, first_message=True, # passed through to the prompt formatter ) if verify_first_message_correctness(response, require_monologue=self.first_message_verify_mono): break counter += 1 if counter > first_message_retry_limit: raise Exception(f"Hit first message retry limit ({first_message_retry_limit})") else: response = self._get_ai_reply( message_sequence=input_message_sequence, ) # Step 2: check if LLM wanted to call a function # (if yes) Step 3: call the function # (if yes) Step 4: send the info on the function call and function response to LLM response_message = response.choices[0].message response_message_copy = response_message.copy() all_response_messages, heartbeat_request, function_failed = self._handle_ai_response(response_message) # Add the extra metadata to the assistant response # (e.g. enough metadata to enable recreating the API call) # assert "api_response" not in all_response_messages[0] # all_response_messages[0]["api_response"] = response_message_copy # assert "api_args" not in all_response_messages[0] # all_response_messages[0]["api_args"] = { # "model": self.model, # "messages": input_message_sequence, # "functions": self.functions, # } # Step 4: extend the message history if user_message is not None: all_new_messages = [ Message.dict_to_message( agent_id=self.agent_state.id, user_id=self.agent_state.user_id, model=self.model, openai_message_dict=packed_user_message, ) ] + all_response_messages else: all_new_messages = all_response_messages # Check the memory pressure and potentially issue a memory pressure warning current_total_tokens = response.usage.total_tokens active_memory_warning = False # We can't do summarize logic properly if context_window is undefined if self.agent_state.llm_config.context_window is None: # Fallback if for some reason context_window is missing, just set to the default
print(f"{CLI_WARNING_PREFIX}could not find context_window in config, setting to default {LLM_MAX_TOKENS['DEFAULT']}")
30
2023-10-11 07:38:37+00:00
24k
PixArt-alpha/PixArt-alpha
train_scripts/train_pixart_lcm.py
[ { "identifier": "IDDPM", "path": "diffusion/iddpm.py", "snippet": "def IDDPM(\n timestep_respacing,\n noise_schedule=\"linear\",\n use_kl=False,\n sigma_small=False,\n predict_xstart=False,\n learn_sigma=True,\n pred_sigma=True,\n rescale_learned_sigmas=False,\n diffusion_steps=1000,\n snr=False,\n return_startx=False,\n):\n betas = gd.get_named_beta_schedule(noise_schedule, diffusion_steps)\n if use_kl:\n loss_type = gd.LossType.RESCALED_KL\n elif rescale_learned_sigmas:\n loss_type = gd.LossType.RESCALED_MSE\n else:\n loss_type = gd.LossType.MSE\n if timestep_respacing is None or timestep_respacing == \"\":\n timestep_respacing = [diffusion_steps]\n return SpacedDiffusion(\n use_timesteps=space_timesteps(diffusion_steps, timestep_respacing),\n betas=betas,\n model_mean_type=(\n gd.ModelMeanType.EPSILON if not predict_xstart else gd.ModelMeanType.START_X\n ),\n model_var_type=(\n ((\n gd.ModelVarType.FIXED_LARGE\n if not sigma_small\n else gd.ModelVarType.FIXED_SMALL\n )\n if not learn_sigma\n else gd.ModelVarType.LEARNED_RANGE\n )\n if pred_sigma\n else None\n ),\n loss_type=loss_type,\n snr=snr,\n return_startx=return_startx,\n # rescale_timesteps=rescale_timesteps,\n )" }, { "identifier": "save_checkpoint", "path": "diffusion/utils/checkpoint.py", "snippet": "def save_checkpoint(work_dir,\n epoch,\n model,\n model_ema=None,\n optimizer=None,\n lr_scheduler=None,\n keep_last=False,\n step=None,\n ):\n os.makedirs(work_dir, exist_ok=True)\n state_dict = dict(state_dict=model.state_dict())\n if model_ema is not None:\n state_dict['state_dict_ema'] = model_ema.state_dict()\n if optimizer is not None:\n state_dict['optimizer'] = optimizer.state_dict()\n if lr_scheduler is not None:\n state_dict['scheduler'] = lr_scheduler.state_dict()\n if epoch is not None:\n state_dict['epoch'] = epoch\n file_path = os.path.join(work_dir, f\"epoch_{epoch}.pth\")\n if step is not None:\n file_path = file_path.split('.pth')[0] + f\"_step_{step}.pth\"\n logger = get_root_logger()\n torch.save(state_dict, file_path)\n logger.info(f'Saved checkpoint of epoch {epoch} to {file_path.format(epoch)}.')\n if keep_last:\n for i in range(epoch):\n previous_ckgt = file_path.format(i)\n if os.path.exists(previous_ckgt):\n os.remove(previous_ckgt)" }, { "identifier": "load_checkpoint", "path": "diffusion/utils/checkpoint.py", "snippet": "def load_checkpoint(checkpoint,\n model,\n model_ema=None,\n optimizer=None,\n lr_scheduler=None,\n load_ema=False,\n resume_optimizer=True,\n resume_lr_scheduler=True\n ):\n assert isinstance(checkpoint, str)\n ckpt_file = checkpoint\n checkpoint = torch.load(ckpt_file, map_location=\"cpu\")\n\n state_dict_keys = ['pos_embed', 'base_model.pos_embed', 'model.pos_embed']\n for key in state_dict_keys:\n if key in checkpoint['state_dict']:\n del checkpoint['state_dict'][key]\n if 'state_dict_ema' in checkpoint and key in checkpoint['state_dict_ema']:\n del checkpoint['state_dict_ema'][key]\n break\n\n if load_ema:\n state_dict = checkpoint['state_dict_ema']\n else:\n state_dict = checkpoint.get('state_dict', checkpoint) # to be compatible with the official checkpoint\n # model.load_state_dict(state_dict)\n missing, unexpect = model.load_state_dict(state_dict, strict=False)\n if model_ema is not None:\n model_ema.load_state_dict(checkpoint['state_dict_ema'], strict=False)\n if optimizer is not None and resume_optimizer:\n optimizer.load_state_dict(checkpoint['optimizer'])\n if lr_scheduler is not None and resume_lr_scheduler:\n lr_scheduler.load_state_dict(checkpoint['scheduler'])\n logger = get_root_logger()\n if optimizer is not None:\n epoch = checkpoint.get('epoch', re.match(r'.*epoch_(\\d*).*.pth', ckpt_file).group()[0])\n logger.info(f'Resume checkpoint of epoch {epoch} from {ckpt_file}. Load ema: {load_ema}, '\n f'resume optimizer: {resume_optimizer}, resume lr scheduler: {resume_lr_scheduler}.')\n return epoch, missing, unexpect\n logger.info(f'Load checkpoint from {ckpt_file}. Load ema: {load_ema}.')\n return missing, unexpect" }, { "identifier": "synchronize", "path": "diffusion/utils/dist_utils.py", "snippet": "def synchronize():\n \"\"\"\n Helper function to synchronize (barrier) among all processes when\n using distributed training\n \"\"\"\n if not dist.is_available():\n return\n if not dist.is_initialized():\n return\n world_size = dist.get_world_size()\n if world_size == 1:\n return\n dist.barrier()" }, { "identifier": "get_world_size", "path": "diffusion/utils/dist_utils.py", "snippet": "def get_world_size():\n if not dist.is_available():\n return 1\n if not dist.is_initialized():\n return 1\n return dist.get_world_size()" }, { "identifier": "clip_grad_norm_", "path": "diffusion/utils/dist_utils.py", "snippet": "@torch.no_grad()\ndef clip_grad_norm_(\n self, max_norm: Union[float, int], norm_type: Union[float, int] = 2.0\n) -> None:\n self._lazy_init()\n self._wait_for_previous_optim_step()\n assert self._is_root, \"clip_grad_norm should only be called on the root (parent) instance\"\n self._assert_state(TrainingState_.IDLE)\n\n max_norm = float(max_norm)\n norm_type = float(norm_type)\n # Computes the max norm for this shard's gradients and sync's across workers\n local_norm = _calc_grad_norm(self.params_with_grad, norm_type).cuda() # type: ignore[arg-type]\n if norm_type == math.inf:\n total_norm = local_norm\n dist.all_reduce(total_norm, op=torch.distributed.ReduceOp.MAX, group=self.process_group)\n else:\n total_norm = local_norm ** norm_type\n dist.all_reduce(total_norm, group=self.process_group)\n total_norm = total_norm ** (1.0 / norm_type)\n\n clip_coef = torch.tensor(max_norm, dtype=total_norm.dtype, device=total_norm.device) / (total_norm + 1e-6)\n if clip_coef < 1:\n # multiply by clip_coef, aka, (max_norm/total_norm).\n for p in self.params_with_grad:\n assert p.grad is not None\n p.grad.detach().mul_(clip_coef.to(p.grad.device))\n return total_norm" }, { "identifier": "build_dataset", "path": "diffusion/data/builder.py", "snippet": "def build_dataset(cfg, resolution=224, **kwargs):\n logger = get_root_logger()\n\n dataset_type = cfg.get('type')\n logger.info(f\"Constructing dataset {dataset_type}...\")\n t = time.time()\n transform = cfg.pop('transform', 'default_train')\n transform = get_transform(transform, resolution)\n dataset = build_from_cfg(cfg, DATASETS, default_args=dict(transform=transform, resolution=resolution, **kwargs))\n logger.info(f\"Dataset {dataset_type} constructed. time: {(time.time() - t):.2f} s, length (use/ori): {len(dataset)}/{dataset.ori_imgs_nums}\")\n return dataset" }, { "identifier": "build_dataloader", "path": "diffusion/data/builder.py", "snippet": "def build_dataloader(dataset, batch_size=256, num_workers=4, shuffle=True, **kwargs):\n if 'batch_sampler' in kwargs:\n dataloader = DataLoader(dataset, batch_sampler=kwargs['batch_sampler'], num_workers=num_workers, pin_memory=True)\n else:\n dataloader = DataLoader(dataset,\n batch_size=batch_size,\n shuffle=shuffle,\n num_workers=num_workers,\n pin_memory=True,\n **kwargs)\n return dataloader" }, { "identifier": "set_data_root", "path": "diffusion/data/builder.py", "snippet": "def set_data_root(data_root):\n global DATA_ROOT\n DATA_ROOT = data_root" }, { "identifier": "build_model", "path": "diffusion/model/builder.py", "snippet": "def build_model(cfg, use_grad_checkpoint=False, use_fp32_attention=False, gc_step=1, **kwargs):\n if isinstance(cfg, str):\n cfg = dict(type=cfg)\n model = MODELS.build(cfg, default_args=kwargs)\n if use_grad_checkpoint:\n set_grad_checkpoint(model, use_fp32_attention=use_fp32_attention, gc_step=gc_step)\n return model" }, { "identifier": "get_root_logger", "path": "diffusion/utils/logger.py", "snippet": "def get_root_logger(log_file=None, log_level=logging.INFO, name='PixArt'):\n \"\"\"Get root logger.\n\n Args:\n log_file (str, optional): File path of log. Defaults to None.\n log_level (int, optional): The level of logger.\n Defaults to logging.INFO.\n name (str): logger name\n Returns:\n :obj:`logging.Logger`: The obtained logger\n \"\"\"\n if log_file is None:\n log_file = '/dev/null'\n logger = get_logger(name=name, log_file=log_file, log_level=log_level)\n return logger" }, { "identifier": "set_random_seed", "path": "diffusion/utils/misc.py", "snippet": "def set_random_seed(seed, deterministic=False):\n \"\"\"Set random seed.\n\n Args:\n seed (int): Seed to be used.\n deterministic (bool): Whether to set the deterministic option for\n CUDNN backend, i.e., set `torch.backends.cudnn.deterministic`\n to True and `torch.backends.cudnn.benchmark` to False.\n Default: False.\n \"\"\"\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n if deterministic:\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False" }, { "identifier": "read_config", "path": "diffusion/utils/misc.py", "snippet": "def read_config(file):\n # solve config loading conflict when multi-processes\n import time\n while True:\n config = Config.fromfile(file)\n if len(config) == 0:\n time.sleep(0.1)\n continue\n break\n return config" }, { "identifier": "init_random_seed", "path": "diffusion/utils/misc.py", "snippet": "def init_random_seed(seed=None, device='cuda'):\n \"\"\"Initialize random seed.\n\n If the seed is not set, the seed will be automatically randomized,\n and then broadcast to all processes to prevent some potential bugs.\n\n Args:\n seed (int, Optional): The seed. Default to None.\n device (str): The device where the seed will be put on.\n Default to 'cuda'.\n\n Returns:\n int: Seed to be used.\n \"\"\"\n if seed is not None:\n return seed\n\n # Make sure all ranks share the same random seed to prevent\n # some potential bugs. Please refer to\n # https://github.com/open-mmlab/mmdetection/issues/6339\n rank, world_size = get_dist_info()\n seed = np.random.randint(2 ** 31)\n if world_size == 1:\n return seed\n\n if rank == 0:\n random_num = torch.tensor(seed, dtype=torch.int32, device=device)\n else:\n random_num = torch.tensor(0, dtype=torch.int32, device=device)\n dist.broadcast(random_num, src=0)\n return random_num.item()" }, { "identifier": "DebugUnderflowOverflow", "path": "diffusion/utils/misc.py", "snippet": "class DebugUnderflowOverflow:\n \"\"\"\n This debug class helps detect and understand where the model starts getting very large or very small, and more\n importantly `nan` or `inf` weight and activation elements.\n There are 2 working modes:\n 1. Underflow/overflow detection (default)\n 2. Specific batch absolute min/max tracing without detection\n Mode 1: Underflow/overflow detection\n To activate the underflow/overflow detection, initialize the object with the model :\n ```python\n debug_overflow = DebugUnderflowOverflow(model)\n ```\n then run the training as normal and if `nan` or `inf` gets detected in at least one of the weight, input or\n output elements this module will throw an exception and will print `max_frames_to_save` frames that lead to this\n event, each frame reporting\n 1. the fully qualified module name plus the class name whose `forward` was run\n 2. the absolute min and max value of all elements for each module weights, and the inputs and output\n For example, here is the header and the last few frames in detection report for `google/mt5-small` run in fp16 mixed precision :\n ```\n Detected inf/nan during batch_number=0\n Last 21 forward frames:\n abs min abs max metadata\n [...]\n encoder.block.2.layer.1.DenseReluDense.wi_0 Linear\n 2.17e-07 4.50e+00 weight\n 1.79e-06 4.65e+00 input[0]\n 2.68e-06 3.70e+01 output\n encoder.block.2.layer.1.DenseReluDense.wi_1 Linear\n 8.08e-07 2.66e+01 weight\n 1.79e-06 4.65e+00 input[0]\n 1.27e-04 2.37e+02 output\n encoder.block.2.layer.1.DenseReluDense.wo Linear\n 1.01e-06 6.44e+00 weight\n 0.00e+00 9.74e+03 input[0]\n 3.18e-04 6.27e+04 output\n encoder.block.2.layer.1.DenseReluDense T5DenseGatedGeluDense\n 1.79e-06 4.65e+00 input[0]\n 3.18e-04 6.27e+04 output\n encoder.block.2.layer.1.dropout Dropout\n 3.18e-04 6.27e+04 input[0]\n 0.00e+00 inf output\n ```\n You can see here, that `T5DenseGatedGeluDense.forward` resulted in output activations, whose absolute max value\n was around 62.7K, which is very close to fp16's top limit of 64K. In the next frame we have `Dropout` which\n renormalizes the weights, after it zeroed some of the elements, which pushes the absolute max value to more than\n 64K, and we get an overlow.\n As you can see it's the previous frames that we need to look into when the numbers start going into very large for\n fp16 numbers.\n The tracking is done in a forward hook, which gets invoked immediately after `forward` has completed.\n By default the last 21 frames are printed. You can change the default to adjust for your needs. For example :\n ```python\n debug_overflow = DebugUnderflowOverflow(model, max_frames_to_save=100)\n ```\n To validate that you have set up this debugging feature correctly, and you intend to use it in a training that may\n take hours to complete, first run it with normal tracing enabled for one of a few batches as explained in the next\n section.\n Mode 2. Specific batch absolute min/max tracing without detection\n The second work mode is per-batch tracing with the underflow/overflow detection feature turned off.\n Let's say you want to watch the absolute min and max values for all the ingredients of each `forward` call of a\n given batch, and only do that for batches 1 and 3. Then you instantiate this class as :\n ```python\n debug_overflow = DebugUnderflowOverflow(model, trace_batch_nums=[1,3])\n ```\n And now full batches 1 and 3 will be traced using the same format as explained above. Batches are 0-indexed.\n This is helpful if you know that the program starts misbehaving after a certain batch number, so you can\n fast-forward right to that area.\n Early stopping:\n You can also specify the batch number after which to stop the training, with :\n ```python\n debug_overflow = DebugUnderflowOverflow(model, trace_batch_nums=[1,3], abort_after_batch_num=3)\n ```\n This feature is mainly useful in the tracing mode, but you can use it for any mode.\n **Performance**:\n As this module measures absolute `min`/``max` of each weight of the model on every forward it'll slow the\n training down. Therefore remember to turn it off once the debugging needs have been met.\n Args:\n model (`nn.Module`):\n The model to debug.\n max_frames_to_save (`int`, *optional*, defaults to 21):\n How many frames back to record\n trace_batch_nums(`List[int]`, *optional*, defaults to `[]`):\n Which batch numbers to trace (turns detection off)\n abort_after_batch_num (`int``, *optional*):\n Whether to abort after a certain batch number has finished\n \"\"\"\n\n def __init__(self, model, max_frames_to_save=21, trace_batch_nums=[], abort_after_batch_num=None):\n self.model = model\n self.trace_batch_nums = trace_batch_nums\n self.abort_after_batch_num = abort_after_batch_num\n\n # keep a LIFO buffer of frames to dump as soon as inf/nan is encountered to give context to the problem emergence\n self.frames = collections.deque([], max_frames_to_save)\n self.frame = []\n self.batch_number = 0\n self.total_calls = 0\n self.detected_overflow = False\n self.prefix = \" \"\n\n self.analyse_model()\n\n self.register_forward_hook()\n\n def save_frame(self, frame=None):\n if frame is not None:\n self.expand_frame(frame)\n self.frames.append(\"\\n\".join(self.frame))\n self.frame = [] # start a new frame\n\n def expand_frame(self, line):\n self.frame.append(line)\n\n def trace_frames(self):\n print(\"\\n\".join(self.frames))\n self.frames = []\n\n def reset_saved_frames(self):\n self.frames = []\n\n def dump_saved_frames(self):\n print(f\"\\nDetected inf/nan during batch_number={self.batch_number} \"\n f\"Last {len(self.frames)} forward frames:\"\n f\"{'abs min':8} {'abs max':8} metadata\"\n f\"'\\n'.join(self.frames)\"\n f\"\\n\\n\")\n self.frames = []\n\n def analyse_model(self):\n # extract the fully qualified module names, to be able to report at run time. e.g.:\n # encoder.block.2.layer.0.SelfAttention.o\n #\n # for shared weights only the first shared module name will be registered\n self.module_names = {m: name for name, m in self.model.named_modules()}\n # self.longest_module_name = max(len(v) for v in self.module_names.values())\n\n def analyse_variable(self, var, ctx):\n if torch.is_tensor(var):\n self.expand_frame(self.get_abs_min_max(var, ctx))\n if self.detect_overflow(var, ctx):\n self.detected_overflow = True\n elif var is None:\n self.expand_frame(f\"{'None':>17} {ctx}\")\n else:\n self.expand_frame(f\"{'not a tensor':>17} {ctx}\")\n\n def batch_start_frame(self):\n self.expand_frame(f\"\\n\\n{self.prefix} *** Starting batch number={self.batch_number} ***\")\n self.expand_frame(f\"{'abs min':8} {'abs max':8} metadata\")\n\n def batch_end_frame(self):\n self.expand_frame(f\"{self.prefix} *** Finished batch number={self.batch_number - 1} ***\\n\\n\")\n\n def create_frame(self, module, input, output):\n self.expand_frame(f\"{self.prefix} {self.module_names[module]} {module.__class__.__name__}\")\n\n # params\n for name, p in module.named_parameters(recurse=False):\n self.analyse_variable(p, name)\n\n # inputs\n if isinstance(input, tuple):\n for i, x in enumerate(input):\n self.analyse_variable(x, f\"input[{i}]\")\n else:\n self.analyse_variable(input, \"input\")\n\n # outputs\n if isinstance(output, tuple):\n for i, x in enumerate(output):\n # possibly a tuple of tuples\n if isinstance(x, tuple):\n for j, y in enumerate(x):\n self.analyse_variable(y, f\"output[{i}][{j}]\")\n else:\n self.analyse_variable(x, f\"output[{i}]\")\n else:\n self.analyse_variable(output, \"output\")\n\n self.save_frame()\n\n def register_forward_hook(self):\n self.model.apply(self._register_forward_hook)\n\n def _register_forward_hook(self, module):\n module.register_forward_hook(self.forward_hook)\n\n def forward_hook(self, module, input, output):\n # - input is a tuple of packed inputs (could be non-Tensors)\n # - output could be a Tensor or a tuple of Tensors and non-Tensors\n\n last_frame_of_batch = False\n\n trace_mode = True if self.batch_number in self.trace_batch_nums else False\n if trace_mode:\n self.reset_saved_frames()\n\n if self.total_calls == 0:\n self.batch_start_frame()\n self.total_calls += 1\n\n # count batch numbers - the very first forward hook of the batch will be called when the\n # batch completes - i.e. it gets called very last - we know this batch has finished\n if module == self.model:\n self.batch_number += 1\n last_frame_of_batch = True\n\n self.create_frame(module, input, output)\n\n # if last_frame_of_batch:\n # self.batch_end_frame()\n\n if trace_mode:\n self.trace_frames()\n\n if last_frame_of_batch:\n self.batch_start_frame()\n\n if self.detected_overflow and not trace_mode:\n self.dump_saved_frames()\n\n # now we can abort, as it's pointless to continue running\n raise ValueError(\n \"DebugUnderflowOverflow: inf/nan detected, aborting as there is no point running further. \"\n \"Please scroll up above this traceback to see the activation values prior to this event.\"\n )\n\n # abort after certain batch if requested to do so\n if self.abort_after_batch_num is not None and self.batch_number > self.abort_after_batch_num:\n raise ValueError(\n f\"DebugUnderflowOverflow: aborting after {self.batch_number} batches due to `abort_after_batch_num={self.abort_after_batch_num}` arg\"\n )\n\n @staticmethod\n def get_abs_min_max(var, ctx):\n abs_var = var.abs()\n return f\"{abs_var.min():8.2e} {abs_var.max():8.2e} {ctx}\"\n\n @staticmethod\n def detect_overflow(var, ctx):\n \"\"\"\n Report whether the tensor contains any `nan` or `inf` entries.\n This is useful for detecting overflows/underflows and best to call right after the function that did some math that\n modified the tensor in question.\n This function contains a few other helper features that you can enable and tweak directly if you want to track\n various other things.\n Args:\n var: the tensor variable to check\n ctx: the message to print as a context\n Return:\n `True` if `inf` or `nan` was detected, `False` otherwise\n \"\"\"\n detected = False\n if torch.isnan(var).any().item():\n detected = True\n print(f\"{ctx} has nans\")\n if torch.isinf(var).any().item():\n detected = True\n print(f\"{ctx} has infs\")\n if var.dtype == torch.float32 and torch.ge(var.abs(), 65535).any().item():\n detected = True\n print(f\"{ctx} has overflow values {var.abs().max().item()}.\")\n # if needed to monitor large elements can enable the following\n if 0: # and detected:\n n100 = var[torch.ge(var.abs(), 100)]\n if n100.numel() > 0:\n print(f\"{ctx}: n100={n100.numel()}\")\n n1000 = var[torch.ge(var.abs(), 1000)]\n if n1000.numel() > 0:\n print(f\"{ctx}: n1000={n1000.numel()}\")\n n10000 = var[torch.ge(var.abs(), 10000)]\n if n10000.numel() > 0:\n print(f\"{ctx}: n10000={n10000.numel()}\")\n\n if 0:\n print(f\"min={var.min():9.2e} max={var.max():9.2e}\")\n\n if 0:\n print(f\"min={var.min():9.2e} max={var.max():9.2e} var={var.var():9.2e} mean={var.mean():9.2e} ({ctx})\")\n\n return detected" }, { "identifier": "build_optimizer", "path": "diffusion/utils/optimizer.py", "snippet": "def build_optimizer(model, optimizer_cfg):\n # default parameter-wise config\n logger = get_root_logger()\n\n if hasattr(model, 'module'):\n model = model.module\n # set optimizer constructor\n optimizer_cfg.setdefault('constructor', 'MyOptimizerConstructor')\n # parameter-wise setting: cancel weight decay for some specific modules\n custom_keys = dict()\n for name, module in model.named_modules():\n if hasattr(module, 'zero_weight_decay'):\n custom_keys.update({(name, key): dict(decay_mult=0) for key in module.zero_weight_decay})\n\n paramwise_cfg = Config(dict(cfg=dict(custom_keys=custom_keys)))\n given_cfg = optimizer_cfg.get('paramwise_cfg')\n if given_cfg:\n paramwise_cfg.merge_from_dict(dict(cfg=given_cfg))\n optimizer_cfg['paramwise_cfg'] = paramwise_cfg.cfg\n # build optimizer\n optimizer = mm_build_optimizer(model, optimizer_cfg)\n\n weight_decay_groups = dict()\n lr_groups = dict()\n for group in optimizer.param_groups:\n if not group.get('requires_grad', True): continue\n lr_groups.setdefault(group['lr'], []).append(group)\n weight_decay_groups.setdefault(group['weight_decay'], []).append(group)\n\n learnable_count, fix_count = 0, 0\n for p in model.parameters():\n if p.requires_grad:\n learnable_count += 1\n else:\n fix_count += 1\n fix_info = f\"{learnable_count} are learnable, {fix_count} are fix\"\n lr_info = \"Lr group: \" + \", \".join([f'{len(group)} params with lr {lr:.5f}' for lr, group in lr_groups.items()])\n wd_info = \"Weight decay group: \" + \", \".join(\n [f'{len(group)} params with weight decay {wd}' for wd, group in weight_decay_groups.items()])\n opt_info = f\"Optimizer: total {len(optimizer.param_groups)} param groups, {fix_info}. {lr_info}; {wd_info}.\"\n logger.info(opt_info)\n\n return optimizer" }, { "identifier": "auto_scale_lr", "path": "diffusion/utils/optimizer.py", "snippet": "def auto_scale_lr(effective_bs, optimizer_cfg, rule='linear', base_batch_size=256):\n assert rule in ['linear', 'sqrt']\n logger = get_root_logger()\n # scale by world size\n if rule == 'sqrt':\n scale_ratio = math.sqrt(effective_bs / base_batch_size)\n elif rule == 'linear':\n scale_ratio = effective_bs / base_batch_size\n optimizer_cfg['lr'] *= scale_ratio\n logger.info(f'Automatically adapt lr to {optimizer_cfg[\"lr\"]:.7f} (using {rule} scaling rule).')\n return scale_ratio" }, { "identifier": "build_lr_scheduler", "path": "diffusion/utils/lr_scheduler.py", "snippet": "def build_lr_scheduler(config, optimizer, train_dataloader, lr_scale_ratio):\n if not config.get('lr_schedule_args', None):\n config.lr_schedule_args = dict()\n if config.get('lr_warmup_steps', None):\n config['num_warmup_steps'] = config.get('lr_warmup_steps') # for compatibility with old version\n\n logger = get_root_logger()\n logger.info(\n f'Lr schedule: {config.lr_schedule}, ' + \",\".join(\n [f\"{key}:{value}\" for key, value in config.lr_schedule_args.items()]) + '.')\n if config.lr_schedule == 'cosine':\n lr_scheduler = get_cosine_schedule_with_warmup(\n optimizer=optimizer,\n **config.lr_schedule_args,\n num_training_steps=(len(train_dataloader) * config.num_epochs),\n )\n elif config.lr_schedule == 'constant':\n lr_scheduler = get_constant_schedule_with_warmup(\n optimizer=optimizer,\n **config.lr_schedule_args,\n )\n elif config.lr_schedule == 'cosine_decay_to_constant':\n assert lr_scale_ratio >= 1\n lr_scheduler = get_cosine_decay_to_constant_with_warmup(\n optimizer=optimizer,\n **config.lr_schedule_args,\n final_lr=1 / lr_scale_ratio,\n num_training_steps=(len(train_dataloader) * config.num_epochs),\n )\n else:\n raise RuntimeError(f'Unrecognized lr schedule {config.lr_schedule}.')\n return lr_scheduler" }, { "identifier": "AspectRatioBatchSampler", "path": "diffusion/utils/data_sampler.py", "snippet": "class AspectRatioBatchSampler(BatchSampler):\n \"\"\"A sampler wrapper for grouping images with similar aspect ratio into a same batch.\n\n Args:\n sampler (Sampler): Base sampler.\n dataset (Dataset): Dataset providing data information.\n batch_size (int): Size of mini-batch.\n drop_last (bool): If ``True``, the sampler will drop the last batch if\n its size would be less than ``batch_size``.\n aspect_ratios (dict): The predefined aspect ratios.\n \"\"\"\n\n def __init__(self,\n sampler: Sampler,\n dataset: Dataset,\n batch_size: int,\n aspect_ratios: dict,\n drop_last: bool = False,\n config=None,\n valid_num=0, # take as valid aspect-ratio when sample number >= valid_num\n **kwargs) -> None:\n if not isinstance(sampler, Sampler):\n raise TypeError('sampler should be an instance of ``Sampler``, '\n f'but got {sampler}')\n if not isinstance(batch_size, int) or batch_size <= 0:\n raise ValueError('batch_size should be a positive integer value, '\n f'but got batch_size={batch_size}')\n self.sampler = sampler\n self.dataset = dataset\n self.batch_size = batch_size\n self.aspect_ratios = aspect_ratios\n self.drop_last = drop_last\n self.ratio_nums_gt = kwargs.get('ratio_nums', None)\n self.config = config\n assert self.ratio_nums_gt\n # buckets for each aspect ratio\n self._aspect_ratio_buckets = {ratio: [] for ratio in aspect_ratios.keys()}\n self.current_available_bucket_keys = [str(k) for k, v in self.ratio_nums_gt.items() if v >= valid_num]\n logger = get_root_logger() if config is None else get_root_logger(os.path.join(config.work_dir, 'train_log.log'))\n logger.warning(f\"Using valid_num={valid_num} in config file. Available {len(self.current_available_bucket_keys)} aspect_ratios: {self.current_available_bucket_keys}\")\n\n def __iter__(self) -> Sequence[int]:\n for idx in self.sampler:\n data_info = self.dataset.get_data_info(idx)\n height, width = data_info['height'], data_info['width']\n ratio = height / width\n # find the closest aspect ratio\n closest_ratio = min(self.aspect_ratios.keys(), key=lambda r: abs(float(r) - ratio))\n if closest_ratio not in self.current_available_bucket_keys:\n continue\n bucket = self._aspect_ratio_buckets[closest_ratio]\n bucket.append(idx)\n # yield a batch of indices in the same aspect ratio group\n if len(bucket) == self.batch_size:\n yield bucket[:]\n del bucket[:]\n\n # yield the rest data and reset the buckets\n for bucket in self._aspect_ratio_buckets.values():\n while len(bucket) > 0:\n if len(bucket) <= self.batch_size:\n if not self.drop_last:\n yield bucket[:]\n bucket = []\n else:\n yield bucket[:self.batch_size]\n bucket = bucket[self.batch_size:]" }, { "identifier": "BalancedAspectRatioBatchSampler", "path": "diffusion/utils/data_sampler.py", "snippet": "class BalancedAspectRatioBatchSampler(AspectRatioBatchSampler):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n # Assign samples to each bucket\n self.ratio_nums_gt = kwargs.get('ratio_nums', None)\n assert self.ratio_nums_gt\n self._aspect_ratio_buckets = {float(ratio): [] for ratio in self.aspect_ratios.keys()}\n self.original_buckets = {}\n self.current_available_bucket_keys = [k for k, v in self.ratio_nums_gt.items() if v >= 3000]\n self.all_available_keys = deepcopy(self.current_available_bucket_keys)\n self.exhausted_bucket_keys = []\n self.total_batches = len(self.sampler) // self.batch_size\n self._aspect_ratio_count = {}\n for k in self.all_available_keys:\n self._aspect_ratio_count[float(k)] = 0\n self.original_buckets[float(k)] = []\n logger = get_root_logger(os.path.join(self.config.work_dir, 'train_log.log'))\n logger.warning(f\"Available {len(self.current_available_bucket_keys)} aspect_ratios: {self.current_available_bucket_keys}\")\n\n def __iter__(self) -> Sequence[int]:\n i = 0\n for idx in self.sampler:\n data_info = self.dataset.get_data_info(idx)\n height, width = data_info['height'], data_info['width']\n ratio = height / width\n closest_ratio = float(min(self.aspect_ratios.keys(), key=lambda r: abs(float(r) - ratio)))\n if closest_ratio not in self.all_available_keys:\n continue\n if self._aspect_ratio_count[closest_ratio] < self.ratio_nums_gt[closest_ratio]:\n self._aspect_ratio_count[closest_ratio] += 1\n self._aspect_ratio_buckets[closest_ratio].append(idx)\n self.original_buckets[closest_ratio].append(idx) # Save the original samples for each bucket\n if not self.current_available_bucket_keys:\n self.current_available_bucket_keys, self.exhausted_bucket_keys = self.exhausted_bucket_keys, []\n\n if closest_ratio not in self.current_available_bucket_keys:\n continue\n key = closest_ratio\n bucket = self._aspect_ratio_buckets[key]\n if len(bucket) == self.batch_size:\n yield bucket[:self.batch_size]\n del bucket[:self.batch_size]\n i += 1\n self.exhausted_bucket_keys.append(key)\n self.current_available_bucket_keys.remove(key)\n\n for _ in range(self.total_batches - i):\n key = choice(self.all_available_keys)\n bucket = self._aspect_ratio_buckets[key]\n if len(bucket) >= self.batch_size:\n yield bucket[:self.batch_size]\n del bucket[:self.batch_size]\n\n # If a bucket is exhausted\n if not bucket:\n self._aspect_ratio_buckets[key] = deepcopy(self.original_buckets[key][:])\n shuffle(self._aspect_ratio_buckets[key])\n else:\n self._aspect_ratio_buckets[key] = deepcopy(self.original_buckets[key][:])\n shuffle(self._aspect_ratio_buckets[key])" }, { "identifier": "LCMScheduler", "path": "diffusion/lcm_scheduler.py", "snippet": "class LCMScheduler(SchedulerMixin, ConfigMixin):\n \"\"\"\n `LCMScheduler` extends the denoising procedure introduced in denoising diffusion probabilistic models (DDPMs) with\n non-Markovian guidance.\n This model inherits from [`SchedulerMixin`] and [`ConfigMixin`]. Check the superclass documentation for the generic\n methods the library implements for all schedulers such as loading and saving.\n Args:\n num_train_timesteps (`int`, defaults to 1000):\n The number of diffusion steps to train the model.\n beta_start (`float`, defaults to 0.0001):\n The starting `beta` value of inference.\n beta_end (`float`, defaults to 0.02):\n The final `beta` value.\n beta_schedule (`str`, defaults to `\"linear\"`):\n The beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from\n `linear`, `scaled_linear`, or `squaredcos_cap_v2`.\n trained_betas (`np.ndarray`, *optional*):\n Pass an array of betas directly to the constructor to bypass `beta_start` and `beta_end`.\n clip_sample (`bool`, defaults to `True`):\n Clip the predicted sample for numerical stability.\n clip_sample_range (`float`, defaults to 1.0):\n The maximum magnitude for sample clipping. Valid only when `clip_sample=True`.\n set_alpha_to_one (`bool`, defaults to `True`):\n Each diffusion step uses the alphas product value at that step and at the previous one. For the final step\n there is no previous alpha. When this option is `True` the previous alpha product is fixed to `1`,\n otherwise it uses the alpha value at step 0.\n steps_offset (`int`, defaults to 0):\n An offset added to the inference steps. You can use a combination of `offset=1` and\n `set_alpha_to_one=False` to make the last step use step 0 for the previous alpha product like in Stable\n Diffusion.\n prediction_type (`str`, defaults to `epsilon`, *optional*):\n Prediction type of the scheduler function; can be `epsilon` (predicts the noise of the diffusion process),\n `sample` (directly predicts the noisy sample`) or `v_prediction` (see section 2.4 of [Imagen\n Video](https://imagen.research.google/video/paper.pdf) paper).\n thresholding (`bool`, defaults to `False`):\n Whether to use the \"dynamic thresholding\" method. This is unsuitable for latent-space diffusion models such\n as Stable Diffusion.\n dynamic_thresholding_ratio (`float`, defaults to 0.995):\n The ratio for the dynamic thresholding method. Valid only when `thresholding=True`.\n sample_max_value (`float`, defaults to 1.0):\n The threshold value for dynamic thresholding. Valid only when `thresholding=True`.\n timestep_spacing (`str`, defaults to `\"leading\"`):\n The way the timesteps should be scaled. Refer to Table 2 of the [Common Diffusion Noise Schedules and\n Sample Steps are Flawed](https://huggingface.co/papers/2305.08891) for more information.\n rescale_betas_zero_snr (`bool`, defaults to `False`):\n Whether to rescale the betas to have zero terminal SNR. This enables the model to generate very bright and\n dark samples instead of limiting it to samples with medium brightness. Loosely related to\n [`--offset_noise`](https://github.com/huggingface/diffusers/blob/74fd735eb073eb1d774b1ab4154a0876eb82f055/examples/dreambooth/train_dreambooth.py#L506).\n \"\"\"\n\n # _compatibles = [e.name for e in KarrasDiffusionSchedulers]\n order = 1\n\n @register_to_config\n def __init__(\n self,\n num_train_timesteps: int = 1000,\n beta_start: float = 0.0001,\n beta_end: float = 0.02,\n beta_schedule: str = \"linear\",\n trained_betas: Optional[Union[np.ndarray, List[float]]] = None,\n clip_sample: bool = True,\n set_alpha_to_one: bool = True,\n steps_offset: int = 0,\n prediction_type: str = \"epsilon\",\n thresholding: bool = False,\n dynamic_thresholding_ratio: float = 0.995,\n clip_sample_range: float = 1.0,\n sample_max_value: float = 1.0,\n timestep_spacing: str = \"leading\",\n rescale_betas_zero_snr: bool = False,\n ):\n if trained_betas is not None:\n self.betas = torch.tensor(trained_betas, dtype=torch.float32)\n elif beta_schedule == \"linear\":\n self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32)\n elif beta_schedule == \"scaled_linear\":\n # this schedule is very specific to the latent diffusion model.\n self.betas = (\n torch.linspace(beta_start ** 0.5, beta_end ** 0.5, num_train_timesteps, dtype=torch.float32) ** 2\n )\n elif beta_schedule == \"squaredcos_cap_v2\":\n # Glide cosine schedule\n self.betas = betas_for_alpha_bar(num_train_timesteps)\n else:\n raise NotImplementedError(f\"{beta_schedule} does is not implemented for {self.__class__}\")\n\n # Rescale for zero SNR\n if rescale_betas_zero_snr:\n self.betas = rescale_zero_terminal_snr(self.betas)\n\n self.alphas = 1.0 - self.betas\n self.alphas_cumprod = torch.cumprod(self.alphas, dim=0)\n\n # At every step in ddim, we are looking into the previous alphas_cumprod\n # For the final step, there is no previous alphas_cumprod because we are already at 0\n # `set_alpha_to_one` decides whether we set this parameter simply to one or\n # whether we use the final alpha of the \"non-previous\" one.\n self.final_alpha_cumprod = torch.tensor(1.0) if set_alpha_to_one else self.alphas_cumprod[0]\n\n # standard deviation of the initial noise distribution\n self.init_noise_sigma = 1.0\n\n # setable values\n self.num_inference_steps = None\n self.timesteps = torch.from_numpy(np.arange(0, num_train_timesteps)[::-1].copy().astype(np.int64))\n\n def scale_model_input(self, sample: torch.FloatTensor, timestep: Optional[int] = None) -> torch.FloatTensor:\n \"\"\"\n Ensures interchangeability with schedulers that need to scale the denoising model input depending on the\n current timestep.\n Args:\n sample (`torch.FloatTensor`):\n The input sample.\n timestep (`int`, *optional*):\n The current timestep in the diffusion chain.\n Returns:\n `torch.FloatTensor`:\n A scaled input sample.\n \"\"\"\n return sample\n\n def _get_variance(self, timestep, prev_timestep):\n alpha_prod_t = self.alphas_cumprod[timestep]\n alpha_prod_t_prev = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod\n beta_prod_t = 1 - alpha_prod_t\n beta_prod_t_prev = 1 - alpha_prod_t_prev\n\n variance = (beta_prod_t_prev / beta_prod_t) * (1 - alpha_prod_t / alpha_prod_t_prev)\n\n return variance\n\n # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler._threshold_sample\n def _threshold_sample(self, sample: torch.FloatTensor) -> torch.FloatTensor:\n \"\"\"\n \"Dynamic thresholding: At each sampling step we set s to a certain percentile absolute pixel value in xt0 (the\n prediction of x_0 at timestep t), and if s > 1, then we threshold xt0 to the range [-s, s] and then divide by\n s. Dynamic thresholding pushes saturated pixels (those near -1 and 1) inwards, thereby actively preventing\n pixels from saturation at each step. We find that dynamic thresholding results in significantly better\n photorealism as well as better image-text alignment, especially when using very large guidance weights.\"\n https://arxiv.org/abs/2205.11487\n \"\"\"\n dtype = sample.dtype\n batch_size, channels, height, width = sample.shape\n\n if dtype not in (torch.float32, torch.float64):\n sample = sample.float() # upcast for quantile calculation, and clamp not implemented for cpu half\n\n # Flatten sample for doing quantile calculation along each image\n sample = sample.reshape(batch_size, channels * height * width)\n\n abs_sample = sample.abs() # \"a certain percentile absolute pixel value\"\n\n s = torch.quantile(abs_sample, self.config.dynamic_thresholding_ratio, dim=1)\n s = torch.clamp(\n s, min=1, max=self.config.sample_max_value\n ) # When clamped to min=1, equivalent to standard clipping to [-1, 1]\n\n s = s.unsqueeze(1) # (batch_size, 1) because clamp will broadcast along dim=0\n sample = torch.clamp(sample, -s, s) / s # \"we threshold xt0 to the range [-s, s] and then divide by s\"\n\n sample = sample.reshape(batch_size, channels, height, width)\n sample = sample.to(dtype)\n\n return sample\n\n def set_timesteps(self, num_inference_steps: int, lcm_origin_steps: int, device: Union[str, torch.device] = None):\n \"\"\"\n Sets the discrete timesteps used for the diffusion chain (to be run before inference).\n Args:\n num_inference_steps (`int`):\n The number of diffusion steps used when generating samples with a pre-trained model.\n \"\"\"\n\n if num_inference_steps > self.config.num_train_timesteps:\n raise ValueError(\n f\"`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:\"\n f\" {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle\"\n f\" maximal {self.config.num_train_timesteps} timesteps.\"\n )\n\n self.num_inference_steps = num_inference_steps\n\n # LCM Timesteps Setting: # Linear Spacing\n c = self.config.num_train_timesteps // lcm_origin_steps\n lcm_origin_timesteps = np.asarray(list(range(1, lcm_origin_steps + 1))) * c - 1 # LCM Training Steps Schedule\n skipping_step = len(lcm_origin_timesteps) // num_inference_steps\n timesteps = lcm_origin_timesteps[::-skipping_step][:num_inference_steps] # LCM Inference Steps Schedule\n\n self.timesteps = torch.from_numpy(timesteps.copy()).to(device)\n\n def get_scalings_for_boundary_condition_discrete(self, t):\n self.sigma_data = 0.5 # Default: 0.5\n\n # By dividing 0.1: This is almost a delta function at t=0.\n c_skip = self.sigma_data ** 2 / ((t / 0.1) ** 2 + self.sigma_data ** 2)\n c_out = ((t / 0.1) / ((t / 0.1) ** 2 + self.sigma_data ** 2) ** 0.5)\n return c_skip, c_out\n\n def step(\n self,\n model_output: torch.FloatTensor,\n timeindex: int,\n timestep: int,\n sample: torch.FloatTensor,\n eta: float = 0.0,\n use_clipped_model_output: bool = False,\n generator=None,\n variance_noise: Optional[torch.FloatTensor] = None,\n return_dict: bool = True,\n ) -> Union[LCMSchedulerOutput, Tuple]:\n \"\"\"\n Predict the sample from the previous timestep by reversing the SDE. This function propagates the diffusion\n process from the learned model outputs (most often the predicted noise).\n Args:\n model_output (`torch.FloatTensor`):\n The direct output from learned diffusion model.\n timestep (`float`):\n The current discrete timestep in the diffusion chain.\n sample (`torch.FloatTensor`):\n A current instance of a sample created by the diffusion process.\n eta (`float`):\n The weight of noise for added noise in diffusion step.\n use_clipped_model_output (`bool`, defaults to `False`):\n If `True`, computes \"corrected\" `model_output` from the clipped predicted original sample. Necessary\n because predicted original sample is clipped to [-1, 1] when `self.config.clip_sample` is `True`. If no\n clipping has happened, \"corrected\" `model_output` would coincide with the one provided as input and\n `use_clipped_model_output` has no effect.\n generator (`torch.Generator`, *optional*):\n A random number generator.\n variance_noise (`torch.FloatTensor`):\n Alternative to generating noise with `generator` by directly providing the noise for the variance\n itself. Useful for methods such as [`CycleDiffusion`].\n return_dict (`bool`, *optional*, defaults to `True`):\n Whether or not to return a [`~schedulers.scheduling_lcm.LCMSchedulerOutput`] or `tuple`.\n Returns:\n [`~schedulers.scheduling_utils.LCMSchedulerOutput`] or `tuple`:\n If return_dict is `True`, [`~schedulers.scheduling_lcm.LCMSchedulerOutput`] is returned, otherwise a\n tuple is returned where the first element is the sample tensor.\n \"\"\"\n if self.num_inference_steps is None:\n raise ValueError(\n \"Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler\"\n )\n\n # 1. get previous step value\n prev_timeindex = timeindex + 1\n if prev_timeindex < len(self.timesteps):\n prev_timestep = self.timesteps[prev_timeindex]\n else:\n prev_timestep = timestep\n\n # 2. compute alphas, betas\n alpha_prod_t = self.alphas_cumprod[timestep]\n alpha_prod_t_prev = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod\n\n beta_prod_t = 1 - alpha_prod_t\n beta_prod_t_prev = 1 - alpha_prod_t_prev\n\n # 3. Get scalings for boundary conditions\n c_skip, c_out = self.get_scalings_for_boundary_condition_discrete(timestep)\n\n # 4. Different Parameterization:\n parameterization = self.config.prediction_type\n\n if parameterization == \"epsilon\": # noise-prediction\n pred_x0 = (sample - beta_prod_t.sqrt() * model_output) / alpha_prod_t.sqrt()\n\n elif parameterization == \"sample\": # x-prediction\n pred_x0 = model_output\n\n elif parameterization == \"v_prediction\": # v-prediction\n pred_x0 = alpha_prod_t.sqrt() * sample - beta_prod_t.sqrt() * model_output\n\n # 4. Denoise model output using boundary conditions\n denoised = c_out * pred_x0 + c_skip * sample\n\n # 5. Sample z ~ N(0, I), For MultiStep Inference\n # Noise is not used for one-step sampling.\n if len(self.timesteps) > 1:\n noise = torch.randn(model_output.shape).to(model_output.device)\n prev_sample = alpha_prod_t_prev.sqrt() * denoised + beta_prod_t_prev.sqrt() * noise\n else:\n prev_sample = denoised\n\n if not return_dict:\n return (prev_sample, denoised)\n\n return LCMSchedulerOutput(prev_sample=prev_sample, denoised=denoised)\n\n # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler.add_noise\n def add_noise(\n self,\n original_samples: torch.FloatTensor,\n noise: torch.FloatTensor,\n timesteps: torch.IntTensor,\n ) -> torch.FloatTensor:\n # Make sure alphas_cumprod and timestep have same device and dtype as original_samples\n alphas_cumprod = self.alphas_cumprod.to(device=original_samples.device, dtype=original_samples.dtype)\n timesteps = timesteps.to(original_samples.device)\n\n sqrt_alpha_prod = alphas_cumprod[timesteps] ** 0.5\n sqrt_alpha_prod = sqrt_alpha_prod.flatten()\n while len(sqrt_alpha_prod.shape) < len(original_samples.shape):\n sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1)\n\n sqrt_one_minus_alpha_prod = (1 - alphas_cumprod[timesteps]) ** 0.5\n sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten()\n while len(sqrt_one_minus_alpha_prod.shape) < len(original_samples.shape):\n sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1)\n\n noisy_samples = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise\n return noisy_samples\n\n # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler.get_velocity\n def get_velocity(\n self, sample: torch.FloatTensor, noise: torch.FloatTensor, timesteps: torch.IntTensor\n ) -> torch.FloatTensor:\n # Make sure alphas_cumprod and timestep have same device and dtype as sample\n alphas_cumprod = self.alphas_cumprod.to(device=sample.device, dtype=sample.dtype)\n timesteps = timesteps.to(sample.device)\n\n sqrt_alpha_prod = alphas_cumprod[timesteps] ** 0.5\n sqrt_alpha_prod = sqrt_alpha_prod.flatten()\n while len(sqrt_alpha_prod.shape) < len(sample.shape):\n sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1)\n\n sqrt_one_minus_alpha_prod = (1 - alphas_cumprod[timesteps]) ** 0.5\n sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten()\n while len(sqrt_one_minus_alpha_prod.shape) < len(sample.shape):\n sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1)\n\n velocity = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample\n return velocity\n\n def __len__(self):\n return self.config.num_train_timesteps" } ]
import os import sys import types import argparse import datetime import time import warnings import torch import torch.nn as nn import numpy as np import torch.nn.functional as F from pathlib import Path from accelerate import Accelerator, InitProcessGroupKwargs from accelerate.utils import DistributedType from diffusers.models import AutoencoderKL from torch.utils.data import RandomSampler from mmcv.runner import LogBuffer from copy import deepcopy from tqdm import tqdm from diffusion import IDDPM from diffusion.utils.checkpoint import save_checkpoint, load_checkpoint from diffusion.utils.dist_utils import synchronize, get_world_size, clip_grad_norm_ from diffusion.data.builder import build_dataset, build_dataloader, set_data_root from diffusion.model.builder import build_model from diffusion.utils.logger import get_root_logger from diffusion.utils.misc import set_random_seed, read_config, init_random_seed, DebugUnderflowOverflow from diffusion.utils.optimizer import build_optimizer, auto_scale_lr from diffusion.utils.lr_scheduler import build_lr_scheduler from diffusion.utils.data_sampler import AspectRatioBatchSampler, BalancedAspectRatioBatchSampler from diffusion.lcm_scheduler import LCMScheduler from torchvision.utils import save_image from accelerate import FullyShardedDataParallelPlugin from torch.distributed.fsdp.fully_sharded_data_parallel import FullStateDictConfig
16,474
w = w.reshape(bsz, 1, 1, 1) w = w.to(device=latents.device, dtype=latents.dtype) # Get online LCM prediction on z_{t_{n + k}}, w, c, t_{n + k} _, pred_x_0, noisy_model_input = train_diffusion.training_losses(model, latents, start_timesteps, model_kwargs=dict(y=y, mask=y_mask, data_info=data_info), noise=noise) model_pred = c_skip_start * noisy_model_input + c_out_start * pred_x_0 # Use the ODE solver to predict the kth step in the augmented PF-ODE trajectory after # noisy_latents with both the conditioning embedding c and unconditional embedding 0 # Get teacher model prediction on noisy_latents and conditional embedding with torch.no_grad(): with torch.autocast("cuda"): cond_teacher_output, cond_pred_x0, _ = train_diffusion.training_losses(model_teacher, latents, start_timesteps, model_kwargs=dict(y=y, mask=y_mask, data_info=data_info), noise=noise) # Get teacher model prediction on noisy_latents and unconditional embedding uncond_teacher_output, uncond_pred_x0, _ = train_diffusion.training_losses(model_teacher, latents, start_timesteps, model_kwargs=dict(y=uncond_prompt_embeds, mask=y_mask, data_info=data_info), noise=noise) # Perform "CFG" to get x_prev estimate (using the LCM paper's CFG formulation) pred_x0 = cond_pred_x0 + w * (cond_pred_x0 - uncond_pred_x0) pred_noise = cond_teacher_output + w * (cond_teacher_output - uncond_teacher_output) x_prev = solver.ddim_step(pred_x0, pred_noise, index) # Get target LCM prediction on x_prev, w, c, t_n with torch.no_grad(): with torch.autocast("cuda", enabled=True): _, pred_x_0, _ = train_diffusion.training_losses(model_ema, x_prev.float(), timesteps, model_kwargs=dict(y=y, mask=y_mask, data_info=data_info), skip_noise=True) target = c_skip * x_prev + c_out * pred_x_0 # Calculate loss if config.loss_type == "l2": loss = F.mse_loss(model_pred.float(), target.float(), reduction="mean") elif config.loss_type == "huber": loss = torch.mean(torch.sqrt((model_pred.float() - target.float()) ** 2 + config.huber_c**2) - config.huber_c) # Backpropagation on the online student model (`model`) accelerator.backward(loss) if accelerator.sync_gradients: grad_norm = accelerator.clip_grad_norm_(model.parameters(), config.gradient_clip) optimizer.step() lr_scheduler.step() optimizer.zero_grad(set_to_none=True) if accelerator.sync_gradients: ema_update(model_ema, model, config.ema_decay) lr = lr_scheduler.get_last_lr()[0] logs = {"loss": accelerator.gather(loss).mean().item()} if grad_norm is not None: logs.update(grad_norm=accelerator.gather(grad_norm).mean().item()) log_buffer.update(logs) if (step + 1) % config.log_interval == 0 or (step + 1) == 1: t = (time.time() - last_tic) / config.log_interval t_d = data_time_all / config.log_interval avg_time = (time.time() - time_start) / (global_step + 1) eta = str(datetime.timedelta(seconds=int(avg_time * (total_steps - start_step - global_step - 1)))) eta_epoch = str(datetime.timedelta(seconds=int(avg_time * (len(train_dataloader) - step - 1)))) # avg_loss = sum(loss_buffer) / len(loss_buffer) log_buffer.average() info = f"Step/Epoch [{(epoch-1)*len(train_dataloader)+step+1}/{epoch}][{step + 1}/{len(train_dataloader)}]:total_eta: {eta}, " \ f"epoch_eta:{eta_epoch}, time_all:{t:.3f}, time_data:{t_d:.3f}, lr:{lr:.3e}, s:({data_info['resolution'][0][0].item()}, {data_info['resolution'][0][1].item()}), " info += ', '.join([f"{k}:{v:.4f}" for k, v in log_buffer.output.items()]) logger.info(info) last_tic = time.time() log_buffer.clear() data_time_all = 0 logs.update(lr=lr) accelerator.log(logs, step=global_step + start_step) global_step += 1 data_time_start= time.time() synchronize() torch.cuda.empty_cache() if accelerator.is_main_process: # log_validation(model_ema, step, model.device) if ((epoch - 1) * len(train_dataloader) + step + 1) % config.save_model_steps == 0: os.umask(0o000) save_checkpoint(os.path.join(config.work_dir, 'checkpoints'), epoch=epoch, step=(epoch - 1) * len(train_dataloader) + step + 1, model=accelerator.unwrap_model(model), model_ema=accelerator.unwrap_model(model_ema), optimizer=optimizer, lr_scheduler=lr_scheduler ) synchronize() synchronize() if accelerator.is_main_process: if epoch % config.save_model_epochs == 0 or epoch == config.num_epochs: os.umask(0o000) save_checkpoint(os.path.join(config.work_dir, 'checkpoints'), epoch=epoch, step=(epoch - 1) * len(train_dataloader) + step + 1, model=accelerator.unwrap_model(model), model_ema=accelerator.unwrap_model(model_ema), optimizer=optimizer, lr_scheduler=lr_scheduler ) synchronize() def parse_args(): parser = argparse.ArgumentParser(description="Process some integers.") parser.add_argument("config", type=str, help="config") parser.add_argument("--cloud", action='store_true', default=False, help="cloud or local machine") parser.add_argument('--work-dir', help='the dir to save logs and models') parser.add_argument('--resume-from', help='the dir to resume the training') parser.add_argument('--load-from', default=None, help='the dir to load a ckpt for training') parser.add_argument('--local-rank', type=int, default=-1) parser.add_argument('--local_rank', type=int, default=-1) parser.add_argument('--debug', action='store_true') args = parser.parse_args() return args if __name__ == '__main__': args = parse_args()
current_file_path = Path(__file__).resolve() sys.path.insert(0, str(current_file_path.parent.parent)) warnings.filterwarnings("ignore") # ignore warning def set_fsdp_env(): os.environ["ACCELERATE_USE_FSDP"] = 'true' os.environ["FSDP_AUTO_WRAP_POLICY"] = 'TRANSFORMER_BASED_WRAP' os.environ["FSDP_BACKWARD_PREFETCH"] = 'BACKWARD_PRE' os.environ["FSDP_TRANSFORMER_CLS_TO_WRAP"] = 'PixArtBlock' def ema_update(model_dest: nn.Module, model_src: nn.Module, rate): param_dict_src = dict(model_src.named_parameters()) for p_name, p_dest in model_dest.named_parameters(): p_src = param_dict_src[p_name] assert p_src is not p_dest p_dest.data.mul_(rate).add_((1 - rate) * p_src.data) def append_dims(x, target_dims): """Appends dimensions to the end of a tensor until it has target_dims dimensions.""" dims_to_append = target_dims - x.ndim if dims_to_append < 0: raise ValueError(f"input has {x.ndim} dims but target_dims is {target_dims}, which is less") return x[(...,) + (None,) * dims_to_append] # From LCMScheduler.get_scalings_for_boundary_condition_discrete def scalings_for_boundary_conditions(timestep, sigma_data=0.5, timestep_scaling=10.0): c_skip = sigma_data**2 / ((timestep / 0.1) ** 2 + sigma_data**2) c_out = (timestep / 0.1) / ((timestep / 0.1) ** 2 + sigma_data**2) ** 0.5 return c_skip, c_out def extract_into_tensor(a, t, x_shape): b, *_ = t.shape out = a.gather(-1, t) return out.reshape(b, *((1,) * (len(x_shape) - 1))) class DDIMSolver: def __init__(self, alpha_cumprods, timesteps=1000, ddim_timesteps=50): # DDIM sampling parameters step_ratio = timesteps // ddim_timesteps self.ddim_timesteps = (np.arange(1, ddim_timesteps + 1) * step_ratio).round().astype(np.int64) - 1 self.ddim_alpha_cumprods = alpha_cumprods[self.ddim_timesteps] self.ddim_alpha_cumprods_prev = np.asarray( [alpha_cumprods[0]] + alpha_cumprods[self.ddim_timesteps[:-1]].tolist() ) # convert to torch tensors self.ddim_timesteps = torch.from_numpy(self.ddim_timesteps).long() self.ddim_alpha_cumprods = torch.from_numpy(self.ddim_alpha_cumprods) self.ddim_alpha_cumprods_prev = torch.from_numpy(self.ddim_alpha_cumprods_prev) def to(self, device): self.ddim_timesteps = self.ddim_timesteps.to(device) self.ddim_alpha_cumprods = self.ddim_alpha_cumprods.to(device) self.ddim_alpha_cumprods_prev = self.ddim_alpha_cumprods_prev.to(device) return self def ddim_step(self, pred_x0, pred_noise, timestep_index): alpha_cumprod_prev = extract_into_tensor(self.ddim_alpha_cumprods_prev, timestep_index, pred_x0.shape) dir_xt = (1.0 - alpha_cumprod_prev).sqrt() * pred_noise x_prev = alpha_cumprod_prev.sqrt() * pred_x0 + dir_xt return x_prev @torch.no_grad() def log_validation(model, step, device): if hasattr(model, 'module'): model = model.module scheduler = LCMScheduler(beta_start=0.0001, beta_end=0.02, beta_schedule="linear", prediction_type="epsilon") scheduler.set_timesteps(4, 50) infer_timesteps = scheduler.timesteps dog_embed = torch.load('data/tmp/dog.pth', map_location='cpu') caption_embs, emb_masks = dog_embed['dog_text'].to(device), dog_embed['dog_mask'].to(device) hw = torch.tensor([[1024, 1024]], dtype=torch.float, device=device).repeat(1, 1) ar = torch.tensor([[1.]], device=device).repeat(1, 1) # Create sampling noise: infer_latents = torch.randn(1, 4, 1024, 1024, device=device) model_kwargs = dict(data_info={'img_hw': hw, 'aspect_ratio': ar}, mask=emb_masks) logger.info("Running validation... ") # 7. LCM MultiStep Sampling Loop: for i, t in tqdm(list(enumerate(infer_timesteps))): ts = torch.full((1,), t, device=device, dtype=torch.long) # model prediction (v-prediction, eps, x) model_pred = model(infer_latents, ts, caption_embs, **model_kwargs)[:, :4] # compute the previous noisy sample x_t -> x_t-1 infer_latents, denoised = scheduler.step(model_pred, i, t, infer_latents, return_dict=False) samples = vae.decode(denoised / 0.18215).sample torch.cuda.empty_cache() save_image(samples[0], f'output_cv/vis/{step}.jpg', nrow=1, normalize=True, value_range=(-1, 1)) def train(): if config.get('debug_nan', False): DebugUnderflowOverflow(model) logger.info('NaN debugger registered. Start to detect overflow during training.') time_start, last_tic = time.time(), time.time() log_buffer = LogBuffer() start_step = start_epoch * len(train_dataloader) global_step = 0 total_steps = len(train_dataloader) * config.num_epochs load_vae_feat = getattr(train_dataloader.dataset, 'load_vae_feat', False) # Create uncond embeds for classifier free guidance uncond_prompt_embeds = model.module.y_embedder.y_embedding.repeat(config.train_batch_size, 1, 1, 1) # Now you train the model for epoch in range(start_epoch + 1, config.num_epochs + 1): data_time_start= time.time() data_time_all = 0 for step, batch in enumerate(train_dataloader): data_time_all += time.time() - data_time_start if load_vae_feat: z = batch[0] else: with torch.no_grad(): with torch.cuda.amp.autocast(enabled=config.mixed_precision == 'fp16'): posterior = vae.encode(batch[0]).latent_dist if config.sample_posterior: z = posterior.sample() else: z = posterior.mode() latents = z * config.scale_factor y = batch[1] y_mask = batch[2] data_info = batch[3] # Sample a random timestep for each image grad_norm = None with accelerator.accumulate(model): # Predict the noise residual optimizer.zero_grad() # Sample noise that we'll add to the latents noise = torch.randn_like(latents) bsz = latents.shape[0] # Sample a random timestep for each image t_n ~ U[0, N - k - 1] without bias. topk = config.train_sampling_steps // config.num_ddim_timesteps index = torch.randint(0, config.num_ddim_timesteps, (bsz,), device=latents.device).long() start_timesteps = solver.ddim_timesteps[index] timesteps = start_timesteps - topk timesteps = torch.where(timesteps < 0, torch.zeros_like(timesteps), timesteps) # Get boundary scalings for start_timesteps and (end) timesteps. c_skip_start, c_out_start = scalings_for_boundary_conditions(start_timesteps) c_skip_start, c_out_start = [append_dims(x, latents.ndim) for x in [c_skip_start, c_out_start]] c_skip, c_out = scalings_for_boundary_conditions(timesteps) c_skip, c_out = [append_dims(x, latents.ndim) for x in [c_skip, c_out]] # Sample a random guidance scale w from U[w_min, w_max] and embed it # w = (config.w_max - config.w_min) * torch.rand((bsz,)) + config.w_min w = config.cfg_scale * torch.ones((bsz,)) w = w.reshape(bsz, 1, 1, 1) w = w.to(device=latents.device, dtype=latents.dtype) # Get online LCM prediction on z_{t_{n + k}}, w, c, t_{n + k} _, pred_x_0, noisy_model_input = train_diffusion.training_losses(model, latents, start_timesteps, model_kwargs=dict(y=y, mask=y_mask, data_info=data_info), noise=noise) model_pred = c_skip_start * noisy_model_input + c_out_start * pred_x_0 # Use the ODE solver to predict the kth step in the augmented PF-ODE trajectory after # noisy_latents with both the conditioning embedding c and unconditional embedding 0 # Get teacher model prediction on noisy_latents and conditional embedding with torch.no_grad(): with torch.autocast("cuda"): cond_teacher_output, cond_pred_x0, _ = train_diffusion.training_losses(model_teacher, latents, start_timesteps, model_kwargs=dict(y=y, mask=y_mask, data_info=data_info), noise=noise) # Get teacher model prediction on noisy_latents and unconditional embedding uncond_teacher_output, uncond_pred_x0, _ = train_diffusion.training_losses(model_teacher, latents, start_timesteps, model_kwargs=dict(y=uncond_prompt_embeds, mask=y_mask, data_info=data_info), noise=noise) # Perform "CFG" to get x_prev estimate (using the LCM paper's CFG formulation) pred_x0 = cond_pred_x0 + w * (cond_pred_x0 - uncond_pred_x0) pred_noise = cond_teacher_output + w * (cond_teacher_output - uncond_teacher_output) x_prev = solver.ddim_step(pred_x0, pred_noise, index) # Get target LCM prediction on x_prev, w, c, t_n with torch.no_grad(): with torch.autocast("cuda", enabled=True): _, pred_x_0, _ = train_diffusion.training_losses(model_ema, x_prev.float(), timesteps, model_kwargs=dict(y=y, mask=y_mask, data_info=data_info), skip_noise=True) target = c_skip * x_prev + c_out * pred_x_0 # Calculate loss if config.loss_type == "l2": loss = F.mse_loss(model_pred.float(), target.float(), reduction="mean") elif config.loss_type == "huber": loss = torch.mean(torch.sqrt((model_pred.float() - target.float()) ** 2 + config.huber_c**2) - config.huber_c) # Backpropagation on the online student model (`model`) accelerator.backward(loss) if accelerator.sync_gradients: grad_norm = accelerator.clip_grad_norm_(model.parameters(), config.gradient_clip) optimizer.step() lr_scheduler.step() optimizer.zero_grad(set_to_none=True) if accelerator.sync_gradients: ema_update(model_ema, model, config.ema_decay) lr = lr_scheduler.get_last_lr()[0] logs = {"loss": accelerator.gather(loss).mean().item()} if grad_norm is not None: logs.update(grad_norm=accelerator.gather(grad_norm).mean().item()) log_buffer.update(logs) if (step + 1) % config.log_interval == 0 or (step + 1) == 1: t = (time.time() - last_tic) / config.log_interval t_d = data_time_all / config.log_interval avg_time = (time.time() - time_start) / (global_step + 1) eta = str(datetime.timedelta(seconds=int(avg_time * (total_steps - start_step - global_step - 1)))) eta_epoch = str(datetime.timedelta(seconds=int(avg_time * (len(train_dataloader) - step - 1)))) # avg_loss = sum(loss_buffer) / len(loss_buffer) log_buffer.average() info = f"Step/Epoch [{(epoch-1)*len(train_dataloader)+step+1}/{epoch}][{step + 1}/{len(train_dataloader)}]:total_eta: {eta}, " \ f"epoch_eta:{eta_epoch}, time_all:{t:.3f}, time_data:{t_d:.3f}, lr:{lr:.3e}, s:({data_info['resolution'][0][0].item()}, {data_info['resolution'][0][1].item()}), " info += ', '.join([f"{k}:{v:.4f}" for k, v in log_buffer.output.items()]) logger.info(info) last_tic = time.time() log_buffer.clear() data_time_all = 0 logs.update(lr=lr) accelerator.log(logs, step=global_step + start_step) global_step += 1 data_time_start= time.time() synchronize() torch.cuda.empty_cache() if accelerator.is_main_process: # log_validation(model_ema, step, model.device) if ((epoch - 1) * len(train_dataloader) + step + 1) % config.save_model_steps == 0: os.umask(0o000) save_checkpoint(os.path.join(config.work_dir, 'checkpoints'), epoch=epoch, step=(epoch - 1) * len(train_dataloader) + step + 1, model=accelerator.unwrap_model(model), model_ema=accelerator.unwrap_model(model_ema), optimizer=optimizer, lr_scheduler=lr_scheduler ) synchronize() synchronize() if accelerator.is_main_process: if epoch % config.save_model_epochs == 0 or epoch == config.num_epochs: os.umask(0o000) save_checkpoint(os.path.join(config.work_dir, 'checkpoints'), epoch=epoch, step=(epoch - 1) * len(train_dataloader) + step + 1, model=accelerator.unwrap_model(model), model_ema=accelerator.unwrap_model(model_ema), optimizer=optimizer, lr_scheduler=lr_scheduler ) synchronize() def parse_args(): parser = argparse.ArgumentParser(description="Process some integers.") parser.add_argument("config", type=str, help="config") parser.add_argument("--cloud", action='store_true', default=False, help="cloud or local machine") parser.add_argument('--work-dir', help='the dir to save logs and models') parser.add_argument('--resume-from', help='the dir to resume the training') parser.add_argument('--load-from', default=None, help='the dir to load a ckpt for training') parser.add_argument('--local-rank', type=int, default=-1) parser.add_argument('--local_rank', type=int, default=-1) parser.add_argument('--debug', action='store_true') args = parser.parse_args() return args if __name__ == '__main__': args = parse_args()
config = read_config(args.config)
12
2023-10-12 14:16:33+00:00
24k
NVlabs/EmerNeRF
datasets/nuscenes.py
[ { "identifier": "SceneLidarSource", "path": "datasets/base/lidar_source.py", "snippet": "class SceneLidarSource(abc.ABC):\n \"\"\"\n The base class for the lidar source of a scene.\n \"\"\"\n\n data_cfg: OmegaConf = None\n # the normalized timestamps of all points (normalized to [0, 1]), shape: (num_points,)\n _normalized_timestamps: Tensor = None\n # the timestamps of all points, shape: (num_points,)\n _timestamps: Tensor = None\n # the timesteps of all points, shape: (num_points,)\n # - the difference between timestamps and timesteps is that\n # timestamps are the actual timestamps (minus 1e9) of lidar scans,\n # while timesteps are the integer timestep indices of lidar scans.\n _timesteps: Tensor = None\n # origin of each lidar point, shape: (num_points, 3)\n origins: Tensor = None\n # unit direction of each lidar point, shape: (num_points, 3)\n directions: Tensor = None\n # range of each lidar point, shape: (num_points,)\n ranges: Tensor = None\n # the transformation matrices from lidar to world coordinate system,\n lidar_to_worlds: Tensor = None\n # the indices of the lidar scans that are cached\n cached_indices: Tensor = None\n cached_origins: Tensor = None\n cached_directions: Tensor = None\n cached_ranges: Tensor = None\n cached_normalized_timestamps: Tensor = None\n\n def __init__(\n self,\n lidar_data_config: OmegaConf,\n device: torch.device = torch.device(\"cpu\"),\n ) -> None:\n # hold the config of the lidar data\n self.data_cfg = lidar_data_config\n self.device = device\n\n @abc.abstractmethod\n def create_all_filelist(self) -> None:\n \"\"\"\n Create a list of all the files in the dataset.\n e.g., a list of all the lidar scans in the dataset.\n \"\"\"\n raise NotImplementedError\n\n def load_data(self):\n self.load_calibrations()\n self.load_lidar()\n logger.info(\"[Lidar] All Lidar Data loaded.\")\n\n def to(self, device: torch.device) -> \"SceneLidarSource\":\n \"\"\"\n Move the dataset to the given device.\n Args:\n device: the device to move the dataset to.\n \"\"\"\n self.device = device\n if self.origins is not None:\n self.origins = self.origins.to(device)\n if self.directions is not None:\n self.directions = self.directions.to(device)\n if self.ranges is not None:\n self.ranges = self.ranges.to(device)\n if self._timestamps is not None:\n self._timestamps = self._timestamps.to(device)\n if self._timesteps is not None:\n self._timesteps = self._timesteps.to(device)\n if self._normalized_timestamps is not None:\n self._normalized_timestamps = self._normalized_timestamps.to(device)\n if self.lidar_to_worlds is not None:\n self.lidar_to_worlds = self.lidar_to_worlds.to(device)\n return self\n\n @abc.abstractmethod\n def load_calibrations(self) -> None:\n \"\"\"\n Load the calibration files of the dataset.\n e.g., lidar to world transformation matrices.\n \"\"\"\n raise NotImplementedError\n\n @abc.abstractmethod\n def load_lidar(self) -> None:\n \"\"\"\n Load the lidar data of the dataset from the filelist.\n \"\"\"\n raise NotImplementedError\n\n def get_aabb(self) -> Tensor:\n \"\"\"\n Returns:\n aabb_min, aabb_max: the min and max of the axis-aligned bounding box of the scene\n Note:\n we assume the lidar points are already in the world coordinate system\n we first downsample the lidar points, then compute the aabb by taking the\n given percentiles of the lidar coordinates in each dimension.\n \"\"\"\n assert (\n self.origins is not None\n and self.directions is not None\n and self.ranges is not None\n ), \"Lidar points not loaded, cannot compute aabb.\"\n logger.info(\"[Lidar] Computing auto AABB based on downsampled lidar points....\")\n\n lidar_pts = self.origins + self.directions * self.ranges\n\n # downsample the lidar points by uniformly sampling a subset of them\n lidar_pts = lidar_pts[\n torch.randperm(len(lidar_pts))[\n : int(len(lidar_pts) / self.data_cfg.lidar_downsample_factor)\n ]\n ]\n # compute the aabb by taking the given percentiles of the lidar coordinates in each dimension\n aabb_min = torch.quantile(lidar_pts, self.data_cfg.lidar_percentile, dim=0)\n aabb_max = torch.quantile(lidar_pts, 1 - self.data_cfg.lidar_percentile, dim=0)\n del lidar_pts\n torch.cuda.empty_cache()\n\n # usually the lidar's height is very small, so we slightly increase the height of the aabb\n if aabb_max[-1] < 20:\n aabb_max[-1] = 20.0\n aabb = torch.tensor([*aabb_min, *aabb_max])\n logger.info(f\"[Lidar] Auto AABB from LiDAR: {aabb}\")\n return aabb\n\n @property\n def num_timesteps(self) -> int:\n \"\"\"\n Returns:\n the number of lidar timestamps in the dataset,\n usually the number of captured lidar scans.\n \"\"\"\n return len(self.timesteps.unique())\n\n @property\n def timesteps(self) -> Tensor:\n \"\"\"\n Returns:\n the integer timestep indices of each lidar timestamp,\n shape: (num_lidar_points,)\n Note:\n the difference between timestamps and timesteps is that\n timestamps are the actual timestamps (minus 1e9) of the lidar scans,\n while timesteps are the integer timestep indices of the lidar scans.\n \"\"\"\n return self._timesteps\n\n @property\n def timestamps(self) -> Tensor:\n \"\"\"\n Returns:\n the actual timestamps (minus 1e9) of the lidar scans.\n shape: (num_lidar_points,)\n \"\"\"\n return self._timestamps\n\n @property\n def normalized_timestamps(self) -> Tensor:\n \"\"\"\n Returns:\n the normalized timestamps of the lidar scans\n (normalized to the range [0, 1]).\n shape: (num_lidar_points,)\n \"\"\"\n return self._normalized_timestamps\n\n @property\n def unique_normalized_timestamps(self) -> Tensor:\n \"\"\"\n Returns:\n the unique normalized timestamps of the lidar scans\n (normalized to the range [0, 1]).\n shape: (num_timesteps,)\n \"\"\"\n return self._unique_normalized_timestamps\n\n def register_normalized_timestamps(self, normalized_timestamps: Tensor) -> None:\n \"\"\"\n Register the normalized timestamps of the lidar scans.\n Args:\n normalized_timestamps: the normalized timestamps of the lidar scans\n (normalized to the range [0, 1]).\n shape: (num_lidar_points,)\n Note:\n we normalize the lidar timestamps together with the image timestamps,\n so that the both the lidar and image timestamps are in the range [0, 1].\n \"\"\"\n assert normalized_timestamps.size(0) == self.origins.size(\n 0\n ), \"The number of lidar points and the number of normalized timestamps must match.\"\n assert (\n normalized_timestamps.min() >= 0 and normalized_timestamps.max() <= 1\n ), \"The normalized timestamps must be in the range [0, 1].\"\n self._normalized_timestamps = normalized_timestamps.to(self.device)\n self._unique_normalized_timestamps = self._normalized_timestamps.unique()\n\n def find_closest_timestep(self, normed_timestamp: float) -> int:\n \"\"\"\n Find the closest timestep to the given timestamp.\n Args:\n normed_timestamp: the normalized timestamp to find the closest timestep for.\n Returns:\n the closest timestep to the given timestamp.\n \"\"\"\n return torch.argmin(\n torch.abs(self.unique_normalized_timestamps - normed_timestamp)\n )\n\n def sample_uniform_rays(\n self,\n num_rays: int,\n candidate_indices: Tensor = None,\n ) -> Tensor:\n \"\"\"\n Sample a batch of rays uniformly from the dataset.\n Args:\n num_rays: the number of rays to sample.\n candidate_indices: the indices of the lidar scans to sample from.\n If None, sample from all the lidar scans.\n If not None, sample from the given lidar scans.\n Returns:\n lidar_idx: the indices of the sampled lidar points.\n shape: (num_rays,)\n \"\"\"\n if candidate_indices is None:\n return torch.randint(\n 0, len(self.origins), size=(num_rays,), device=self.device\n )\n else:\n if not isinstance(candidate_indices, Tensor):\n candidate_indices = torch.tensor(candidate_indices, device=self.device)\n if self.cached_indices is None:\n self.cached_indices = candidate_indices\n mask = self.timesteps.new_zeros(\n self.timesteps.size(0), dtype=torch.bool\n ) # Create a mask of False\n for index in self.cached_indices:\n mask |= (\n self.timesteps == index\n ) # Set mask values to True where timesteps match an index\n self.cached_origins = self.origins[mask]\n self.cached_directions = self.directions[mask]\n self.cached_ranges = self.ranges[mask]\n self.cached_normalized_timestamps = self.normalized_timestamps[mask]\n if not torch.equal(candidate_indices, self.cached_indices):\n print(\"Recomputing cached indices\")\n self.cached_indices = candidate_indices\n mask = self.timesteps.new_zeros(\n self.timesteps.size(0), dtype=torch.bool\n ) # Create a mask of False\n for index in self.cached_indices:\n mask |= (\n self.timesteps == index\n ) # Set mask values to True where timesteps match an index\n self.cached_origins = self.origins[mask]\n self.cached_directions = self.directions[mask]\n self.cached_ranges = self.ranges[mask]\n self.cached_normalized_timestamps = self.normalized_timestamps[mask]\n random_idx = torch.randint(\n 0,\n len(self.cached_origins),\n size=(num_rays,),\n device=self.device,\n )\n return random_idx\n\n def get_train_rays(\n self,\n num_rays: int,\n candidate_indices: Tensor = None,\n ) -> Dict[str, Tensor]:\n \"\"\"\n Get a batch of rays for training.\n Args:\n num_rays: the number of rays to sample.\n candidate_indices: the indices of the lidar scans to sample from.\n If None, sample from all the lidar scans.\n If not None, sample from the given lidar scans.\n Returns:\n a dict of the sampled rays.\n \"\"\"\n lidar_idx = self.sample_uniform_rays(\n num_rays=num_rays, candidate_indices=candidate_indices\n )\n origins = self.cached_origins[lidar_idx]\n directions = self.cached_directions[lidar_idx]\n ranges = self.cached_ranges[lidar_idx]\n normalized_timestamps = self.cached_normalized_timestamps[lidar_idx]\n return {\n \"lidar_origins\": origins,\n \"lidar_viewdirs\": directions,\n \"lidar_ranges\": ranges,\n \"lidar_normed_timestamps\": normalized_timestamps,\n }\n\n def get_render_rays(self, time_idx: int) -> Dict[str, Tensor]:\n \"\"\"\n Get the of rays for rendering at the given timestep.\n Args:\n time_idx: the index of the lidar scan to render.\n Returns:\n a dict of the sampled rays.\n \"\"\"\n origins = self.origins[self.timesteps == time_idx]\n directions = self.directions[self.timesteps == time_idx]\n ranges = self.ranges[self.timesteps == time_idx]\n normalized_timestamps = self.normalized_timestamps[self.timesteps == time_idx]\n return {\n \"lidar_origins\": origins,\n \"lidar_viewdirs\": directions,\n \"lidar_ranges\": ranges,\n \"lidar_normed_timestamps\": normalized_timestamps,\n }" }, { "identifier": "ScenePixelSource", "path": "datasets/base/pixel_source.py", "snippet": "class ScenePixelSource(abc.ABC):\n \"\"\"\n The base class for all pixel sources of a scene.\n \"\"\"\n\n # the original size of the images in the dataset\n # these values are from the waymo dataset as a placeholder\n ORIGINAL_SIZE = [[1280, 1920], [1280, 1920], [1280, 1920], [884, 1920], [884, 1920]]\n\n # define a transformation matrix to convert the opencv camera coordinate system to the dataset camera coordinate system\n OPENCV2DATASET = np.array(\n [[0, 0, 1, 0], [-1, 0, 0, 0], [0, -1, 0, 0], [0, 0, 0, 1]]\n )\n data_cfg: OmegaConf = None\n # the normalized timestamps of all images (normalized to [0, 1]), shape: (num_imgs,)\n _normalized_timestamps: Tensor = None\n # the timestamps of all images, shape: (num_imgs,)\n _timestamps: Tensor = None\n # the timesteps of all images, shape: (num_imgs,)\n # - the difference between timestamps and timesteps is that\n # timestamps are the actual timestamps (minus 1e9) of images\n # while timesteps are the integer timestep indices of images.\n _timesteps: Tensor = None\n # camera ids of all images, shape: (num_imgs,)\n cam_ids: Tensor = None\n # camera-to-world matrices of all images, shape: (num_imgs, 4, 4)\n cam_to_worlds: Tensor = None\n # camera intrinsic matrices of all images, shape: (num_imgs, 3, 3)\n intrinsics: Tensor = None\n # all image tensors, shape: (num_imgs, load_size[0], load_size[1], 3)\n images: Tensor = None\n # the image ids of all images, shape: (num_imgs,)\n img_ids: Tensor = None\n # the binary masks of dynamic objects, shape: (num_imgs, load_size[0], load_size[1])\n dynamic_masks: Tensor = None\n # the binary masks of sky regions, shape: (num_imgs, load_size[0], load_size[1])\n sky_masks: Tensor = None\n # the feature tensors, shape: (num_imgs, num_patches_h, num_patches_w, C)\n features: Tensor = None\n # the pca matrix used to reduce the feature dimension to target_feature_dim,\n # shape: (original_feature_dim, target_feature_dim)\n reduce_to_target_dim_mat: Tensor = None\n # the min and max values of the reduced features used for normalization,\n # shape: (target_feature_dim,)\n feat_min: Tensor = None\n feat_max: Tensor = None\n\n # the pca matrix used to reduce the feature dimension for visualization,\n # shape: (target_feature_dim, 3)\n feat_dimension_reduction_mat: Tensor = None\n # the min and max values of the original features used for visualization,\n # shape: (3,)\n feat_color_min: Tensor = None\n feat_color_max: Tensor = None\n # the downscale factor of the features, shape: (2,)\n featmap_downscale_factor: Tuple[float, float] = None\n\n # importance sampling weights of all images,\n # shape: (num_imgs, load_size[0] // buffer_scale, load_size[1] // buffer_scale)\n pixel_error_maps: Tensor = None\n pixel_error_buffered: bool = False\n\n def __init__(\n self, pixel_data_config: OmegaConf, device: torch.device = torch.device(\"cpu\")\n ) -> None:\n # hold the config of the pixel data\n self.data_cfg = pixel_data_config\n self.device = device\n self._downscale_factor = 1 / pixel_data_config.downscale\n self._old_downscale_factor = 1 / pixel_data_config.downscale\n\n @abc.abstractmethod\n def create_all_filelist(self) -> None:\n \"\"\"\n Create file lists for all data files.\n e.g., img files, feature files, etc.\n \"\"\"\n self.img_filepaths = []\n self.feat_filepaths = []\n self.sky_mask_filepaths = []\n self.dynamic_mask_filepaths = []\n raise NotImplementedError\n\n @abc.abstractmethod\n def load_calibrations(self) -> None:\n \"\"\"\n Load the camera intrinsics, extrinsics, timestamps, etc.\n Compute the camera-to-world matrices, ego-to-world matrices, etc.\n \"\"\"\n raise NotImplementedError\n\n def load_data(self) -> None:\n \"\"\"\n A general function to load all data.\n \"\"\"\n self.load_calibrations()\n self.load_rgb()\n self.load_dynamic_mask()\n self.load_sky_mask()\n self.load_features()\n # build the pixel error buffer\n self.build_pixel_error_buffer()\n logger.info(\"[Pixel] All Pixel Data loaded.\")\n\n def to(self, device: torch.device) -> \"ScenePixelSource\":\n \"\"\"\n Move the dataset to the given device.\n Args:\n device: the device to move the dataset to.\n \"\"\"\n self.device = device\n if self.images is not None:\n self.images = self.images.to(device)\n if self.dynamic_masks is not None:\n self.dynamic_masks = self.dynamic_masks.to(device)\n if self.sky_masks is not None:\n self.sky_masks = self.sky_masks.to(device)\n if self.features is not None:\n # this step can be dangerous because the features are huge\n # TODO: add a flag to control whether to move the features to GPU\n self.features = self.features.to(device)\n if self.reduce_to_target_dim_mat is not None:\n self.reduce_to_target_dim_mat = self.reduce_to_target_dim_mat.to(\n self.device\n )\n if self.feat_min is not None:\n self.feat_min = self.feat_min.to(self.device)\n self.feat_max = self.feat_max.to(self.device)\n if self.feat_dimension_reduction_mat is not None:\n self.feat_dimension_reduction_mat = (\n self.feat_dimension_reduction_mat.to(self.device)\n )\n self.feat_color_min = self.feat_color_min.to(self.device)\n self.feat_color_max = self.feat_color_max.to(self.device)\n if self.cam_to_worlds is not None:\n self.cam_to_worlds = self.cam_to_worlds.to(device)\n if self.intrinsics is not None:\n self.intrinsics = self.intrinsics.to(device)\n if self.cam_ids is not None:\n self.cam_ids = self.cam_ids.to(device)\n if self._timestamps is not None:\n self._timestamps = self._timestamps.to(device)\n if self._timesteps is not None:\n self._timesteps = self._timesteps.to(device)\n if self._normalized_timestamps is not None:\n self._normalized_timestamps = self._normalized_timestamps.to(device)\n if self.pixel_error_maps is not None:\n self.pixel_error_maps = self.pixel_error_maps.to(device)\n return self\n\n def load_rgb(self) -> None:\n \"\"\"\n Load the RGB images if they are available. We cache the images in memory for faster loading.\n Note this can be memory consuming.\n \"\"\"\n if not self.data_cfg.load_rgb:\n return\n images = []\n for fname in tqdm(\n self.img_filepaths, desc=\"Loading images\", dynamic_ncols=True\n ):\n rgb = Image.open(fname).convert(\"RGB\")\n # resize them to the load_size\n rgb = rgb.resize(\n (self.data_cfg.load_size[1], self.data_cfg.load_size[0]), Image.BILINEAR\n )\n images.append(rgb)\n # normalize the images to [0, 1]\n self.images = torch.from_numpy(np.stack(images, axis=0)) / 255\n self.img_ids = torch.arange(len(self.images)).long()\n\n def load_dynamic_mask(self) -> None:\n \"\"\"\n Load the dynamic masks if they are available.\n \"\"\"\n if not self.data_cfg.load_dynamic_mask:\n return\n dynamic_masks = []\n for fname in tqdm(\n self.dynamic_mask_filepaths,\n desc=\"Loading dynamic masks\",\n dynamic_ncols=True,\n ):\n dyn_mask = Image.open(fname).convert(\"L\")\n # resize them to the load_size\n dyn_mask = dyn_mask.resize(\n (self.data_cfg.load_size[1], self.data_cfg.load_size[0]), Image.BILINEAR\n )\n dynamic_masks.append(np.array(dyn_mask) > 0)\n self.dynamic_masks = torch.from_numpy(np.stack(dynamic_masks, axis=0)).float()\n\n def load_sky_mask(self) -> None:\n \"\"\"\n Load the sky masks if they are available.\n \"\"\"\n if not self.data_cfg.load_sky_mask:\n return\n sky_masks = []\n for fname in tqdm(\n self.sky_mask_filepaths, desc=\"Loading sky masks\", dynamic_ncols=True\n ):\n sky_mask = Image.open(fname).convert(\"L\")\n # resize them to the load_size\n sky_mask = sky_mask.resize(\n (self.data_cfg.load_size[1], self.data_cfg.load_size[0]), Image.NEAREST\n )\n sky_masks.append(np.array(sky_mask) > 0)\n self.sky_masks = torch.from_numpy(np.stack(sky_masks, axis=0)).float()\n\n def load_features(self) -> None:\n \"\"\"\n Load the features if they are available.\n \"\"\"\n if not self.data_cfg.load_features:\n return\n\n if not self.data_cfg.skip_feature_extraction:\n logger.info(f\"Extracting {self.data_cfg.feature_model_type}...\")\n return_dict = extract_and_save_features(\n input_img_path_list=self.img_filepaths,\n saved_feat_path_list=self.feat_filepaths,\n img_shape=self.data_cfg.feature_extraction_size,\n stride=self.data_cfg.feature_extraction_stride,\n model_type=self.data_cfg.feature_model_type,\n )\n\n features = []\n for fname in tqdm(\n self.feat_filepaths, desc=\"Loading features\", dynamic_ncols=True\n ):\n # mmap_mode=\"r\" is to avoid memory overflow when loading features\n # but it only slightly helps... do we have a better way to load features?\n feature = np.load(fname, mmap_mode=\"r\").squeeze()\n features.append(feature)\n # shape: (num_imgs, num_patches_h, num_patches_w, C)\n self.features = torch.from_numpy(np.stack(features, axis=0)).float()\n # featmap_downscale_factor is used to convert the image coordinates to ViT feature coordinates.\n # resizing ViT features to (H, W) using bilinear interpolation is infeasible.\n # imagine a feature array of shape (num_timesteps x num_cams, 640, 960, 768). it's too large to fit in GPU memory.\n self.featmap_downscale_factor = (\n self.features.shape[1] / self.data_cfg.load_size[0],\n self.features.shape[2] / self.data_cfg.load_size[1],\n )\n logger.info(\n f\"Loaded {self.features.shape} {self.data_cfg.feature_model_type} features.\"\n )\n logger.info(f\"Feature scale: {self.featmap_downscale_factor}\")\n logger.info(f\"Computing features PCA...\")\n # compute feature visualization matrix\n C = self.features.shape[-1]\n # no need to compute PCA on the entire set of features, we randomly sample 100k features\n temp_feats = self.features.reshape(-1, C)\n max_elements_to_compute_pca = min(100000, temp_feats.shape[0])\n selected_features = temp_feats[\n np.random.choice(\n temp_feats.shape[0], max_elements_to_compute_pca, replace=False\n )\n ]\n if self.data_cfg.target_feature_dim is not None:\n logger.info(\n f\"Reducing features to {self.data_cfg.target_feature_dim} dimensions.\"\n )\n # compute PCA to reduce the feature dimension to target_feature_dim\n U, S, reduce_to_target_dim_mat = torch.pca_lowrank(\n selected_features, q=self.data_cfg.target_feature_dim, niter=20\n )\n # compute the fraction of variance explained by target_feature_dim\n variances = S**2\n fraction_var_explained = variances / variances.sum()\n logger.info(f\"[PCA] fraction_var_explained: \\n{fraction_var_explained}\")\n logger.info(\n f\"[PCA] fraction_var_explained sum: {fraction_var_explained.sum()}\",\n )\n self.reduce_to_target_dim_mat = reduce_to_target_dim_mat\n\n # reduce the features to target_feature_dim\n selected_features = selected_features @ reduce_to_target_dim_mat\n self.features = self.features @ reduce_to_target_dim_mat\n C = self.features.shape[-1]\n\n # normalize the reduced features to [0, 1] along each dimension\n feat_min = self.features.reshape(-1, C).min(dim=0)[0]\n feat_max = self.features.reshape(-1, C).max(dim=0)[0]\n self.features = (self.features - feat_min) / (feat_max - feat_min)\n selected_features = (selected_features - feat_min) / (feat_max - feat_min)\n self.feat_min = feat_min.to(self.device)\n self.feat_max = feat_max.to(self.device)\n self.reduce_to_target_dim_mat = reduce_to_target_dim_mat.to(self.device)\n # we compute the first 3 principal components of the ViT features as the color\n reduction_mat, feat_color_min, feat_color_max = get_robust_pca(\n selected_features\n )\n # final features are of shape (num_imgs, num_patches_h, num_patches_w, target_feature_dim)\n self.features = self.features\n\n # save visualization parameters\n self.feat_dimension_reduction_mat = reduction_mat\n self.feat_color_min = feat_color_min\n self.feat_color_max = feat_color_max\n del temp_feats, selected_features\n\n logger.info(\n f\"Feature PCA computed, shape: {self.feat_dimension_reduction_mat.shape}\"\n )\n\n def delete_features(self) -> None:\n \"\"\"\n Delete the features if they exist.\n This is to save disk space. 2D features of a single sequence can be 30GB+.\n \"\"\"\n delete_features(self.feat_filepaths)\n\n def get_aabb(self) -> Tensor:\n \"\"\"\n Returns:\n aabb_min, aabb_max: the min and max of the axis-aligned bounding box of the scene\n Note:\n We compute the coarse aabb by using the front camera positions / trajectories. We then\n extend this aabb by 40 meters along horizontal directions and 20 meters up and 5 meters\n down along vertical directions.\n \"\"\"\n assert (\n self.cam_to_worlds is not None\n ), \"Camera poses not loaded, cannot compute aabb.\"\n logger.info(\"[Pixel] Computing auto AABB based on front camera trajectory....\")\n if self.num_cams == 1:\n # if there is only one camera, it's front camera\n front_cameras_positions = self.cam_to_worlds[:, :3, 3]\n elif self.num_cams == 3:\n # if there are three cameras, they are ordered as front_left, front, front_right\n front_cameras_positions = self.cam_to_worlds[1::3, :3, 3]\n elif self.num_cams == 5:\n # if there are five cameras, they are ordered as side_left, front_left, front, front_right, side_right\n front_cameras_positions = self.cam_to_worlds[2::5, :3, 3]\n elif self.num_cams == 6:\n # if there are six cameras, they are ordered as front_left, front, front_right, back_left, back, back_right\n front_cameras_positions = self.cam_to_worlds[2::6, :3, 3]\n\n # compute the aabb\n aabb_min = front_cameras_positions.min(dim=0)[0]\n aabb_max = front_cameras_positions.max(dim=0)[0]\n\n # extend aabb by 40 meters along forward direction and 40 meters along the left/right direction\n # aabb direction: x, y, z: front, left, up\n aabb_max[0] += 40\n aabb_max[1] += 40\n # when the car is driving uphills\n aabb_max[2] = min(aabb_max[2] + 20, 20)\n\n # for waymo, there will be a lot of waste of space because we don't have images in the back,\n # it's more reasonable to extend the aabb only by a small amount, e.g., 5 meters\n # we use 40 meters here for a more general case\n aabb_min[0] -= 40\n aabb_min[1] -= 40\n # when a car is driving downhills\n aabb_min[2] = max(aabb_min[2] - 5, -5)\n aabb = torch.tensor([*aabb_min, *aabb_max])\n logger.info(f\"[Pixel] Auto AABB from camera: {aabb}\")\n return aabb\n\n def get_features(\n self,\n img_id,\n y: Tensor,\n x: Tensor,\n downscale: Union[float, Tuple[float, float]] = 1.0,\n ) -> Tensor:\n \"\"\"\n Get the features at the given pixel coordinates.\n Args:\n img_id: the image index.\n y: the vertical coordinates of the pixels, shape: (num_rays,)\n x: the horizontal coordinates of the pixels, shape: (num_rays,)\n downscale: the downscale factor of the features.\n If it's a float, we use the same downscale factor for both height and width.\n If it's a tuple, we use the first value as the downscale factor for height\n and the second value as the downscale factor for width.\n Returns:\n features: the features at the given pixel coordinates.\n shape: (num_rays, feat_dim)\n \"\"\"\n if isinstance(downscale, float):\n downscale = (downscale, downscale)\n # we compute the nearest DINO feature for each pixel\n # map (x, y) in the (W, H) space to (x * dino_scale[0], y * dino_scale[1]) in the (W//patch_size, H//patch_size) space\n dino_y = (y * downscale[0]).long()\n dino_x = (x * downscale[1]).long()\n # dino_feats are in CPU memory (because they are huge), so we need to move them to GPU\n dino_feat = self.features[img_id, dino_y.cpu(), dino_x.cpu()]\n return dino_feat\n\n def build_pixel_error_buffer(self) -> None:\n \"\"\"\n Build the pixel error buffer.\n \"\"\"\n if self.buffer_ratio > 0:\n # shape: (num_imgs, H // buffer_downscale, W // buffer_downscale)\n self.pixel_error_maps = torch.ones(\n (\n len(self.cam_to_worlds),\n self.HEIGHT // self.buffer_downscale,\n self.WIDTH // self.buffer_downscale,\n ),\n dtype=torch.float32,\n device=self.device,\n )\n logger.info(\n f\"Successfully built pixel error buffer (log2(num_pixels) = {np.log2(len(self.pixel_error_maps.reshape(-1))):.2f}).\"\n )\n else:\n logger.info(\"Not building pixel error buffer because buffer_ratio <= 0.\")\n\n def update_pixel_error_maps(self, render_results: Dict[str, Tensor]) -> None:\n \"\"\"\n Update the pixel error buffer with the given render results.\n \"\"\"\n if self.pixel_error_maps is None:\n logger.info(\"Skipping pixel error buffer update because it's not built.\")\n return\n gt_rgbs = render_results[\"gt_rgbs\"]\n pred_rgbs = render_results[\"rgbs\"]\n gt_rgbs = torch.from_numpy(np.stack(gt_rgbs, axis=0))\n pred_rgbs = torch.from_numpy(np.stack(pred_rgbs, axis=0))\n pixel_error_maps = torch.abs(gt_rgbs - pred_rgbs).mean(dim=-1)\n assert pixel_error_maps.shape == self.pixel_error_maps.shape\n if \"dynamic_opacities\" in render_results:\n if len(render_results[\"dynamic_opacities\"]) > 0:\n dynamic_opacity = render_results[\"dynamic_opacities\"]\n dynamic_opacity = torch.from_numpy(np.stack(dynamic_opacity, axis=0))\n # we prioritize the dynamic objects by multiplying the error by 5\n pixel_error_maps[dynamic_opacity > 0.1] *= 5\n # update the pixel error buffer\n self.pixel_error_maps: Tensor = pixel_error_maps.to(self.device)\n # normalize the pixel error buffer to [0, 1]\n self.pixel_error_maps = (\n self.pixel_error_maps - self.pixel_error_maps.min()\n ) / (self.pixel_error_maps.max() - self.pixel_error_maps.min())\n self.pixel_error_buffered = True\n logger.info(\"Successfully updated pixel error buffer\")\n\n def visualize_pixel_sample_weights(self, indices: List[int]) -> np.ndarray:\n \"\"\"\n Visualize the pixel sample weights.\n Args:\n indices: the image indices to visualize.\n Returns:\n frames: the pixel sample weights of the given image.\n shape: (len(indices) // cams, H, num_cams * W, 3)\n \"\"\"\n frames = (\n self.pixel_error_maps.detach()\n .cpu()\n .numpy()\n .reshape(\n self.num_imgs,\n self.HEIGHT // self.buffer_downscale,\n self.WIDTH // self.buffer_downscale,\n )[indices]\n )\n frames = [np.stack([frame, frame, frame], axis=-1) for frame in frames]\n return np.uint8(np.concatenate(frames, axis=1) * 255)\n\n def get_pixel_sample_weights_video(self) -> List[np.ndarray]:\n \"\"\"\n Get the pixel sample weights video.\n Returns:\n frames: the pixel sample weights video.\n shape: (num_imgs // cams, H, num_cams * W, 3)\n \"\"\"\n assert self.buffer_ratio > 0, \"buffer_ratio must be > 0\"\n maps = []\n loss_maps = (\n self.pixel_error_maps.detach()\n .cpu()\n .numpy()\n .reshape(\n self.num_imgs,\n self.HEIGHT // self.buffer_downscale,\n self.WIDTH // self.buffer_downscale,\n )\n )\n for i in range(self.num_imgs):\n maps.append(loss_maps[i])\n return maps\n\n def sample_important_rays(\n self, num_rays, img_candidate_indices: Tensor = None\n ) -> Tuple[Tensor, Tensor, Tensor]:\n \"\"\"\n Sample rays coordinates from the given images based on the pixel error buffer.\n Args:\n num_rays: the number of rays to sample.\n img_candidate_indices: the indices of the images to sample from.\n If None, sample from all the images.\n If not None, sample from the given images only.\n Returns:\n img_id: the image indices of the sampled rays.\n shape: (num_rays,)\n y: the vertical coordinates of the sampled rays.\n shape: (num_rays,)\n x: the horizontal coordinates of the sampled rays.\n shape: (num_rays,)\n \"\"\"\n assert self.pixel_error_buffered, \"Pixel error buffer not built.\"\n # if img_candidate_indices is None, use all image indices\n if img_candidate_indices is None:\n img_candidate_indices = torch.arange(len(self.images)).to(self.device)\n if not isinstance(img_candidate_indices, Tensor):\n img_candidate_indices = torch.tensor(img_candidate_indices).to(self.device)\n sampled_indices = torch.multinomial(\n self.pixel_error_maps[img_candidate_indices].reshape(-1),\n num_rays,\n replacement=False,\n )\n # convert the sampled 1d indices to (img_idx, y, x)\n img_idx, y, x = idx_to_3d(\n sampled_indices,\n self.HEIGHT // self.buffer_downscale,\n self.WIDTH // self.buffer_downscale,\n )\n img_idx = img_candidate_indices[img_idx]\n\n # Upscale to the original resolution\n y, x = (y * self.buffer_downscale).long(), (x * self.buffer_downscale).long()\n\n # Add a random offset to avoid sampling the same pixel\n y += torch.randint(\n 0, self.buffer_downscale, (num_rays,), device=self.images.device\n )\n x += torch.randint(\n 0, self.buffer_downscale, (num_rays,), device=self.images.device\n )\n # Clamp to ensure coordinates don't exceed the image bounds\n y = torch.clamp(y, 0, self.HEIGHT - 1)\n x = torch.clamp(x, 0, self.WIDTH - 1)\n return img_idx, y, x\n\n def sample_uniform_rays(\n self,\n num_rays: int,\n img_candidate_indices: Tensor = None,\n ) -> Tuple[Tensor, Tensor, Tensor]:\n \"\"\"\n Sample rays coordinates uniformly from the given images.\n Args:\n num_rays: the number of rays to sample.\n img_candidate_indices: the indices of the images to sample from.\n If None, sample from all the images.\n If not None, sample from the given images only.\n Returns:\n img_id: the image indices of the sampled rays.\n shape: (num_rays,)\n y: the vertical coordinates of the sampled rays.\n shape: (num_rays,)\n x: the horizontal coordinates of the sampled rays.\n shape: (num_rays,)\n \"\"\"\n # if img_candidate_indices is None, use all image indices\n if img_candidate_indices is None:\n img_candidate_indices = torch.arange(len(self.images)).to(self.device)\n if not isinstance(img_candidate_indices, Tensor):\n img_candidate_indices = torch.tensor(img_candidate_indices).to(self.device)\n # sample random index based on img_candidate_indices\n random_idx = torch.randint(\n 0,\n len(img_candidate_indices),\n size=(num_rays,),\n device=self.device,\n )\n img_id = img_candidate_indices[random_idx]\n\n # sample pixels\n x = torch.randint(\n 0,\n self.WIDTH,\n size=(num_rays,),\n device=self.device,\n )\n y = torch.randint(\n 0,\n self.HEIGHT,\n size=(num_rays,),\n device=self.device,\n )\n x, y = x.long(), y.long()\n return img_id, y, x\n\n def get_train_rays(\n self,\n num_rays: int,\n candidate_indices: Tensor = None,\n ) -> Dict[str, Tensor]:\n \"\"\"\n Get a batch of rays for training.\n Args:\n num_rays: the number of rays to sample.\n candidate_indices: the indices of the images to sample from.\n If None, sample from all the images.\n If not None, sample from the given images only.\n Returns:\n a dict of the sampled rays.\n \"\"\"\n rgb, sky_mask, dynamic_mask, features = None, None, None, None\n pixel_coords, normalized_timestamps = None, None\n if self.buffer_ratio > 0 and self.pixel_error_buffered:\n num_roi_rays = int(num_rays * self.buffer_ratio)\n num_random_rays = num_rays - num_roi_rays\n random_img_idx, random_y, random_x = self.sample_uniform_rays(\n num_random_rays, candidate_indices\n )\n roi_img_idx, roi_y, roi_x = self.sample_important_rays(\n num_roi_rays, candidate_indices\n )\n img_idx = torch.cat([random_img_idx, roi_img_idx], dim=0)\n y = torch.cat([random_y, roi_y], dim=0)\n x = torch.cat([random_x, roi_x], dim=0)\n else:\n img_idx, y, x = self.sample_uniform_rays(\n num_rays=num_rays, img_candidate_indices=candidate_indices\n )\n pixel_coords = torch.stack([y / self.HEIGHT, x / self.WIDTH], dim=-1)\n if self.images is not None:\n rgb = self.images[img_idx, y, x]\n if self.sky_masks is not None:\n sky_mask = self.sky_masks[img_idx, y, x]\n if self.dynamic_masks is not None:\n dynamic_mask = self.dynamic_masks[img_idx, y, x].float()\n if self.features is not None:\n features = self.get_features(\n img_idx, y, x, downscale=self.featmap_downscale_factor\n )\n if self.normalized_timestamps is not None:\n normalized_timestamps = self.normalized_timestamps[img_idx]\n if self.cam_ids is not None:\n camera_id = self.cam_ids[img_idx]\n image_id = torch.ones_like(x) * img_idx\n c2w = self.cam_to_worlds[img_idx]\n intrinsics = self.intrinsics[img_idx]\n origins, viewdirs, direction_norm = get_rays(x, y, c2w, intrinsics)\n data = {\n \"origins\": origins,\n \"viewdirs\": viewdirs,\n \"direction_norms\": direction_norm,\n \"pixel_coords\": pixel_coords,\n \"normed_timestamps\": normalized_timestamps,\n \"img_idx\": image_id,\n \"cam_idx\": camera_id,\n \"pixels\": rgb,\n \"sky_masks\": sky_mask,\n \"dynamic_masks\": dynamic_mask,\n \"features\": features,\n }\n return {k: v for k, v in data.items() if v is not None}\n\n def get_render_rays(self, img_idx: int) -> Dict[str, Tensor]:\n \"\"\"\n Get the rays for rendering the given image index.\n Args:\n img_idx: the image index.\n Returns:\n a dict containing the rays for rendering the given image index.\n \"\"\"\n rgb, sky_mask, dynamic_mask, features = None, None, None, None\n pixel_coords, normalized_timestamps = None, None\n if self.images is not None:\n rgb = self.images[img_idx]\n if self.downscale_factor != 1.0:\n rgb = (\n torch.nn.functional.interpolate(\n rgb.unsqueeze(0).permute(0, 3, 1, 2),\n scale_factor=self.downscale_factor,\n mode=\"bicubic\",\n antialias=True,\n )\n .squeeze(0)\n .permute(1, 2, 0)\n )\n img_height, img_width = rgb.shape[:2]\n else:\n img_height, img_width = self.HEIGHT, self.WIDTH\n\n x, y = torch.meshgrid(\n torch.arange(img_width),\n torch.arange(img_height),\n indexing=\"xy\",\n )\n x, y = x.flatten(), y.flatten()\n x, y = x.to(self.device), y.to(self.device)\n # pixel coordinates\n pixel_coords = (\n torch.stack([y / img_height, x / img_width], dim=-1)\n .float()\n .reshape(img_height, img_width, 2)\n )\n\n if self.sky_masks is not None:\n sky_mask = self.sky_masks[img_idx]\n if self.downscale_factor != 1.0:\n sky_mask = (\n torch.nn.functional.interpolate(\n sky_mask.unsqueeze(0).unsqueeze(0),\n scale_factor=self.downscale_factor,\n mode=\"nearest\",\n )\n .squeeze(0)\n .squeeze(0)\n )\n if self.dynamic_masks is not None:\n dynamic_mask = self.dynamic_masks[img_idx].float()\n if self.downscale_factor != 1.0:\n dynamic_mask = (\n torch.nn.functional.interpolate(\n dynamic_mask.unsqueeze(0).unsqueeze(0),\n scale_factor=self.downscale_factor,\n mode=\"nearest\",\n )\n .squeeze(0)\n .squeeze(0)\n )\n if self.features is not None:\n features = self.get_features(\n img_idx,\n y,\n x,\n downscale=(\n self.featmap_downscale_factor[0] / self.downscale_factor,\n self.featmap_downscale_factor[1] / self.downscale_factor,\n ),\n ).reshape(img_height, img_width, -1)\n\n if self.normalized_timestamps is not None:\n normalized_timestamps = torch.full(\n (img_height, img_width),\n self.normalized_timestamps[img_idx],\n dtype=torch.float32,\n )\n if self.cam_ids is not None:\n camera_id = torch.full(\n (img_height, img_width),\n self.cam_ids[img_idx],\n dtype=torch.long,\n )\n image_id = torch.full(\n (img_height, img_width),\n img_idx,\n dtype=torch.long,\n )\n c2w = self.cam_to_worlds[img_idx]\n intrinsics = self.intrinsics[img_idx] * self.downscale_factor\n intrinsics[2, 2] = 1.0\n origins, viewdirs, direction_norm = get_rays(x, y, c2w, intrinsics)\n origins = origins.reshape(img_height, img_width, 3)\n viewdirs = viewdirs.reshape(img_height, img_width, 3)\n direction_norm = direction_norm.reshape(img_height, img_width, 1)\n data = {\n \"origins\": origins,\n \"viewdirs\": viewdirs,\n \"direction_norm\": direction_norm,\n \"pixel_coords\": pixel_coords,\n \"normed_timestamps\": normalized_timestamps,\n \"img_idx\": image_id,\n \"cam_idx\": camera_id,\n \"pixels\": rgb,\n \"sky_masks\": sky_mask,\n \"dynamic_masks\": dynamic_mask,\n \"features\": features,\n }\n return {k: v for k, v in data.items() if v is not None}\n\n @property\n def num_cams(self) -> int:\n \"\"\"\n Returns:\n the number of cameras in the dataset\n \"\"\"\n return self.data_cfg.num_cams\n\n @property\n def num_imgs(self) -> int:\n \"\"\"\n Returns:\n the number of images in the dataset\n \"\"\"\n return len(self.cam_to_worlds)\n\n @property\n def num_timesteps(self) -> int:\n \"\"\"\n Returns:\n the number of image timesteps in the dataset\n \"\"\"\n return len(self.timesteps.unique())\n\n @property\n def timesteps(self) -> Tensor:\n \"\"\"\n Returns:\n the integer timestep indices of all images,\n shape: (num_imgs,)\n Note:\n the difference between timestamps and timesteps is that\n timestamps are the actual timestamps (minus 1e9) of images\n while timesteps are the integer timestep indices of images.\n \"\"\"\n return self._timesteps\n\n @property\n def timestamps(self) -> Tensor:\n \"\"\"\n Returns:\n the actual timestamps (minus 1e9) of all images,\n shape: (num_imgs,)\n \"\"\"\n return self._timestamps\n\n @property\n def normalized_timestamps(self) -> Tensor:\n \"\"\"\n Returns:\n the normalized timestamps of all images\n (normalized to the range [0, 1]),\n shape: (num_imgs,)\n \"\"\"\n return self._normalized_timestamps\n\n @property\n def unique_normalized_timestamps(self) -> Tensor:\n \"\"\"\n Returns:\n the unique normalized timestamps of all images\n (normalized to the range [0, 1]).\n shape: (num_timesteps,)\n \"\"\"\n return self._unique_normalized_timestamps\n\n def register_normalized_timestamps(self, normalized_timestamps: Tensor) -> None:\n \"\"\"\n Register the normalized timestamps of all images.\n Args:\n normalized_timestamps: the normalized timestamps of all images\n (normalized to the range [0, 1]).\n shape: (num_imgs,)\n Note:\n we normalize the image timestamps together with the lidar timestamps,\n so that the both the image and lidar timestamps are in the range [0, 1].\n \"\"\"\n assert normalized_timestamps.shape[0] == len(\n self.img_filepaths\n ), \"The number of normalized timestamps must match the number of images.\"\n assert (\n normalized_timestamps.min() >= 0 and normalized_timestamps.max() <= 1\n ), \"The normalized timestamps must be in the range [0, 1].\"\n self._normalized_timestamps = normalized_timestamps.to(self.device)\n self._unique_normalized_timestamps = self._normalized_timestamps.unique()\n\n def find_closest_timestep(self, normed_timestamp: float) -> int:\n \"\"\"\n Find the closest timestep to the given timestamp.\n Args:\n normed_timestamp: the normalized timestamp to find the closest timestep for.\n Returns:\n the closest timestep to the given timestamp.\n \"\"\"\n return torch.argmin(\n torch.abs(self.unique_normalized_timestamps - normed_timestamp)\n )\n\n @property\n def HEIGHT(self) -> int:\n return self.data_cfg.load_size[0]\n\n @property\n def WIDTH(self) -> int:\n return self.data_cfg.load_size[1]\n\n @property\n def downscale_factor(self) -> float:\n \"\"\"\n Returns:\n downscale_factor: the downscale factor of the images\n \"\"\"\n return self._downscale_factor\n\n def update_downscale_factor(self, downscale: float) -> None:\n \"\"\"\n Args:\n downscale: the new downscale factor\n Updates the downscale factor\n \"\"\"\n self._old_downscale_factor = self._downscale_factor\n self._downscale_factor = downscale\n\n def reset_downscale_factor(self) -> None:\n \"\"\"\n Resets the downscale factor to the original value\n \"\"\"\n self._downscale_factor = self._old_downscale_factor\n\n @property\n def buffer_downscale(self) -> float:\n \"\"\"\n Returns:\n buffer_downscale: the downscale factor of the pixel error buffer\n \"\"\"\n return self.data_cfg.sampler.buffer_downscale\n\n @property\n def buffer_ratio(self) -> float:\n \"\"\"\n Returns:\n buffer_ratio: the ratio of the rays sampled from the pixel error buffer\n \"\"\"\n return self.data_cfg.sampler.buffer_ratio" }, { "identifier": "SceneDataset", "path": "datasets/base/scene_dataset.py", "snippet": "class SceneDataset(abc.ABC):\n \"\"\"\n Base class for scene dataset.\n \"\"\"\n\n data_cfg: OmegaConf = None\n pixel_source: ScenePixelSource = None\n lidar_source: SceneLidarSource = None\n # training and testing indices are indices into the full dataset\n # train_indices are img indices, so the length is num_cams * num_timesteps\n train_indices: List[int] = None\n test_indices: List[int] = None\n # train_timesteps are timesteps, so the length is num_timesteps (len(unique_timesteps))\n train_timesteps: Tensor = None\n test_timesteps: Tensor = None\n\n # dataset wrappers\n # full: includes all data\n full_pixel_set: SplitWrapper = None\n full_lidar_set: SplitWrapper = None\n # train: includes only training data\n train_pixel_set: SplitWrapper = None\n train_lidar_set: SplitWrapper = None\n # test: includes only testing data\n test_pixel_set: SplitWrapper = None\n test_lidar_set: SplitWrapper = None\n\n def __init__(\n self,\n data_config: OmegaConf,\n ):\n super().__init__()\n self.data_cfg = data_config\n\n @abc.abstractmethod\n def build_data_source(self):\n \"\"\"\n Create the data source for the dataset.\n \"\"\"\n raise NotImplementedError\n\n @abc.abstractmethod\n def build_split_wrapper(self):\n \"\"\"\n Makes each data source as a Pytorch Dataset.\n \"\"\"\n raise NotImplementedError\n\n @abc.abstractmethod\n def split_train_test(self):\n raise NotImplementedError\n\n def get_aabb(self) -> Tensor:\n if self.lidar_source is not None:\n aabb = self.lidar_source.get_aabb()\n else:\n aabb = self.pixel_source.get_aabb()\n return aabb\n\n @property\n def num_cams(self) -> int:\n return self.pixel_source.num_cams\n\n @property\n def scene_idx(self) -> int:\n return self.data_cfg.scene_idx\n\n @property\n def num_img_timesteps(self) -> int:\n return self.pixel_source.num_timesteps\n\n @property\n def num_lidar_timesteps(self) -> int:\n if self.lidar_source is None:\n logger.warning(\"No lidar source, returning num_img_timesteps\")\n return self.num_img_timesteps\n return self.lidar_source.num_timesteps\n\n @property\n def num_train_timesteps(self) -> int:\n return len(self.train_timesteps)\n\n @property\n def num_test_timesteps(self) -> int:\n return len(self.test_timesteps)\n\n @property\n def unique_normalized_training_timestamps(self) -> Tensor:\n return self.pixel_source.unique_normalized_timestamps[self.train_timesteps]\n\n @property\n def device(self):\n return self.data_cfg.preload_device" }, { "identifier": "SplitWrapper", "path": "datasets/base/split_wrapper.py", "snippet": "class SplitWrapper(torch.utils.data.Dataset):\n\n # a sufficiently large number to make sure we don't run out of data\n _num_iters = 1000000\n\n def __init__(\n self,\n datasource: Union[ScenePixelSource, SceneLidarSource],\n split_indices: List[int] = None,\n split: str = \"train\",\n ray_batch_size: int = 4096,\n ):\n super().__init__()\n self.datasource = datasource\n self.split_indices = split_indices\n self.split = split\n self.ray_batch_size = ray_batch_size\n\n def __getitem__(self, idx) -> dict:\n if self.split == \"train\":\n # randomly sample rays from the training set\n return self.datasource.get_train_rays(\n num_rays=self.ray_batch_size,\n candidate_indices=self.split_indices,\n )\n else:\n # return all rays for the given index\n return self.datasource.get_render_rays(self.split_indices[idx])\n\n def __len__(self) -> int:\n if self.split == \"train\":\n return self.num_iters\n else:\n return len(self.split_indices)\n\n @property\n def num_iters(self) -> int:\n return self._num_iters\n\n def set_num_iters(self, num_iters) -> None:\n self._num_iters = num_iters" }, { "identifier": "voxel_coords_to_world_coords", "path": "datasets/utils.py", "snippet": "def voxel_coords_to_world_coords(\n aabb_min: Union[Tensor, List[float]],\n aabb_max: Union[Tensor, List[float]],\n voxel_resolution: Union[Tensor, List[int]],\n points: Union[Tensor, List[float]] = None,\n) -> Tensor:\n \"\"\"\n Converts voxel coordinates to world coordinates.\n\n Args:\n aabb_min (Union[Tensor, List[float]]): Minimum coordinates of the axis-aligned bounding box (AABB) of the voxel grid.\n aabb_max (Union[Tensor, List[float]]): Maximum coordinates of the AABB of the voxel grid.\n voxel_resolution (Union[Tensor, List[int]]): Number of voxels in each dimension of the voxel grid.\n points (Union[Tensor, List[float]], optional):\n Tensor of voxel coordinates to convert to world coordinates.\n If None, returns a grid of world coordinates. Defaults to None.\n Returns:\n Tensor: Tensor of world coordinates.\n \"\"\"\n aabb_min = torch.tensor(aabb_min) if isinstance(aabb_min, List) else aabb_min\n aabb_max = torch.tensor(aabb_max) if isinstance(aabb_max, List) else aabb_max\n voxel_resolution = (\n torch.tensor(voxel_resolution)\n if isinstance(voxel_resolution, List)\n else voxel_resolution\n )\n\n if points is None:\n x, y, z = torch.meshgrid(\n torch.linspace(aabb_min[0], aabb_max[0], voxel_resolution[0]),\n torch.linspace(aabb_min[1], aabb_max[1], voxel_resolution[1]),\n torch.linspace(aabb_min[2], aabb_max[2], voxel_resolution[2]),\n )\n return torch.stack([x, y, z], dim=-1)\n else:\n points = torch.tensor(points) if isinstance(points, List) else points\n\n # Compute voxel size\n voxel_size = (aabb_max - aabb_min) / voxel_resolution\n\n # Convert voxel coordinates to world coordinates\n world_coords = aabb_min.to(points.device) + points * voxel_size.to(\n points.device\n )\n return world_coords" }, { "identifier": "save_videos", "path": "radiance_fields/video_utils.py", "snippet": "def render_pixels(\n cfg: OmegaConf,\n model: RadianceField,\n proposal_estimator: PropNetEstimator,\n dataset: SplitWrapper,\n proposal_networks: Optional[List[DensityField]] = None,\n compute_metrics: bool = False,\n vis_indices: Optional[List[int]] = None,\n return_decomposition: bool = True,\n):\ndef render(\n dataset: SplitWrapper,\n render_func: Callable,\n model: Optional[RadianceField] = None,\n compute_metrics: bool = False,\n vis_indices: Optional[List[int]] = None,\n):\ndef save_videos(\n render_results: Dict[str, List[Tensor]],\n save_pth: str,\n num_timestamps: int,\n keys: List[str] = [\"gt_rgbs\", \"rgbs\", \"depths\"],\n num_cams: int = 3,\n save_seperate_video: bool = False,\n save_images: bool = False,\n fps: int = 10,\n verbose: bool = True,\n):\ndef save_concatenated_videos(\n render_results: Dict[str, List[Tensor]],\n save_pth: str,\n num_timestamps: int,\n keys: List[str] = [\"gt_rgbs\", \"rgbs\", \"depths\"],\n num_cams: int = 3,\n save_images: bool = False,\n fps: int = 10,\n verbose: bool = True,\n):\ndef save_seperate_videos(\n render_results: Dict[str, List[Tensor]],\n save_pth: str,\n num_timestamps: int,\n keys: List[str] = [\"gt_rgbs\", \"rgbs\", \"depths\"],\n num_cams: int = 3,\n fps: int = 10,\n verbose: bool = False,\n save_images: bool = False,\n):" }, { "identifier": "NumpyEncoder", "path": "utils/misc.py", "snippet": "class NumpyEncoder(json.JSONEncoder):\n def default(self, obj):\n if isinstance(obj, np.ndarray):\n return obj.tolist() # convert numpy array to list\n return super(NumpyEncoder, self).default(obj)" } ]
import json import logging import os import numpy as np import torch from typing import Dict from nuscenes.nuscenes import LidarPointCloud, NuScenes from omegaconf import OmegaConf from pyquaternion import Quaternion from torch import Tensor from tqdm import trange from datasets.base.lidar_source import SceneLidarSource from datasets.base.pixel_source import ScenePixelSource from datasets.base.scene_dataset import SceneDataset from datasets.base.split_wrapper import SplitWrapper from datasets.utils import voxel_coords_to_world_coords from radiance_fields.video_utils import save_videos, depth_visualizer from utils.misc import NumpyEncoder
17,396
split_indices=np.arange(self.pixel_source.num_imgs).tolist(), split="full", ray_batch_size=self.data_cfg.ray_batch_size, ) if self.lidar_source is not None: train_lidar_set = SplitWrapper( datasource=self.lidar_source, # the number of image timesteps is different from the number of lidar timesteps # TODO: find a better way to handle this # currently use all the lidar timesteps for training split_indices=np.arange(self.lidar_source.num_timesteps), split="train", ray_batch_size=self.data_cfg.ray_batch_size, ) full_lidar_set = SplitWrapper( datasource=self.lidar_source, # cover all the lidar scans split_indices=np.arange(self.lidar_source.num_timesteps), split="full", ray_batch_size=self.data_cfg.ray_batch_size, ) pixel_set = (train_pixel_set, test_pixel_set, full_pixel_set) lidar_set = (train_lidar_set, test_lidar_set, full_lidar_set) return pixel_set, lidar_set def build_data_source(self): pixel_source, lidar_source = None, None all_timestamps = [] # ---- create pixel source ---- # load_pixel = ( self.data_cfg.pixel_source.load_rgb or self.data_cfg.pixel_source.load_sky_mask or self.data_cfg.pixel_source.load_dynamic_mask or self.data_cfg.pixel_source.load_feature ) if load_pixel: pixel_source = NuScenesPixelSource( pixel_data_config=self.data_cfg.pixel_source, data_path=self.data_path, scene_idx=self.scene_idx, meta_file_path=self.img_meta_file_path, start_timestep=self.data_cfg.start_timestep, end_timestep=self.data_cfg.end_timestep, ) pixel_source.to(self.device) all_timestamps.append(pixel_source.timestamps) self.start_timestep = pixel_source.start_timestep self.end_timestep = pixel_source.end_timestep self.scene_fraction = pixel_source.scene_fraction # ---- create lidar source ---- # if self.data_cfg.lidar_source.load_lidar: lidar_source = NuScenesLiDARSource( lidar_data_config=self.data_cfg.lidar_source, data_path=self.data_path, meta_file_path=self.lidar_meta_file_path, nusc=pixel_source.nusc if pixel_source is not None else None, scene_idx=self.scene_idx, start_timestep=self.start_timestep, fraction=self.scene_fraction, global_to_initial_ego=pixel_source.global_to_initial_ego, ) lidar_source.to(self.device) all_timestamps.append(lidar_source.timestamps) assert len(all_timestamps) > 0, "No data source is loaded" all_timestamps = torch.cat(all_timestamps, dim=0) # normalize the timestamps all_timestamps = (all_timestamps - all_timestamps.min()) / ( all_timestamps.max() - all_timestamps.min() ) all_timestamps = all_timestamps.float() if pixel_source is not None: pixel_source.register_normalized_timestamps( all_timestamps[: len(pixel_source.timestamps)] ) if lidar_source is not None: lidar_source.register_normalized_timestamps( all_timestamps[-len(lidar_source.timestamps) :] ) return pixel_source, lidar_source def split_train_test(self): assert ( self.data_cfg.pixel_source.test_image_stride == 0 ), "test_image_stride > 0 is not supported for nuscenes dataset. " if self.data_cfg.pixel_source.test_image_stride != 0: test_timesteps = np.arange( self.data_cfg.pixel_source.test_image_stride, self.num_img_timesteps, self.data_cfg.pixel_source.test_image_stride, ) else: test_timesteps = [] train_timesteps = np.array( [i for i in range(self.num_img_timesteps) if i not in test_timesteps] ) logger.info( f"Train timesteps: \n{np.arange(self.start_timestep, self.end_timestep)[train_timesteps]}" ) logger.info( f"Test timesteps: \n{np.arange(self.start_timestep, self.end_timestep)[test_timesteps]}" ) # propagate the train and test timesteps to the train and test indices train_indices, test_indices = [], [] for t in range(self.num_img_timesteps): if t in train_timesteps: for cam in range(self.pixel_source.num_cams): train_indices.append(t * self.pixel_source.num_cams + cam) elif t in test_timesteps: for cam in range(self.pixel_source.num_cams): test_indices.append(t * self.pixel_source.num_cams + cam) logger.info(f"Number of train indices: {len(train_indices)}") logger.info(f"Train indices: {train_indices}") logger.info(f"Number of test indices: {len(test_indices)}") logger.info(f"Test indices: {test_indices}") return train_timesteps, test_timesteps, train_indices, test_indices
logger = logging.getLogger() class NuScenesPixelSource(ScenePixelSource): ORIGINAL_SIZE = [[900, 1600] for _ in range(6)] OPENCV2DATASET = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]]) def __init__( self, pixel_data_config: OmegaConf, data_path: str, meta_file_path: str, nusc: NuScenes = None, scene_idx: int = 0, start_timestep: int = 0, end_timestep: int = -1, device: torch.device = torch.device("cpu"), ): pixel_data_config.load_dynamic_mask = False logger.info("[Pixel] Overriding load_dynamic_mask to False") super().__init__(pixel_data_config, device=device) self.data_path = data_path self.meta_file_path = meta_file_path self.start_timestep = start_timestep self.end_timestep = end_timestep self.nusc = nusc self.scene_idx = scene_idx self.meta_dict = self.create_or_load_metas() self.create_all_filelist() self.load_data() def create_or_load_metas(self): # ---- define camera list ---- # if self.num_cams == 1: self.camera_list = ["CAM_FRONT"] elif self.num_cams == 3: self.camera_list = ["CAM_FRONT_LEFT", "CAM_FRONT", "CAM_FRONT_RIGHT"] elif self.num_cams == 6: self.camera_list = [ "CAM_FRONT_LEFT", "CAM_FRONT", "CAM_FRONT_RIGHT", "CAM_BACK_LEFT", "CAM_BACK", "CAM_BACK_RIGHT", ] else: raise NotImplementedError( f"num_cams: {self.num_cams} not supported for nuscenes dataset" ) if os.path.exists(self.meta_file_path): with open(self.meta_file_path, "r") as f: meta_dict = json.load(f) logger.info(f"[Pixel] Loaded camera meta from {self.meta_file_path}") return meta_dict else: logger.info(f"[Pixel] Creating camera meta at {self.meta_file_path}") if self.nusc is None: self.nusc = NuScenes( version="v1.0-trainval", dataroot=self.data_path, verbose=True ) self.scene = self.nusc.scene[self.scene_idx] total_camera_list = [ "CAM_FRONT_LEFT", "CAM_FRONT", "CAM_FRONT_RIGHT", "CAM_BACK_LEFT", "CAM_BACK", "CAM_BACK_RIGHT", ] meta_dict = { camera: { "timestamp": [], "filepath": [], "ego_pose": [], "cam_id": [], "extrinsics": [], "intrinsics": [], } for i, camera in enumerate(total_camera_list) } # ---- get the first sample of each camera ---- # current_camera_data_tokens = {camera: None for camera in total_camera_list} first_sample = self.nusc.get("sample", self.scene["first_sample_token"]) for camera in total_camera_list: current_camera_data_tokens[camera] = first_sample["data"][camera] while not all(token == "" for token in current_camera_data_tokens.values()): for i, camera in enumerate(total_camera_list): # skip if the current camera data token is empty if current_camera_data_tokens[camera] == "": continue current_camera_data = self.nusc.get( "sample_data", current_camera_data_tokens[camera] ) # ---- timestamp and cam_id ---- # meta_dict[camera]["cam_id"].append(i) meta_dict[camera]["timestamp"].append(current_camera_data["timestamp"]) meta_dict[camera]["filepath"].append(current_camera_data["filename"]) # ---- intrinsics and extrinsics ---- # calibrated_sensor_record = self.nusc.get( "calibrated_sensor", current_camera_data["calibrated_sensor_token"] ) # intrinsics intrinsic = calibrated_sensor_record["camera_intrinsic"] meta_dict[camera]["intrinsics"].append(np.array(intrinsic)) # extrinsics extrinsic = np.eye(4) extrinsic[:3, :3] = Quaternion( calibrated_sensor_record["rotation"] ).rotation_matrix extrinsic[:3, 3] = np.array(calibrated_sensor_record["translation"]) meta_dict[camera]["extrinsics"].append(extrinsic) # ---- ego pose ---- # ego_pose_record = self.nusc.get( "ego_pose", current_camera_data["ego_pose_token"] ) ego_pose = np.eye(4) ego_pose[:3, :3] = Quaternion( ego_pose_record["rotation"] ).rotation_matrix ego_pose[:3, 3] = np.array(ego_pose_record["translation"]) meta_dict[camera]["ego_pose"].append(ego_pose) current_camera_data_tokens[camera] = current_camera_data["next"] with open(self.meta_file_path, "w") as f: json.dump(meta_dict, f, cls=NumpyEncoder) logger.info(f"[Pixel] Saved camera meta to {self.meta_file_path}") return meta_dict def create_all_filelist(self): # NuScenes dataset is not synchronized, so we need to find the minimum shared # scene length, and only use the frames within the shared scene length. # we also define the start and end timestep within the shared scene length # ---- find the minimum shared scene length ---- # num_timestamps = 100000000 for camera in self.camera_list: if len(self.meta_dict[camera]["timestamp"]) < num_timestamps: num_timestamps = len(self.meta_dict[camera]["timestamp"]) logger.info(f"[Pixel] Min shared scene length: {num_timestamps}") self.scene_total_num_timestamps = num_timestamps if self.end_timestep == -1: self.end_timestep = num_timestamps - 1 else: self.end_timestep = min(self.end_timestep, num_timestamps - 1) # to make sure the last timestep is included self.end_timestep += 1 self.start_timestep = min(self.start_timestep, self.end_timestep - 1) self.scene_fraction = (self.end_timestep - self.start_timestep) / num_timestamps logger.info(f"[Pixel] Start timestep: {self.start_timestep}") logger.info(f"[Pixel] End timestep: {self.end_timestep}") img_filepaths, feat_filepaths, sky_mask_filepaths = [], [], [] # TODO: support dynamic masks for t in range(self.start_timestep, self.end_timestep): for cam_idx in self.camera_list: img_filepath = os.path.join( self.data_path, self.meta_dict[cam_idx]["filepath"][t] ) img_filepaths.append(img_filepath) sky_mask_filepaths.append( img_filepath.replace("samples", "samples_sky_mask") .replace("sweeps", "sweeps_sky_mask") .replace(".jpg", ".png") ) feat_filepaths.append( img_filepath.replace( "samples", f"samples_{self.data_cfg.feature_model_type}" ) .replace("sweeps", f"sweeps_{self.data_cfg.feature_model_type}") .replace(".jpg", ".npy") ) self.img_filepaths = np.array(img_filepaths) self.sky_mask_filepaths = np.array(sky_mask_filepaths) self.feat_filepaths = np.array(feat_filepaths) def load_calibrations(self): # compute per-image poses and intrinsics cam_to_worlds, ego_to_worlds = [], [] intrinsics, timesteps, cam_ids = [], [], [] timestamps = [] # we tranform the camera poses w.r.t. the first timestep to make the origin of # the first ego pose as the origin of the world coordinate system. initial_ego_to_global = self.meta_dict["CAM_FRONT"]["ego_pose"][ self.start_timestep ] global_to_initial_ego = np.linalg.inv(initial_ego_to_global) for t in range(self.start_timestep, self.end_timestep): ego_to_global_current = self.meta_dict["CAM_FRONT"]["ego_pose"][t] # compute ego_to_world transformation ego_to_world = global_to_initial_ego @ ego_to_global_current ego_to_worlds.append(ego_to_world) for cam_name in self.camera_list: cam_to_ego = self.meta_dict[cam_name]["extrinsics"][t] # Because we use opencv coordinate system to generate camera rays, # we need to store the transformation from opencv coordinate system to dataset # coordinate system. However, the nuScenes dataset uses the same coordinate # system as opencv, so we just store the identity matrix. # opencv coordinate system: x right, y down, z front cam_to_ego = cam_to_ego @ self.OPENCV2DATASET cam2world = ego_to_world @ cam_to_ego cam_to_worlds.append(cam2world) intrinsics.append(self.meta_dict[cam_name]["intrinsics"][t]) timesteps.append(t) cam_ids.append(self.meta_dict[cam_name]["cam_id"][t]) timestamps.append( self.meta_dict[cam_name]["timestamp"][t] / 1e6 * np.ones_like(self.meta_dict[cam_name]["cam_id"][t]) ) self.intrinsics = torch.from_numpy(np.stack(intrinsics, axis=0)).float() # scale the intrinsics according to the load size self.intrinsics[..., 0, 0] *= ( self.data_cfg.load_size[1] / self.ORIGINAL_SIZE[0][1] ) self.intrinsics[..., 1, 1] *= ( self.data_cfg.load_size[0] / self.ORIGINAL_SIZE[0][0] ) self.intrinsics[..., 0, 2] *= ( self.data_cfg.load_size[1] / self.ORIGINAL_SIZE[0][1] ) self.intrinsics[..., 1, 2] *= ( self.data_cfg.load_size[0] / self.ORIGINAL_SIZE[0][0] ) self.cam_to_worlds = torch.from_numpy(np.stack(cam_to_worlds, axis=0)).float() self.ego_to_worlds = torch.from_numpy(np.stack(ego_to_worlds, axis=0)).float() self.global_to_initial_ego = torch.from_numpy(global_to_initial_ego).float() self.cam_ids = torch.from_numpy(np.stack(cam_ids, axis=0)).long() # the underscore here is important. self._timestamps = torch.tensor(timestamps, dtype=torch.float64) self._timesteps = torch.from_numpy(np.stack(timesteps, axis=0)).long() class NuScenesLiDARSource(SceneLidarSource): def __init__( self, lidar_data_config: OmegaConf, data_path: str, meta_file_path: str, nusc: NuScenes, scene_idx: int, start_timestep: int, fraction: float, # a value in [0, 1] to indicate the fraction of the scene to use global_to_initial_ego: Tensor, ): super().__init__(lidar_data_config) self.data_path = data_path self.meta_file_path = meta_file_path self.nusc = nusc self.scene_idx = scene_idx self.start_timestep = start_timestep # because the lidar data is not synchronized with the image data, we need to # define the end timestep based on the fraction of the scene to use self.fraction = fraction self.global_to_initial_ego = global_to_initial_ego.numpy() self.meta_dict = self.create_or_load_metas() self.create_all_filelist() self.load_data() def create_or_load_metas(self): if os.path.exists(self.meta_file_path): with open(self.meta_file_path, "r") as f: meta_dict = json.load(f) logger.info(f"[Lidar] Loaded lidar meta from {self.meta_file_path}") return meta_dict else: logger.info(f"[Lidar] Creating lidar meta at {self.meta_file_path}") if self.nusc is None: self.nusc = NuScenes( version="v1.0-trainval", dataroot=self.data_path, verbose=True ) self.scene = self.nusc.scene[self.scene_idx] meta_dict = { "timestamp": [], "filepath": [], "extrinsics": [], "ego_pose": [], } # ---- obtain initial pose ---- # first_sample = self.nusc.get("sample", self.scene["first_sample_token"]) current_data_token = first_sample["data"]["LIDAR_TOP"] while current_data_token != "": current_lidar_data = self.nusc.get("sample_data", current_data_token) # ---- timestamp and cam_id ---- # meta_dict["timestamp"].append(current_lidar_data["timestamp"]) meta_dict["filepath"].append(current_lidar_data["filename"]) # ---- extrinsics ---- # calibrated_sensor_record = self.nusc.get( "calibrated_sensor", current_lidar_data["calibrated_sensor_token"] ) extrinsic = np.eye(4) extrinsic[:3, :3] = Quaternion( calibrated_sensor_record["rotation"] ).rotation_matrix extrinsic[:3, 3] = np.array(calibrated_sensor_record["translation"]) meta_dict["extrinsics"].append(extrinsic) # ---- ego pose ---- # ego_pose_record = self.nusc.get( "ego_pose", current_lidar_data["ego_pose_token"] ) ego_pose = np.eye(4) ego_pose[:3, :3] = Quaternion(ego_pose_record["rotation"]).rotation_matrix ego_pose[:3, 3] = np.array(ego_pose_record["translation"]) meta_dict["ego_pose"].append(ego_pose) current_data_token = current_lidar_data["next"] with open(self.meta_file_path, "w") as f: json.dump(meta_dict, f, cls=NumpyEncoder) logger.info(f"[Lidar] Saved lidar meta to {self.meta_file_path}") return meta_dict def create_all_filelist(self): # ---- define filepaths ---- # num_timestamps = len(self.meta_dict["timestamp"]) self.end_timestep = int(num_timestamps * self.fraction) self.start_timestep = min(self.start_timestep, self.end_timestep - 1) logger.info(f"[Lidar] Start timestep: {self.start_timestep}") logger.info(f"[Lidar] End timestep: {self.end_timestep}") lidar_filepaths = [] for t in range(self.start_timestep, self.end_timestep): lidar_filepaths.append( os.path.join(self.data_path, self.meta_dict["filepath"][t]) ) self.lidar_filepaths = np.array(lidar_filepaths) def load_calibrations(self): lidar_to_worlds, ego_to_worlds = [], [] # we tranform the poses w.r.t. the first timestep to make the origin of the # first ego pose as the origin of the world coordinate system. for t in range(self.start_timestep, self.end_timestep): lidar_to_ego = np.array(self.meta_dict["extrinsics"][t]) ego_to_global_current = np.array(self.meta_dict["ego_pose"][t]) # compute ego_to_world transformation ego_to_world = self.global_to_initial_ego @ ego_to_global_current ego_to_worlds.append(ego_to_world) lidar_to_worlds.append(ego_to_world @ lidar_to_ego) self.lidar_to_worlds = torch.from_numpy( np.stack(lidar_to_worlds, axis=0) ).float() self.ego_to_worlds = torch.from_numpy(np.stack(ego_to_worlds, axis=0)).float() def load_lidar(self): origins, directions, ranges, timesteps = [], [], [], [] laser_ids = [] timestamps = [] accumulated_num_original_rays = 0 accumulated_num_rays = 0 for t in trange( 0, len(self.lidar_filepaths), desc="Loading lidar", dynamic_ncols=True ): lidar_pc = LidarPointCloud.from_file(self.lidar_filepaths[t]) lidar_pc.remove_close(1.0) pc = lidar_pc.points[:3, :].T pc = np.hstack((pc, np.ones((pc.shape[0], 1)))) pc = torch.from_numpy(pc).float() lidar_points = pc @ self.lidar_to_worlds[t].T lidar_points = lidar_points[:, :3] lidar_origins = ( self.lidar_to_worlds[t][:3, 3] .unsqueeze(0) .repeat(lidar_points.shape[0], 1) ) lidar_directions = lidar_points - lidar_origins lidar_ranges = torch.norm(lidar_directions, dim=-1, keepdim=True) lidar_directions = lidar_directions / lidar_ranges accumulated_num_original_rays += len(lidar_pc.points[0]) valid_mask = torch.ones_like(lidar_origins[:, 0]).bool() if self.data_cfg.truncated_max_range is not None: valid_mask = lidar_points[:, 0] < self.data_cfg.truncated_max_range if self.data_cfg.truncated_min_range is not None: valid_mask = valid_mask & ( lidar_points[:, 0] > self.data_cfg.truncated_min_range ) lidar_origins = lidar_origins[valid_mask] lidar_directions = lidar_directions[valid_mask] lidar_ranges = lidar_ranges[valid_mask] lidar_timestep = torch.ones_like(lidar_ranges).squeeze(-1) * t lidar_ids = torch.zeros_like(lidar_origins[:, 0]).long() accumulated_num_rays += len(lidar_ranges) origins.append(lidar_origins) directions.append(lidar_directions) ranges.append(lidar_ranges) timesteps.append(lidar_timestep) laser_ids.append(lidar_ids) timestamps.append( self.meta_dict["timestamp"][t] / 1e6 * torch.ones_like(lidar_ids, dtype=torch.float64) ) logger.info( f"[Lidar] Number of lidar rays: {accumulated_num_rays} " f"({accumulated_num_rays / accumulated_num_original_rays * 100:.2f}%) of " f"{accumulated_num_original_rays} original rays)" ) logger.info("[Lidar] Filter condition:") logger.info(f" only_use_top_lidar: {self.data_cfg.only_use_top_lidar}") logger.info(f" truncated_max_range: {self.data_cfg.truncated_max_range}") logger.info(f" truncated_min_range: {self.data_cfg.truncated_min_range}") self.origins = torch.cat(origins, dim=0) self.directions = torch.cat(directions, dim=0) self.ranges = torch.cat(ranges, dim=0) self._timesteps = torch.cat(timesteps, dim=0) self.laser_ids = torch.cat(laser_ids, dim=0) self._timestamps = torch.cat(timestamps, dim=0) def sample_uniform_rays( self, num_rays: int, candidate_indices: Tensor = None, ): # in nuscenes, we don't support novel view synthesis yet, so we don't need to # use candidate indices self.cached_origins = self.origins self.cached_directions = self.directions self.cached_ranges = self.ranges self.cached_normalized_timestamps = self.normalized_timestamps return torch.randint( 0, len(self.cached_origins), size=(num_rays,), device=self.device, ) class NuScenesDataset(SceneDataset): dataset: str = "nuscenes" def __init__( self, data_cfg: OmegaConf, ) -> None: super().__init__(data_cfg) assert self.data_cfg.dataset == "nuscenes" self.data_path = self.data_cfg.data_root self.processed_data_path = os.path.join( self.data_path, "emernerf_metas", f"{self.scene_idx:03d}" ) if not os.path.exists(self.processed_data_path): os.makedirs(self.processed_data_path) self.img_meta_file_path = os.path.join( self.processed_data_path, "img_meta.json" ) self.lidar_meta_file_path = os.path.join( self.processed_data_path, "lidar_meta.json" ) # ---- create pixel source ---- # self.pixel_source, self.lidar_source = self.build_data_source() self.aabb = self.get_aabb() # ---- define train and test indices ---- # ( self.train_timesteps, self.test_timesteps, self.train_indices, self.test_indices, ) = self.split_train_test() # ---- create split wrappers ---- # pixel_sets, lidar_sets = self.build_split_wrapper() self.train_pixel_set, self.test_pixel_set, self.full_pixel_set = pixel_sets self.train_lidar_set, self.test_lidar_set, self.full_lidar_set = lidar_sets def build_split_wrapper(self): """ Makes each data source as a Pytorch Dataset """ train_pixel_set, test_pixel_set, full_pixel_set = None, None, None train_lidar_set, test_lidar_set, full_lidar_set = None, None, None assert ( len(self.test_indices) == 0 ), "Test split is not supported yet for nuscenes" # ---- create split wrappers ---- # if self.pixel_source is not None: train_pixel_set = SplitWrapper( datasource=self.pixel_source, # train_indices are img indices, so the length is num_cams * num_train_timesteps split_indices=self.train_indices, split="train", ray_batch_size=self.data_cfg.ray_batch_size, ) full_pixel_set = SplitWrapper( datasource=self.pixel_source, # cover all the images split_indices=np.arange(self.pixel_source.num_imgs).tolist(), split="full", ray_batch_size=self.data_cfg.ray_batch_size, ) if self.lidar_source is not None: train_lidar_set = SplitWrapper( datasource=self.lidar_source, # the number of image timesteps is different from the number of lidar timesteps # TODO: find a better way to handle this # currently use all the lidar timesteps for training split_indices=np.arange(self.lidar_source.num_timesteps), split="train", ray_batch_size=self.data_cfg.ray_batch_size, ) full_lidar_set = SplitWrapper( datasource=self.lidar_source, # cover all the lidar scans split_indices=np.arange(self.lidar_source.num_timesteps), split="full", ray_batch_size=self.data_cfg.ray_batch_size, ) pixel_set = (train_pixel_set, test_pixel_set, full_pixel_set) lidar_set = (train_lidar_set, test_lidar_set, full_lidar_set) return pixel_set, lidar_set def build_data_source(self): pixel_source, lidar_source = None, None all_timestamps = [] # ---- create pixel source ---- # load_pixel = ( self.data_cfg.pixel_source.load_rgb or self.data_cfg.pixel_source.load_sky_mask or self.data_cfg.pixel_source.load_dynamic_mask or self.data_cfg.pixel_source.load_feature ) if load_pixel: pixel_source = NuScenesPixelSource( pixel_data_config=self.data_cfg.pixel_source, data_path=self.data_path, scene_idx=self.scene_idx, meta_file_path=self.img_meta_file_path, start_timestep=self.data_cfg.start_timestep, end_timestep=self.data_cfg.end_timestep, ) pixel_source.to(self.device) all_timestamps.append(pixel_source.timestamps) self.start_timestep = pixel_source.start_timestep self.end_timestep = pixel_source.end_timestep self.scene_fraction = pixel_source.scene_fraction # ---- create lidar source ---- # if self.data_cfg.lidar_source.load_lidar: lidar_source = NuScenesLiDARSource( lidar_data_config=self.data_cfg.lidar_source, data_path=self.data_path, meta_file_path=self.lidar_meta_file_path, nusc=pixel_source.nusc if pixel_source is not None else None, scene_idx=self.scene_idx, start_timestep=self.start_timestep, fraction=self.scene_fraction, global_to_initial_ego=pixel_source.global_to_initial_ego, ) lidar_source.to(self.device) all_timestamps.append(lidar_source.timestamps) assert len(all_timestamps) > 0, "No data source is loaded" all_timestamps = torch.cat(all_timestamps, dim=0) # normalize the timestamps all_timestamps = (all_timestamps - all_timestamps.min()) / ( all_timestamps.max() - all_timestamps.min() ) all_timestamps = all_timestamps.float() if pixel_source is not None: pixel_source.register_normalized_timestamps( all_timestamps[: len(pixel_source.timestamps)] ) if lidar_source is not None: lidar_source.register_normalized_timestamps( all_timestamps[-len(lidar_source.timestamps) :] ) return pixel_source, lidar_source def split_train_test(self): assert ( self.data_cfg.pixel_source.test_image_stride == 0 ), "test_image_stride > 0 is not supported for nuscenes dataset. " if self.data_cfg.pixel_source.test_image_stride != 0: test_timesteps = np.arange( self.data_cfg.pixel_source.test_image_stride, self.num_img_timesteps, self.data_cfg.pixel_source.test_image_stride, ) else: test_timesteps = [] train_timesteps = np.array( [i for i in range(self.num_img_timesteps) if i not in test_timesteps] ) logger.info( f"Train timesteps: \n{np.arange(self.start_timestep, self.end_timestep)[train_timesteps]}" ) logger.info( f"Test timesteps: \n{np.arange(self.start_timestep, self.end_timestep)[test_timesteps]}" ) # propagate the train and test timesteps to the train and test indices train_indices, test_indices = [], [] for t in range(self.num_img_timesteps): if t in train_timesteps: for cam in range(self.pixel_source.num_cams): train_indices.append(t * self.pixel_source.num_cams + cam) elif t in test_timesteps: for cam in range(self.pixel_source.num_cams): test_indices.append(t * self.pixel_source.num_cams + cam) logger.info(f"Number of train indices: {len(train_indices)}") logger.info(f"Train indices: {train_indices}") logger.info(f"Number of test indices: {len(test_indices)}") logger.info(f"Test indices: {test_indices}") return train_timesteps, test_timesteps, train_indices, test_indices
def save_videos(self, video_dict, **kwargs):
5
2023-10-11 20:56:27+00:00
24k
alibaba-damo-academy/FunCodec
funcodec/models/encoder/transformer_encoder.py
[ { "identifier": "AbsEncoder", "path": "funcodec/models/encoder/abs_encoder.py", "snippet": "class AbsEncoder(torch.nn.Module, ABC):\n @abstractmethod\n def output_size(self) -> int:\n raise NotImplementedError\n\n @abstractmethod\n def forward(\n self,\n xs_pad: torch.Tensor,\n ilens: torch.Tensor,\n prev_states: torch.Tensor = None,\n ) -> Tuple[torch.Tensor, torch.Tensor, Optional[torch.Tensor]]:\n raise NotImplementedError" }, { "identifier": "MultiHeadedAttention", "path": "funcodec/modules/attention.py", "snippet": "class MultiHeadedAttention(nn.Module):\n \"\"\"Multi-Head Attention layer.\n\n Args:\n n_head (int): The number of heads.\n n_feat (int): The number of features.\n dropout_rate (float): Dropout rate.\n\n \"\"\"\n\n def __init__(self, n_head, n_feat, dropout_rate):\n \"\"\"Construct an MultiHeadedAttention object.\"\"\"\n super(MultiHeadedAttention, self).__init__()\n assert n_feat % n_head == 0\n # We assume d_v always equals d_k\n self.d_k = n_feat // n_head\n self.h = n_head\n self.linear_q = nn.Linear(n_feat, n_feat)\n self.linear_k = nn.Linear(n_feat, n_feat)\n self.linear_v = nn.Linear(n_feat, n_feat)\n self.linear_out = nn.Linear(n_feat, n_feat)\n self.attn = None\n self.dropout = nn.Dropout(p=dropout_rate)\n\n def forward_qkv(self, query, key, value):\n \"\"\"Transform query, key and value.\n\n Args:\n query (torch.Tensor): Query tensor (#batch, time1, size).\n key (torch.Tensor): Key tensor (#batch, time2, size).\n value (torch.Tensor): Value tensor (#batch, time2, size).\n\n Returns:\n torch.Tensor: Transformed query tensor (#batch, n_head, time1, d_k).\n torch.Tensor: Transformed key tensor (#batch, n_head, time2, d_k).\n torch.Tensor: Transformed value tensor (#batch, n_head, time2, d_k).\n\n \"\"\"\n n_batch = query.size(0)\n q = self.linear_q(query).view(n_batch, -1, self.h, self.d_k)\n k = self.linear_k(key).view(n_batch, -1, self.h, self.d_k)\n v = self.linear_v(value).view(n_batch, -1, self.h, self.d_k)\n q = q.transpose(1, 2) # (batch, head, time1, d_k)\n k = k.transpose(1, 2) # (batch, head, time2, d_k)\n v = v.transpose(1, 2) # (batch, head, time2, d_k)\n\n return q, k, v\n\n def forward_attention(self, value, scores, mask):\n \"\"\"Compute attention context vector.\n\n Args:\n value (torch.Tensor): Transformed value (#batch, n_head, time2, d_k).\n scores (torch.Tensor): Attention score (#batch, n_head, time1, time2).\n mask (torch.Tensor): Mask (#batch, 1, time2) or (#batch, time1, time2).\n\n Returns:\n torch.Tensor: Transformed value (#batch, time1, d_model)\n weighted by the attention score (#batch, time1, time2).\n\n \"\"\"\n n_batch = value.size(0)\n if mask is not None:\n mask = mask.unsqueeze(1).eq(0) # (batch, 1, *, time2)\n min_value = float(\n numpy.finfo(torch.tensor(0, dtype=scores.dtype).numpy().dtype).min\n )\n scores = scores.masked_fill(mask, min_value)\n self.attn = torch.softmax(scores, dim=-1).masked_fill(\n mask, 0.0\n ) # (batch, head, time1, time2)\n else:\n self.attn = torch.softmax(scores, dim=-1) # (batch, head, time1, time2)\n\n p_attn = self.dropout(self.attn)\n x = torch.matmul(p_attn, value) # (batch, head, time1, d_k)\n x = (\n x.transpose(1, 2).contiguous().view(n_batch, -1, self.h * self.d_k)\n ) # (batch, time1, d_model)\n\n return self.linear_out(x) # (batch, time1, d_model)\n\n def forward(self, query, key, value, mask):\n \"\"\"Compute scaled dot product attention.\n\n Args:\n query (torch.Tensor): Query tensor (#batch, time1, size).\n key (torch.Tensor): Key tensor (#batch, time2, size).\n value (torch.Tensor): Value tensor (#batch, time2, size).\n mask (torch.Tensor): Mask tensor (#batch, 1, time2) or\n (#batch, time1, time2).\n\n Returns:\n torch.Tensor: Output tensor (#batch, time1, d_model).\n\n \"\"\"\n q, k, v = self.forward_qkv(query, key, value)\n scores = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(self.d_k)\n return self.forward_attention(v, scores, mask)" }, { "identifier": "RelPositionMultiHeadedAttention", "path": "funcodec/modules/attention.py", "snippet": "class RelPositionMultiHeadedAttention(MultiHeadedAttention):\n \"\"\"Multi-Head Attention layer with relative position encoding (new implementation).\n\n Details can be found in https://github.com/espnet/espnet/pull/2816.\n\n Paper: https://arxiv.org/abs/1901.02860\n\n Args:\n n_head (int): The number of heads.\n n_feat (int): The number of features.\n dropout_rate (float): Dropout rate.\n zero_triu (bool): Whether to zero the upper triangular part of attention matrix.\n\n \"\"\"\n\n def __init__(self, n_head, n_feat, dropout_rate, zero_triu=False):\n \"\"\"Construct an RelPositionMultiHeadedAttention object.\"\"\"\n super().__init__(n_head, n_feat, dropout_rate)\n self.zero_triu = zero_triu\n # linear transformation for positional encoding\n self.linear_pos = nn.Linear(n_feat, n_feat, bias=False)\n # these two learnable bias are used in matrix c and matrix d\n # as described in https://arxiv.org/abs/1901.02860 Section 3.3\n self.pos_bias_u = nn.Parameter(torch.Tensor(self.h, self.d_k))\n self.pos_bias_v = nn.Parameter(torch.Tensor(self.h, self.d_k))\n torch.nn.init.xavier_uniform_(self.pos_bias_u)\n torch.nn.init.xavier_uniform_(self.pos_bias_v)\n\n def rel_shift(self, x):\n \"\"\"Compute relative positional encoding.\n\n Args:\n x (torch.Tensor): Input tensor (batch, head, time1, 2*time1-1).\n time1 means the length of query vector.\n\n Returns:\n torch.Tensor: Output tensor.\n\n \"\"\"\n zero_pad = torch.zeros((*x.size()[:3], 1), device=x.device, dtype=x.dtype)\n x_padded = torch.cat([zero_pad, x], dim=-1)\n\n x_padded = x_padded.view(*x.size()[:2], x.size(3) + 1, x.size(2))\n x = x_padded[:, :, 1:].view_as(x)[\n :, :, :, : x.size(-1) // 2 + 1\n ] # only keep the positions from 0 to time2\n\n if self.zero_triu:\n ones = torch.ones((x.size(2), x.size(3)), device=x.device)\n x = x * torch.tril(ones, x.size(3) - x.size(2))[None, None, :, :]\n\n return x\n\n def forward(self, query, key, value, pos_emb, mask):\n \"\"\"Compute 'Scaled Dot Product Attention' with rel. positional encoding.\n\n Args:\n query (torch.Tensor): Query tensor (#batch, time1, size).\n key (torch.Tensor): Key tensor (#batch, time2, size).\n value (torch.Tensor): Value tensor (#batch, time2, size).\n pos_emb (torch.Tensor): Positional embedding tensor\n (#batch, 2*time1-1, size).\n mask (torch.Tensor): Mask tensor (#batch, 1, time2) or\n (#batch, time1, time2).\n\n Returns:\n torch.Tensor: Output tensor (#batch, time1, d_model).\n\n \"\"\"\n q, k, v = self.forward_qkv(query, key, value)\n q = q.transpose(1, 2) # (batch, time1, head, d_k)\n\n n_batch_pos = pos_emb.size(0)\n p = self.linear_pos(pos_emb).view(n_batch_pos, -1, self.h, self.d_k)\n p = p.transpose(1, 2) # (batch, head, 2*time1-1, d_k)\n\n # (batch, head, time1, d_k)\n q_with_bias_u = (q + self.pos_bias_u).transpose(1, 2)\n # (batch, head, time1, d_k)\n q_with_bias_v = (q + self.pos_bias_v).transpose(1, 2)\n\n # compute attention score\n # first compute matrix a and matrix c\n # as described in https://arxiv.org/abs/1901.02860 Section 3.3\n # (batch, head, time1, time2)\n matrix_ac = torch.matmul(q_with_bias_u, k.transpose(-2, -1))\n\n # compute matrix b and matrix d\n # (batch, head, time1, 2*time1-1)\n matrix_bd = torch.matmul(q_with_bias_v, p.transpose(-2, -1))\n matrix_bd = self.rel_shift(matrix_bd)\n\n scores = (matrix_ac + matrix_bd) / math.sqrt(\n self.d_k\n ) # (batch, head, time1, time2)\n\n return self.forward_attention(v, scores, mask)" }, { "identifier": "LegacyRelPositionMultiHeadedAttention", "path": "funcodec/modules/attention.py", "snippet": "class LegacyRelPositionMultiHeadedAttention(MultiHeadedAttention):\n \"\"\"Multi-Head Attention layer with relative position encoding (old version).\n\n Details can be found in https://github.com/espnet/espnet/pull/2816.\n\n Paper: https://arxiv.org/abs/1901.02860\n\n Args:\n n_head (int): The number of heads.\n n_feat (int): The number of features.\n dropout_rate (float): Dropout rate.\n zero_triu (bool): Whether to zero the upper triangular part of attention matrix.\n\n \"\"\"\n\n def __init__(self, n_head, n_feat, dropout_rate, zero_triu=False):\n \"\"\"Construct an RelPositionMultiHeadedAttention object.\"\"\"\n super().__init__(n_head, n_feat, dropout_rate)\n self.zero_triu = zero_triu\n # linear transformation for positional encoding\n self.linear_pos = nn.Linear(n_feat, n_feat, bias=False)\n # these two learnable bias are used in matrix c and matrix d\n # as described in https://arxiv.org/abs/1901.02860 Section 3.3\n self.pos_bias_u = nn.Parameter(torch.Tensor(self.h, self.d_k))\n self.pos_bias_v = nn.Parameter(torch.Tensor(self.h, self.d_k))\n torch.nn.init.xavier_uniform_(self.pos_bias_u)\n torch.nn.init.xavier_uniform_(self.pos_bias_v)\n\n def rel_shift(self, x):\n \"\"\"Compute relative positional encoding.\n\n Args:\n x (torch.Tensor): Input tensor (batch, head, time1, time2).\n\n Returns:\n torch.Tensor: Output tensor.\n\n \"\"\"\n zero_pad = torch.zeros((*x.size()[:3], 1), device=x.device, dtype=x.dtype)\n x_padded = torch.cat([zero_pad, x], dim=-1)\n\n x_padded = x_padded.view(*x.size()[:2], x.size(3) + 1, x.size(2))\n x = x_padded[:, :, 1:].view_as(x)\n\n if self.zero_triu:\n ones = torch.ones((x.size(2), x.size(3)))\n x = x * torch.tril(ones, x.size(3) - x.size(2))[None, None, :, :]\n\n return x\n\n def forward(self, query, key, value, pos_emb, mask):\n \"\"\"Compute 'Scaled Dot Product Attention' with rel. positional encoding.\n\n Args:\n query (torch.Tensor): Query tensor (#batch, time1, size).\n key (torch.Tensor): Key tensor (#batch, time2, size).\n value (torch.Tensor): Value tensor (#batch, time2, size).\n pos_emb (torch.Tensor): Positional embedding tensor (#batch, time1, size).\n mask (torch.Tensor): Mask tensor (#batch, 1, time2) or\n (#batch, time1, time2).\n\n Returns:\n torch.Tensor: Output tensor (#batch, time1, d_model).\n\n \"\"\"\n q, k, v = self.forward_qkv(query, key, value)\n q = q.transpose(1, 2) # (batch, time1, head, d_k)\n\n n_batch_pos = pos_emb.size(0)\n p = self.linear_pos(pos_emb).view(n_batch_pos, -1, self.h, self.d_k)\n p = p.transpose(1, 2) # (batch, head, time1, d_k)\n\n # (batch, head, time1, d_k)\n q_with_bias_u = (q + self.pos_bias_u).transpose(1, 2)\n # (batch, head, time1, d_k)\n q_with_bias_v = (q + self.pos_bias_v).transpose(1, 2)\n\n # compute attention score\n # first compute matrix a and matrix c\n # as described in https://arxiv.org/abs/1901.02860 Section 3.3\n # (batch, head, time1, time2)\n matrix_ac = torch.matmul(q_with_bias_u, k.transpose(-2, -1))\n\n # compute matrix b and matrix d\n # (batch, head, time1, time1)\n matrix_bd = torch.matmul(q_with_bias_v, p.transpose(-2, -1))\n matrix_bd = self.rel_shift(matrix_bd)\n\n scores = (matrix_ac + matrix_bd) / math.sqrt(\n self.d_k\n ) # (batch, head, time1, time2)\n\n return self.forward_attention(v, scores, mask)" }, { "identifier": "LayerNorm", "path": "funcodec/modules/layer_norm.py", "snippet": "class LayerNorm(torch.nn.LayerNorm):\n \"\"\"Layer normalization module.\n\n Args:\n nout (int): Output dim size.\n dim (int): Dimension to be normalized.\n\n \"\"\"\n\n def __init__(self, nout, dim=-1):\n \"\"\"Construct an LayerNorm object.\"\"\"\n super(LayerNorm, self).__init__(nout, eps=1e-12)\n self.dim = dim\n\n def forward(self, x):\n \"\"\"Apply layer normalization.\n\n Args:\n x (torch.Tensor): Input tensor.\n\n Returns:\n torch.Tensor: Normalized tensor.\n\n \"\"\"\n if self.dim == -1:\n return super(LayerNorm, self).forward(x)\n return (\n super(LayerNorm, self)\n .forward(x.transpose(self.dim, -1))\n .transpose(self.dim, -1)\n )" }, { "identifier": "Conv1dLinear", "path": "funcodec/modules/multi_layer_conv.py", "snippet": "class Conv1dLinear(torch.nn.Module):\n \"\"\"Conv1D + Linear for Transformer block.\n\n A variant of MultiLayeredConv1d, which replaces second conv-layer to linear.\n\n \"\"\"\n\n def __init__(self, in_chans, hidden_chans, kernel_size, dropout_rate):\n \"\"\"Initialize Conv1dLinear module.\n\n Args:\n in_chans (int): Number of input channels.\n hidden_chans (int): Number of hidden channels.\n kernel_size (int): Kernel size of conv1d.\n dropout_rate (float): Dropout rate.\n\n \"\"\"\n super(Conv1dLinear, self).__init__()\n self.w_1 = torch.nn.Conv1d(\n in_chans,\n hidden_chans,\n kernel_size,\n stride=1,\n padding=(kernel_size - 1) // 2,\n )\n self.w_2 = torch.nn.Linear(hidden_chans, in_chans)\n self.dropout = torch.nn.Dropout(dropout_rate)\n\n def forward(self, x):\n \"\"\"Calculate forward propagation.\n\n Args:\n x (torch.Tensor): Batch of input tensors (B, T, in_chans).\n\n Returns:\n torch.Tensor: Batch of output tensors (B, T, hidden_chans).\n\n \"\"\"\n x = torch.relu(self.w_1(x.transpose(-1, 1))).transpose(-1, 1)\n return self.w_2(self.dropout(x))" }, { "identifier": "MultiLayeredConv1d", "path": "funcodec/modules/multi_layer_conv.py", "snippet": "class MultiLayeredConv1d(torch.nn.Module):\n \"\"\"Multi-layered conv1d for Transformer block.\n\n This is a module of multi-leyered conv1d designed\n to replace positionwise feed-forward network\n in Transforner block, which is introduced in\n `FastSpeech: Fast, Robust and Controllable Text to Speech`_.\n\n .. _`FastSpeech: Fast, Robust and Controllable Text to Speech`:\n https://arxiv.org/pdf/1905.09263.pdf\n\n \"\"\"\n\n def __init__(self, in_chans, hidden_chans, kernel_size, dropout_rate):\n \"\"\"Initialize MultiLayeredConv1d module.\n\n Args:\n in_chans (int): Number of input channels.\n hidden_chans (int): Number of hidden channels.\n kernel_size (int): Kernel size of conv1d.\n dropout_rate (float): Dropout rate.\n\n \"\"\"\n super(MultiLayeredConv1d, self).__init__()\n self.w_1 = torch.nn.Conv1d(\n in_chans,\n hidden_chans,\n kernel_size,\n stride=1,\n padding=(kernel_size - 1) // 2,\n )\n self.w_2 = torch.nn.Conv1d(\n hidden_chans,\n in_chans,\n kernel_size,\n stride=1,\n padding=(kernel_size - 1) // 2,\n )\n self.dropout = torch.nn.Dropout(dropout_rate)\n\n def forward(self, x):\n \"\"\"Calculate forward propagation.\n\n Args:\n x (torch.Tensor): Batch of input tensors (B, T, in_chans).\n\n Returns:\n torch.Tensor: Batch of output tensors (B, T, hidden_chans).\n\n \"\"\"\n x = torch.relu(self.w_1(x.transpose(-1, 1))).transpose(-1, 1)\n return self.w_2(self.dropout(x).transpose(-1, 1)).transpose(-1, 1)" }, { "identifier": "make_pad_mask", "path": "funcodec/modules/nets_utils.py", "snippet": "def make_pad_mask(lengths, xs=None, length_dim=-1, maxlen=None):\n \"\"\"Make mask tensor containing indices of padded part.\n\n Args:\n lengths (LongTensor or List): Batch of lengths (B,).\n xs (Tensor, optional): The reference tensor.\n If set, masks will be the same shape as this tensor.\n length_dim (int, optional): Dimension indicator of the above tensor.\n See the example.\n\n Returns:\n Tensor: Mask tensor containing indices of padded part.\n dtype=torch.uint8 in PyTorch 1.2-\n dtype=torch.bool in PyTorch 1.2+ (including 1.2)\n\n Examples:\n With only lengths.\n\n >>> lengths = [5, 3, 2]\n >>> make_pad_mask(lengths)\n masks = [[0, 0, 0, 0 ,0],\n [0, 0, 0, 1, 1],\n [0, 0, 1, 1, 1]]\n\n With the reference tensor.\n\n >>> xs = torch.zeros((3, 2, 4))\n >>> make_pad_mask(lengths, xs)\n tensor([[[0, 0, 0, 0],\n [0, 0, 0, 0]],\n [[0, 0, 0, 1],\n [0, 0, 0, 1]],\n [[0, 0, 1, 1],\n [0, 0, 1, 1]]], dtype=torch.uint8)\n >>> xs = torch.zeros((3, 2, 6))\n >>> make_pad_mask(lengths, xs)\n tensor([[[0, 0, 0, 0, 0, 1],\n [0, 0, 0, 0, 0, 1]],\n [[0, 0, 0, 1, 1, 1],\n [0, 0, 0, 1, 1, 1]],\n [[0, 0, 1, 1, 1, 1],\n [0, 0, 1, 1, 1, 1]]], dtype=torch.uint8)\n\n With the reference tensor and dimension indicator.\n\n >>> xs = torch.zeros((3, 6, 6))\n >>> make_pad_mask(lengths, xs, 1)\n tensor([[[0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0],\n [1, 1, 1, 1, 1, 1]],\n [[0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0],\n [1, 1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1]],\n [[0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0],\n [1, 1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1]]], dtype=torch.uint8)\n >>> make_pad_mask(lengths, xs, 2)\n tensor([[[0, 0, 0, 0, 0, 1],\n [0, 0, 0, 0, 0, 1],\n [0, 0, 0, 0, 0, 1],\n [0, 0, 0, 0, 0, 1],\n [0, 0, 0, 0, 0, 1],\n [0, 0, 0, 0, 0, 1]],\n [[0, 0, 0, 1, 1, 1],\n [0, 0, 0, 1, 1, 1],\n [0, 0, 0, 1, 1, 1],\n [0, 0, 0, 1, 1, 1],\n [0, 0, 0, 1, 1, 1],\n [0, 0, 0, 1, 1, 1]],\n [[0, 0, 1, 1, 1, 1],\n [0, 0, 1, 1, 1, 1],\n [0, 0, 1, 1, 1, 1],\n [0, 0, 1, 1, 1, 1],\n [0, 0, 1, 1, 1, 1],\n [0, 0, 1, 1, 1, 1]]], dtype=torch.uint8)\n\n \"\"\"\n if length_dim == 0:\n raise ValueError(\"length_dim cannot be 0: {}\".format(length_dim))\n\n if not isinstance(lengths, list):\n lengths = lengths.tolist()\n bs = int(len(lengths))\n if maxlen is None:\n if xs is None:\n maxlen = int(max(lengths))\n else:\n maxlen = xs.size(length_dim)\n else:\n assert xs is None\n assert maxlen >= int(max(lengths))\n\n seq_range = torch.arange(0, maxlen, dtype=torch.int64)\n seq_range_expand = seq_range.unsqueeze(0).expand(bs, maxlen)\n seq_length_expand = seq_range_expand.new(lengths).unsqueeze(-1)\n mask = seq_range_expand >= seq_length_expand\n\n if xs is not None:\n assert xs.size(0) == bs, (xs.size(0), bs)\n\n if length_dim < 0:\n length_dim = xs.dim() + length_dim\n # ind = (:, None, ..., None, :, , None, ..., None)\n ind = tuple(\n slice(None) if i in (0, length_dim) else None for i in range(xs.dim())\n )\n mask = mask[ind].expand_as(xs).to(xs.device)\n return mask" }, { "identifier": "PositionalEncoding", "path": "funcodec/modules/embedding.py", "snippet": "class PositionalEncoding(torch.nn.Module):\n \"\"\"Positional encoding.\n\n Args:\n d_model (int): Embedding dimension.\n dropout_rate (float): Dropout rate.\n max_len (int): Maximum input length.\n reverse (bool): Whether to reverse the input position. Only for\n the class LegacyRelPositionalEncoding. We remove it in the current\n class RelPositionalEncoding.\n \"\"\"\n\n def __init__(self, d_model, dropout_rate, max_len=5000, reverse=False):\n \"\"\"Construct an PositionalEncoding object.\"\"\"\n super(PositionalEncoding, self).__init__()\n self.d_model = d_model\n self.reverse = reverse\n self.xscale = math.sqrt(self.d_model)\n self.dropout = torch.nn.Dropout(p=dropout_rate)\n self.pe = None\n self.extend_pe(torch.tensor(0.0).expand(1, max_len))\n self._register_load_state_dict_pre_hook(_pre_hook)\n\n def extend_pe(self, x):\n \"\"\"Reset the positional encodings.\"\"\"\n if self.pe is not None:\n if self.pe.size(1) >= x.size(1):\n if self.pe.dtype != x.dtype or self.pe.device != x.device:\n self.pe = self.pe.to(dtype=x.dtype, device=x.device)\n return\n pe = torch.zeros(x.size(1), self.d_model)\n if self.reverse:\n position = torch.arange(\n x.size(1) - 1, -1, -1.0, dtype=torch.float32\n ).unsqueeze(1)\n else:\n position = torch.arange(0, x.size(1), dtype=torch.float32).unsqueeze(1)\n div_term = torch.exp(\n torch.arange(0, self.d_model, 2, dtype=torch.float32)\n * -(math.log(10000.0) / self.d_model)\n )\n pe[:, 0::2] = torch.sin(position * div_term)\n pe[:, 1::2] = torch.cos(position * div_term)\n pe = pe.unsqueeze(0)\n self.pe = pe.to(device=x.device, dtype=x.dtype)\n\n def forward(self, x: torch.Tensor):\n \"\"\"Add positional encoding.\n\n Args:\n x (torch.Tensor): Input tensor (batch, time, `*`).\n\n Returns:\n torch.Tensor: Encoded tensor (batch, time, `*`).\n \"\"\"\n self.extend_pe(x)\n x = x * self.xscale + self.pe[:, : x.size(1)]\n return self.dropout(x)" }, { "identifier": "ScaledPositionalEncoding", "path": "funcodec/modules/embedding.py", "snippet": "class ScaledPositionalEncoding(PositionalEncoding):\n \"\"\"Scaled positional encoding module.\n\n See Sec. 3.2 https://arxiv.org/abs/1809.08895\n\n Args:\n d_model (int): Embedding dimension.\n dropout_rate (float): Dropout rate.\n max_len (int): Maximum input length.\n\n \"\"\"\n\n def __init__(self, d_model, dropout_rate, max_len=5000):\n \"\"\"Initialize class.\"\"\"\n super().__init__(d_model=d_model, dropout_rate=dropout_rate, max_len=max_len)\n self.alpha = torch.nn.Parameter(torch.tensor(1.0))\n\n def reset_parameters(self):\n \"\"\"Reset parameters.\"\"\"\n self.alpha.data = torch.tensor(1.0)\n\n def forward(self, x):\n \"\"\"Add positional encoding.\n\n Args:\n x (torch.Tensor): Input tensor (batch, time, `*`).\n\n Returns:\n torch.Tensor: Encoded tensor (batch, time, `*`).\n\n \"\"\"\n self.extend_pe(x)\n x = x + self.alpha * self.pe[:, : x.size(1)]\n return self.dropout(x)" }, { "identifier": "RelPositionalEncoding", "path": "funcodec/modules/embedding.py", "snippet": "class RelPositionalEncoding(torch.nn.Module):\n \"\"\"Relative positional encoding module (new implementation).\n\n Details can be found in https://github.com/espnet/espnet/pull/2816.\n\n See : Appendix B in https://arxiv.org/abs/1901.02860\n\n Args:\n d_model (int): Embedding dimension.\n dropout_rate (float): Dropout rate.\n max_len (int): Maximum input length.\n\n \"\"\"\n\n def __init__(self, d_model, dropout_rate, max_len=5000):\n \"\"\"Construct an PositionalEncoding object.\"\"\"\n super(RelPositionalEncoding, self).__init__()\n self.d_model = d_model\n self.xscale = math.sqrt(self.d_model)\n self.dropout = torch.nn.Dropout(p=dropout_rate)\n self.pe = None\n self.extend_pe(torch.tensor(0.0).expand(1, max_len))\n\n def extend_pe(self, x):\n \"\"\"Reset the positional encodings.\"\"\"\n if self.pe is not None:\n # self.pe contains both positive and negative parts\n # the length of self.pe is 2 * input_len - 1\n if self.pe.size(1) >= x.size(1) * 2 - 1:\n if self.pe.dtype != x.dtype or self.pe.device != x.device:\n self.pe = self.pe.to(dtype=x.dtype, device=x.device)\n return\n # Suppose `i` means to the position of query vecotr and `j` means the\n # position of key vector. We use position relative positions when keys\n # are to the left (i>j) and negative relative positions otherwise (i<j).\n pe_positive = torch.zeros(x.size(1), self.d_model)\n pe_negative = torch.zeros(x.size(1), self.d_model)\n position = torch.arange(0, x.size(1), dtype=torch.float32).unsqueeze(1)\n div_term = torch.exp(\n torch.arange(0, self.d_model, 2, dtype=torch.float32)\n * -(math.log(10000.0) / self.d_model)\n )\n pe_positive[:, 0::2] = torch.sin(position * div_term)\n pe_positive[:, 1::2] = torch.cos(position * div_term)\n pe_negative[:, 0::2] = torch.sin(-1 * position * div_term)\n pe_negative[:, 1::2] = torch.cos(-1 * position * div_term)\n\n # Reserve the order of positive indices and concat both positive and\n # negative indices. This is used to support the shifting trick\n # as in https://arxiv.org/abs/1901.02860\n pe_positive = torch.flip(pe_positive, [0]).unsqueeze(0)\n pe_negative = pe_negative[1:].unsqueeze(0)\n pe = torch.cat([pe_positive, pe_negative], dim=1)\n self.pe = pe.to(device=x.device, dtype=x.dtype)\n\n def forward(self, x: torch.Tensor):\n \"\"\"Add positional encoding.\n\n Args:\n x (torch.Tensor): Input tensor (batch, time, `*`).\n\n Returns:\n torch.Tensor: Encoded tensor (batch, time, `*`).\n\n \"\"\"\n self.extend_pe(x)\n x = x * self.xscale\n pos_emb = self.pe[\n :,\n self.pe.size(1) // 2 - x.size(1) + 1 : self.pe.size(1) // 2 + x.size(1),\n ]\n return self.dropout(x), self.dropout(pos_emb)" }, { "identifier": "LegacyRelPositionalEncoding", "path": "funcodec/modules/embedding.py", "snippet": "class LegacyRelPositionalEncoding(PositionalEncoding):\n \"\"\"Relative positional encoding module (old version).\n\n Details can be found in https://github.com/espnet/espnet/pull/2816.\n\n See : Appendix B in https://arxiv.org/abs/1901.02860\n\n Args:\n d_model (int): Embedding dimension.\n dropout_rate (float): Dropout rate.\n max_len (int): Maximum input length.\n\n \"\"\"\n\n def __init__(self, d_model, dropout_rate, max_len=5000):\n \"\"\"Initialize class.\"\"\"\n super().__init__(\n d_model=d_model,\n dropout_rate=dropout_rate,\n max_len=max_len,\n reverse=True,\n )\n\n def forward(self, x):\n \"\"\"Compute positional encoding.\n\n Args:\n x (torch.Tensor): Input tensor (batch, time, `*`).\n\n Returns:\n torch.Tensor: Encoded tensor (batch, time, `*`).\n torch.Tensor: Positional embedding tensor (1, time, `*`).\n\n \"\"\"\n self.extend_pe(x)\n x = x * self.xscale\n pos_emb = self.pe[:, : x.size(1)]\n return self.dropout(x), self.dropout(pos_emb)" }, { "identifier": "PositionwiseFeedForward", "path": "funcodec/modules/positionwise_feed_forward.py", "snippet": "class PositionwiseFeedForward(torch.nn.Module):\n \"\"\"Positionwise feed forward layer.\n\n Args:\n idim (int): Input dimenstion.\n hidden_units (int): The number of hidden units.\n dropout_rate (float): Dropout rate.\n\n \"\"\"\n\n def __init__(self, idim, hidden_units, dropout_rate, activation=torch.nn.ReLU()):\n \"\"\"Construct an PositionwiseFeedForward object.\"\"\"\n super(PositionwiseFeedForward, self).__init__()\n self.w_1 = torch.nn.Linear(idim, hidden_units)\n self.w_2 = torch.nn.Linear(hidden_units, idim)\n self.dropout = torch.nn.Dropout(dropout_rate)\n self.activation = activation\n\n def forward(self, x):\n \"\"\"Forward function.\"\"\"\n return self.w_2(self.dropout(self.activation(self.w_1(x))))" }, { "identifier": "repeat", "path": "funcodec/modules/repeat.py", "snippet": "def repeat(N, fn):\n \"\"\"Repeat module N times.\n\n Args:\n N (int): Number of repeat time.\n fn (Callable): Function to generate module.\n\n Returns:\n MultiSequential: Repeated model instance.\n\n \"\"\"\n return MultiSequential(*[fn(n) for n in range(N)])" }, { "identifier": "rename_state_dict", "path": "funcodec/modules/nets_utils.py", "snippet": "def rename_state_dict(\n old_prefix: str, new_prefix: str, state_dict: Dict[str, torch.Tensor]\n):\n \"\"\"Replace keys of old prefix with new prefix in state dict.\"\"\"\n # need this list not to break the dict iterator\n old_keys = [k for k in state_dict if k.startswith(old_prefix)]\n if len(old_keys) > 0:\n logging.warning(f\"Rename: {old_prefix} -> {new_prefix}\")\n for k in old_keys:\n v = state_dict.pop(k)\n new_k = k.replace(old_prefix, new_prefix)\n state_dict[new_k] = v" }, { "identifier": "DynamicConvolution", "path": "funcodec/modules/dynamic_conv.py", "snippet": "class DynamicConvolution(nn.Module):\n \"\"\"Dynamic Convolution layer.\n\n This implementation is based on\n https://github.com/pytorch/fairseq/tree/master/fairseq\n\n Args:\n wshare (int): the number of kernel of convolution\n n_feat (int): the number of features\n dropout_rate (float): dropout_rate\n kernel_size (int): kernel size (length)\n use_kernel_mask (bool): Use causal mask or not for convolution kernel\n use_bias (bool): Use bias term or not.\n\n \"\"\"\n\n def __init__(\n self,\n wshare,\n n_feat,\n dropout_rate,\n kernel_size,\n use_kernel_mask=False,\n use_bias=False,\n ):\n \"\"\"Construct Dynamic Convolution layer.\"\"\"\n super(DynamicConvolution, self).__init__()\n\n assert n_feat % wshare == 0\n self.wshare = wshare\n self.use_kernel_mask = use_kernel_mask\n self.dropout_rate = dropout_rate\n self.kernel_size = kernel_size\n self.attn = None\n\n # linear -> GLU -- -> lightconv -> linear\n # \\ /\n # Linear\n self.linear1 = nn.Linear(n_feat, n_feat * 2)\n self.linear2 = nn.Linear(n_feat, n_feat)\n self.linear_weight = nn.Linear(n_feat, self.wshare * 1 * kernel_size)\n nn.init.xavier_uniform(self.linear_weight.weight)\n self.act = nn.GLU()\n\n # dynamic conv related\n self.use_bias = use_bias\n if self.use_bias:\n self.bias = nn.Parameter(torch.Tensor(n_feat))\n\n def forward(self, query, key, value, mask):\n \"\"\"Forward of 'Dynamic Convolution'.\n\n This function takes query, key and value but uses only quert.\n This is just for compatibility with self-attention layer (attention.py)\n\n Args:\n query (torch.Tensor): (batch, time1, d_model) input tensor\n key (torch.Tensor): (batch, time2, d_model) NOT USED\n value (torch.Tensor): (batch, time2, d_model) NOT USED\n mask (torch.Tensor): (batch, time1, time2) mask\n\n Return:\n x (torch.Tensor): (batch, time1, d_model) output\n\n \"\"\"\n # linear -> GLU -- -> lightconv -> linear\n # \\ /\n # Linear\n x = query\n B, T, C = x.size()\n H = self.wshare\n k = self.kernel_size\n\n # first liner layer\n x = self.linear1(x)\n\n # GLU activation\n x = self.act(x)\n\n # get kernel of convolution\n weight = self.linear_weight(x) # B x T x kH\n weight = F.dropout(weight, self.dropout_rate, training=self.training)\n weight = weight.view(B, T, H, k).transpose(1, 2).contiguous() # B x H x T x k\n weight_new = torch.zeros(B * H * T * (T + k - 1), dtype=weight.dtype)\n weight_new = weight_new.view(B, H, T, T + k - 1).fill_(float(\"-inf\"))\n weight_new = weight_new.to(x.device) # B x H x T x T+k-1\n weight_new.as_strided(\n (B, H, T, k), ((T + k - 1) * T * H, (T + k - 1) * T, T + k, 1)\n ).copy_(weight)\n weight_new = weight_new.narrow(-1, int((k - 1) / 2), T) # B x H x T x T(k)\n if self.use_kernel_mask:\n kernel_mask = torch.tril(torch.ones(T, T, device=x.device)).unsqueeze(0)\n weight_new = weight_new.masked_fill(kernel_mask == 0.0, float(\"-inf\"))\n weight_new = F.softmax(weight_new, dim=-1)\n self.attn = weight_new\n weight_new = weight_new.view(B * H, T, T)\n\n # convolution\n x = x.transpose(1, 2).contiguous() # B x C x T\n x = x.view(B * H, int(C / H), T).transpose(1, 2)\n x = torch.bmm(weight_new, x) # BH x T x C/H\n x = x.transpose(1, 2).contiguous().view(B, C, T)\n\n if self.use_bias:\n x = x + self.bias.view(1, -1, 1)\n x = x.transpose(1, 2) # B x T x C\n\n if mask is not None and not self.use_kernel_mask:\n mask = mask.transpose(-1, -2)\n x = x.masked_fill(mask == 0, 0.0)\n\n # second linear layer\n x = self.linear2(x)\n return x" }, { "identifier": "DynamicConvolution2D", "path": "funcodec/modules/dynamic_conv2d.py", "snippet": "class DynamicConvolution2D(nn.Module):\n \"\"\"Dynamic 2-Dimensional Convolution layer.\n\n This implementation is based on\n https://github.com/pytorch/fairseq/tree/master/fairseq\n\n Args:\n wshare (int): the number of kernel of convolution\n n_feat (int): the number of features\n dropout_rate (float): dropout_rate\n kernel_size (int): kernel size (length)\n use_kernel_mask (bool): Use causal mask or not for convolution kernel\n use_bias (bool): Use bias term or not.\n\n \"\"\"\n\n def __init__(\n self,\n wshare,\n n_feat,\n dropout_rate,\n kernel_size,\n use_kernel_mask=False,\n use_bias=False,\n ):\n \"\"\"Construct Dynamic 2-Dimensional Convolution layer.\"\"\"\n super(DynamicConvolution2D, self).__init__()\n\n assert n_feat % wshare == 0\n self.wshare = wshare\n self.use_kernel_mask = use_kernel_mask\n self.dropout_rate = dropout_rate\n self.kernel_size = kernel_size\n self.padding_size = int(kernel_size / 2)\n self.attn_t = None\n self.attn_f = None\n\n # linear -> GLU -- -> lightconv -> linear\n # \\ /\n # Linear\n self.linear1 = nn.Linear(n_feat, n_feat * 2)\n self.linear2 = nn.Linear(n_feat * 2, n_feat)\n self.linear_weight = nn.Linear(n_feat, self.wshare * 1 * kernel_size)\n nn.init.xavier_uniform(self.linear_weight.weight)\n self.linear_weight_f = nn.Linear(n_feat, kernel_size)\n nn.init.xavier_uniform(self.linear_weight_f.weight)\n self.act = nn.GLU()\n\n # dynamic conv related\n self.use_bias = use_bias\n if self.use_bias:\n self.bias = nn.Parameter(torch.Tensor(n_feat))\n\n def forward(self, query, key, value, mask):\n \"\"\"Forward of 'Dynamic 2-Dimensional Convolution'.\n\n This function takes query, key and value but uses only query.\n This is just for compatibility with self-attention layer (attention.py)\n\n Args:\n query (torch.Tensor): (batch, time1, d_model) input tensor\n key (torch.Tensor): (batch, time2, d_model) NOT USED\n value (torch.Tensor): (batch, time2, d_model) NOT USED\n mask (torch.Tensor): (batch, time1, time2) mask\n\n Return:\n x (torch.Tensor): (batch, time1, d_model) output\n\n \"\"\"\n # linear -> GLU -- -> lightconv -> linear\n # \\ /\n # Linear\n x = query\n B, T, C = x.size()\n H = self.wshare\n k = self.kernel_size\n\n # first liner layer\n x = self.linear1(x)\n\n # GLU activation\n x = self.act(x)\n\n # convolution of frequency axis\n weight_f = self.linear_weight_f(x).view(B * T, 1, k) # B x T x k\n self.attn_f = weight_f.view(B, T, k).unsqueeze(1)\n xf = F.conv1d(\n x.view(1, B * T, C), weight_f, padding=self.padding_size, groups=B * T\n )\n xf = xf.view(B, T, C)\n\n # get kernel of convolution\n weight = self.linear_weight(x) # B x T x kH\n weight = F.dropout(weight, self.dropout_rate, training=self.training)\n weight = weight.view(B, T, H, k).transpose(1, 2).contiguous() # B x H x T x k\n weight_new = torch.zeros(B * H * T * (T + k - 1), dtype=weight.dtype)\n weight_new = weight_new.view(B, H, T, T + k - 1).fill_(float(\"-inf\"))\n weight_new = weight_new.to(x.device) # B x H x T x T+k-1\n weight_new.as_strided(\n (B, H, T, k), ((T + k - 1) * T * H, (T + k - 1) * T, T + k, 1)\n ).copy_(weight)\n weight_new = weight_new.narrow(-1, int((k - 1) / 2), T) # B x H x T x T(k)\n if self.use_kernel_mask:\n kernel_mask = torch.tril(torch.ones(T, T, device=x.device)).unsqueeze(0)\n weight_new = weight_new.masked_fill(kernel_mask == 0.0, float(\"-inf\"))\n weight_new = F.softmax(weight_new, dim=-1)\n self.attn_t = weight_new\n weight_new = weight_new.view(B * H, T, T)\n\n # convolution\n x = x.transpose(1, 2).contiguous() # B x C x T\n x = x.view(B * H, int(C / H), T).transpose(1, 2)\n x = torch.bmm(weight_new, x)\n x = x.transpose(1, 2).contiguous().view(B, C, T)\n\n if self.use_bias:\n x = x + self.bias.view(1, -1, 1)\n x = x.transpose(1, 2) # B x T x C\n x = torch.cat((x, xf), -1) # B x T x Cx2\n\n if mask is not None and not self.use_kernel_mask:\n mask = mask.transpose(-1, -2)\n x = x.masked_fill(mask == 0, 0.0)\n\n # second linear layer\n x = self.linear2(x)\n return x" }, { "identifier": "LightweightConvolution", "path": "funcodec/modules/lightconv.py", "snippet": "class LightweightConvolution(nn.Module):\n \"\"\"Lightweight Convolution layer.\n\n This implementation is based on\n https://github.com/pytorch/fairseq/tree/master/fairseq\n\n Args:\n wshare (int): the number of kernel of convolution\n n_feat (int): the number of features\n dropout_rate (float): dropout_rate\n kernel_size (int): kernel size (length)\n use_kernel_mask (bool): Use causal mask or not for convolution kernel\n use_bias (bool): Use bias term or not.\n\n \"\"\"\n\n def __init__(\n self,\n wshare,\n n_feat,\n dropout_rate,\n kernel_size,\n use_kernel_mask=False,\n use_bias=False,\n ):\n \"\"\"Construct Lightweight Convolution layer.\"\"\"\n super(LightweightConvolution, self).__init__()\n\n assert n_feat % wshare == 0\n self.wshare = wshare\n self.use_kernel_mask = use_kernel_mask\n self.dropout_rate = dropout_rate\n self.kernel_size = kernel_size\n self.padding_size = int(kernel_size / 2)\n\n # linear -> GLU -> lightconv -> linear\n self.linear1 = nn.Linear(n_feat, n_feat * 2)\n self.linear2 = nn.Linear(n_feat, n_feat)\n self.act = nn.GLU()\n\n # lightconv related\n self.weight = nn.Parameter(\n torch.Tensor(self.wshare, 1, kernel_size).uniform_(0, 1)\n )\n self.use_bias = use_bias\n if self.use_bias:\n self.bias = nn.Parameter(torch.Tensor(n_feat))\n\n # mask of kernel\n kernel_mask0 = torch.zeros(self.wshare, int(kernel_size / 2))\n kernel_mask1 = torch.ones(self.wshare, int(kernel_size / 2 + 1))\n self.kernel_mask = torch.cat((kernel_mask1, kernel_mask0), dim=-1).unsqueeze(1)\n\n def forward(self, query, key, value, mask):\n \"\"\"Forward of 'Lightweight Convolution'.\n\n This function takes query, key and value but uses only query.\n This is just for compatibility with self-attention layer (attention.py)\n\n Args:\n query (torch.Tensor): (batch, time1, d_model) input tensor\n key (torch.Tensor): (batch, time2, d_model) NOT USED\n value (torch.Tensor): (batch, time2, d_model) NOT USED\n mask (torch.Tensor): (batch, time1, time2) mask\n\n Return:\n x (torch.Tensor): (batch, time1, d_model) output\n\n \"\"\"\n # linear -> GLU -> lightconv -> linear\n x = query\n B, T, C = x.size()\n H = self.wshare\n\n # first liner layer\n x = self.linear1(x)\n\n # GLU activation\n x = self.act(x)\n\n # lightconv\n x = x.transpose(1, 2).contiguous().view(-1, H, T) # B x C x T\n weight = F.dropout(self.weight, self.dropout_rate, training=self.training)\n if self.use_kernel_mask:\n self.kernel_mask = self.kernel_mask.to(x.device)\n weight = weight.masked_fill(self.kernel_mask == 0.0, float(\"-inf\"))\n weight = F.softmax(weight, dim=-1)\n x = F.conv1d(x, weight, padding=self.padding_size, groups=self.wshare).view(\n B, C, T\n )\n if self.use_bias:\n x = x + self.bias.view(1, -1, 1)\n x = x.transpose(1, 2) # B x T x C\n\n if mask is not None and not self.use_kernel_mask:\n mask = mask.transpose(-1, -2)\n x = x.masked_fill(mask == 0, 0.0)\n\n # second linear layer\n x = self.linear2(x)\n return x" }, { "identifier": "LightweightConvolution2D", "path": "funcodec/modules/lightconv2d.py", "snippet": "class LightweightConvolution2D(nn.Module):\n \"\"\"Lightweight 2-Dimensional Convolution layer.\n\n This implementation is based on\n https://github.com/pytorch/fairseq/tree/master/fairseq\n\n Args:\n wshare (int): the number of kernel of convolution\n n_feat (int): the number of features\n dropout_rate (float): dropout_rate\n kernel_size (int): kernel size (length)\n use_kernel_mask (bool): Use causal mask or not for convolution kernel\n use_bias (bool): Use bias term or not.\n\n \"\"\"\n\n def __init__(\n self,\n wshare,\n n_feat,\n dropout_rate,\n kernel_size,\n use_kernel_mask=False,\n use_bias=False,\n ):\n \"\"\"Construct Lightweight 2-Dimensional Convolution layer.\"\"\"\n super(LightweightConvolution2D, self).__init__()\n\n assert n_feat % wshare == 0\n self.wshare = wshare\n self.use_kernel_mask = use_kernel_mask\n self.dropout_rate = dropout_rate\n self.kernel_size = kernel_size\n self.padding_size = int(kernel_size / 2)\n\n # linear -> GLU -> lightconv -> linear\n self.linear1 = nn.Linear(n_feat, n_feat * 2)\n self.linear2 = nn.Linear(n_feat * 2, n_feat)\n self.act = nn.GLU()\n\n # lightconv related\n self.weight = nn.Parameter(\n torch.Tensor(self.wshare, 1, kernel_size).uniform_(0, 1)\n )\n self.weight_f = nn.Parameter(torch.Tensor(1, 1, kernel_size).uniform_(0, 1))\n self.use_bias = use_bias\n if self.use_bias:\n self.bias = nn.Parameter(torch.Tensor(n_feat))\n\n # mask of kernel\n kernel_mask0 = torch.zeros(self.wshare, int(kernel_size / 2))\n kernel_mask1 = torch.ones(self.wshare, int(kernel_size / 2 + 1))\n self.kernel_mask = torch.cat((kernel_mask1, kernel_mask0), dim=-1).unsqueeze(1)\n\n def forward(self, query, key, value, mask):\n \"\"\"Forward of 'Lightweight 2-Dimensional Convolution'.\n\n This function takes query, key and value but uses only query.\n This is just for compatibility with self-attention layer (attention.py)\n\n Args:\n query (torch.Tensor): (batch, time1, d_model) input tensor\n key (torch.Tensor): (batch, time2, d_model) NOT USED\n value (torch.Tensor): (batch, time2, d_model) NOT USED\n mask (torch.Tensor): (batch, time1, time2) mask\n\n Return:\n x (torch.Tensor): (batch, time1, d_model) output\n\n \"\"\"\n # linear -> GLU -> lightconv -> linear\n x = query\n B, T, C = x.size()\n H = self.wshare\n\n # first liner layer\n x = self.linear1(x)\n\n # GLU activation\n x = self.act(x)\n\n # convolution along frequency axis\n weight_f = F.softmax(self.weight_f, dim=-1)\n weight_f = F.dropout(weight_f, self.dropout_rate, training=self.training)\n weight_new = torch.zeros(\n B * T, 1, self.kernel_size, device=x.device, dtype=x.dtype\n ).copy_(weight_f)\n xf = F.conv1d(\n x.view(1, B * T, C), weight_new, padding=self.padding_size, groups=B * T\n ).view(B, T, C)\n\n # lightconv\n x = x.transpose(1, 2).contiguous().view(-1, H, T) # B x C x T\n weight = F.dropout(self.weight, self.dropout_rate, training=self.training)\n if self.use_kernel_mask:\n self.kernel_mask = self.kernel_mask.to(x.device)\n weight = weight.masked_fill(self.kernel_mask == 0.0, float(\"-inf\"))\n weight = F.softmax(weight, dim=-1)\n x = F.conv1d(x, weight, padding=self.padding_size, groups=self.wshare).view(\n B, C, T\n )\n if self.use_bias:\n x = x + self.bias.view(1, -1, 1)\n x = x.transpose(1, 2) # B x T x C\n x = torch.cat((x, xf), -1) # B x T x Cx2\n\n if mask is not None and not self.use_kernel_mask:\n mask = mask.transpose(-1, -2)\n x = x.masked_fill(mask == 0, 0.0)\n\n # second linear layer\n x = self.linear2(x)\n return x" }, { "identifier": "Conv2dSubsampling", "path": "funcodec/modules/subsampling.py", "snippet": "class Conv2dSubsampling(torch.nn.Module):\n \"\"\"Convolutional 2D subsampling (to 1/4 length).\n\n Args:\n idim (int): Input dimension.\n odim (int): Output dimension.\n dropout_rate (float): Dropout rate.\n pos_enc (torch.nn.Module): Custom position encoding layer.\n\n \"\"\"\n\n def __init__(self, idim, odim, dropout_rate, pos_enc=None):\n \"\"\"Construct an Conv2dSubsampling object.\"\"\"\n super(Conv2dSubsampling, self).__init__()\n self.conv = torch.nn.Sequential(\n torch.nn.Conv2d(1, odim, 3, 2),\n torch.nn.ReLU(),\n torch.nn.Conv2d(odim, odim, 3, 2),\n torch.nn.ReLU(),\n )\n self.out = torch.nn.Sequential(\n torch.nn.Linear(odim * (((idim - 1) // 2 - 1) // 2), odim),\n pos_enc if pos_enc is not None else PositionalEncoding(odim, dropout_rate),\n )\n\n def forward(self, x, x_mask):\n \"\"\"Subsample x.\n\n Args:\n x (torch.Tensor): Input tensor (#batch, time, idim).\n x_mask (torch.Tensor): Input mask (#batch, 1, time).\n\n Returns:\n torch.Tensor: Subsampled tensor (#batch, time', odim),\n where time' = time // 4.\n torch.Tensor: Subsampled mask (#batch, 1, time'),\n where time' = time // 4.\n\n \"\"\"\n x = x.unsqueeze(1) # (b, c, t, f)\n x = self.conv(x)\n b, c, t, f = x.size()\n x = self.out(x.transpose(1, 2).contiguous().view(b, t, c * f))\n if x_mask is None:\n return x, None\n return x, x_mask[:, :, :-2:2][:, :, :-2:2]\n\n def __getitem__(self, key):\n \"\"\"Get item.\n\n When reset_parameters() is called, if use_scaled_pos_enc is used,\n return the positioning encoding.\n\n \"\"\"\n if key != -1:\n raise NotImplementedError(\"Support only `-1` (for `reset_parameters`).\")\n return self.out[key]" }, { "identifier": "Conv2dSubsampling2", "path": "funcodec/modules/subsampling.py", "snippet": "class Conv2dSubsampling2(torch.nn.Module):\n \"\"\"Convolutional 2D subsampling (to 1/2 length).\n\n Args:\n idim (int): Input dimension.\n odim (int): Output dimension.\n dropout_rate (float): Dropout rate.\n pos_enc (torch.nn.Module): Custom position encoding layer.\n\n \"\"\"\n\n def __init__(self, idim, odim, dropout_rate, pos_enc=None):\n \"\"\"Construct an Conv2dSubsampling2 object.\"\"\"\n super(Conv2dSubsampling2, self).__init__()\n self.conv = torch.nn.Sequential(\n torch.nn.Conv2d(1, odim, 3, 2),\n torch.nn.ReLU(),\n torch.nn.Conv2d(odim, odim, 3, 1),\n torch.nn.ReLU(),\n )\n self.out = torch.nn.Sequential(\n torch.nn.Linear(odim * (((idim - 1) // 2 - 2)), odim),\n pos_enc if pos_enc is not None else PositionalEncoding(odim, dropout_rate),\n )\n\n def forward(self, x, x_mask):\n \"\"\"Subsample x.\n\n Args:\n x (torch.Tensor): Input tensor (#batch, time, idim).\n x_mask (torch.Tensor): Input mask (#batch, 1, time).\n\n Returns:\n torch.Tensor: Subsampled tensor (#batch, time', odim),\n where time' = time // 2.\n torch.Tensor: Subsampled mask (#batch, 1, time'),\n where time' = time // 2.\n\n \"\"\"\n x = x.unsqueeze(1) # (b, c, t, f)\n x = self.conv(x)\n b, c, t, f = x.size()\n x = self.out(x.transpose(1, 2).contiguous().view(b, t, c * f))\n if x_mask is None:\n return x, None\n return x, x_mask[:, :, :-2:2][:, :, :-2:1]\n\n def __getitem__(self, key):\n \"\"\"Get item.\n\n When reset_parameters() is called, if use_scaled_pos_enc is used,\n return the positioning encoding.\n\n \"\"\"\n if key != -1:\n raise NotImplementedError(\"Support only `-1` (for `reset_parameters`).\")\n return self.out[key]" }, { "identifier": "Conv2dSubsampling6", "path": "funcodec/modules/subsampling.py", "snippet": "class Conv2dSubsampling6(torch.nn.Module):\n \"\"\"Convolutional 2D subsampling (to 1/6 length).\n\n Args:\n idim (int): Input dimension.\n odim (int): Output dimension.\n dropout_rate (float): Dropout rate.\n pos_enc (torch.nn.Module): Custom position encoding layer.\n\n \"\"\"\n\n def __init__(self, idim, odim, dropout_rate, pos_enc=None):\n \"\"\"Construct an Conv2dSubsampling6 object.\"\"\"\n super(Conv2dSubsampling6, self).__init__()\n self.conv = torch.nn.Sequential(\n torch.nn.Conv2d(1, odim, 3, 2),\n torch.nn.ReLU(),\n torch.nn.Conv2d(odim, odim, 5, 3),\n torch.nn.ReLU(),\n )\n self.out = torch.nn.Sequential(\n torch.nn.Linear(odim * (((idim - 1) // 2 - 2) // 3), odim),\n pos_enc if pos_enc is not None else PositionalEncoding(odim, dropout_rate),\n )\n\n def forward(self, x, x_mask):\n \"\"\"Subsample x.\n\n Args:\n x (torch.Tensor): Input tensor (#batch, time, idim).\n x_mask (torch.Tensor): Input mask (#batch, 1, time).\n\n Returns:\n torch.Tensor: Subsampled tensor (#batch, time', odim),\n where time' = time // 6.\n torch.Tensor: Subsampled mask (#batch, 1, time'),\n where time' = time // 6.\n\n \"\"\"\n x = x.unsqueeze(1) # (b, c, t, f)\n x = self.conv(x)\n b, c, t, f = x.size()\n x = self.out(x.transpose(1, 2).contiguous().view(b, t, c * f))\n if x_mask is None:\n return x, None\n return x, x_mask[:, :, :-2:2][:, :, :-4:3]" }, { "identifier": "Conv2dSubsampling8", "path": "funcodec/modules/subsampling.py", "snippet": "class Conv2dSubsampling8(torch.nn.Module):\n \"\"\"Convolutional 2D subsampling (to 1/8 length).\n\n Args:\n idim (int): Input dimension.\n odim (int): Output dimension.\n dropout_rate (float): Dropout rate.\n pos_enc (torch.nn.Module): Custom position encoding layer.\n\n \"\"\"\n\n def __init__(self, idim, odim, dropout_rate, pos_enc=None):\n \"\"\"Construct an Conv2dSubsampling8 object.\"\"\"\n super(Conv2dSubsampling8, self).__init__()\n self.conv = torch.nn.Sequential(\n torch.nn.Conv2d(1, odim, 3, 2),\n torch.nn.ReLU(),\n torch.nn.Conv2d(odim, odim, 3, 2),\n torch.nn.ReLU(),\n torch.nn.Conv2d(odim, odim, 3, 2),\n torch.nn.ReLU(),\n )\n self.out = torch.nn.Sequential(\n torch.nn.Linear(odim * ((((idim - 1) // 2 - 1) // 2 - 1) // 2), odim),\n pos_enc if pos_enc is not None else PositionalEncoding(odim, dropout_rate),\n )\n\n def forward(self, x, x_mask):\n \"\"\"Subsample x.\n\n Args:\n x (torch.Tensor): Input tensor (#batch, time, idim).\n x_mask (torch.Tensor): Input mask (#batch, 1, time).\n\n Returns:\n torch.Tensor: Subsampled tensor (#batch, time', odim),\n where time' = time // 8.\n torch.Tensor: Subsampled mask (#batch, 1, time'),\n where time' = time // 8.\n\n \"\"\"\n x = x.unsqueeze(1) # (b, c, t, f)\n x = self.conv(x)\n b, c, t, f = x.size()\n x = self.out(x.transpose(1, 2).contiguous().view(b, t, c * f))\n if x_mask is None:\n return x, None\n return x, x_mask[:, :, :-2:2][:, :, :-2:2][:, :, :-2:2]" }, { "identifier": "TooShortUttError", "path": "funcodec/modules/subsampling.py", "snippet": "class TooShortUttError(Exception):\n \"\"\"Raised when the utt is too short for subsampling.\n\n Args:\n message (str): Message for error catch\n actual_size (int): the short size that cannot pass the subsampling\n limit (int): the limit size for subsampling\n\n \"\"\"\n\n def __init__(self, message, actual_size, limit):\n \"\"\"Construct a TooShortUttError for error handler.\"\"\"\n super().__init__(message)\n self.actual_size = actual_size\n self.limit = limit" }, { "identifier": "check_short_utt", "path": "funcodec/modules/subsampling.py", "snippet": "def check_short_utt(ins, size):\n \"\"\"Check if the utterance is too short for subsampling.\"\"\"\n if isinstance(ins, Conv2dSubsampling2) and size < 3:\n return True, 3\n if isinstance(ins, Conv2dSubsampling) and size < 7:\n return True, 7\n if isinstance(ins, Conv2dSubsampling6) and size < 11:\n return True, 11\n if isinstance(ins, Conv2dSubsampling8) and size < 15:\n return True, 15\n return False, -1" } ]
from typing import List from typing import Optional from typing import Tuple from torch import nn from funcodec.models.encoder.abs_encoder import AbsEncoder from funcodec.modules.attention import ( MultiHeadedAttention, RelPositionMultiHeadedAttention, # noqa: H301 LegacyRelPositionMultiHeadedAttention, # noqa: H301 ) from funcodec.modules.layer_norm import LayerNorm from funcodec.modules.multi_layer_conv import Conv1dLinear from funcodec.modules.multi_layer_conv import MultiLayeredConv1d from funcodec.modules.nets_utils import make_pad_mask from funcodec.modules.embedding import ( PositionalEncoding, # noqa: H301 ScaledPositionalEncoding, # noqa: H301 RelPositionalEncoding, # noqa: H301 LegacyRelPositionalEncoding, # noqa: H301 ) from funcodec.modules.positionwise_feed_forward import ( PositionwiseFeedForward, # noqa: H301 ) from funcodec.modules.repeat import repeat from funcodec.modules.nets_utils import rename_state_dict from funcodec.modules.dynamic_conv import DynamicConvolution from funcodec.modules.dynamic_conv2d import DynamicConvolution2D from funcodec.modules.lightconv import LightweightConvolution from funcodec.modules.lightconv2d import LightweightConvolution2D from funcodec.modules.subsampling import Conv2dSubsampling from funcodec.modules.subsampling import Conv2dSubsampling2 from funcodec.modules.subsampling import Conv2dSubsampling6 from funcodec.modules.subsampling import Conv2dSubsampling8 from funcodec.modules.subsampling import TooShortUttError from funcodec.modules.subsampling import check_short_utt import torch import logging
18,117
skip_layer = False # with stochastic depth, residual connection `x + f(x)` becomes # `x <- x + 1 / (1 - p) * f(x)` at training time. stoch_layer_coeff = 1.0 if self.training and self.stochastic_depth_rate > 0: skip_layer = torch.rand(1).item() < self.stochastic_depth_rate stoch_layer_coeff = 1.0 / (1 - self.stochastic_depth_rate) if skip_layer: if cache is not None: x = torch.cat([cache, x], dim=1) if pos_emb is not None: return (x, pos_emb), mask return x, mask residual = x if self.normalize_before: x = self.norm1(x) if cache is None: x_q = x else: assert cache.shape == (x.shape[0], x.shape[1] - 1, self.size) x_q = x[:, -1:, :] residual = residual[:, -1:, :] mask = None if mask is None else mask[:, -1:, :] if pos_emb is not None: x_att = self.self_attn(x_q, x, x, pos_emb, mask) else: x_att = self.self_attn(x_q, x, x, mask) if self.concat_after: x_concat = torch.cat((x, x_att), dim=-1) x = residual + stoch_layer_coeff * self.concat_linear(x_concat) else: x = residual + stoch_layer_coeff * self.dropout(x_att) if not self.normalize_before: x = self.norm1(x) residual = x if self.normalize_before: x = self.norm2(x) x = residual + stoch_layer_coeff * self.dropout(self.feed_forward(x)) if not self.normalize_before: x = self.norm2(x) if cache is not None: x = torch.cat([cache, x], dim=1) if pos_emb is not None: return (x, pos_emb), mask return x, mask class TransformerEncoder(AbsEncoder): """Transformer encoder module. Args: input_size: input dim output_size: dimension of attention attention_heads: the number of heads of multi head attention linear_units: the number of units of position-wise feed forward num_blocks: the number of decoder blocks dropout_rate: dropout rate attention_dropout_rate: dropout rate in attention positional_dropout_rate: dropout rate after adding positional encoding input_layer: input layer type pos_enc_class: PositionalEncoding or ScaledPositionalEncoding normalize_before: whether to use layer_norm before the first block concat_after: whether to concat attention layer's input and output if True, additional linear will be applied. i.e. x -> x + linear(concat(x, att(x))) if False, no additional linear will be applied. i.e. x -> x + att(x) positionwise_layer_type: linear of conv1d positionwise_conv_kernel_size: kernel size of positionwise conv1d layer padding_idx: padding_idx for input_layer=embed """ def __init__( self, input_size: int, output_size: int = 256, attention_heads: int = 4, linear_units: int = 2048, num_blocks: int = 6, dropout_rate: float = 0.1, positional_dropout_rate: float = 0.1, attention_dropout_rate: float = 0.0, input_layer: Optional[str] = "conv2d", pos_enc_class=PositionalEncoding, normalize_before: bool = True, concat_after: bool = False, positionwise_layer_type: str = "linear", positionwise_conv_kernel_size: int = 1, padding_idx: int = -1, interctc_layer_idx: List[int] = [], interctc_use_conditioning: bool = False, causal_mode: str = "None", ): super().__init__() self._output_size = output_size self.causal_mode = causal_mode if input_layer == "linear": self.embed = torch.nn.Sequential( torch.nn.Linear(input_size, output_size), torch.nn.LayerNorm(output_size), torch.nn.Dropout(dropout_rate), torch.nn.ReLU(), pos_enc_class(output_size, positional_dropout_rate), ) elif input_layer == "conv2d": self.embed = Conv2dSubsampling(input_size, output_size, dropout_rate) elif input_layer == "conv2d2": self.embed = Conv2dSubsampling2(input_size, output_size, dropout_rate) elif input_layer == "conv2d6":
# Copyright 2019 Shigeki Karita # Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0) """Transformer encoder definition.""" class EncoderLayer(nn.Module): """Encoder layer module. Args: size (int): Input dimension. self_attn (torch.nn.Module): Self-attention module instance. `MultiHeadedAttention` or `RelPositionMultiHeadedAttention` instance can be used as the argument. feed_forward (torch.nn.Module): Feed-forward module instance. `PositionwiseFeedForward`, `MultiLayeredConv1d`, or `Conv1dLinear` instance can be used as the argument. dropout_rate (float): Dropout rate. normalize_before (bool): Whether to use layer_norm before the first block. concat_after (bool): Whether to concat attention layer's input and output. if True, additional linear will be applied. i.e. x -> x + linear(concat(x, att(x))) if False, no additional linear will be applied. i.e. x -> x + att(x) stochastic_depth_rate (float): Proability to skip this layer. During training, the layer may skip residual computation and return input as-is with given probability. """ def __init__( self, size, self_attn, feed_forward, dropout_rate, normalize_before=True, concat_after=False, stochastic_depth_rate=0.0, ): """Construct an EncoderLayer object.""" super(EncoderLayer, self).__init__() self.self_attn = self_attn self.feed_forward = feed_forward self.norm1 = LayerNorm(size) self.norm2 = LayerNorm(size) self.dropout = nn.Dropout(dropout_rate) self.size = size self.normalize_before = normalize_before self.concat_after = concat_after if self.concat_after: self.concat_linear = nn.Linear(size + size, size) self.stochastic_depth_rate = stochastic_depth_rate def forward(self, x, mask, cache=None): """Compute encoded features. Args: x_input (torch.Tensor): Input tensor (#batch, time, size). mask (torch.Tensor): Mask tensor for the input (#batch, time). cache (torch.Tensor): Cache tensor of the input (#batch, time - 1, size). Returns: torch.Tensor: Output tensor (#batch, time, size). torch.Tensor: Mask tensor (#batch, time). """ if isinstance(x, tuple): x, pos_emb = x[0], x[1] else: x, pos_emb = x, None skip_layer = False # with stochastic depth, residual connection `x + f(x)` becomes # `x <- x + 1 / (1 - p) * f(x)` at training time. stoch_layer_coeff = 1.0 if self.training and self.stochastic_depth_rate > 0: skip_layer = torch.rand(1).item() < self.stochastic_depth_rate stoch_layer_coeff = 1.0 / (1 - self.stochastic_depth_rate) if skip_layer: if cache is not None: x = torch.cat([cache, x], dim=1) if pos_emb is not None: return (x, pos_emb), mask return x, mask residual = x if self.normalize_before: x = self.norm1(x) if cache is None: x_q = x else: assert cache.shape == (x.shape[0], x.shape[1] - 1, self.size) x_q = x[:, -1:, :] residual = residual[:, -1:, :] mask = None if mask is None else mask[:, -1:, :] if pos_emb is not None: x_att = self.self_attn(x_q, x, x, pos_emb, mask) else: x_att = self.self_attn(x_q, x, x, mask) if self.concat_after: x_concat = torch.cat((x, x_att), dim=-1) x = residual + stoch_layer_coeff * self.concat_linear(x_concat) else: x = residual + stoch_layer_coeff * self.dropout(x_att) if not self.normalize_before: x = self.norm1(x) residual = x if self.normalize_before: x = self.norm2(x) x = residual + stoch_layer_coeff * self.dropout(self.feed_forward(x)) if not self.normalize_before: x = self.norm2(x) if cache is not None: x = torch.cat([cache, x], dim=1) if pos_emb is not None: return (x, pos_emb), mask return x, mask class TransformerEncoder(AbsEncoder): """Transformer encoder module. Args: input_size: input dim output_size: dimension of attention attention_heads: the number of heads of multi head attention linear_units: the number of units of position-wise feed forward num_blocks: the number of decoder blocks dropout_rate: dropout rate attention_dropout_rate: dropout rate in attention positional_dropout_rate: dropout rate after adding positional encoding input_layer: input layer type pos_enc_class: PositionalEncoding or ScaledPositionalEncoding normalize_before: whether to use layer_norm before the first block concat_after: whether to concat attention layer's input and output if True, additional linear will be applied. i.e. x -> x + linear(concat(x, att(x))) if False, no additional linear will be applied. i.e. x -> x + att(x) positionwise_layer_type: linear of conv1d positionwise_conv_kernel_size: kernel size of positionwise conv1d layer padding_idx: padding_idx for input_layer=embed """ def __init__( self, input_size: int, output_size: int = 256, attention_heads: int = 4, linear_units: int = 2048, num_blocks: int = 6, dropout_rate: float = 0.1, positional_dropout_rate: float = 0.1, attention_dropout_rate: float = 0.0, input_layer: Optional[str] = "conv2d", pos_enc_class=PositionalEncoding, normalize_before: bool = True, concat_after: bool = False, positionwise_layer_type: str = "linear", positionwise_conv_kernel_size: int = 1, padding_idx: int = -1, interctc_layer_idx: List[int] = [], interctc_use_conditioning: bool = False, causal_mode: str = "None", ): super().__init__() self._output_size = output_size self.causal_mode = causal_mode if input_layer == "linear": self.embed = torch.nn.Sequential( torch.nn.Linear(input_size, output_size), torch.nn.LayerNorm(output_size), torch.nn.Dropout(dropout_rate), torch.nn.ReLU(), pos_enc_class(output_size, positional_dropout_rate), ) elif input_layer == "conv2d": self.embed = Conv2dSubsampling(input_size, output_size, dropout_rate) elif input_layer == "conv2d2": self.embed = Conv2dSubsampling2(input_size, output_size, dropout_rate) elif input_layer == "conv2d6":
self.embed = Conv2dSubsampling6(input_size, output_size, dropout_rate)
21
2023-10-07 02:00:40+00:00
24k
Beckschen/3D-TransUNet
nn_transunet/trainer/nnUNetTrainerV2_DDP.py
[ { "identifier": "nnUNetTrainerV2", "path": "nn_transunet/trainer/nnUNetTrainerV2.py", "snippet": "class nnUNetTrainerV2(nnUNetTrainer):\n \"\"\"\n Info for Fabian: same as internal nnUNetTrainerV2_2\n \"\"\"\n\n def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None,\n unpack_data=True, deterministic=True, fp16=False, input_size=(64, 160, 160),args=None):\n super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data,\n deterministic, fp16)\n if args is not None: \n self.input_size=input_size\n self.model = args.model\n self.resume = args.resume\n self.disable_ds=args.disable_ds\n self.max_num_epochs = args.max_num_epochs # set 1 gpu training\n self.initial_lr = args.initial_lr # 0.01\n self.args = args\n \n if self.disable_ds:\n print(\"disable_ds\")\n # print(\"not runnable for this feature! current nnunetV2 (w/o DDP) only support deep supervision version\")\n # raise NotImplementedError\n else:\n print(\"runnning DDP, inheriting nnUNetTrainerV2\")\n \n self.save_every = 1 # prev 50\n # self.max_num_epochs = 1000\n # self.initial_lr = 1e-2\n self.deep_supervision_scales = None\n self.ds_loss_weights = None\n\n self.pin_memory = True\n\n def initialize(self, training=True, force_load_plans=False):\n \"\"\"\n - replaced get_default_augmentation with get_moreDA_augmentation\n - enforce to only run this code once\n - loss function wrapper for deep supervision\n\n :param training:\n :param force_load_plans:\n :return:\n \"\"\"\n if not self.was_initialized:\n maybe_mkdir_p(self.output_folder)\n\n if force_load_plans or (self.plans is None):\n self.load_plans_file()\n\n self.process_plans(self.plans)\n\n self.setup_DA_params()\n\n ################# Here we wrap the loss for deep supervision ############\n # we need to know the number of outputs of the network\n net_numpool = len(self.net_num_pool_op_kernel_sizes)\n\n # we give each output a weight which decreases exponentially (division by 2) as the resolution decreases\n # this gives higher resolution outputs more weight in the loss\n weights = np.array([1 / (2 ** i) for i in range(net_numpool)])\n\n # we don't use the lowest 2 outputs. Normalize weights so that they sum to 1\n mask = np.array([True] + [True if i < net_numpool - 1 else False for i in range(1, net_numpool)])\n weights[~mask] = 0\n weights = weights / weights.sum()\n self.ds_loss_weights = weights\n if self.disable_ds:\n \n self.ds_loss_weights[0]=1\n self.ds_loss_weights[1:]=0\n from loss_functions import DC_and_CE_loss\n self.loss = DC_and_CE_loss({'batch_dice': self.batch_dice, 'smooth': 1e-5, 'do_bg': False}, {})\n\n else:\n # now wrap the loss\n self.loss = MultipleOutputLoss2(self.loss, self.ds_loss_weights)\n ################# END ###################\n\n self.folder_with_preprocessed_data = join(self.dataset_directory, self.plans['data_identifier'] +\n \"_stage%d\" % self.stage)\n if training:\n self.dl_tr, self.dl_val = self.get_basic_generators()\n if self.unpack_data:\n print(\"unpacking dataset\")\n unpack_dataset(self.folder_with_preprocessed_data)\n print(\"done\")\n else:\n print(\n \"INFO: Not unpacking data! Training may be slow due to that. Pray you are not using 2d or you \"\n \"will wait all winter for your model to finish!\")\n\n self.tr_gen, self.val_gen = get_moreDA_augmentation(\n self.dl_tr, self.dl_val,\n self.data_aug_params[\n 'patch_size_for_spatialtransform'],\n self.data_aug_params,\n deep_supervision_scales=self.deep_supervision_scales,\n pin_memory=self.pin_memory,\n use_nondetMultiThreadedAugmenter=False\n )\n self.print_to_log_file(\"TRAINING KEYS:\\n %s\" % (str(self.dataset_tr.keys())),\n also_print_to_console=False)\n self.print_to_log_file(\"VALIDATION KEYS:\\n %s\" % (str(self.dataset_val.keys())),\n also_print_to_console=False)\n else:\n pass\n\n self.initialize_network()\n self.initialize_optimizer_and_scheduler()\n\n # assert isinstance(self.network, (SegmentationNetwork, nn.DataParallel))\n else:\n self.print_to_log_file('self.was_initialized is True, not running self.initialize again')\n self.was_initialized = True\n\n\n def initialize_network(self):\n \"\"\"\n - momentum 0.99\n - SGD instead of Adam\n - self.lr_scheduler = None because we do poly_lr\n - deep supervision = True\n - i am sure I forgot something here\n\n Known issue: forgot to set neg_slope=0 in InitWeights_He; should not make a difference though\n :return:\n \"\"\"\n\n # model_list = {'Generic_UNet': Generic_UNet}\n\n if self.model.startswith(\"Generic\"):\n if self.threeD:\n conv_op = nn.Conv3d\n dropout_op = nn.Dropout3d\n norm_op = nn.InstanceNorm3d\n\n else:\n conv_op = nn.Conv2d\n dropout_op = nn.Dropout2d\n norm_op = nn.InstanceNorm2d\n\n norm_op_kwargs = {'eps': 1e-5, 'affine': True}\n dropout_op_kwargs = {'p': 0, 'inplace': True}\n net_nonlin = nn.LeakyReLU\n net_nonlin_kwargs = {'negative_slope': 1e-2, 'inplace': True}\n do_ds = not self.disable_ds\n if not do_ds: print(\"disable ds\")\n if self.model == 'Generic_TransUNet_max_ppbp':\n from ..networks.transunet3d_model import Generic_TransUNet_max_ppbp\n self.network = Generic_TransUNet_max_ppbp(self.num_input_channels, self.base_num_features, self.num_classes,\n len(self.net_num_pool_op_kernel_sizes),\n self.conv_per_stage, 2, conv_op, norm_op, norm_op_kwargs, dropout_op,\n dropout_op_kwargs,\n net_nonlin, net_nonlin_kwargs, do_ds, False, lambda x: x, InitWeights_He(1e-2),\n self.net_num_pool_op_kernel_sizes, self.net_conv_kernel_sizes, False, True, \n convolutional_upsampling= False if ('is_fam' in self.model_params.keys() and self.model_params['is_fam']) else True, # default True,\n patch_size=self.args.crop_size, \n **self.model_params)\n else:\n raise NotImplementedError\n \n if torch.cuda.is_available():\n self.network.cuda()\n self.network.inference_apply_nonlin = softmax_helper\n\n else:\n raise NotImplementedError\n\n\n def initialize_optimizer_and_scheduler(self):\n assert self.network is not None, \"self.initialize_network must be called first\"\n self.optimizer = torch.optim.SGD(self.network.parameters(), self.initial_lr, weight_decay=self.weight_decay,\n momentum=0.99, nesterov=True)\n self.lr_scheduler = None\n\n def run_online_evaluation(self, output, target):\n \"\"\"\n due to deep supervision the return value and the reference are now lists of tensors. We only need the full\n resolution output because this is what we are interested in in the end. The others are ignored\n :param output:\n :param target:\n :return:\n \"\"\"\n target = target[0]\n output = output[0]\n return super().run_online_evaluation(output, target)\n\n def validate(self, do_mirroring: bool = True, use_sliding_window: bool = True,\n step_size: float = 0.5, save_softmax: bool = True, use_gaussian: bool = True, overwrite: bool = True,\n validation_folder_name: str = 'validation_raw', debug: bool = False, all_in_gpu: bool = False,\n segmentation_export_kwargs: dict = None, run_postprocessing_on_folds: bool = True):\n \"\"\"\n We need to wrap this because we need to enforce self.network.do_ds = False for prediction\n \"\"\"\n ds = self.network.do_ds\n self.network.do_ds = False\n ret = super().validate(do_mirroring=do_mirroring, use_sliding_window=use_sliding_window, step_size=step_size,\n save_softmax=save_softmax, use_gaussian=use_gaussian,\n overwrite=overwrite, validation_folder_name=validation_folder_name, debug=debug,\n all_in_gpu=all_in_gpu, segmentation_export_kwargs=segmentation_export_kwargs,\n run_postprocessing_on_folds=run_postprocessing_on_folds)\n\n self.network.do_ds = ds\n return ret\n\n def predict_preprocessed_data_return_seg_and_softmax(self, data: np.ndarray, do_mirroring: bool = True,\n mirror_axes: Tuple[int] = None,\n use_sliding_window: bool = True, step_size: float = 0.5,\n use_gaussian: bool = True, pad_border_mode: str = 'constant',\n pad_kwargs: dict = None, all_in_gpu: bool = False,\n verbose: bool = True, mixed_precision=True) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"\n We need to wrap this because we need to enforce self.network.do_ds = False for prediction\n \"\"\"\n ds = self.network.do_ds\n self.network.do_ds = False\n ret = super().predict_preprocessed_data_return_seg_and_softmax(data,\n do_mirroring=do_mirroring,\n mirror_axes=mirror_axes,\n use_sliding_window=use_sliding_window,\n step_size=step_size, use_gaussian=use_gaussian,\n pad_border_mode=pad_border_mode,\n pad_kwargs=pad_kwargs, all_in_gpu=all_in_gpu,\n verbose=verbose,\n mixed_precision=mixed_precision)\n self.network.do_ds = ds\n return ret\n\n def run_iteration(self, data_generator, do_backprop=True, run_online_evaluation=False):\n \"\"\"\n gradient clipping improves training stability\n\n :param data_generator:\n :param do_backprop:\n :param run_online_evaluation:\n :return:\n \"\"\"\n data_dict = next(data_generator)\n data = data_dict['data']\n target = data_dict['target']\n\n data = maybe_to_torch(data)\n target = maybe_to_torch(target)\n\n if torch.cuda.is_available():\n data = to_cuda(data)\n target = to_cuda(target)\n\n self.optimizer.zero_grad()\n\n if self.fp16:\n with autocast():\n output = self.network(data)\n del data\n if self.disable_ds:\n if isinstance(output, tuple) or isinstance(output, list):\n output = output[0]\n if isinstance(target, tuple) or isinstance(target, list):\n target = target[0]\n l = self.loss(output, target)\n\n if do_backprop:\n self.amp_grad_scaler.scale(l).backward()\n self.amp_grad_scaler.unscale_(self.optimizer)\n torch.nn.utils.clip_grad_norm_(self.network.parameters(), 12)\n self.amp_grad_scaler.step(self.optimizer)\n self.amp_grad_scaler.update()\n else:\n output = self.network(data)\n del data\n l = self.loss(output, target)\n\n if do_backprop:\n l.backward()\n torch.nn.utils.clip_grad_norm_(self.network.parameters(), 12)\n self.optimizer.step()\n\n if run_online_evaluation:\n if self.disable_ds:\n output = output.unsqueeze(0)\n target = target.unsqueeze(0)\n self.run_online_evaluation(output, target)\n\n del target\n\n return l.detach().cpu().numpy()\n\n def do_split(self):\n \"\"\"\n The default split is a 5 fold CV on all available training cases. nnU-Net will create a split (it is seeded,\n so always the same) and save it as splits_final.pkl file in the preprocessed data directory.\n Sometimes you may want to create your own split for various reasons. For this you will need to create your own\n splits_final.pkl file. If this file is present, nnU-Net is going to use it and whatever splits are defined in\n it. You can create as many splits in this file as you want. Note that if you define only 4 splits (fold 0-3)\n and then set fold=4 when training (that would be the fifth split), nnU-Net will print a warning and proceed to\n use a random 80:20 data split.\n :return:\n \"\"\"\n if isinstance(self.fold, str) and self.fold.startswith(\"all\"):\n # achtung!\n if self.fold == \"all\":\n tr_keys = val_keys = list(self.dataset.keys())\n elif self.fold.find(\"tr\") != -1:\n # np.sort(list(self.dataset.keys()))\n np.random.seed(12345)\n all_keys = list(self.dataset.keys())\n np.random.shuffle(all_keys)\n proportion = float(self.fold.split(\"tr\")[-1])\n assert proportion < 1.0\n cur_num = int(len(all_keys) * proportion)\n tr_keys = val_keys = all_keys[:cur_num]\n\n else:\n splits_file = join(self.dataset_directory, \"splits_final.pkl\")\n # if the split file does not exist we need to create it\n if not isfile(splits_file):\n self.print_to_log_file(\"Creating new 5-fold cross-validation split...\")\n splits = []\n all_keys_sorted = np.sort(list(self.dataset.keys()))\n kfold = KFold(n_splits=5, shuffle=True, random_state=12345)\n for i, (train_idx, test_idx) in enumerate(kfold.split(all_keys_sorted)):\n train_keys = np.array(all_keys_sorted)[train_idx]\n test_keys = np.array(all_keys_sorted)[test_idx]\n splits.append(OrderedDict())\n splits[-1]['train'] = train_keys\n splits[-1]['val'] = test_keys\n save_pickle(splits, splits_file)\n\n else:\n self.print_to_log_file(\"Using splits from existing split file:\", splits_file)\n splits = load_pickle(splits_file)\n self.print_to_log_file(\"The split file contains %d splits.\" % len(splits))\n\n self.print_to_log_file(\"Desired fold for training: %d\" % self.fold)\n if self.fold < len(splits):\n tr_keys = splits[self.fold]['train']\n val_keys = splits[self.fold]['val']\n self.print_to_log_file(\"This split has %d training and %d validation cases.\"\n % (len(tr_keys), len(val_keys)))\n else:\n self.print_to_log_file(\"INFO: You requested fold %d for training but splits \"\n \"contain only %d folds. I am now creating a \"\n \"random (but seeded) 80:20 split!\" % (self.fold, len(splits)))\n # if we request a fold that is not in the split file, create a random 80:20 split\n rnd = np.random.RandomState(seed=12345 + self.fold)\n keys = np.sort(list(self.dataset.keys()))\n idx_tr = rnd.choice(len(keys), int(len(keys) * 0.8), replace=False)\n idx_val = [i for i in range(len(keys)) if i not in idx_tr]\n tr_keys = [keys[i] for i in idx_tr]\n val_keys = [keys[i] for i in idx_val]\n self.print_to_log_file(\"This random 80:20 split has %d training and %d validation cases.\"\n % (len(tr_keys), len(val_keys)))\n\n tr_keys.sort()\n val_keys.sort()\n self.dataset_tr = OrderedDict()\n for i in tr_keys:\n self.dataset_tr[i] = self.dataset[i]\n self.dataset_val = OrderedDict()\n for i in val_keys:\n self.dataset_val[i] = self.dataset[i]\n\n def setup_DA_params(self):\n \"\"\"\n - we increase roation angle from [-15, 15] to [-30, 30]\n - scale range is now (0.7, 1.4), was (0.85, 1.25)\n - we don't do elastic deformation anymore\n\n :return:\n \"\"\"\n\n self.deep_supervision_scales = [[1, 1, 1]] + list(list(i) for i in 1 / np.cumprod(\n np.vstack(self.net_num_pool_op_kernel_sizes), axis=0))[:-1]\n\n if self.threeD:\n self.data_aug_params = default_3D_augmentation_params\n self.data_aug_params['rotation_x'] = (-30. / 360 * 2. * np.pi, 30. / 360 * 2. * np.pi)\n self.data_aug_params['rotation_y'] = (-30. / 360 * 2. * np.pi, 30. / 360 * 2. * np.pi)\n self.data_aug_params['rotation_z'] = (-30. / 360 * 2. * np.pi, 30. / 360 * 2. * np.pi)\n if self.do_dummy_2D_aug:\n self.data_aug_params[\"dummy_2D\"] = True\n self.print_to_log_file(\"Using dummy2d data augmentation\")\n self.data_aug_params[\"elastic_deform_alpha\"] = \\\n default_2D_augmentation_params[\"elastic_deform_alpha\"]\n self.data_aug_params[\"elastic_deform_sigma\"] = \\\n default_2D_augmentation_params[\"elastic_deform_sigma\"]\n self.data_aug_params[\"rotation_x\"] = default_2D_augmentation_params[\"rotation_x\"]\n else:\n self.do_dummy_2D_aug = False\n if max(self.patch_size) / min(self.patch_size) > 1.5:\n default_2D_augmentation_params['rotation_x'] = (-15. / 360 * 2. * np.pi, 15. / 360 * 2. * np.pi)\n self.data_aug_params = default_2D_augmentation_params\n self.data_aug_params[\"mask_was_used_for_normalization\"] = self.use_mask_for_norm\n\n if self.do_dummy_2D_aug:\n self.basic_generator_patch_size = get_patch_size(self.patch_size[1:],\n self.data_aug_params['rotation_x'],\n self.data_aug_params['rotation_y'],\n self.data_aug_params['rotation_z'],\n self.data_aug_params['scale_range'])\n self.basic_generator_patch_size = np.array([self.patch_size[0]] + list(self.basic_generator_patch_size))\n else:\n self.basic_generator_patch_size = get_patch_size(self.patch_size, self.data_aug_params['rotation_x'],\n self.data_aug_params['rotation_y'],\n self.data_aug_params['rotation_z'],\n self.data_aug_params['scale_range'])\n\n self.data_aug_params[\"scale_range\"] = (0.7, 1.4)\n self.data_aug_params[\"do_elastic\"] = False\n self.data_aug_params['selected_seg_channels'] = [0]\n self.data_aug_params['patch_size_for_spatialtransform'] = self.patch_size\n\n self.data_aug_params[\"num_cached_per_thread\"] = 2\n\n def maybe_update_lr(self, epoch=None):\n \"\"\"\n if epoch is not None we overwrite epoch. Else we use epoch = self.epoch + 1\n\n (maybe_update_lr is called in on_epoch_end which is called before epoch is incremented.\n herefore we need to do +1 here)\n\n :param epoch:\n :return:\n \"\"\"\n if epoch is None:\n ep = self.epoch + 1\n else:\n ep = epoch\n self.optimizer.param_groups[0]['lr'] = poly_lr(ep, self.max_num_epochs, self.initial_lr, 0.9)\n self.print_to_log_file(\"lr:\", np.round(self.optimizer.param_groups[0]['lr'], decimals=6))\n\n def on_epoch_end(self):\n \"\"\"\n overwrite patient-based early stopping. Always run to 1000 epochs\n :return:\n \"\"\"\n super().on_epoch_end()\n continue_training = self.epoch < self.max_num_epochs\n\n # it can rarely happen that the momentum of nnUNetTrainerV2 is too high for some dataset. If at epoch 100 the\n # estimated validation Dice is still 0 then we reduce the momentum from 0.99 to 0.95\n if self.epoch == 100:\n if self.all_val_eval_metrics[-1] == 0:\n self.optimizer.param_groups[0][\"momentum\"] = 0.95\n self.network.apply(InitWeights_He(1e-2))\n self.print_to_log_file(\"At epoch 100, the mean foreground Dice was 0. This can be caused by a too \"\n \"high momentum. High momentum (0.99) is good for datasets where it works, but \"\n \"sometimes causes issues such as this one. Momentum has now been reduced to \"\n \"0.95 and network weights have been reinitialized\")\n return continue_training\n\n def run_training(self):\n \"\"\"\n if we run with -c then we need to set the correct lr for the first epoch, otherwise it will run the first\n continued epoch with self.initial_lr\n\n we also need to make sure deep supervision in the network is enabled for training, thus the wrapper\n :return:\n \"\"\"\n self.maybe_update_lr(self.epoch) # if we dont overwrite epoch then self.epoch+1 is used which is not what we\n # want at the start of the training\n ds = self.network.do_ds\n if not self.disable_ds:\n self.network.do_ds = True\n ret = super().run_training()\n self.network.do_ds = ds\n\n\n\n return ret" }, { "identifier": "InitWeights_He", "path": "nn_transunet/trainer/nnUNetTrainerV2.py", "snippet": "class InitWeights_He(object):\n def __init__(self, neg_slope=1e-2):\n self.neg_slope = neg_slope\n\n def __call__(self, module):\n if isinstance(module, nn.Conv3d) or isinstance(module, nn.Conv2d) or isinstance(module, nn.ConvTranspose2d) or isinstance(module, nn.ConvTranspose3d):\n module.weight = nn.init.kaiming_normal_(module.weight, a=self.neg_slope)\n if module.bias is not None:\n module.bias = nn.init.constant_(module.bias, 0)" }, { "identifier": "get_moreDA_augmentation", "path": "nn_transunet/data/data_augmentation_moreDA.py", "snippet": "def get_moreDA_augmentation(dataloader_train, dataloader_val, patch_size, params=default_3D_augmentation_params,\n border_val_seg=-1,\n seeds_train=None, seeds_val=None, order_seg=1, order_data=3, deep_supervision_scales=None,\n soft_ds=False,\n classes=None, pin_memory=True, regions=None,\n use_nondetMultiThreadedAugmenter: bool = False,\n is_spatial_aug_only=False, reclip=None):\n\n # default_3D_augmentation_params: {'selected_data_channels': None, 'selected_seg_channels': [0], 'do_elastic': False, 'elastic_deform_alpha': (0.0, 900.0), 'elastic_deform_sigma': (9.0, 13.0), 'p_eldef': 0.2, 'do_scaling': True, 'scale_range': (0.7, 1.4), 'independent_scale_factor_for_each_axis': False, 'p_independent_scale_per_axis': 1, 'p_scale': 0.2, 'do_rotation': True, 'rotation_x': (-0.5235987755982988, 0.5235987755982988), 'rotation_y': (-0.5235987755982988, 0.5235987755982988), 'rotation_z': (-0.5235987755982988, 0.5235987755982988), 'rotation_p_per_axis': 1, 'p_rot': 0.2, 'random_crop': False, 'random_crop_dist_to_border': None, 'do_gamma': True, 'gamma_retain_stats': True, 'gamma_range': (0.7, 1.5), 'p_gamma': 0.3, 'do_mirror': True, 'mirror_axes': (0, 1, 2), 'dummy_2D': False, 'mask_was_used_for_normalization': OrderedDict([(0, False)]), 'border_mode_data': 'constant', 'all_segmentation_labels': None, 'move_last_seg_chanel_to_data': False, 'cascade_do_cascade_augmentations': False, 'cascade_random_binary_transform_p': 0.4, 'cascade_random_binary_transform_p_per_label': 1, 'cascade_random_binary_transform_size': (1, 8), 'cascade_remove_conn_comp_p': 0.2, 'cascade_remove_conn_comp_max_size_percent_threshold': 0.15, 'cascade_remove_conn_comp_fill_with_other_class_p': 0.0, 'do_additive_brightness': False, 'additive_brightness_p_per_sample': 0.15, 'additive_brightness_p_per_channel': 0.5, 'additive_brightness_mu': 0.0, 'additive_brightness_sigma': 0.1, 'num_threads': 12, 'num_cached_per_thread': 2, 'patch_size_for_spatialtransform': [64, 128, 128]} \n\n assert params.get('mirror') is None, \"old version of params, use new keyword do_mirror\"\n\n tr_transforms = []\n\n\n if params.get(\"selected_data_channels\") is not None:\n tr_transforms.append(DataChannelSelectionTransform(params.get(\"selected_data_channels\")))\n\n if params.get(\"selected_seg_channels\") is not None:\n tr_transforms.append(SegChannelSelectionTransform(params.get(\"selected_seg_channels\")))\n\n # don't do color augmentations while in 2d mode with 3d data because the color channel is overloaded!!\n if params.get(\"dummy_2D\") is not None and params.get(\"dummy_2D\"):\n ignore_axes = (0,)\n tr_transforms.append(Convert3DTo2DTransform())\n patch_size_spatial = patch_size[1:]\n else:\n patch_size_spatial = patch_size\n ignore_axes = None\n\n tr_transforms.append(SpatialTransform(\n patch_size_spatial, patch_center_dist_from_border=None,\n do_elastic_deform=params.get(\"do_elastic\"), alpha=params.get(\"elastic_deform_alpha\"),\n sigma=params.get(\"elastic_deform_sigma\"),\n do_rotation=params.get(\"do_rotation\"), angle_x=params.get(\"rotation_x\"), angle_y=params.get(\"rotation_y\"),\n angle_z=params.get(\"rotation_z\"), p_rot_per_axis=params.get(\"rotation_p_per_axis\"),\n do_scale=params.get(\"do_scaling\"), scale=params.get(\"scale_range\"),\n border_mode_data=params.get(\"border_mode_data\"), border_cval_data=0, order_data=order_data,\n border_mode_seg=\"constant\", border_cval_seg=border_val_seg,\n order_seg=order_seg, random_crop=params.get(\"random_crop\"), p_el_per_sample=params.get(\"p_eldef\"),\n p_scale_per_sample=params.get(\"p_scale\"), p_rot_per_sample=params.get(\"p_rot\"),\n independent_scale_for_each_axis=params.get(\"independent_scale_factor_for_each_axis\")\n ))\n\n if params.get(\"dummy_2D\"):\n tr_transforms.append(Convert2DTo3DTransform())\n\n # we need to put the color augmentations after the dummy 2d part (if applicable). Otherwise the overloaded color\n # channel gets in the way\n tr_transforms.append(GaussianNoiseTransform(p_per_sample=0.1)) # a kind of noise transform\n tr_transforms.append(GaussianBlurTransform((0.5, 1.), different_sigma_per_channel=True, p_per_sample=0.2, p_per_channel=0.5))\n tr_transforms.append(BrightnessMultiplicativeTransform(multiplier_range=(0.75, 1.25), p_per_sample=0.15))\n\n if params.get(\"do_additive_brightness\"):\n tr_transforms.append(BrightnessTransform(params.get(\"additive_brightness_mu\"),\n params.get(\"additive_brightness_sigma\"),\n True, p_per_sample=params.get(\"additive_brightness_p_per_sample\"),\n p_per_channel=params.get(\"additive_brightness_p_per_channel\")))\n\n tr_transforms.append(ContrastAugmentationTransform(p_per_sample=0.15))\n tr_transforms.append(SimulateLowResolutionTransform(zoom_range=(0.5, 1), per_channel=True,\n p_per_channel=0.5,\n order_downsample=0, order_upsample=3, p_per_sample=0.25,\n ignore_axes=ignore_axes))\n tr_transforms.append(\n GammaTransform(params.get(\"gamma_range\"), True, True, retain_stats=params.get(\"gamma_retain_stats\"),\n p_per_sample=0.1)) # inverted gamma, a kind of color transform\n\n if params.get(\"do_gamma\"):\n tr_transforms.append(\n GammaTransform(params.get(\"gamma_range\"), False, True, retain_stats=params.get(\"gamma_retain_stats\"),\n p_per_sample=params[\"p_gamma\"]))\n if params.get(\"do_mirror\") or params.get(\"mirror\"):\n tr_transforms.append(MirrorTransform(params.get(\"mirror_axes\")))\n\n if params.get(\"mask_was_used_for_normalization\") is not None:\n mask_was_used_for_normalization = params.get(\"mask_was_used_for_normalization\")\n tr_transforms.append(MaskTransform(mask_was_used_for_normalization, mask_idx_in_seg=0, set_outside_to=0))\n # Replaces all pixels in data_dict[input_key] that have value remove_label with replace_with and saves the result to data_dict[output_key]\n tr_transforms.append(RemoveLabelTransform(-1, 0))\n\n if params.get(\"move_last_seg_chanel_to_data\") is not None and params.get(\"move_last_seg_chanel_to_data\"): # only used for cascade\n print(\"only used for cascaded!\")\n raise NotImplementedError\n\n tr_transforms.append(RenameTransform('seg', 'target', True))\n\n if regions is not None:\n tr_transforms.append(ConvertSegmentationToRegionsTransform(regions, 'target', 'target'))\n\n if deep_supervision_scales is not None:\n if soft_ds:\n assert classes is not None\n tr_transforms.append(DownsampleSegForDSTransform3(deep_supervision_scales, 'target', 'target', classes))\n else:\n tr_transforms.append(DownsampleSegForDSTransform2(deep_supervision_scales, 0, input_key='target',\n output_key='target'))\n\n tr_transforms.append(NumpyToTensor(['data', 'target'], 'float'))\n tr_transforms = Compose(tr_transforms)\n\n if use_nondetMultiThreadedAugmenter:\n if NonDetMultiThreadedAugmenter is None:\n raise RuntimeError('NonDetMultiThreadedAugmenter is not yet available')\n batchgenerator_train = NonDetMultiThreadedAugmenter(dataloader_train, tr_transforms, params.get('num_threads'),\n params.get(\"num_cached_per_thread\"), seeds=seeds_train,\n pin_memory=pin_memory)\n else:\n batchgenerator_train = MultiThreadedAugmenter(dataloader_train, tr_transforms, params.get('num_threads'),\n params.get(\"num_cached_per_thread\"),\n seeds=seeds_train, pin_memory=pin_memory)\n # batchgenerator_train = SingleThreadedAugmenter(dataloader_train, tr_transforms)\n # import IPython;IPython.embed()\n\n val_transforms = []\n val_transforms.append(RemoveLabelTransform(-1, 0))\n if params.get(\"selected_data_channels\") is not None:\n val_transforms.append(DataChannelSelectionTransform(params.get(\"selected_data_channels\")))\n if params.get(\"selected_seg_channels\") is not None:\n val_transforms.append(SegChannelSelectionTransform(params.get(\"selected_seg_channels\")))\n\n if params.get(\"move_last_seg_chanel_to_data\") is not None and params.get(\"move_last_seg_chanel_to_data\"):\n print(\"only used for cascaded!\")\n raise NotImplementedError\n # val_transforms.append(MoveSegAsOneHotToData(1, params.get(\"all_segmentation_labels\"), 'seg', 'data'))\n\n\n val_transforms.append(RenameTransform('seg', 'target', True))\n\n if regions is not None:\n val_transforms.append(ConvertSegmentationToRegionsTransform(regions, 'target', 'target'))\n\n if deep_supervision_scales is not None:\n if soft_ds:\n assert classes is not None\n val_transforms.append(DownsampleSegForDSTransform3(deep_supervision_scales, 'target', 'target', classes))\n else:\n val_transforms.append(DownsampleSegForDSTransform2(deep_supervision_scales, 0, input_key='target',\n output_key='target'))\n\n val_transforms.append(NumpyToTensor(['data', 'target'], 'float'))\n val_transforms = Compose(val_transforms)\n\n if use_nondetMultiThreadedAugmenter:\n if NonDetMultiThreadedAugmenter is None:\n raise RuntimeError('NonDetMultiThreadedAugmenter is not yet available')\n batchgenerator_val = NonDetMultiThreadedAugmenter(dataloader_val, val_transforms,\n max(params.get('num_threads') // 2, 1),\n params.get(\"num_cached_per_thread\"),\n seeds=seeds_val, pin_memory=pin_memory)\n else:\n batchgenerator_val = MultiThreadedAugmenter(dataloader_val, val_transforms,\n max(params.get('num_threads') // 2, 1),\n params.get(\"num_cached_per_thread\"),\n seeds=seeds_val, pin_memory=pin_memory)\n # batchgenerator_val = SingleThreadedAugmenter(dataloader_val, val_transforms)\n return batchgenerator_train, batchgenerator_val" }, { "identifier": "unpack_dataset", "path": "nn_transunet/data/dataset_loading.py", "snippet": "def unpack_dataset(folder, threads=default_num_threads, key=\"data\"):\n \"\"\"\n unpacks all npz files in a folder to npy (whatever you want to have unpacked must be saved unter key)\n :param folder:\n :param threads:\n :param key:\n :return:\n \"\"\"\n p = Pool(threads)\n npz_files = subfiles(folder, True, None, \".npz\", True)\n p.map(convert_to_npy, zip(npz_files, [key] * len(npz_files)))\n p.close()\n p.join()" }, { "identifier": "default_2D_augmentation_params", "path": "nn_transunet/data/default_data_augmentation.py", "snippet": "def get_patch_size(final_patch_size, rot_x, rot_y, rot_z, scale_range):\ndef get_default_augmentation(dataloader_train, dataloader_val, patch_size, params=default_3D_augmentation_params,\n border_val_seg=-1, pin_memory=True,\n seeds_train=None, seeds_val=None, regions=None):" }, { "identifier": "Generic_TransUNet_max_ppbp", "path": "nn_transunet/networks/transunet3d_model.py", "snippet": "class Generic_TransUNet_max_ppbp(SegmentationNetwork):\n DEFAULT_BATCH_SIZE_3D = 2\n DEFAULT_PATCH_SIZE_3D = (64, 192, 160)\n SPACING_FACTOR_BETWEEN_STAGES = 2\n BASE_NUM_FEATURES_3D = 30\n MAX_NUMPOOL_3D = 999\n MAX_NUM_FILTERS_3D = 320\n\n DEFAULT_PATCH_SIZE_2D = (256, 256)\n BASE_NUM_FEATURES_2D = 30\n DEFAULT_BATCH_SIZE_2D = 50\n MAX_NUMPOOL_2D = 999\n MAX_FILTERS_2D = 480\n\n use_this_for_batch_size_computation_2D = 19739648\n use_this_for_batch_size_computation_3D = 520000000 # 505789440\n\n def __init__(self, input_channels, base_num_features, num_classes, num_pool, num_conv_per_stage=2,\n feat_map_mul_on_downscale=2, conv_op=nn.Conv2d,\n norm_op=nn.BatchNorm2d, norm_op_kwargs=None,\n dropout_op=nn.Dropout2d, dropout_op_kwargs=None,\n nonlin=nn.LeakyReLU, nonlin_kwargs=None, deep_supervision=True, dropout_in_localization=False,\n final_nonlin=softmax_helper, weightInitializer=InitWeights_He(1e-2), pool_op_kernel_sizes=None,\n conv_kernel_sizes=None,\n upscale_logits=False, convolutional_pooling=False, convolutional_upsampling=False, # TODO default False\n max_num_features=None, basic_block=ConvDropoutNormNonlin,\n seg_output_use_bias=False,\n patch_size=None, is_vit_pretrain=False, \n vit_depth=12, vit_hidden_size=768, vit_mlp_dim=3072, vit_num_heads=12,\n max_msda='', is_max_ms=True, is_max_ms_fpn=False, max_n_fpn=4, max_ms_idxs=[-4,-3,-2], max_ss_idx=0,\n is_max_bottleneck_transformer=False, max_seg_weight=1.0, max_hidden_dim=256, max_dec_layers=10,\n mw = 0.5,\n is_max=True, is_masked_attn=False, is_max_ds=False, is_masking=False, is_masking_argmax=False,\n is_fam=False, fam_k=5, fam_reduct_ratio=8,\n is_max_hungarian=False, num_queries=None, is_max_cls=False,\n point_rend=False, num_point_rend=None, no_object_weight=None, is_mhsa_float32=False, no_max_hw_pe=False,\n max_infer=None, cost_weight=[2.0, 5.0, 5.0], vit_layer_scale=False, decoder_layer_scale=False):\n\n super(Generic_TransUNet_max_ppbp, self).__init__()\n\n # newly added\n self.is_fam = is_fam\n self.is_max, self.max_msda, self.is_max_ms, self.is_max_ms_fpn, self.max_n_fpn, self.max_ss_idx, self.mw = is_max, max_msda, is_max_ms, is_max_ms_fpn, max_n_fpn, max_ss_idx, mw\n self.max_ms_idxs = max_ms_idxs\n\n self.is_max_cls = is_max_cls\n self.is_masked_attn, self.is_max_ds = is_masked_attn, is_max_ds\n self.is_max_bottleneck_transformer = is_max_bottleneck_transformer\n\n self.convolutional_upsampling = convolutional_upsampling\n self.convolutional_pooling = convolutional_pooling\n self.upscale_logits = upscale_logits\n if nonlin_kwargs is None:\n nonlin_kwargs = {'negative_slope': 1e-2, 'inplace': True}\n if dropout_op_kwargs is None:\n dropout_op_kwargs = {'p': 0.5, 'inplace': True}\n if norm_op_kwargs is None:\n norm_op_kwargs = {'eps': 1e-5, 'affine': True, 'momentum': 0.1}\n\n self.conv_kwargs = {'stride': 1, 'dilation': 1, 'bias': True}\n\n self.nonlin = nonlin\n self.nonlin_kwargs = nonlin_kwargs\n self.dropout_op_kwargs = dropout_op_kwargs\n self.norm_op_kwargs = norm_op_kwargs\n self.weightInitializer = weightInitializer\n self.conv_op = conv_op\n self.norm_op = norm_op\n self.dropout_op = dropout_op\n self.num_classes = num_classes\n self.final_nonlin = final_nonlin\n self._deep_supervision = deep_supervision\n self.do_ds = deep_supervision\n\n if conv_op == nn.Conv2d:\n upsample_mode = 'bilinear'\n pool_op = nn.MaxPool2d\n transpconv = nn.ConvTranspose2d\n if pool_op_kernel_sizes is None:\n pool_op_kernel_sizes = [(2, 2)] * num_pool\n if conv_kernel_sizes is None:\n conv_kernel_sizes = [(3, 3)] * (num_pool + 1)\n elif conv_op == nn.Conv3d:\n upsample_mode = 'trilinear'\n pool_op = nn.MaxPool3d\n transpconv = nn.ConvTranspose3d\n if pool_op_kernel_sizes is None:\n pool_op_kernel_sizes = [(2, 2, 2)] * num_pool\n if conv_kernel_sizes is None:\n conv_kernel_sizes = [(3, 3, 3)] * (num_pool + 1)\n else:\n raise ValueError(\"unknown convolution dimensionality, conv op: %s\" % str(conv_op))\n\n self.input_shape_must_be_divisible_by = np.prod(pool_op_kernel_sizes, 0, dtype=np.int64)\n self.pool_op_kernel_sizes = pool_op_kernel_sizes\n self.conv_kernel_sizes = conv_kernel_sizes\n\n self.conv_pad_sizes = []\n for krnl in self.conv_kernel_sizes:\n self.conv_pad_sizes.append([1 if i == 3 else 0 for i in krnl])\n\n if max_num_features is None:\n if self.conv_op == nn.Conv3d:\n self.max_num_features = self.MAX_NUM_FILTERS_3D\n else:\n self.max_num_features = self.MAX_FILTERS_2D\n else:\n self.max_num_features = max_num_features\n\n self.conv_blocks_context = []\n self.conv_blocks_localization = []\n self.td = []\n self.tu = []\n\n\n self.fams = []\n\n output_features = base_num_features\n input_features = input_channels\n\n for d in range(num_pool):\n # determine the first stride\n if d != 0 and self.convolutional_pooling:\n first_stride = pool_op_kernel_sizes[d - 1]\n else:\n first_stride = None\n\n self.conv_kwargs['kernel_size'] = self.conv_kernel_sizes[d]\n self.conv_kwargs['padding'] = self.conv_pad_sizes[d]\n # add convolutions\n self.conv_blocks_context.append(StackedConvLayers(input_features, output_features, num_conv_per_stage,\n self.conv_op, self.conv_kwargs, self.norm_op,\n self.norm_op_kwargs, self.dropout_op,\n self.dropout_op_kwargs, self.nonlin, self.nonlin_kwargs,\n first_stride, basic_block=basic_block))\n if not self.convolutional_pooling:\n self.td.append(pool_op(pool_op_kernel_sizes[d]))\n input_features = output_features\n output_features = int(np.round(output_features * feat_map_mul_on_downscale))\n\n output_features = min(output_features, self.max_num_features)\n\n # now the bottleneck.\n # determine the first stride\n if self.convolutional_pooling:\n first_stride = pool_op_kernel_sizes[-1]\n else:\n first_stride = None\n\n # the output of the last conv must match the number of features from the skip connection if we are not using\n # convolutional upsampling. If we use convolutional upsampling then the reduction in feature maps will be\n # done by the transposed conv\n if self.convolutional_upsampling:\n final_num_features = output_features\n else:\n final_num_features = self.conv_blocks_context[-1].output_channels\n\n self.conv_kwargs['kernel_size'] = self.conv_kernel_sizes[num_pool]\n self.conv_kwargs['padding'] = self.conv_pad_sizes[num_pool]\n self.conv_blocks_context.append(nn.Sequential(\n StackedConvLayers(input_features, output_features, num_conv_per_stage - 1, self.conv_op, self.conv_kwargs,\n self.norm_op, self.norm_op_kwargs, self.dropout_op, self.dropout_op_kwargs, self.nonlin,\n self.nonlin_kwargs, first_stride, basic_block=basic_block),\n StackedConvLayers(output_features, final_num_features, 1, self.conv_op, self.conv_kwargs,\n self.norm_op, self.norm_op_kwargs, self.dropout_op, self.dropout_op_kwargs, self.nonlin,\n self.nonlin_kwargs, basic_block=basic_block)))\n\n # if we don't want to do dropout in the localization pathway then we set the dropout prob to zero here\n if not dropout_in_localization:\n old_dropout_p = self.dropout_op_kwargs['p']\n self.dropout_op_kwargs['p'] = 0.0\n\n # now lets build the localization pathway\n for u in range(num_pool):\n nfeatures_from_down = final_num_features\n nfeatures_from_skip = self.conv_blocks_context[\n -(2 + u)].output_channels # self.conv_blocks_context[-1] is bottleneck, so start with -2\n n_features_after_tu_and_concat = nfeatures_from_skip * 2\n\n # the first conv reduces the number of features to match those of skip\n # the following convs work on that number of features\n # if not convolutional upsampling then the final conv reduces the num of features again\n if u != num_pool - 1 and not self.convolutional_upsampling:\n final_num_features = self.conv_blocks_context[-(3 + u)].output_channels\n else:\n final_num_features = nfeatures_from_skip\n\n if not self.convolutional_upsampling:\n self.tu.append(Upsample(scale_factor=pool_op_kernel_sizes[-(u + 1)], mode=upsample_mode))\n else:\n self.tu.append(transpconv(nfeatures_from_down, nfeatures_from_skip, pool_op_kernel_sizes[-(u + 1)],\n pool_op_kernel_sizes[-(u + 1)], bias=False))\n\n self.conv_kwargs['kernel_size'] = self.conv_kernel_sizes[- (u + 1)]\n self.conv_kwargs['padding'] = self.conv_pad_sizes[- (u + 1)]\n self.conv_blocks_localization.append(nn.Sequential(\n StackedConvLayers(n_features_after_tu_and_concat, nfeatures_from_skip, num_conv_per_stage - 1,\n self.conv_op, self.conv_kwargs, self.norm_op, self.norm_op_kwargs, self.dropout_op,\n self.dropout_op_kwargs, self.nonlin, self.nonlin_kwargs, basic_block=basic_block),\n StackedConvLayers(nfeatures_from_skip, final_num_features, 1, self.conv_op, self.conv_kwargs,\n self.norm_op, self.norm_op_kwargs, self.dropout_op, self.dropout_op_kwargs,\n self.nonlin, self.nonlin_kwargs, basic_block=basic_block)\n ))\n\n\n\n if self.is_fam:\n self.fams = nn.ModuleList(self.fams)\n\n if self.do_ds:\n self.seg_outputs = []\n for ds in range(len(self.conv_blocks_localization)):\n self.seg_outputs.append(conv_op(self.conv_blocks_localization[ds][-1].output_channels, num_classes,\n 1, 1, 0, 1, 1, seg_output_use_bias))\n self.seg_outputs = nn.ModuleList(self.seg_outputs)\n\n self.upscale_logits_ops = []\n cum_upsample = np.cumprod(np.vstack(pool_op_kernel_sizes), axis=0)[::-1]\n for usl in range(num_pool - 1):\n if self.upscale_logits:\n self.upscale_logits_ops.append(Upsample(scale_factor=tuple([int(i) for i in cum_upsample[usl + 1]]),\n mode=upsample_mode))\n else:\n self.upscale_logits_ops.append(lambda x: x)\n\n if not dropout_in_localization:\n self.dropout_op_kwargs['p'] = old_dropout_p\n\n # register all modules properly\n self.conv_blocks_localization = nn.ModuleList(self.conv_blocks_localization)\n self.conv_blocks_context = nn.ModuleList(self.conv_blocks_context)\n self.td = nn.ModuleList(self.td)\n self.tu = nn.ModuleList(self.tu)\n\n if self.upscale_logits:\n self.upscale_logits_ops = nn.ModuleList(\n self.upscale_logits_ops) # lambda x:x is not a Module so we need to distinguish here\n\n if self.weightInitializer is not None:\n self.apply(self.weightInitializer)\n # self.apply(print_module_training_status)\n\n # Transformer configuration\n if self.is_max_bottleneck_transformer:\n self.patch_size = patch_size # e.g. [48, 192, 192]\n config_vit = CONFIGS_ViT['R50-ViT-B_16']\n config_vit.transformer.num_layers = vit_depth\n config_vit.hidden_size = vit_hidden_size # 768\n config_vit.transformer.mlp_dim = vit_mlp_dim # 3072\n config_vit.transformer.num_heads = vit_num_heads # 12\n self.conv_more = nn.Conv3d(config_vit.hidden_size, output_features, 1)\n num_pool_per_axis = np.prod(np.array(pool_op_kernel_sizes), axis=0)\n num_pool_per_axis = np.log2(num_pool_per_axis).astype(np.uint8)\n feat_size = [int(self.patch_size[0]/2**num_pool_per_axis[0]), int(self.patch_size[1]/2**num_pool_per_axis[1]), int(self.patch_size[2]/2**num_pool_per_axis[2])]\n self.transformer = Transformer(config_vit, feat_size=feat_size, vis=False, feat_channels=output_features, use_layer_scale=vit_layer_scale)\n if is_vit_pretrain:\n self.transformer.load_from(weights=np.load(config_vit.pretrained_path))\n\n\n if self.is_max:\n # Max PPB+ configuration (i.e. MultiScaleStandardTransformerDecoder)\n cfg = {\n \"num_classes\": num_classes,\n \"hidden_dim\": max_hidden_dim,\n \"num_queries\": num_classes if num_queries is None else num_queries, # N=K if 'fixed matching', else default=100,\n \"nheads\": 8,\n \"dim_feedforward\": max_hidden_dim * 8, # 2048,\n \"dec_layers\": max_dec_layers, # 9 decoder layers, add one for the loss on learnable query?\n \"pre_norm\": False,\n \"enforce_input_project\": False,\n \"mask_dim\": max_hidden_dim, # input feat of segm head?\n \"non_object\": False,\n \"use_layer_scale\": decoder_layer_scale,\n }\n cfg['non_object'] = is_max_cls\n input_proj_list = [] # from low resolution to high resolution (res4 -> res1), [1, 1024, 14, 14], [1, 512, 28, 28], 1, 256, 56, 56], [1, 64, 112, 112]\n decoder_channels = [320, 320, 256, 128, 64, 32]\n if self.is_max_ms: # use multi-scale feature as Transformer decoder input\n if self.is_max_ms_fpn:\n for idx, in_channels in enumerate(decoder_channels[:max_n_fpn]): # max_n_fpn=4: 1/32, 1/16, 1/8, 1/4\n input_proj_list.append(nn.Sequential(\n nn.Conv3d(in_channels, max_hidden_dim, kernel_size=1),\n nn.GroupNorm(32, max_hidden_dim),\n nn.Upsample(size=(int(patch_size[0]/2), int(patch_size[1]/4), int(patch_size[2]/4)), mode='trilinear')\n )) # proj to scale (1, 1/2, 1/2), TODO: init\n self.input_proj = nn.ModuleList(input_proj_list)\n self.linear_encoder_feature = nn.Conv3d(max_hidden_dim * max_n_fpn, max_hidden_dim, 1, 1) # concat four-level feature\n else:\n for idx, in_channels in enumerate([decoder_channels[i] for i in self.max_ms_idxs]):\n input_proj_list.append(nn.Sequential(\n nn.Conv3d(in_channels, max_hidden_dim, kernel_size=1),\n nn.GroupNorm(32, max_hidden_dim),\n ))\n self.input_proj = nn.ModuleList(input_proj_list)\n\n # self.linear_mask_features =nn.Conv3d(decoder_channels[max_n_fpn-1], cfg[\"mask_dim\"], kernel_size=1, stride=1, padding=0,) # low-level feat, dot product Trans-feat\n self.linear_mask_features =nn.Conv3d(decoder_channels[-1], cfg[\"mask_dim\"], kernel_size=1, stride=1, padding=0,) # following SingleScale, high-level feat, obtain seg_map\n else:\n self.linear_encoder_feature = nn.Conv3d(decoder_channels[max_ss_idx], cfg[\"mask_dim\"], kernel_size=1)\n self.linear_mask_features = nn.Conv3d(decoder_channels[-1], cfg[\"mask_dim\"], kernel_size=1, stride=1, padding=0,) # low-level feat, dot product Trans-feat\n\n if self.is_masked_attn:\n from .mask2former_modeling.transformer_decoder.mask2former_transformer_decoder3d import MultiScaleMaskedTransformerDecoder3d\n cfg['num_feature_levels'] = 1 if not self.is_max_ms or self.is_max_ms_fpn else 3\n cfg[\"is_masking\"] = True if is_masking else False\n cfg[\"is_masking_argmax\"] = True if is_masking_argmax else False\n cfg[\"is_mhsa_float32\"] = True if is_mhsa_float32 else False\n cfg[\"no_max_hw_pe\"] = True if no_max_hw_pe else False\n self.predictor = MultiScaleMaskedTransformerDecoder3d(in_channels=max_hidden_dim, mask_classification=is_max_cls, **cfg)\n else:\n from .mask2former_modeling.transformer_decoder.maskformer_transformer_decoder3d import StandardTransformerDecoder\n cfg[\"dropout\"], cfg[\"enc_layers\"], cfg[\"deep_supervision\"] = 0.1, 0, False\n self.predictor = StandardTransformerDecoder(in_channels=max_hidden_dim, mask_classification=is_max_cls, **cfg)\n\n def forward(self, x):\n skips = []\n seg_outputs = []\n for d in range(len(self.conv_blocks_context) - 1):\n x = self.conv_blocks_context[d](x)\n skips.append(x)\n if not self.convolutional_pooling:\n x = self.td[d](x)\n \n x = self.conv_blocks_context[-1](x)\n ######### TransUNet #########\n if self.is_max_bottleneck_transformer:\n x, attn = self.transformer(x) # [b, hidden, d/8, h/16, w/16]\n x = self.conv_more(x)\n #############################\n\n ds_feats = [] # obtain multi-scale feature\n ds_feats.append(x)\n for u in range(len(self.tu)):\n if u<len(self.tu)-1 and isinstance(self.is_fam, str) and self.is_fam.startswith('fam_down'):\n skip_down = nn.Upsample(size=x.shape[2:])(skips[-(u + 1)]) if x.shape[2:]!=skips[-(u + 1)].shape[2:] else skips[-(u + 1)]\n x_align = self.fams[u](x, x_l=skip_down)\n x = x + x_align\n\n x = self.tu[u](x) # merely an upsampling or transposeconv operation\n\n if isinstance(self.is_fam, bool) and self.is_fam:\n x_align = self.fams[u](x, x_l=skips[-(u + 1)])\n x = x + x_align\n x = torch.cat((x, skips[-(u + 1)]), dim=1)\n x = self.conv_blocks_localization[u](x)\n if self.do_ds:\n seg_outputs.append(self.final_nonlin(self.seg_outputs[u](x)))\n ds_feats.append(x)\n\n ######### Max PPB+ #########\n if self.is_max:\n if self.is_max_ms: # is_max_ms_fpn\n multi_scale_features = []\n ms_pixel_feats = ds_feats[:self.max_n_fpn] if self.is_max_ms_fpn else [ds_feats[i] for i in self.max_ms_idxs]\n \n for idx, f in enumerate(ms_pixel_feats): \n\n f = self.input_proj[idx](f) # proj into same spatial/channel dim , but transformer_decoder also project to same mask_dim \n multi_scale_features.append(f)\n transformer_decoder_in_feature = self.linear_encoder_feature(torch.cat(multi_scale_features, dim=1)) if self.is_max_ms_fpn else multi_scale_features # feature pyramid\n mask_features = self.linear_mask_features(ds_feats[-1]) # following SingleScale\n else:\n transformer_decoder_in_feature = self.linear_encoder_feature(ds_feats[self.max_ss_idx])\n mask_features = self.linear_mask_features(ds_feats[-1])\n \n predictions = self.predictor(transformer_decoder_in_feature, mask_features, mask=None)\n\n if self.is_max_cls and self.is_max_ds:\n if self._deep_supervision and self.do_ds:\n return [predictions] + [i(j) for i, j in zip(list(self.upscale_logits_ops)[::-1], seg_outputs[:-1][::-1])]\n return predictions\n\n elif self.is_max_ds and not self.is_max_ms and self.mw==1.0: # aux output of max decoder\n aux_out = [p['pred_masks'] for p in predictions['aux_outputs']] # ascending order\n all_out = [predictions[\"pred_masks\"]] + aux_out[::-1] # reverse order, w/o sigmoid activation\n return tuple(all_out)\n elif not self.is_max_ds and self.mw==1.0:\n raise NotImplementedError\n else:\n raise NotImplementedError\n\n #############################\n\n if self._deep_supervision and self.do_ds: # assuming turn off ds\n return tuple([seg_outputs[-1]] + [i(j) for i, j in zip(list(self.upscale_logits_ops)[::-1], seg_outputs[:-1][::-1])])\n else:\n return seg_outputs[-1]\n\n @staticmethod\n def compute_approx_vram_consumption(patch_size, num_pool_per_axis, base_num_features, max_num_features,\n num_modalities, num_classes, pool_op_kernel_sizes, deep_supervision=False,\n conv_per_stage=2):\n \"\"\"\n This only applies for num_conv_per_stage and convolutional_upsampling=True\n not real vram consumption. just a constant term to which the vram consumption will be approx proportional\n (+ offset for parameter storage)\n :param deep_supervision:\n :param patch_size:\n :param num_pool_per_axis:\n :param base_num_features:\n :param max_num_features:\n :param num_modalities:\n :param num_classes:\n :param pool_op_kernel_sizes:\n :return:\n \"\"\"\n if not isinstance(num_pool_per_axis, np.ndarray):\n num_pool_per_axis = np.array(num_pool_per_axis)\n\n npool = len(pool_op_kernel_sizes)\n\n map_size = np.array(patch_size)\n tmp = np.int64((conv_per_stage * 2 + 1) * np.prod(map_size, dtype=np.int64) * base_num_features +\n num_modalities * np.prod(map_size, dtype=np.int64) +\n num_classes * np.prod(map_size, dtype=np.int64))\n\n num_feat = base_num_features\n\n for p in range(npool):\n for pi in range(len(num_pool_per_axis)):\n map_size[pi] /= pool_op_kernel_sizes[p][pi]\n num_feat = min(num_feat * 2, max_num_features)\n num_blocks = (conv_per_stage * 2 + 1) if p < (npool - 1) else conv_per_stage # conv_per_stage + conv_per_stage for the convs of encode/decode and 1 for transposed conv\n tmp += num_blocks * np.prod(map_size, dtype=np.int64) * num_feat\n if deep_supervision and p < (npool - 2):\n tmp += np.prod(map_size, dtype=np.int64) * num_classes\n # print(p, map_size, num_feat, tmp)\n return tmp" } ]
from genericpath import exists from _warnings import warn from collections import OrderedDict from multiprocessing import Pool from time import sleep, time from typing import Tuple from nnunet.configuration import default_num_threads from nnunet.evaluation.evaluator import aggregate_scores from nnunet.inference.segmentation_export import save_segmentation_nifti_from_softmax from nnunet.network_architecture.neural_network import SegmentationNetwork from nnunet.postprocessing.connected_components import determine_postprocessing from nnunet.utilities.distributed import awesome_allgather_function from nnunet.utilities.nd_softmax import softmax_helper from nnunet.utilities.tensor_utilities import sum_tensor from nnunet.utilities.to_torch import to_cuda, maybe_to_torch from nnunet.training.loss_functions.crossentropy import RobustCrossEntropyLoss from nnunet.training.loss_functions.dice_loss import get_tp_fp_fn_tn from torch import nn, distributed from torch.backends import cudnn from torch.cuda.amp import autocast from torch.nn.parallel import DistributedDataParallel as DDP from torch.optim.lr_scheduler import _LRScheduler from tqdm import trange from ..trainer.nnUNetTrainerV2 import nnUNetTrainerV2, InitWeights_He from batchgenerators.utilities.file_and_folder_operations import maybe_mkdir_p, join, subfiles, isfile, load_pickle, \ save_json from ..data.data_augmentation_moreDA import get_moreDA_augmentation from ..data.dataset_loading import unpack_dataset from ..data.default_data_augmentation import default_2D_augmentation_params, get_patch_size, default_3D_augmentation_params from ..networks.transunet3d_model import Generic_TransUNet_max_ppbp from nnunet.training.data_augmentation.data_augmentation_insaneDA2 import get_insaneDA_augmentation2 from ..optimizers.lr_scheduler import LinearWarmupCosineAnnealingLR from torch.optim import lr_scheduler from network_trainer import warmup_poly_lr from network_trainer import poly_lr from ..networks.transunet3d_model import HungarianMatcher3D, compute_loss_hungarian from ..utils.dist_utils import check_call_hdfs_command, mkdir_hdfs import os import shutil import numpy as np import torch import torch.distributed as dist import torch.nn.functional as F
17,458
self.data_aug_params["independent_scale_factor_for_each_axis"] = True self.data_aug_params["p_independent_scale_per_axis"] = 0.3 self.data_aug_params["do_elastic"] = True self.data_aug_params["p_eldef"] = 0.3 # LMH 0.2 -> 0.3 according to paper self.data_aug_params["eldef_deformation_scale"] = (0, 0.25) self.data_aug_params["do_additive_brightness"] = True self.data_aug_params["additive_brightness_mu"] = 0 self.data_aug_params["additive_brightness_sigma"] = 0.2 self.data_aug_params["additive_brightness_p_per_sample"] = 0.3 self.data_aug_params["additive_brightness_p_per_channel"] = 0.5 self.data_aug_params['gamma_range'] = (0.5, 1.6) self.data_aug_params['num_cached_per_thread'] = 4 def set_batch_size_and_oversample(self): batch_sizes = [] oversample_percents = [] world_size = self.args.world_size# dist.get_world_size() my_rank = self.args.rank # dist.get_rank() # not local_rank if self.args.total_batch_size: # actually it is global_batch_size # reset the batch_size per gpu accordingly self.batch_size = self.args.total_batch_size // world_size # if self.args.local_rank == 0: # print("total_batch_size: %d, updated batch_size per gpu %d, world_size %d" % (self.args.total_batch_size, self.batch_size, world_size)) if self.distribute_batch_size: # set total batch_size to 16 will be fine... self.global_batch_size = self.batch_size else: self.global_batch_size = self.batch_size * world_size batch_size_per_GPU = np.ceil(self.batch_size / world_size).astype(int) # probably 1 for rank in range(world_size): if self.distribute_batch_size: if (rank + 1) * batch_size_per_GPU > self.batch_size: batch_size = batch_size_per_GPU - ((rank + 1) * batch_size_per_GPU - self.batch_size) else: batch_size = batch_size_per_GPU else: batch_size = self.batch_size batch_sizes.append(batch_size) sample_id_low = 0 if len(batch_sizes) == 0 else np.sum(batch_sizes[:-1]) sample_id_high = np.sum(batch_sizes) if sample_id_high / self.global_batch_size < (1 - self.oversample_foreground_percent): oversample_percents.append(0.0) elif sample_id_low / self.global_batch_size > (1 - self.oversample_foreground_percent): oversample_percents.append(1.0) else: percent_covered_by_this_rank = sample_id_high / self.global_batch_size - sample_id_low / self.global_batch_size oversample_percent_here = 1 - (((1 - self.oversample_foreground_percent) - sample_id_low / self.global_batch_size) / percent_covered_by_this_rank) oversample_percents.append(oversample_percent_here) print("worker", my_rank, "oversample", oversample_percents[my_rank]) print("worker", my_rank, "batch_size", batch_sizes[my_rank]) # batch_sizes [self.batch_size]*world_size self.batch_size = batch_sizes[my_rank] self.oversample_foreground_percent = oversample_percents[my_rank] def save_checkpoint(self, fname, save_optimizer=True): if self.local_rank == 0: super().save_checkpoint(fname, save_optimizer) def plot_progress(self): if self.local_rank == 0: super().plot_progress() def print_to_log_file(self, *args, also_print_to_console=True): if self.local_rank == 0: super().print_to_log_file(*args, also_print_to_console=also_print_to_console) def process_plans(self, plans): super().process_plans(plans) if (self.patch_size != self.args.crop_size).any(): self.patch_size = self.args.crop_size self.set_batch_size_and_oversample() if self.args.config.find('500Region') != -1: self.num_classes = len(self.regions) # only care about foreground (compatible with sigmoid) def initialize(self, training=True, force_load_plans=False): """ :param training: :return: """ if not self.was_initialized: maybe_mkdir_p(self.output_folder) if force_load_plans or (self.plans is None): self.load_plans_file() self.process_plans(self.plans) self.setup_DA_params() if self.args.config.find('500Region') != -1: # BraTSRegions_moreDA self.setup_DA_params_BraTSRegions() if hasattr(self.args, 'deep_supervision_scales') and len(self.args.deep_supervision_scales)>0: self.deep_supervision_scales = self.args.deep_supervision_scales # overwrite setup_DA_params() from nnUNetTrainerV2 self.folder_with_preprocessed_data = join(self.dataset_directory, self.plans['data_identifier'] + "_stage%d" % self.stage) if training: self.dl_tr, self.dl_val = self.get_basic_generators() if self.unpack_data: if self.local_rank == 0: print("unpacking dataset")
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #installed package class nnUNetTrainerV2_DDP(nnUNetTrainerV2): def __init__(self, plans_file, fold, local_rank, output_folder=None, dataset_directory=None, batch_dice=True, stage=None, unpack_data=True, deterministic=True, distribute_batch_size=False, fp16=False, model="Generic_UNet", input_size=(64, 160, 160), args=None): super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data, deterministic, fp16) self.init_args = ( plans_file, fold, local_rank, output_folder, dataset_directory, batch_dice, stage, unpack_data, deterministic, distribute_batch_size, fp16) assert args is not None self.args = args if self.args.config.find('500Region') != -1: self.regions = {"whole tumor": (1, 2, 3), "tumor core": (2, 3), "enhancing tumor": (3,) # correct } if self.args.config.find('500RegionFix') != -1: self.regions = {"whole tumor": (1, 2, 3), "tumor core": (2, 3), "enhancing tumor": (2,) # fig 1: the innermost tumor, but this is a bug!! } self.regions_class_order = (1, 2, 3) self.layer_decay = args.layer_decay self.lr_scheduler_name = args.lrschedule # [ TO DO ] self.reclip = args.reclip self.warmup_epochs = args.warmup_epochs self.min_lr = args.min_lr self.is_spatial_aug_only = args.is_spatial_aug_only if "model_params" in args: self.model_params = args.model_params else: self.model_params = {} self.optim_name = args.optim_name self.find_zero_weight_decay = args.find_zero_weight_decay self.model = args.model self.resume = args.resume self.input_size=input_size self.disable_ds=args.disable_ds self.max_num_epochs = args.max_num_epochs # set 8 gpu training self.initial_lr = args.initial_lr # 8 * 0.01 self.weight_decay = args.weight_decay # 3e-5 in nnUNetTrainer.py self.save_every = 1 # prev 50 self.distribute_batch_size = distribute_batch_size np.random.seed(local_rank) torch.manual_seed(local_rank) if torch.cuda.is_available(): torch.cuda.manual_seed_all(local_rank) self.local_rank = local_rank if torch.cuda.is_available(): torch.cuda.set_device(local_rank) # dist.init_process_group(backend='nccl', init_method='env://') # init outside self.loss = None self.ce_loss = RobustCrossEntropyLoss() self.global_batch_size = None # we need to know this to properly steer oversample def setup_DA_params_BraTSRegions(self): # nnUNetTrainerV2.setup_DA_params(self) self.deep_supervision_scales = [[1, 1, 1]] + list(list(i) for i in 1 / np.cumprod( np.vstack(self.net_num_pool_op_kernel_sizes), axis=0))[:-1] if self.threeD: self.data_aug_params = default_3D_augmentation_params self.data_aug_params['rotation_x'] = (-90. / 360 * 2. * np.pi, 90. / 360 * 2. * np.pi) self.data_aug_params['rotation_y'] = (-90. / 360 * 2. * np.pi, 90. / 360 * 2. * np.pi) self.data_aug_params['rotation_z'] = (-90. / 360 * 2. * np.pi, 90. / 360 * 2. * np.pi) if self.do_dummy_2D_aug: self.data_aug_params["dummy_2D"] = True self.print_to_log_file("Using dummy2d data augmentation") self.data_aug_params["elastic_deform_alpha"] = \ default_2D_augmentation_params["elastic_deform_alpha"] self.data_aug_params["elastic_deform_sigma"] = \ default_2D_augmentation_params["elastic_deform_sigma"] self.data_aug_params["rotation_x"] = default_2D_augmentation_params["rotation_x"] else: self.do_dummy_2D_aug = False if max(self.patch_size) / min(self.patch_size) > 1.5: default_2D_augmentation_params['rotation_x'] = (-180. / 360 * 2. * np.pi, 180. / 360 * 2. * np.pi) self.data_aug_params = default_2D_augmentation_params self.data_aug_params["mask_was_used_for_normalization"] = self.use_mask_for_norm if self.do_dummy_2D_aug: self.basic_generator_patch_size = get_patch_size(self.patch_size[1:], self.data_aug_params['rotation_x'], self.data_aug_params['rotation_y'], self.data_aug_params['rotation_z'], self.data_aug_params['scale_range']) self.basic_generator_patch_size = np.array([self.patch_size[0]] + list(self.basic_generator_patch_size)) else: self.basic_generator_patch_size = get_patch_size(self.patch_size, self.data_aug_params['rotation_x'], self.data_aug_params['rotation_y'], self.data_aug_params['rotation_z'], self.data_aug_params['scale_range']) self.data_aug_params['selected_seg_channels'] = [0] self.data_aug_params['patch_size_for_spatialtransform'] = self.patch_size self.data_aug_params["p_rot"] = 0.3 self.data_aug_params["scale_range"] = (0.65, 1.6) self.data_aug_params["p_scale"] = 0.3 self.data_aug_params["independent_scale_factor_for_each_axis"] = True self.data_aug_params["p_independent_scale_per_axis"] = 0.3 self.data_aug_params["do_elastic"] = True self.data_aug_params["p_eldef"] = 0.3 # LMH 0.2 -> 0.3 according to paper self.data_aug_params["eldef_deformation_scale"] = (0, 0.25) self.data_aug_params["do_additive_brightness"] = True self.data_aug_params["additive_brightness_mu"] = 0 self.data_aug_params["additive_brightness_sigma"] = 0.2 self.data_aug_params["additive_brightness_p_per_sample"] = 0.3 self.data_aug_params["additive_brightness_p_per_channel"] = 0.5 self.data_aug_params['gamma_range'] = (0.5, 1.6) self.data_aug_params['num_cached_per_thread'] = 4 def set_batch_size_and_oversample(self): batch_sizes = [] oversample_percents = [] world_size = self.args.world_size# dist.get_world_size() my_rank = self.args.rank # dist.get_rank() # not local_rank if self.args.total_batch_size: # actually it is global_batch_size # reset the batch_size per gpu accordingly self.batch_size = self.args.total_batch_size // world_size # if self.args.local_rank == 0: # print("total_batch_size: %d, updated batch_size per gpu %d, world_size %d" % (self.args.total_batch_size, self.batch_size, world_size)) if self.distribute_batch_size: # set total batch_size to 16 will be fine... self.global_batch_size = self.batch_size else: self.global_batch_size = self.batch_size * world_size batch_size_per_GPU = np.ceil(self.batch_size / world_size).astype(int) # probably 1 for rank in range(world_size): if self.distribute_batch_size: if (rank + 1) * batch_size_per_GPU > self.batch_size: batch_size = batch_size_per_GPU - ((rank + 1) * batch_size_per_GPU - self.batch_size) else: batch_size = batch_size_per_GPU else: batch_size = self.batch_size batch_sizes.append(batch_size) sample_id_low = 0 if len(batch_sizes) == 0 else np.sum(batch_sizes[:-1]) sample_id_high = np.sum(batch_sizes) if sample_id_high / self.global_batch_size < (1 - self.oversample_foreground_percent): oversample_percents.append(0.0) elif sample_id_low / self.global_batch_size > (1 - self.oversample_foreground_percent): oversample_percents.append(1.0) else: percent_covered_by_this_rank = sample_id_high / self.global_batch_size - sample_id_low / self.global_batch_size oversample_percent_here = 1 - (((1 - self.oversample_foreground_percent) - sample_id_low / self.global_batch_size) / percent_covered_by_this_rank) oversample_percents.append(oversample_percent_here) print("worker", my_rank, "oversample", oversample_percents[my_rank]) print("worker", my_rank, "batch_size", batch_sizes[my_rank]) # batch_sizes [self.batch_size]*world_size self.batch_size = batch_sizes[my_rank] self.oversample_foreground_percent = oversample_percents[my_rank] def save_checkpoint(self, fname, save_optimizer=True): if self.local_rank == 0: super().save_checkpoint(fname, save_optimizer) def plot_progress(self): if self.local_rank == 0: super().plot_progress() def print_to_log_file(self, *args, also_print_to_console=True): if self.local_rank == 0: super().print_to_log_file(*args, also_print_to_console=also_print_to_console) def process_plans(self, plans): super().process_plans(plans) if (self.patch_size != self.args.crop_size).any(): self.patch_size = self.args.crop_size self.set_batch_size_and_oversample() if self.args.config.find('500Region') != -1: self.num_classes = len(self.regions) # only care about foreground (compatible with sigmoid) def initialize(self, training=True, force_load_plans=False): """ :param training: :return: """ if not self.was_initialized: maybe_mkdir_p(self.output_folder) if force_load_plans or (self.plans is None): self.load_plans_file() self.process_plans(self.plans) self.setup_DA_params() if self.args.config.find('500Region') != -1: # BraTSRegions_moreDA self.setup_DA_params_BraTSRegions() if hasattr(self.args, 'deep_supervision_scales') and len(self.args.deep_supervision_scales)>0: self.deep_supervision_scales = self.args.deep_supervision_scales # overwrite setup_DA_params() from nnUNetTrainerV2 self.folder_with_preprocessed_data = join(self.dataset_directory, self.plans['data_identifier'] + "_stage%d" % self.stage) if training: self.dl_tr, self.dl_val = self.get_basic_generators() if self.unpack_data: if self.local_rank == 0: print("unpacking dataset")
unpack_dataset(self.folder_with_preprocessed_data)
3
2023-10-11 05:19:25+00:00
24k
eai-lab/On-NAS
cifar_search.py
[ { "identifier": "genotypes", "path": "utils/genotypes.py", "snippet": "PRIMITIVES = [\n \"max_pool_3x3\",\n \"avg_pool_3x3\",\n \"skip_connect\", # identity\n \"sep_conv_3x3\",\n \"sep_conv_5x5\",\n \"dil_conv_3x3\",\n \"dil_conv_5x5\",\n \"none\",\n]\nPRIMITIVES_FEWSHOT = [\n \"max_pool_3x3\",\n \"avg_pool_3x3\",\n \"skip_connect\", # identity\n \"conv_1x5_5x1\",\n \"conv_3x3\",\n \"sep_conv_3x3\",\n # \"sep_conv_5x5\",\n \"dil_conv_3x3\",\n # \"dil_conv_5x5\",\n # \"none\",\n]\ndef to_dag(C_in, gene, reduction):\ndef from_str(s):\ndef parse(alpha, k, primitives=PRIMITIVES_FEWSHOT):\ndef parse_pairwise(alpha, alpha_pairwise, primitives=PRIMITIVES_FEWSHOT): # deprecated" }, { "identifier": "SearchCNNController", "path": "models/search_cnn.py", "snippet": "class SearchCNNController(nn.Module):\n \"\"\" SearchCNN controller supporting multi-gpu \"\"\"\n def __init__(\n self,\n \n C_in,\n C,\n n_classes,\n n_layers,\n config,\n n_nodes=4,\n reduction_layers=[],\n stem_multiplier=3,\n device_ids=None,\n normalizer=dict(),\n PRIMITIVES=None,\n feature_scale_rate=2,\n use_hierarchical_alphas=False, # deprecated\n use_pairwise_input_alphas=False,\n alpha_prune_threshold=0.0,\n ):\n super().__init__()\n self.n_nodes = n_nodes\n self.criterion = nn.CrossEntropyLoss()\n self.use_pairwise_input_alphas = use_pairwise_input_alphas\n self.use_hierarchical_alphas = use_hierarchical_alphas\n self.alpha_prune_threshold = alpha_prune_threshold\n \n if \"name\" not in normalizer.keys():\n normalizer[\"func\"] = SoftMax\n normalizer[\"params\"] = dict()\n normalizer[\"params\"][\"temp_anneal_mode\"] = None\n elif normalizer[\"name\"] == \"softmax\":\n normalizer[\"func\"] = SoftMax\n elif normalizer[\"name\"] == \"relusoftmax\":\n normalizer[\"func\"] = ReLUSoftMax\n elif normalizer[\"name\"] == \"gumbel_softmax\":\n normalizer[\"func\"] = GumbelSoftMax\n else:\n raise RuntimeError(f\"Unknown normalizer {normalizer['name']}\")\n self.normalizer = normalizer\n\n if device_ids is None:\n device_ids = list(range(torch.cuda.device_count()))\n self.device_ids = device_ids\n\n \n \n # initialize architect parameters: alphas\n if PRIMITIVES is None:\n PRIMITIVES = gt.PRIMITIVES\n\n self.primitives = PRIMITIVES\n n_ops = len(PRIMITIVES)\n\n self.alpha_normal = nn.ParameterList()\n self.alpha_reduce = nn.ParameterList()\n\n \n for i in range(n_nodes):\n # create alpha parameters over parallel operations\n self.alpha_normal.append(nn.Parameter(1e-3 * torch.randn(i + 2, n_ops)))\n self.alpha_reduce.append(nn.Parameter(1e-3 * torch.randn(i + 2, n_ops)))\n \n \n\n \n assert not (\n use_hierarchical_alphas and use_pairwise_input_alphas\n ), \"Hierarchical and pairwise alphas exclude each other.\"\n\n self.alpha_pw_normal = None\n self.alpha_pw_reduce = None\n self.alpha_in_normal = None\n self.alpha_in_reduce = None\n if use_hierarchical_alphas: # deprecated\n # create alpha parameters the different input nodes for a cell, i.e. for each node in a\n # cell an additional distribution over the input nodes is introduced\n print(\"Using hierarchical alphas.\")\n\n self.alpha_in_normal = nn.ParameterList()\n self.alpha_in_reduce = nn.ParameterList()\n\n for i in range(n_nodes):\n self.alpha_in_normal.append(nn.Parameter(1e-3 * torch.randn(i + 2)))\n self.alpha_in_reduce.append(nn.Parameter(1e-3 * torch.randn(i + 2)))\n\n elif use_pairwise_input_alphas:\n print(\"Using pairwise input alphas.\")\n\n self.alpha_pw_normal = nn.ParameterList()\n self.alpha_pw_reduce = nn.ParameterList()\n\n \n for i in range(n_nodes):\n num_comb = int(scipy.special.binom(i + 2, 2))\n self.alpha_pw_normal.append(nn.Parameter(1e-3 * torch.randn(num_comb)))\n self.alpha_pw_reduce.append(nn.Parameter(1e-3 * torch.randn(num_comb)))\n \n \n\n # setup alphas list\n self._alphas = []\n \n for n, p in self.named_parameters():\n if \"alpha\" in n:\n self._alphas.append((n, p))\n\n \n \n self.net = SearchCNN(\n \n C_in,\n C,\n n_classes,\n n_layers,\n config,\n n_nodes,\n reduction_layers,\n stem_multiplier,\n PRIMITIVES=self.primitives,\n feature_scale_rate=feature_scale_rate,\n )\n\n \n\n def apply_normalizer(self, alpha):\n return self.normalizer[\"func\"](alpha, self.normalizer[\"params\"])\n\n def _get_normalized_alphas(self):\n weights_normal = [self.apply_normalizer(alpha) for alpha in self.alpha_normal]\n weights_reduce = [self.apply_normalizer(alpha) for alpha in self.alpha_reduce]\n\n weights_pw_normal = None\n weights_pw_reduce = None\n weights_in_normal = None\n weights_in_reduce = None\n if self.alpha_in_normal is not None:\n weights_in_normal = [\n self.apply_normalizer(alpha) for alpha in self.alpha_in_normal\n ]\n weights_in_reduce = [\n self.apply_normalizer(alpha) for alpha in self.alpha_in_reduce\n ]\n elif self.alpha_pw_normal is not None:\n weights_pw_normal = [\n self.apply_normalizer(alpha) for alpha in self.alpha_pw_normal\n ]\n weights_pw_reduce = [\n self.apply_normalizer(alpha) for alpha in self.alpha_pw_reduce\n ]\n\n return (\n weights_normal,\n weights_reduce,\n weights_in_normal,\n weights_in_reduce,\n weights_pw_normal,\n weights_pw_reduce,\n )\n\n def prune_alphas(self, prune_threshold=0.0, val=-10e8):\n \"\"\"Set the alphas with probability below prune_threshold to a large negative value\n\n Note:\n The prune_threshold applies to the alpha probabilities (after the softmax is\n applied) while `val` corresponds to the logit values (thus a large negative value\n corresponds to a low probability).\n \"\"\"\n\n # reset temperature for prunning\n model_has_normalizer = hasattr(self, \"normalizer\")\n if model_has_normalizer:\n curr_step_backup = self.normalizer[\"params\"][\"curr_step\"]\n self.normalizer[\"params\"][\"curr_step\"] = (\n self.normalizer[\"params\"][\"max_steps\"] - 1\n )\n\n weights_normal = [self.apply_normalizer(alpha) for alpha in self.alpha_normal]\n weights_reduce = [self.apply_normalizer(alpha) for alpha in self.alpha_reduce]\n for idx in range(len(weights_normal)):\n # need to modify data because alphas are leaf variables\n self.alpha_normal[idx].data[weights_normal[idx] < prune_threshold] = val\n self.alpha_reduce[idx].data[weights_reduce[idx] < prune_threshold] = val\n\n # set curr_step back to original value\n self.normalizer[\"params\"][\"curr_step\"] = curr_step_backup\n\n def get_sparse_alphas_pw(self, alpha_prune_threshold=0.0):\n\n \"\"\"\n Convert alphas to zero-one-vectors under consideration of pairwise alphas\n\n\n :param alpha_prune_threshold: threshold for pruning\n\n :return: binary tensors with shape like alpha_normal and alpha_reduce, indicating whether an op is included in the\n sparsified one shot model\n \"\"\"\n\n assert (\n self.alpha_pw_normal is not None\n ), \"Error: function only availaible for pw models\"\n\n weights_normal = [\n self.apply_normalizer(alpha) for alpha in self.alpha_normal\n ] # get normalized weights\n weights_reduce = [self.apply_normalizer(alpha) for alpha in self.alpha_reduce]\n\n weights_pw_normal = [\n self.apply_normalizer(alpha) for alpha in self.alpha_pw_normal\n ]\n\n weights_pw_reduce = [\n self.apply_normalizer(alpha) for alpha in self.alpha_pw_reduce\n ]\n\n weights_normal_sparse = list()\n\n # get all the pairs of inputs\n for node_idx, node_weights in enumerate(weights_normal):\n input_pairs = list()\n\n # get pairs of inputs correspeonding to indices in alpha_pw\n for input_1 in range(len(node_weights)):\n for input_2 in range(input_1 + 1, len(node_weights)):\n input_pairs.append([input_1, input_2])\n\n assert len(input_pairs) == len(\n weights_pw_normal[node_idx]\n ), \"error: pairwise alpha length does not match pairwise terms length\"\n\n keep_inputs = list() # list of input nodes that are kept\n\n for input_pair_idx in range(len(input_pairs)):\n if (\n weights_pw_normal[node_idx][input_pair_idx] >= alpha_prune_threshold\n ): # if pw weight larger than threshold keep input\n keep_inputs.extend(input_pairs[input_pair_idx])\n\n weights_normal_sparse.append(\n torch.stack(\n [\n (weight >= alpha_prune_threshold).type(torch.float)\n if weight_idx in keep_inputs\n else torch.zeros_like(weight)\n for weight_idx, weight in enumerate(node_weights)\n ]\n )\n )\n\n ### same for reduction\n\n weights_reduce_sparse = list()\n\n for node_idx, node_weights in enumerate(weights_reduce):\n input_pairs = list()\n\n # get pairs of inputs correspeonding to indices in alpha_pw\n for input_1 in range(len(node_weights)):\n for input_2 in range(input_1 + 1, len(node_weights)):\n input_pairs.append([input_1, input_2])\n\n assert len(input_pairs) == len(\n weights_pw_reduce[node_idx]\n ), \"error: pairwise alpha length does not match pairwise terms length\"\n\n keep_inputs = list() # list of input nodes that are kept\n\n for input_pair_idx in range(len(input_pairs)):\n if (\n weights_pw_reduce[node_idx][input_pair_idx] >= alpha_prune_threshold\n ): # if pw weight larger than threshold keep input\n keep_inputs.extend(input_pairs[input_pair_idx])\n\n weights_reduce_sparse.append(\n torch.stack(\n [\n (weight >= alpha_prune_threshold).type(torch.float)\n if weight_idx in keep_inputs\n else torch.zeros_like(weight)\n for weight_idx, weight in enumerate(node_weights)\n ]\n )\n )\n\n return weights_normal_sparse, weights_reduce_sparse\n\n def get_sparse_num_params(self, alpha_prune_threshold=0.0):\n \"\"\"Get number of parameters for sparse one-shot-model\n\n Returns:\n A torch tensor\n \"\"\"\n\n weights_normal, weights_reduce = self.get_sparse_alphas_pw(\n alpha_prune_threshold\n )\n # this returns tensors with only 0's and 1's depending on whether an op is used in the sparsified model\n\n # get none active ops/layer names\n\n # for normal cell\n none_active_ops_normal = list()\n for node_idx, node in enumerate(weights_normal):\n for mixed_op_idx, mixed_op in enumerate(node):\n none_active_ops_idx = (mixed_op == 0.0).nonzero()\n for op in none_active_ops_idx:\n none_active_ops_normal.append(\n str(node_idx)\n + \".\"\n + str(mixed_op_idx)\n + \"._ops.\"\n + str(int(op))\n )\n\n # and for reduction cell\n none_active_ops_reduce = list()\n for node_idx, node in enumerate(weights_reduce):\n for mixed_op_idx, mixed_op in enumerate(node):\n none_active_ops_idx = (mixed_op == 0.0).nonzero()\n for op in none_active_ops_idx:\n none_active_ops_reduce.append(\n str(node_idx)\n + \".\"\n + str(mixed_op_idx)\n + \"._ops.\"\n + str(int(op))\n )\n\n all_params = sum(\n p.numel() for p in self.net.parameters()\n ) # params of one-shot model\n\n # get normal and reduction layers\n normal_cells = list()\n red_cells = list()\n for lyr, cell in enumerate(self.net.cells):\n if cell.reduction:\n red_cells.append(lyr)\n else:\n normal_cells.append(lyr)\n\n # count params of non-active ops\n\n none_active_params = 0\n for layer_name, layer_weights in self.named_parameters():\n # check if layer is part of normal or reduction cell\n if \"net.cells.\" in layer_name: # layer part of cells at all?\n for cell in normal_cells: # normal cell?\n if \"net.cells.\" + str(cell) in layer_name: # normal cell\n none_active_ops = none_active_ops_normal\n\n # else reduction cell\n for cell in red_cells:\n if \"net.cells.\" + str(cell) in layer_name: # normal cell\n none_active_ops = none_active_ops_reduce\n\n if any(\n [none_active_op in layer_name for none_active_op in none_active_ops]\n ): # check if layer is part of none-active ops\n none_active_params += layer_weights.numel()\n\n active_params = all_params - none_active_params\n\n return active_params\n\n def drop_path_prob(self, p):\n \"\"\" Set drop path probability \"\"\"\n for module in self.net.modules():\n if isinstance(module, ops.DropPath_):\n module.p = p\n def forward(self, x, sparsify_input_alphas=None):\n \"\"\"Forward pass through the network\n\n Args:\n x: The input tensor\n sparsify_input_alphas: Whether to sparsify the alphas over the input nodes. Use `None`\n to not sparsify input alphas.\n For hierarchical alphas, `sparsify_input_alphas` should be a (float) threshold on\n the probability (i.e. between 0 and 1). Alphas above the threshold (and thus the\n corresponding input nodes) are kept.\n For pairwise alphas, if `sparsify_input_alphas` is larger than 0, then only the\n largest alpha is kept.\n Note that the sparsification is not be differentiable and thus cannot be used during\n training.\n\n Returns:\n The network output\n \"\"\"\n (\n weights_normal,\n weights_reduce,\n weights_in_normal,\n weights_in_reduce,\n weights_pw_normal,\n weights_pw_reduce,\n ) = self._get_normalized_alphas()\n\n \n if len(self.device_ids) == 1 :\n output= self.net(\n x,\n weights_normal,\n weights_reduce,\n weights_in_normal,\n weights_in_reduce,\n weights_pw_normal,\n weights_pw_reduce,\n sparsify_input_alphas=sparsify_input_alphas,\n alpha_prune_threshold=self.alpha_prune_threshold,\n )\n return output\n\n \n # scatter x\n xs = nn.parallel.scatter(x, self.device_ids)\n # broadcast weights\n wnormal_copies = broadcast_list(weights_normal, self.device_ids)\n wreduce_copies = broadcast_list(weights_reduce, self.device_ids)\n if weights_in_normal is not None:\n wnormal_in_copies = broadcast_list(weights_in_normal, self.device_ids)\n wreduce_in_copies = broadcast_list(weights_in_reduce, self.device_ids)\n else:\n \n wnormal_in_copies = None\n wreduce_in_copies = None\n\n if weights_pw_normal is not None:\n wnormal_pw_copies = broadcast_list(weights_pw_normal, self.device_ids)\n wreduce_pw_copies = broadcast_list(weights_pw_reduce, self.device_ids)\n else:\n wnormal_pw_copies = None\n wreduce_pw_copies = None\n\n # replicate modules\n replicas = nn.parallel.replicate(self.net, self.device_ids)\n outputs = nn.parallel.parallel_apply(\n replicas,\n list(\n zip(\n xs,\n wnormal_copies,\n wreduce_copies,\n # wnormal_in_copies,\n # wreduce_in_copies,\n # wnormal_pw_copies,\n # wreduce_pw_copies,\n )\n ),\n devices=self.device_ids,\n )\n return nn.parallel.gather(outputs, self.device_ids[0])\n\n def loss(self, X, y):\n logits = self.forward(X)\n return self.criterion(logits, y)\n\n def print_alphas(self, logger):\n # remove formats\n org_formatters = []\n for handler in logger.handlers:\n org_formatters.append(handler.formatter)\n handler.setFormatter(logging.Formatter(\"%(message)s\"))\n\n normalizer = self.get_normalizer(deterministic=True)\n logger.info(\"####### ALPHA #######\")\n logger.info(\"# Alpha - normal\")\n for alpha in self.alpha_normal:\n logger.info(normalizer(alpha))\n\n logger.info(\"\\n# Alpha - reduce\")\n for alpha in self.alpha_reduce:\n logger.info(normalizer(alpha))\n logger.info(\"#####################\")\n\n # restore formats\n for handler, formatter in zip(logger.handlers, org_formatters):\n handler.setFormatter(formatter)\n\n def genotype(self):\n if self.use_pairwise_input_alphas:\n\n weights_pw_normal = [\n F.softmax(alpha, dim=-1) for alpha in self.alpha_pw_normal\n ]\n weights_pw_reduce = [\n F.softmax(alpha, dim=-1) for alpha in self.alpha_pw_reduce\n ]\n\n gene_normal = gt.parse_pairwise(\n self.alpha_normal, weights_pw_normal, primitives=self.primitives\n )\n gene_reduce = gt.parse_pairwise(\n self.alpha_reduce, weights_pw_reduce, primitives=self.primitives\n )\n\n elif self.use_hierarchical_alphas:\n raise NotImplementedError\n else:\n\n gene_normal = gt.parse(self.alpha_normal, k=2, primitives=self.primitives)\n gene_reduce = gt.parse(self.alpha_reduce, k=2, primitives=self.primitives)\n\n concat = range(2, 2 + self.n_nodes) # concat all intermediate nodes\n\n return gt.Genotype(\n normal=gene_normal,\n normal_concat=concat,\n reduce=gene_reduce,\n reduce_concat=concat,\n )\n\n def weights(self):\n return self.net.parameters()\n\n def named_weights(self):\n return self.net.named_parameters()\n\n def named_weights_with_net(self):\n return self.named_parameters()\n\n def alphas(self):\n for n, p in self._alphas:\n yield p\n\n def named_alphas(self):\n for n, p in self._alphas:\n yield n, p" }, { "identifier": "SearchCNNControllerPC", "path": "models/search_cnn_PC.py", "snippet": "class SearchCNNControllerPC(nn.Module):\n \"\"\" SearchCNN controller supporting multi-gpu \"\"\"\n\n def __init__(\n self,\n C_in,\n C,\n n_classes,\n n_layers,\n n_nodes=4,\n reduction_layers=[],\n stem_multiplier=3,\n device_ids=None,\n normalizer=dict(),\n PRIMITIVES=None,\n feature_scale_rate=2,\n use_hierarchical_alphas=False, # deprecated\n use_pairwise_input_alphas=False,\n use_pc_adaptation=False,\n alpha_prune_threshold=0.0,\n ):\n super().__init__()\n self.n_nodes = n_nodes\n self.criterion = nn.CrossEntropyLoss()\n self.use_pairwise_input_alphas = use_pairwise_input_alphas\n self.use_hierarchical_alphas = use_hierarchical_alphas\n self.alpha_prune_threshold = alpha_prune_threshold\n self.use_pc_adaptation = use_pc_adaptation\n if \"name\" not in normalizer.keys():\n normalizer[\"func\"] = SoftMax\n normalizer[\"params\"] = dict()\n normalizer[\"params\"][\"temp_anneal_mode\"] = None\n elif normalizer[\"name\"] == \"softmax\":\n normalizer[\"func\"] = SoftMax\n elif normalizer[\"name\"] == \"relusoftmax\":\n normalizer[\"func\"] = ReLUSoftMax\n elif normalizer[\"name\"] == \"gumbel_softmax\":\n normalizer[\"func\"] = GumbelSoftMax\n else:\n raise RuntimeError(f\"Unknown normalizer {normalizer['name']}\")\n self.normalizer = normalizer\n\n if device_ids is None:\n device_ids = list(range(torch.cuda.device_count()))\n self.device_ids = device_ids\n\n # initialize architect parameters: alphas\n if PRIMITIVES is None:\n PRIMITIVES = gt.PRIMITIVES\n\n self.primitives = PRIMITIVES\n n_ops = len(PRIMITIVES)\n\n self.alpha_normal = nn.ParameterList()\n self.alpha_reduce = nn.ParameterList()\n\n\n self.pc_beta_normal = nn.ParameterList()\n self.pc_beta_reduce = nn.ParameterList()\n\n for i in range(n_nodes):\n # create alpha parameters over parallel operations\n self.alpha_normal.append(nn.Parameter(1e-3 * torch.randn(i + 2, n_ops)))\n self.alpha_reduce.append(nn.Parameter(1e-3 * torch.randn(i + 2, n_ops)))\n\n assert not (\n use_hierarchical_alphas and use_pairwise_input_alphas\n ), \"Hierarchical and pairwise alphas exclude each other.\"\n\n self.alpha_pw_normal = None\n self.alpha_pw_reduce = None\n self.alpha_in_normal = None\n self.alpha_in_reduce = None\n self.pc_alpha_normal = None\n self.pc_alpha_reduce = None \n\n if use_hierarchical_alphas: # deprecated\n # create alpha parameters the different input nodes for a cell, i.e. for each node in a\n # cell an additional distribution over the input nodes is introduced\n print(\"Using hierarchical alphas.\")\n\n self.alpha_in_normal = nn.ParameterList()\n self.alpha_in_reduce = nn.ParameterList()\n\n for i in range(n_nodes):\n self.alpha_in_normal.append(nn.Parameter(1e-3 * torch.randn(i + 2)))\n self.alpha_in_reduce.append(nn.Parameter(1e-3 * torch.randn(i + 2)))\n\n elif use_pairwise_input_alphas:\n print(\"Using pairwise input alphas.\")\n\n self.alpha_pw_normal = nn.ParameterList()\n self.alpha_pw_reduce = nn.ParameterList()\n\n for i in range(n_nodes):\n num_comb = int(scipy.special.binom(i + 2, 2))\n self.alpha_pw_normal.append(nn.Parameter(1e-3 * torch.randn(num_comb)))\n self.alpha_pw_reduce.append(nn.Parameter(1e-3 * torch.randn(num_comb)))\n \n if use_pc_adaptation:\n # initialize pc_beta here\n # beta have to be [[2],[3],[4]]\n self.pc_alpha_normal = nn.ParameterList()\n self.pc_alpha_reduce = nn.ParameterList()\n for i in range(n_nodes):\n num_edges = i + 2\n self.pc_alpha_normal.append(nn.Parameter(1e-3 * torch.randn(num_edges)))\n self.pc_alpha_reduce.append(nn.Parameter(1e-3 * torch.randn(num_edges)))\n\n\n # setup alphas list\n self._alphas = []\n for n, p in self.named_parameters():\n if \"alpha\" in n:\n self._alphas.append((n, p))\n\n self.net = SearchCNNPC(\n C_in,\n C,\n n_classes,\n n_layers,\n n_nodes,\n reduction_layers,\n stem_multiplier,\n PRIMITIVES=self.primitives,\n feature_scale_rate=feature_scale_rate,\n )\n\n def apply_normalizer(self, alpha):\n return self.normalizer[\"func\"](alpha, self.normalizer[\"params\"])\n\n def _get_normalized_alphas(self):\n weights_normal = [self.apply_normalizer(alpha) for alpha in self.alpha_normal]\n weights_reduce = [self.apply_normalizer(alpha) for alpha in self.alpha_reduce]\n\n weights_pw_normal = None\n weights_pw_reduce = None\n weights_in_normal = None\n weights_in_reduce = None\n weights_pc_normal = None\n weights_pc_reduce = None\n\n if self.alpha_in_normal is not None:\n weights_in_normal = [\n self.apply_normalizer(alpha) for alpha in self.alpha_in_normal\n ]\n weights_in_reduce = [\n self.apply_normalizer(alpha) for alpha in self.alpha_in_reduce\n ]\n elif self.alpha_pw_normal is not None:\n weights_pw_normal = [\n self.apply_normalizer(alpha) for alpha in self.alpha_pw_normal\n ]\n weights_pw_reduce = [\n self.apply_normalizer(alpha) for alpha in self.alpha_pw_reduce\n ]\n if self.pc_alpha_normal is not None:\n weights_pc_normal = [\n self.apply_normalizer(alpha) for alpha in self.pc_alpha_normal\n ]\n weights_pc_reduce = [\n self.apply_normalizer(alpha) for alpha in self.pc_alpha_reduce\n ]\n return (\n weights_normal,\n weights_reduce,\n weights_in_normal,\n weights_in_reduce,\n weights_pw_normal,\n weights_pw_reduce,\n weights_pc_normal,\n weights_pc_reduce,\n )\n\n def prune_alphas(self, prune_threshold=0.0, val=-10e8):\n \"\"\"Set the alphas with probability below prune_threshold to a large negative value\n\n Note:\n The prune_threshold applies to the alpha probabilities (after the softmax is\n applied) while `val` corresponds to the logit values (thus a large negative value\n corresponds to a low probability).\n \"\"\"\n\n # reset temperature for prunning\n model_has_normalizer = hasattr(self, \"normalizer\")\n if model_has_normalizer:\n curr_step_backup = self.normalizer[\"params\"][\"curr_step\"]\n self.normalizer[\"params\"][\"curr_step\"] = (\n self.normalizer[\"params\"][\"max_steps\"] - 1\n )\n\n weights_normal = [self.apply_normalizer(alpha) for alpha in self.alpha_normal]\n weights_reduce = [self.apply_normalizer(alpha) for alpha in self.alpha_reduce]\n for idx in range(len(weights_normal)):\n # need to modify data because alphas are leaf variables\n self.alpha_normal[idx].data[weights_normal[idx] < prune_threshold] = val\n self.alpha_reduce[idx].data[weights_reduce[idx] < prune_threshold] = val\n\n # set curr_step back to original value\n self.normalizer[\"params\"][\"curr_step\"] = curr_step_backup\n\n def get_sparse_alphas_pw(self, alpha_prune_threshold=0.0):\n\n \"\"\"\n Convert alphas to zero-one-vectors under consideration of pairwise alphas\n\n\n :param alpha_prune_threshold: threshold for pruning\n\n :return: binary tensors with shape like alpha_normal and alpha_reduce, indicating whether an op is included in the\n sparsified one shot model\n \"\"\"\n\n assert (\n self.alpha_pw_normal is not None\n ), \"Error: function only availaible for pw models\"\n\n weights_normal = [\n self.apply_normalizer(alpha) for alpha in self.alpha_normal\n ] # get normalized weights\n weights_reduce = [self.apply_normalizer(alpha) for alpha in self.alpha_reduce]\n\n weights_pw_normal = [\n self.apply_normalizer(alpha) for alpha in self.alpha_pw_normal\n ]\n\n weights_pw_reduce = [\n self.apply_normalizer(alpha) for alpha in self.alpha_pw_reduce\n ]\n\n weights_normal_sparse = list()\n\n # get all the pairs of inputs\n for node_idx, node_weights in enumerate(weights_normal):\n input_pairs = list()\n\n # get pairs of inputs correspeonding to indices in alpha_pw\n for input_1 in range(len(node_weights)):\n for input_2 in range(input_1 + 1, len(node_weights)):\n input_pairs.append([input_1, input_2])\n\n assert len(input_pairs) == len(\n weights_pw_normal[node_idx]\n ), \"error: pairwise alpha length does not match pairwise terms length\"\n\n keep_inputs = list() # list of input nodes that are kept\n\n for input_pair_idx in range(len(input_pairs)):\n if (\n weights_pw_normal[node_idx][input_pair_idx] >= alpha_prune_threshold\n ): # if pw weight larger than threshold keep input\n keep_inputs.extend(input_pairs[input_pair_idx])\n\n weights_normal_sparse.append(\n torch.stack(\n [\n (weight >= alpha_prune_threshold).type(torch.float)\n if weight_idx in keep_inputs\n else torch.zeros_like(weight)\n for weight_idx, weight in enumerate(node_weights)\n ]\n )\n )\n\n ### same for reduction\n\n weights_reduce_sparse = list()\n\n for node_idx, node_weights in enumerate(weights_reduce):\n input_pairs = list()\n\n # get pairs of inputs correspeonding to indices in alpha_pw\n for input_1 in range(len(node_weights)):\n for input_2 in range(input_1 + 1, len(node_weights)):\n input_pairs.append([input_1, input_2])\n\n assert len(input_pairs) == len(\n weights_pw_reduce[node_idx]\n ), \"error: pairwise alpha length does not match pairwise terms length\"\n\n keep_inputs = list() # list of input nodes that are kept\n\n for input_pair_idx in range(len(input_pairs)):\n if (\n weights_pw_reduce[node_idx][input_pair_idx] >= alpha_prune_threshold\n ): # if pw weight larger than threshold keep input\n keep_inputs.extend(input_pairs[input_pair_idx])\n\n weights_reduce_sparse.append(\n torch.stack(\n [\n (weight >= alpha_prune_threshold).type(torch.float)\n if weight_idx in keep_inputs\n else torch.zeros_like(weight)\n for weight_idx, weight in enumerate(node_weights)\n ]\n )\n )\n\n return weights_normal_sparse, weights_reduce_sparse\n\n def get_sparse_num_params(self, alpha_prune_threshold=0.0):\n \"\"\"Get number of parameters for sparse one-shot-model\n\n Returns:\n A torch tensor\n \"\"\"\n\n weights_normal, weights_reduce = self.get_sparse_alphas_pw(\n alpha_prune_threshold\n )\n # this returns tensors with only 0's and 1's depending on whether an op is used in the sparsified model\n\n # get none active ops/layer names\n\n # for normal cell\n none_active_ops_normal = list()\n for node_idx, node in enumerate(weights_normal):\n for mixed_op_idx, mixed_op in enumerate(node):\n none_active_ops_idx = (mixed_op == 0.0).nonzero()\n for op in none_active_ops_idx:\n none_active_ops_normal.append(\n str(node_idx)\n + \".\"\n + str(mixed_op_idx)\n + \"._ops.\"\n + str(int(op))\n )\n\n # and for reduction cell\n none_active_ops_reduce = list()\n for node_idx, node in enumerate(weights_reduce):\n for mixed_op_idx, mixed_op in enumerate(node):\n none_active_ops_idx = (mixed_op == 0.0).nonzero()\n for op in none_active_ops_idx:\n none_active_ops_reduce.append(\n str(node_idx)\n + \".\"\n + str(mixed_op_idx)\n + \"._ops.\"\n + str(int(op))\n )\n\n all_params = sum(\n p.numel() for p in self.net.parameters()\n ) # params of one-shot model\n\n # get normal and reduction layers\n normal_cells = list()\n red_cells = list()\n for lyr, cell in enumerate(self.net.cells):\n if cell.reduction:\n red_cells.append(lyr)\n else:\n normal_cells.append(lyr)\n\n # count params of non-active ops\n\n none_active_params = 0\n for layer_name, layer_weights in self.named_parameters():\n # check if layer is part of normal or reduction cell\n if \"net.cells.\" in layer_name: # layer part of cells at all?\n for cell in normal_cells: # normal cell?\n if \"net.cells.\" + str(cell) in layer_name: # normal cell\n none_active_ops = none_active_ops_normal\n\n # else reduction cell\n for cell in red_cells:\n if \"net.cells.\" + str(cell) in layer_name: # normal cell\n none_active_ops = none_active_ops_reduce\n\n if any(\n [none_active_op in layer_name for none_active_op in none_active_ops]\n ): # check if layer is part of none-active ops\n none_active_params += layer_weights.numel()\n\n active_params = all_params - none_active_params\n\n return active_params\n\n def drop_path_prob(self, p):\n \"\"\" Set drop path probability \"\"\"\n for module in self.net.modules():\n if isinstance(module, ops_7c.DropPath_):\n module.p = p\n\n def forward(self, x, sparsify_input_alphas=None):\n \"\"\"Forward pass through the network\n\n Args:\n x: The input tensor\n sparsify_input_alphas: Whether to sparsify the alphas over the input nodes. Use `None`\n to not sparsify input alphas.\n For hierarchical alphas, `sparsify_input_alphas` should be a (float) threshold on\n the probability (i.e. between 0 and 1). Alphas above the threshold (and thus the\n corresponding input nodes) are kept.\n For pairwise alphas, if `sparsify_input_alphas` is larger than 0, then only the\n largest alpha is kept.\n Note that the sparsification is not be differentiable and thus cannot be used during\n training.\n\n Returns:\n The network output\n \"\"\"\n\n (\n weights_normal,\n weights_reduce,\n weights_in_normal,\n weights_in_reduce,\n weights_pw_normal,\n weights_pw_reduce,\n weights_pc_normal,\n weights_pc_reduce,\n ) = self._get_normalized_alphas()\n\n if len(self.device_ids) == 1:\n return self.net(\n x,\n weights_normal,\n weights_reduce,\n weights_in_normal,\n weights_in_reduce,\n weights_pw_normal,\n weights_pw_reduce,\n weights_pc_normal,\n weights_pc_reduce,\n sparsify_input_alphas=sparsify_input_alphas,\n alpha_prune_threshold=self.alpha_prune_threshold,\n )\n\n # scatter x\n xs = nn.parallel.scatter(x, self.device_ids)\n # broadcast weights\n wnormal_copies = broadcast_list(weights_normal, self.device_ids)\n wreduce_copies = broadcast_list(weights_reduce, self.device_ids)\n\n if weights_in_normal is not None:\n wnormal_in_copies = broadcast_list(weights_in_normal, self.device_ids)\n wreduce_in_copies = broadcast_list(weights_in_reduce, self.device_ids)\n else:\n wnormal_in_copies = None\n wreduce_in_copies = None\n\n if weights_pw_normal is not None:\n wnormal_pw_copies = broadcast_list(weights_pw_normal, self.device_ids)\n wreduce_pw_copies = broadcast_list(weights_pw_reduce, self.device_ids)\n else:\n wnormal_pw_copies = None\n wreduce_pw_copies = None\n\n # replicate modules\n replicas = nn.parallel.replicate(self.net, self.device_ids)\n outputs = nn.parallel.parallel_apply(\n replicas,\n list(\n zip(\n xs,\n wnormal_copies,\n wreduce_copies,\n wnormal_in_copies,\n wreduce_in_copies,\n wnormal_pw_copies,\n wreduce_pw_copies,\n )\n ),\n devices=self.device_ids,\n )\n return nn.parallel.gather(outputs, self.device_ids[0])\n\n def loss(self, X, y):\n logits = self.forward(X)\n return self.criterion(logits, y)\n\n def print_alphas(self, logger):\n # remove formats\n org_formatters = []\n for handler in logger.handlers:\n org_formatters.append(handler.formatter)\n handler.setFormatter(logging.Formatter(\"%(message)s\"))\n\n normalizer = self.get_normalizer(deterministic=True)\n logger.info(\"####### ALPHA #######\")\n logger.info(\"# Alpha - normal\")\n for alpha in self.alpha_normal:\n logger.info(normalizer(alpha))\n\n logger.info(\"\\n# Alpha - reduce\")\n for alpha in self.alpha_reduce:\n logger.info(normalizer(alpha))\n logger.info(\"#####################\")\n\n # restore formats\n for handler, formatter in zip(logger.handlers, org_formatters):\n handler.setFormatter(formatter)\n\n def genotype(self):\n if self.use_pairwise_input_alphas:\n\n weights_pw_normal = [\n F.softmax(alpha, dim=-1) for alpha in self.alpha_pw_normal\n ]\n weights_pw_reduce = [\n F.softmax(alpha, dim=-1) for alpha in self.alpha_pw_reduce\n ]\n\n gene_normal = gt.parse_pairwise(\n self.alpha_normal, weights_pw_normal, primitives=self.primitives\n )\n gene_reduce = gt.parse_pairwise(\n self.alpha_reduce, weights_pw_reduce, primitives=self.primitives\n )\n\n elif self.use_hierarchical_alphas:\n raise NotImplementedError\n else:\n\n gene_normal = gt.parse(self.alpha_normal, k=2, primitives=self.primitives)\n gene_reduce = gt.parse(self.alpha_reduce, k=2, primitives=self.primitives)\n\n concat = range(2, 2 + self.n_nodes) # concat all intermediate nodes\n\n return gt.Genotype(\n normal=gene_normal,\n normal_concat=concat,\n reduce=gene_reduce,\n reduce_concat=concat,\n )\n\n def weights(self):\n return self.net.parameters()\n\n def named_weights(self):\n return self.net.named_parameters()\n\n def named_weights_with_net(self):\n return self.named_parameters()\n\n def alphas(self):\n for n, p in self._alphas:\n yield p\n\n def named_alphas(self):\n for n, p in self._alphas:\n yield n, p" }, { "identifier": "Darts", "path": "task_optimizer/darts.py", "snippet": "class Darts:\n def __init__(self, model, config, do_schedule_lr=False):\n\n self.config = config\n self.config.logger = None\n self.model = model\n self.do_schedule_lr = do_schedule_lr\n self.task_train_steps = config.task_train_steps\n self.test_task_train_steps = config.test_task_train_steps\n self.warm_up_epochs = config.warm_up_epochs\n self.eval_switch = 0\n self.pprevious_grads = 0\n # weights optimizer\n\n self.w_optim = torch.optim.Adam(\n self.model.weights(),\n lr=self.config.w_lr,\n betas=(0.0, 0.999), # config.w_momentum,\n weight_decay=self.config.w_weight_decay,\n ) #\n\n # architecture optimizer\n self.a_optim = torch.optim.Adam(\n model.alphas(),\n self.config.alpha_lr,\n betas=(0.0, 0.999),\n weight_decay=self.config.alpha_weight_decay,\n )\n self.architect = Architect(\n self.model,\n self.config.w_momentum,\n self.config.w_weight_decay,\n self.config.use_first_order_darts,\n )\n def step(\n self,\n task,\n epoch,\n global_progress=\"\",\n test_phase=False,\n alpha_logger=None,\n sparsify_input_alphas=None,\n ):\n \n\n\n log_alphas = False\n\n if test_phase:\n top1_logger = self.config.top1_logger_test\n losses_logger = self.config.losses_logger_test\n train_steps = self.config.test_task_train_steps\n arch_adap_steps = int(train_steps * self.config.test_adapt_steps)\n \n if alpha_logger is not None:\n log_alphas = True\n\n else:\n top1_logger = self.config.top1_logger\n losses_logger = self.config.losses_logger\n train_steps = self.config.task_train_steps\n arch_adap_steps = train_steps\n \n\n \n\n lr = self.config.w_lr\n\n if self.config.w_task_anneal:\n for group in self.w_optim.param_groups:\n group[\"lr\"] = self.config.w_lr\n\n w_task_lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(\n self.w_optim, train_steps, eta_min=0.0\n )\n else:\n w_task_lr_scheduler = None\n\n if self.config.a_task_anneal:\n for group in self.a_optim.param_groups:\n group[\"lr\"] = self.config.alpha_lr\n\n a_task_lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(\n self.a_optim, arch_adap_steps, eta_min=0.0\n )\n\n else:\n a_task_lr_scheduler = None\n\n model_has_normalizer = hasattr(self.model, \"normalizer\")\n if model_has_normalizer:\n self.model.normalizer[\"params\"][\"curr_step\"] = 0.0\n self.architect.v_net.normalizer[\"params\"][\"curr_step\"] = 0.0\n self.model.normalizer[\"params\"][\"max_steps\"] = float(arch_adap_steps)\n self.architect.v_net.normalizer[\"params\"][\"max_steps\"] = float(\n arch_adap_steps\n )\n from tqdm import tqdm\n if self.config.drop_path_prob > 0.0:\n if not test_phase or self.config.use_drop_path_in_meta_testing:\n self.model.drop_path_prob(self.config.drop_path_prob)\n\n p_bar = tqdm(range(train_steps))\n self.config.total_steps = train_steps * len(task.train_loader)\n \n\n\n for train_step in p_bar: # task train_steps = epochs per task\n warm_up = (\n epoch < self.warm_up_epochs\n ) # if epoch < warm_up_epochs, do warm up\n if (\n train_step >= arch_adap_steps\n ): # no architecture adap after arch_adap_steps steps\n warm_up = 1\n\n if w_task_lr_scheduler is not None:\n w_task_lr_scheduler.step()\n\n if a_task_lr_scheduler is not None:\n a_task_lr_scheduler.step()\n torch.cuda.reset_peak_memory_stats(device=0)\n \n task_specific_model = train( \n task,\n self.model,\n self.architect,\n self.w_optim,\n self.a_optim,\n lr,\n global_progress,\n self.config,\n warm_up,\n test_phase\n )\n mem = torch.cuda.memory_stats(0)['allocated_bytes.all.peak']/(1024**2)\n p_bar.set_postfix({\"Memory\" : f\"{mem : .2f}\",\"Task average\":f\"{self.config.top1_logger_test.avg:.1%}\"})\n if train_step == 9:\n self.config.memory_snap = mem\n if (\n model_has_normalizer\n and train_step < (arch_adap_steps - 1)\n and not warm_up\n ): \n self.model.normalizer[\"params\"][\"curr_step\"] += 1\n self.architect.v_net.normalizer[\"params\"][\"curr_step\"] += 1\n\n w_task = OrderedDict(\n {\n layer_name: copy.deepcopy(layer_weight)\n for layer_name, layer_weight in self.model.named_weights()\n # if layer_weight.grad is not None\n }\n )\n a_task = OrderedDict(\n {\n layer_name: copy.deepcopy(layer_alpha)\n for layer_name, layer_alpha in self.model.named_alphas()\n # if layer_alpha.grad is not None\n }\n )\n\n \n w_task_bot = OrderedDict(\n {\n layer_name: copy.deepcopy(layer_weight)\n for layer_name, layer_weight in task_specific_model.named_weights()\n \n }\n )\n a_task_bot = OrderedDict(\n {\n layer_name: copy.deepcopy(layer_alpha)\n for layer_name, layer_alpha in task_specific_model.named_alphas()\n \n }\n )\n # Log genotype\n genotype = self.model.genotype()\n\n if log_alphas:\n alpha_logger[\"normal_relaxed\"].append(\n copy.deepcopy(self.model.alpha_normal)\n )\n alpha_logger[\"reduced_relaxed\"].append(\n copy.deepcopy(self.model.alpha_reduce)\n )\n alpha_logger[\"all_alphas\"].append(a_task)\n alpha_logger[\"normal_hierarchical\"].append(\n copy.deepcopy(self.model.alpha_in_normal)\n )\n alpha_logger[\"reduced_hierarchical\"].append(\n copy.deepcopy(self.model.alpha_in_reduce)\n )\n alpha_logger[\"normal_pairwise\"].append(\n copy.deepcopy(self.model.alpha_pw_normal)\n )\n alpha_logger[\"reduced_pairwise\"].append(\n copy.deepcopy(self.model.alpha_pw_reduce)\n )\n\n # for test data evaluation, turn off drop path\n if self.config.drop_path_prob > 0.0:\n self.model.drop_path_prob(0.0)\n little_switch = 0\n\n if self.config.naivenaive:\n little_switch = 1\n with torch.no_grad():\n self.config.naivenaive = 1\n self.config.eval_switch = 1\n self.config.cell_phase = 3\n\n for batch_idx, batch in enumerate(task.test_loader):\n \n x_test, y_test = batch\n x_test = x_test.to(self.config.device, non_blocking=True)\n y_test = y_test.to(self.config.device, non_blocking=True)\n if isinstance(self.model, SearchCNNController):\n logits = self.model(\n x_test, sparsify_input_alphas=sparsify_input_alphas\n )\n else:\n logits = self.model(x_test)\n loss = self.model.criterion(logits, y_test)\n\n y_test_pred = logits.softmax(dim=1)\n now = time.strftime('%c', time.localtime(time.time()))\n prec1, prec5 = utils.accuracy(logits, y_test, self.config, topk=(1, 5))\n losses_logger.update(loss.item(), 1)\n top1_logger.update(prec1.item(), 1)\n \n self.config.naivenaive = 0 \n self.config.eval_switch = 0\n self.config.cell_phase = 3 \n\n if little_switch == 1:\n self.config.naivenaive = 1\n \n task_info = namedtuple(\n \"task_info\",\n [\n \"genotype\",\n \"top1\",\n \"w_task\",\n \"a_task\",\n \"loss\",\n \"y_test_pred\",\n \"sparse_num_params\",\n \"w_task_bot\",\n \"a_task_bot\"\n ],\n )\n task_info.w_task = w_task\n task_info.a_task = a_task\n task_info.loss = loss\n y_test_pred = y_test_pred\n task_info.y_test_pred = y_test_pred\n task_info.genotype = genotype\n # task_info.top1 = top1\n\n # task_info.sparse_num_params = self.model.get_sparse_num_params(\n # self.model.alpha_prune_threshold\n # )\n task_info.w_task_bot = w_task_bot\n task_info.a_task_bot = a_task_bot\n\n return task_info" }, { "identifier": "Architect", "path": "task_optimizer/darts.py", "snippet": "class Architect:\n \"\"\" Compute gradients of alphas \"\"\"\n\n def __init__(self, net, w_momentum, w_weight_decay, use_first_order_darts):\n \"\"\"\n Args:\n net\n w_momentum: weights momentum\n \"\"\"\n self.net = net\n self.v_net = copy.deepcopy(net)\n self.w_momentum = w_momentum\n self.w_weight_decay = w_weight_decay\n self.use_first_order_darts = use_first_order_darts\n self.pprevious_grads = list()\n \n\n def virtual_step(self, train_X, train_y, xi, w_optim):\n \"\"\"\n Compute unrolled weight w' (virtual step)\n\n Step process:\n 1) forward\n 2) calc loss\n 3) compute gradient (by backprop)\n 4) update gradient\n\n Args:\n xi: learning rate for virtual gradient step (same as weights lr)\n w_optim: weights optimizer\n \"\"\"\n # forward & calc loss\n loss = self.net.loss(train_X, train_y) # L_train(w)\n\n # compute gradient\n gradients = torch.autograd.grad(loss, self.net.weights())\n\n \n \n\n\n\n \n # do virtual step (update gradient)\n # below operations do not need gradient tracking\n with torch.no_grad():\n # dict key is not the value, but the pointer. So original network weight have to\n # be iterated also.\n for w, vw, g in zip(self.net.weights(), self.v_net.weights(), gradients):\n m = w_optim.state[w].get(\"momentum_buffer\", 0.0) * self.w_momentum\n vw.copy_(w - xi * (m + g + self.w_weight_decay * w))\n\n # synchronize alphas\n for a, va in zip(self.net.alphas(), self.v_net.alphas()):\n va.copy_(a)\n\n def backward(self, train_X, train_y, val_X, val_y, xi, w_optim):\n \"\"\"Compute loss and backward its gradients\n Args:\n xi: learning rate for virtual gradient step (same as net lr)\n w_optim: weights optimizer - for virtual step\n \"\"\"\n # calc unrolled loss\n loss = self.v_net.loss(val_X, val_y) # L_val(w`)\n # compute gradient\n v_alphas = tuple(self.v_net.alphas())\n v_weights = tuple(self.v_net.weights())\n v_grads = torch.autograd.grad(loss, v_alphas + v_weights, allow_unused=True)\n dalpha = v_grads[: len(v_alphas)]\n dw = v_grads[len(v_alphas) :]\n\n \n\n if self.use_first_order_darts: # use first oder approximation for darts\n \n with torch.no_grad():\n for alpha, da in zip(self.net.alphas(), dalpha):\n alpha.grad = da\n \n\n else: # 2nd order DARTS\n\n hessian = self.compute_hessian(dw, train_X, train_y)\n\n # update final gradient = dalpha - xi*hessian\n with torch.no_grad():\n for alpha, da, h in zip(self.net.alphas(), dalpha, hessian):\n alpha.grad = da - xi * h\n\n\n\n\n def partial_alpha_backward(self,config, train_X, train_y, val_X, val_y, xi, w_optim):\n \"\"\"Compute loss and backward its gradients\n Args:\n \n xi: learning rate for virtual gradient step (same as net lr)\n w_optim: weights optimizer - for virtual step\n \"\"\"\n # compute gradient\n grad_output_sum = copy.deepcopy(self.v_net.net.config.alpha_previous_grad)\n \n if config.residual_flag == 1:\n pprevious_grad = copy.deepcopy(self.v_net.net.config.alpha_pprevious_grad)\n self.pprevious_grads.append(pprevious_grad) \n \n latent = self.v_net(val_X)\n\n\n v_alphas = tuple(self.v_net.alphas())\n v_weights = tuple(self.v_net.weights())\n\n if config.residual_flag == 1:\n try:\n if self.v_net.net.config.cell_phase == 1:\n grad_output_sum = torch.add(self.pprevious_grads[0],grad_output_sum)\n\n elif self.v_net.net.config.cell_phase == 0:\n grad_output_sum = torch.add(self.pprevious_grads[1],grad_output_sum)\n except:\n print(f\"Shape error,{grad_output_sum.shape} was the desired shape but you got {self.pprevious_grads[0].shape} or {self.pprevious_grads[1].shape}.\")\n print(\"Bypassing residual flag.\")\n\n v_grads = torch.autograd.grad(latent, v_alphas + v_weights, grad_outputs=grad_output_sum, allow_unused=True) \n dalpha = v_grads[: len(v_alphas)]\n dw = v_grads[len(v_alphas) :]\n \n \n\n if self.use_first_order_darts: # use first oder approximation for darts\n \n with torch.no_grad():\n for alpha, da in zip(self.net.alphas(), dalpha):\n if alpha.grad is not None and da is not None:\n alpha.grad.data.add_(da)\n else:\n alpha.grad= da\n\n else: # 2nd order DARTS\n\n hessian = self.compute_hessian(dw, train_X, train_y)\n\n # update final gradient = dalpha - xi*hessian\n with torch.no_grad():\n for alpha, da, h in zip(self.net.alphas(), dalpha, hessian):\n alpha.grad = da - xi * h\n\n def compute_hessian(self, dw, train_X, train_y):\n \"\"\"\n dw = dw` { L_val(w`, alpha) }\n w+ = w + eps * dw\n w- = w - eps * dw\n hessian = (dalpha { L_train(w+, alpha) } - dalpha { L_train(w-, alpha) }) / (2*eps)\n eps = 0.01 / ||dw||\n \"\"\"\n norm = torch.cat([w.view(-1) for w in dw]).norm()\n eps = 0.01 / norm\n \n # w+ = w + eps*dw`\n with torch.no_grad():\n for p, d in zip(self.net.weights(), dw):\n p += eps * d\n\n # dalpha { L_train(w+) }\n loss = self.net.loss(train_X, train_y)\n dalpha_pos = torch.autograd.grad(loss, self.net.alphas())\n\n # w- = w - eps*dw`\n with torch.no_grad():\n for p, d in zip(self.net.weights(), dw):\n p -= 2.0 * eps * d\n\n # dalpha { L_train(w-) }\n loss = self.net.loss(train_X, train_y)\n dalpha_neg = torch.autograd.grad(loss, self.net.alphas())\n\n # recover w\n with torch.no_grad():\n for p, d in zip(self.net.weights(), dw):\n p += eps * d\n\n hessian = [(p - n) / 2.0 * eps for p, n in zip(dalpha_pos, dalpha_neg)]\n return hessian" }, { "identifier": "train", "path": "task_optimizer/darts.py", "snippet": "def train(\n task,\n model,\n architect,\n w_optim,\n alpha_optim,\n lr,\n global_progress,\n config,\n warm_up=False,\n test_phase = False\n):\n model.train()\n pprevious_grads = list()\n initial_model = copy.deepcopy(model)\n \n p_bar_monitor = (enumerate(zip(task.train_loader, task.valid_loader)))#\n for step, ((train_X, train_y), (val_X, val_y)) in p_bar_monitor:\n\n start = torch.cuda.Event(enable_timing=True)\n end = torch.cuda.Event(enable_timing=True)\n start.record()\n \n train_X, train_y = train_X.to(config.device), train_y.to(config.device)\n val_X, val_y = val_X.to(config.device), val_y.to(config.device)\n N = train_X.size(0)\n initial_alpha = [copy.deepcopy(x).detach().cpu() for x in model.alphas()]\n \n if config.light_exp == 1:\n\n if config.meta_model != \"pc_adaptation\" and config.meta_model != \"pure_darts\" and config.dataset != \"cifar10\" and config.dataset != \"cifar100\":\n config.cell_phase = config.layers -1\n architect.v_net.net.config.cell_phase = config.layers -1\n # phase 2. architect step (alpha)\n prohibited_list = config.prohibited_list\n if config.naivenaive != 1 and config.eval_switch != 1 and config.meta_model != \"pc_adaptation\" and config.meta_model != \"pure_darts\" and config.dataset not in prohibited_list:\n\n w_optim.zero_grad()\n alpha_optim.zero_grad()\n train_X, train_y = train_X.chunk(config.split_num), train_y.chunk(config.split_num)\n val_X,val_y = val_X.chunk(config.split_num), val_y.chunk(config.split_num)\n \n for (train_X_chunk, train_y_chunk) ,(val_X_chunk,val_y_chunk) in zip(zip(train_X,train_y),zip(val_X,val_y)):\n config.cell_phase = config.layers -1\n architect.v_net.net.config.cell_phase = config.layers -1\n for phase in range(config.layers):\n \n if not warm_up: # only update alphas outside warm up phase\n if config.do_unrolled_architecture_steps:\n architect.virtual_step(train_X_chunk, train_y_chunk, lr, w_optim) # (calc w`)\n \n if config.cell_phase == config.layers -1:\n architect.v_net.net.cells[config.cell_phase].alpha_switch = 1 \n architect.backward(train_X_chunk, train_y_chunk, val_X_chunk, val_y_chunk, lr, w_optim)\n \n \n else:\n architect.v_net.net.cells[config.cell_phase].alpha_switch = 1\n architect.partial_alpha_backward(config, train_X_chunk, train_y_chunk, val_X_chunk, val_y_chunk, lr, w_optim) \n \n \n model.net.alpha_switch = 0\n architect.v_net.net.alpha_switch = 0\n\n # phase 1. child network step (w)\n if config.cell_phase == config.layers -1:\n w_optim.zero_grad()\n logits = model(train_X_chunk)\n loss = model.criterion(logits, train_y_chunk)\n loss_monitor = loss.item()\n loss.backward()\n nn.utils.clip_grad_norm_(model.weights(), config.w_grad_clip) \n w_optim.step()\n\n\n else:\n w_optim.zero_grad()\n output_grad_sum = copy.deepcopy(config.previous_grad)\n pprevious_grad = copy.deepcopy(config.pprevious_grad)\n pprevious_grads.append(pprevious_grad)\n\n if config.residual_flag == 1:\n if config.cell_phase == 1:\n if pprevious_grads[0].shape != output_grad_sum.shape:\n output_grad_sum = output_grad_sum\n else:\n output_grad_sum = torch.add(pprevious_grads[0],output_grad_sum)\n elif config.cell_phase == 0:\n if pprevious_grads[1].shape != output_grad_sum.shape:\n output_grad_sum = output_grad_sum\n else:\n output_grad_sum = torch.add(pprevious_grads[1],output_grad_sum)\n latent = model(train_X_chunk)\n\n\n \n try:\n latent.backward(output_grad_sum)\n \n except:\n if output_grad_sum is not None:\n print(\"batch passed,\",output_grad_sum.shape, \" was the shape of grad saved\")\n print(\"what we had to save was this shape, \", latent.shape )\n print(f\"And this was the phase.{config.cell_phase} what can be the problem here ? \")\n else:\n print(\"output was none. Why?\")\n pass\n nn.utils.clip_grad_norm_(model.weights(), config.w_grad_clip)\n \n\n \n config.cell_phase -= 1\n architect.v_net.net.config.cell_phase -= 1\n alpha_optim.step() \n w_optim.step()\n \n\n \n \n \n\n else:\n if not warm_up: # only update alphas outside warm up phase\n alpha_optim.zero_grad()\n \n if config.do_unrolled_architecture_steps:\n architect.virtual_step(train_X, train_y, lr, w_optim) # (calc w`)\n \n architect.backward(train_X, train_y, val_X, val_y, lr, w_optim)\n alpha_optim.step()\n \n\n \n w_optim.zero_grad()\n \n logits = model(train_X)\n \n loss = model.criterion(logits, train_y)\n loss.backward()\n nn.utils.clip_grad_norm_(model.weights(), config.w_grad_clip)\n w_optim.step()\n\n \n \n\n\n end.record()\n torch.cuda.synchronize()\n config.computing_time += start.elapsed_time(end)\n \n config.total_steps -= 1\n pprevious_grads = list()\n architect.pprevious_grads = list()\n \n if config.alpha_expect and config.meta_model != 'pc_adaptation':\n if len(config.alpha_grad_footprints) <= 5:\n\n learnt_alpha = [copy.deepcopy(x).detach().cpu() for x in model.alphas()]\n alpha_grad = _alpha_subtract(initial_alpha,learnt_alpha)\n config.alpha_grad_footprints.append(alpha_grad) \n\n\n else:\n \n learnt_alpha = [copy.deepcopy(x).detach().cpu() for x in model.alphas()]\n alpha_grad = _alpha_subtract(initial_alpha,learnt_alpha)\n \n config.alpha_grad_footprints.pop(0) \n config.alpha_grad_footprints.append(alpha_grad) \n\n config.alpha_sample_metrics = _exp_alpha_metric(initial_alpha,config)\n architect.v_net.net.config.alpha_sample_metrics = config.alpha_sample_metrics\n\n ###################################################################################\n\n\n task_specific_model = copy.deepcopy(model)\n task_specific_model = get_diff_for_const_bottom(initial_model,task_specific_model)\n \n return task_specific_model" } ]
import os import torch import torch.nn as nn import numpy as np import utils.utils as utils import random import time import pandas as pd import copy import argparse from utils import genotypes as gt from models.search_cnn import SearchCNNController from models.search_cnn_PC import SearchCNNControllerPC from task_optimizer.darts import Darts,Architect from task_optimizer.darts import train as d_train from tqdm import tqdm from tqdm import tqdm
15,613
""" Search cell """ ''' Based on https://github.com/boschresearch/metanas which is licensed under GNU Affero General Public License, ''' device = torch.device("cuda") # tensorboard def _init_alpha_normalizer(name, task_train_steps, t_max, t_min, temp_anneal_mode): normalizer = dict() normalizer["name"] = name normalizer["params"] = dict() normalizer["params"]["curr_step"] = 0.0 # current step for scheduling normalizer normalizer["params"]["max_steps"] = float( task_train_steps ) # for scheduling normalizer normalizer["params"]["t_max"] = t_max normalizer["params"]["t_min"] = t_min normalizer["params"]["temp_anneal_mode"] = temp_anneal_mode # temperature annealing return normalizer def main(config): # set default gpu device id torch.cuda.set_device(config.gpus[0]) # set seed np.random.seed(config.seed) torch.manual_seed(config.seed) torch.cuda.manual_seed_all(config.seed) random.seed(config.seed) torch.backends.cudnn.benchmark = True # get data with meta info input_size, input_channels, n_classes, train_data = utils.get_data( config.dataset, config.data_path, cutout_length=0, validation=False) _,_,_,_,test_data = utils.get_data(config.dataset, config.data_path, cutout_length=0, validation=True) # input my model architecture here normalizer = _init_alpha_normalizer( config.normalizer, config.task_train_steps, config.normalizer_t_max, config.normalizer_t_min, config.normalizer_temp_anneal_mode, ) net_crit = nn.CrossEntropyLoss().to(device) model = SearchCNNController( 3, config.init_channels, config.k, config.layers, config, n_nodes=config.nodes, reduction_layers=config.reduction_layers, device_ids=config.gpus, normalizer=normalizer,
""" Search cell """ ''' Based on https://github.com/boschresearch/metanas which is licensed under GNU Affero General Public License, ''' device = torch.device("cuda") # tensorboard def _init_alpha_normalizer(name, task_train_steps, t_max, t_min, temp_anneal_mode): normalizer = dict() normalizer["name"] = name normalizer["params"] = dict() normalizer["params"]["curr_step"] = 0.0 # current step for scheduling normalizer normalizer["params"]["max_steps"] = float( task_train_steps ) # for scheduling normalizer normalizer["params"]["t_max"] = t_max normalizer["params"]["t_min"] = t_min normalizer["params"]["temp_anneal_mode"] = temp_anneal_mode # temperature annealing return normalizer def main(config): # set default gpu device id torch.cuda.set_device(config.gpus[0]) # set seed np.random.seed(config.seed) torch.manual_seed(config.seed) torch.cuda.manual_seed_all(config.seed) random.seed(config.seed) torch.backends.cudnn.benchmark = True # get data with meta info input_size, input_channels, n_classes, train_data = utils.get_data( config.dataset, config.data_path, cutout_length=0, validation=False) _,_,_,_,test_data = utils.get_data(config.dataset, config.data_path, cutout_length=0, validation=True) # input my model architecture here normalizer = _init_alpha_normalizer( config.normalizer, config.task_train_steps, config.normalizer_t_max, config.normalizer_t_min, config.normalizer_temp_anneal_mode, ) net_crit = nn.CrossEntropyLoss().to(device) model = SearchCNNController( 3, config.init_channels, config.k, config.layers, config, n_nodes=config.nodes, reduction_layers=config.reduction_layers, device_ids=config.gpus, normalizer=normalizer,
PRIMITIVES=gt.PRIMITIVES,
0
2023-10-08 02:42:27+00:00
24k
LukeForeverYoung/UReader
serve/model_worker.py
[ { "identifier": "IO", "path": "serve/io_utils.py", "snippet": "class IO:\n @staticmethod\n def register(options):\n pass\n\n def open(self, path: str, mode: str):\n raise NotImplementedError\n\n def exists(self, path: str) -> bool:\n raise NotImplementedError\n\n def move(self, src: str, dst: str):\n raise NotImplementedError\n\n def copy(self, src: str, dst: str):\n raise NotImplementedError\n\n def makedirs(self, path: str, exist_ok=True):\n raise NotImplementedError\n\n def remove(self, path: str):\n raise NotImplementedError\n\n def listdir(self, path: str, recursive=False, full_path=False, contains=None):\n raise NotImplementedError\n\n def isdir(self, path: str) -> bool:\n raise NotImplementedError\n\n def isfile(self, path: str) -> bool:\n raise NotImplementedError\n\n def abspath(self, path: str) -> str:\n raise NotImplementedError\n\n def last_modified(self, path: str) -> datetime:\n raise NotImplementedError\n\n def md5(self, path: str) -> str:\n hash_md5 = hashlib.md5()\n with self.open(path, 'rb') as f:\n for chunk in iter(lambda: f.read(4096), b''):\n hash_md5.update(chunk)\n return hash_md5.hexdigest()\n\n re_remote = re.compile(r'(oss|https?)://')\n\n def islocal(self, path: str) -> bool:\n return not self.re_remote.match(path.lstrip())" }, { "identifier": "DefaultIO", "path": "serve/io_utils.py", "snippet": "class DefaultIO(IO):\n __name__ = 'DefaultIO'\n\n def _check_path(self, path):\n if not self.islocal(path):\n raise RuntimeError(\n 'Credentials must be provided to use oss path. '\n 'Make sure you have created \"user/modules/oss_credentials.py\" according to ReadMe.')\n\n def open(self, path, mode='r'):\n self._check_path(path)\n path = self.abspath(path)\n return open(path, mode=mode)\n\n def exists(self, path):\n self._check_path(path)\n path = self.abspath(path)\n return os.path.exists(path)\n\n def move(self, src, dst):\n self._check_path(src)\n self._check_path(dst)\n src = self.abspath(src)\n dst = self.abspath(dst)\n shutil.move(src, dst)\n\n def copy(self, src, dst):\n self._check_path(src)\n self._check_path(dst)\n src = self.abspath(src)\n dst = self.abspath(dst)\n try:\n shutil.copyfile(src, dst)\n except shutil.SameFileError:\n pass\n\n def makedirs(self, path, exist_ok=True):\n self._check_path(path)\n path = self.abspath(path)\n os.makedirs(path, exist_ok=exist_ok)\n\n def remove(self, path):\n self._check_path(path)\n path = self.abspath(path)\n if os.path.isdir(path):\n shutil.rmtree(path)\n else:\n os.remove(path)\n\n def listdir(self, path, recursive=False, full_path=False, contains=None):\n self._check_path(path)\n path = self.abspath(path)\n contains = contains or ''\n if recursive:\n files = (os.path.join(dp, f) if full_path else f for dp, dn, fn in os.walk(path) for f in fn)\n files = [file for file in files if contains in file]\n else:\n files = os.listdir(path)\n if full_path:\n files = [os.path.join(path, file) for file in files if contains in file]\n return files\n\n def isdir(self, path):\n return os.path.isdir(path)\n\n def isfile(self, path):\n return os.path.isfile(path)\n\n def abspath(self, path):\n return os.path.abspath(path)\n\n def last_modified(self, path):\n return datetime.fromtimestamp(os.path.getmtime(path))" }, { "identifier": "OSS", "path": "serve/io_utils.py", "snippet": "class OSS(DefaultIO):\n \"Mixed IO module to support both system-level and OSS IO methods\"\n __name__ = 'OSS'\n\n def __init__(self, access_key_id: str, access_key_secret: str, region_bucket: List[List[str]]):\n \"\"\"\n the value of \"region_bucket\" should be something like [[\"cn-hangzhou\", \"<yourBucketName>\"], [\"cn-zhangjiakou\", \"<yourBucketName>\"]],\n specifying your buckets and corresponding regions\n \"\"\"\n from oss2 import Auth, Bucket, ObjectIterator\n super().__init__()\n self.ObjectIterator = ObjectIterator\n self.auth = Auth(access_key_id, access_key_secret)\n self.buckets = {\n bucket_name: Bucket(self.auth, f'http://oss-{region}.aliyuncs.com', bucket_name)\n for region, bucket_name in region_bucket\n }\n self.oss_pattern = re.compile(r'oss://([^/]+)/(.+)')\n\n def _split_name(self, path):\n m = self.oss_pattern.match(path)\n if not m:\n raise IOError(f'invalid oss path: \"{path}\", should be \"oss://<bucket_name>/path\"')\n bucket_name, path = m.groups()\n return bucket_name, path\n\n def _split(self, path):\n bucket_name, path = self._split_name(path)\n try:\n bucket = self.buckets[bucket_name]\n except KeyError:\n raise IOError(f'Bucket {bucket_name} not registered in oss_credentials.py')\n return bucket, path\n\n def open(self, full_path, mode='r'):\n if not full_path.startswith('oss://'):\n return super().open(full_path, mode)\n\n bucket, path = self._split(full_path)\n with mute_stderr():\n path_exists = bucket.object_exists(path)\n if 'w' in mode:\n if path_exists:\n bucket.delete_object(path)\n if 'b' in mode:\n return BinaryOSSFile(bucket, path)\n return OSSFile(bucket, path)\n elif mode == 'a':\n position = bucket.head_object(path).content_length if path_exists else 0\n return OSSFile(bucket, path, position=position)\n else:\n if not path_exists:\n raise FileNotFoundError(full_path)\n obj = bucket.get_object(path)\n # auto cache large files to avoid memory issues\n # if obj.content_length > 30 * 1024 ** 2: # 30M\n # from da.utils import cache_file\n # path = cache_file(full_path)\n # return super().open(path, mode)\n if mode == 'rb':\n # TODO for a large file, this will load the whole file into memory\n return NullContextWrapper(BytesIO(obj.read()))\n else:\n assert mode == 'r'\n return NullContextWrapper(StringIO(obj.read().decode()))\n\n def exists(self, path):\n if not path.startswith('oss://'):\n return super().exists(path)\n\n bucket, _path = self._split(path)\n # if file exists\n exists = self._file_exists(bucket, _path)\n # if directory exists\n if not exists:\n try:\n self.listdir(path)\n exists = True\n except FileNotFoundError:\n pass\n return exists\n\n def _file_exists(self, bucket, path):\n with mute_stderr():\n return bucket.object_exists(path)\n\n def move(self, src, dst):\n if not src.startswith('oss://') and not dst.startswith('oss://'):\n return super().move(src, dst)\n self.copy(src, dst)\n self.remove(src)\n\n def copy(self, src, dst):\n cloud_src = src.startswith('oss://')\n cloud_dst = dst.startswith('oss://')\n if not cloud_src and not cloud_dst:\n return super().copy(src, dst)\n\n # download\n if cloud_src and not cloud_dst:\n bucket, src = self._split(src)\n obj = bucket.get_object(src)\n if obj.content_length > 100 * 1024 ** 2: # 100M\n from tqdm import tqdm\n progress = None\n\n def callback(i, n):\n nonlocal progress\n if progress is None:\n progress = tqdm(total=n, unit='B', unit_scale=True, unit_divisor=1024, leave=False,\n desc=f'downloading')\n progress.update(i - progress.n)\n\n bucket.get_object_to_file(src, dst, progress_callback=callback)\n if progress is not None:\n progress.close()\n else:\n bucket.get_object_to_file(src, dst)\n return\n bucket, dst = self._split(dst)\n # upload\n if cloud_dst and not cloud_src:\n bucket.put_object_from_file(dst, src)\n return\n # copy between oss paths\n if src != dst:\n src_bucket_name, src = self._split_name(src)\n bucket.copy_object(src_bucket_name, src, dst)\n # TODO: support large file copy\n # https://help.aliyun.com/document_detail/88465.html?spm=a2c4g.11174283.6.882.4d157da2mgp3xc\n\n def listdir(self, path, recursive=False, full_path=False, contains=None):\n if not path.startswith('oss://'):\n return super().listdir(path, recursive, full_path, contains)\n\n bucket, path = self._split(path)\n path = path.rstrip('/') + '/'\n files = [obj.key for obj in self.ObjectIterator(bucket, prefix=path, delimiter='' if recursive else '/')]\n try:\n files.remove(path)\n except ValueError:\n pass\n if full_path:\n files = [f'oss://{bucket.bucket_name}/{file}' for file in files]\n else:\n files = [file[len(path):] for file in files]\n if not files:\n raise FileNotFoundError(f'No such directory: oss://{bucket.bucket_name}/{path}')\n files = [file for file in files if (contains or '') in file]\n return files\n\n def remove(self, path):\n if not path.startswith('oss://'):\n return super().remove(path)\n\n if self.isfile(path):\n paths = [path]\n else:\n paths = self.listdir(path, recursive=True, full_path=True)\n for path in paths:\n bucket, path = self._split(path)\n bucket.delete_object(path)\n\n def makedirs(self, path, exist_ok=True):\n # there is no need to create directory in oss\n if not path.startswith('oss://'):\n return super().makedirs(path)\n\n def isdir(self, path):\n if not path.startswith('oss://'):\n return super().isdir(path)\n return self.exists(path.rstrip('/') + '/')\n\n def isfile(self, path):\n if not path.startswith('oss://'):\n return super().isdir(path)\n return self.exists(path) and not self.isdir(path)\n\n def abspath(self, path):\n if not path.startswith('oss://'):\n return super().abspath(path)\n return path\n\n def authorize(self, path):\n if not path.startswith('oss://'):\n raise ValueError('Only oss path can use \"authorize\"')\n import oss2\n bucket, path = self._split(path)\n bucket.put_object_acl(path, oss2.OBJECT_ACL_PUBLIC_READ)\n\n def last_modified(self, path):\n if not path.startswith('oss://'):\n return super().last_modified(path)\n bucket, path = self._split(path)\n return datetime.strptime(\n bucket.get_object_meta(path).headers['Last-Modified'],\n r'%a, %d %b %Y %H:%M:%S %Z'\n ) + timedelta(hours=8)" }, { "identifier": "MplugOwlProcessor", "path": "mplug_owl/processing_mplug_owl.py", "snippet": "class MplugOwlProcessor(ProcessorMixin):\n attributes = []\n tokenizer_class = (\"MplugOwlTokenizer\")\n\n def __init__(self, image_processor=None, tokenizer=None, **kwargs):\n super().__init__(**kwargs)\n self.tokens_to_generate = 0\n self.image_processor = image_processor\n self.tokenizer = tokenizer\n self.add_BOS = True\n\n def __call__(self, text=None, images=None, return_tensors=None, **kwargs):\n args = get_args()\n if text is None and images is None:\n raise ValueError(\"You have to specify either text or images. Both cannot be none.\")\n\n if images is not None:\n if not isinstance(images, list):\n images = [images]\n # image_features, = self.image_processor(images, return_tensors=return_tensors, **kwargs)\n process_results = [self.image_processor(image=image, text=None) for image in images]\n if len(process_results)>0 and len(process_results[0][0].shape) == 4:\n # 图片被切分成了多块 默认是doc场景\n text_list = text.split('<image>')\n images = []\n patch_positions = []\n text = text_list[0]\n for ri, (image_input, text_input, patch_position) in enumerate(process_results):\n images.append(image_input)\n patch_positions.append(patch_position)\n if args.patch_pos_embed_type == 'pre':\n # 对于pre处理 v2t最终输出的是一张图的token\n text += '<image>'\n else:\n # 对于post处理 v2t最终输出的是多图\n text += '<image>'*image_input.shape[0]\n text += text_list[ri+1]\n images = torch.cat(images, dim=0)\n patch_positions = torch.cat(patch_positions, dim=0)\n else:\n # 如果没有切片 则正常stack 并创建patch position = num_image (0,0)的patch id以保持一致\n images = [_[0] for _ in process_results]\n images = torch.stack(images, dim=0)\n patch_positions = torch.zeros(images.shape[0],2).long()\n text = text\n if text is not None:\n encoding = tokenize_prompts(\n prompts=[text],\n tokens_to_generate=self.tokens_to_generate,\n add_BOS=self.add_BOS,\n tokenizer=self.tokenizer,\n ignore_dist=True,\n **kwargs,\n )\n # encoding = self.tokenizer(text, return_tensors=return_tensors, **kwargs)\n\n \n if text is not None and images is not None:\n encoding[\"pixel_values\"] = images\n encoding[\"patch_positions\"] = patch_position\n return BatchEncoding(data=encoding)\n elif text is not None:\n return BatchEncoding(data=encoding)\n else:\n return BatchEncoding(data=dict(pixel_values=images, patch_position=patch_position), tensor_type=return_tensors)\n\n def batch_decode(self, skip_special_tokens=True, *args, **kwargs):\n \"\"\"\n This method forwards all its arguments to CLIPTokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please\n refer to the docstring of this method for more information.\n \"\"\"\n return self.tokenizer.batch_decode(*args, skip_special_tokens=skip_special_tokens, **kwargs)\n\n def decode(self, skip_special_tokens=True, *args, **kwargs):\n \"\"\"\n This method forwards all its arguments to CLIPTokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer to\n the docstring of this method for more information.\n \"\"\"\n return self.tokenizer.decode(*args, skip_special_tokens=skip_special_tokens, **kwargs)" }, { "identifier": "MplugOwlImageProcessor", "path": "mplug_owl/processing_mplug_owl.py", "snippet": "class MplugOwlImageProcessor(CLIPImageProcessor):\n pass" }, { "identifier": "MplugOwlForConditionalGeneration", "path": "mplug_owl/modeling_mplug_owl.py", "snippet": "class MplugOwlForConditionalGeneration(MplugOwlPreTrainedModel):\n config_class = MplugOwlConfig\n main_input_name = \"pixel_values\"\n\n def __init__(self, config: MplugOwlConfig):\n super().__init__(config)\n\n self.vision_model = MplugOwlVisionModel(config.vision_config)\n\n self.query_tokens = nn.Parameter(\n torch.zeros(1, config.num_query_tokens, config.visual_abstractor_config.hidden_size)\n )\n self.num_queries = config.num_query_tokens\n self.abstractor = MplugOwlVisualAbstractorModel(\n config.visual_abstractor_config, config.text_config.hidden_size\n )\n language_model = AutoModelForCausalLM.from_config(config.text_config)\n self.language_model = language_model\n\n # Initialize weights and apply final processing\n self.post_init()\n self.main_input_name = \"input_ids\"\n from transformers import GenerationConfig\n\n self.generation_config = GenerationConfig(\n max_length=512, do_sample=True, top_k=3, pad_token_id=0, unk_token_id=0, bos_token_id=1, eos_token_id=2\n )\n\n def get_input_embeddings(self):\n return self.language_model.get_input_embeddings()\n\n def set_input_embeddings(self, value):\n self.language_model.set_input_embeddings(value)\n\n def set_output_embeddings(self, new_embeddings):\n self.language_model.set_output_embeddings(new_embeddings)\n\n def get_output_embeddings(self) -> nn.Module:\n return self.language_model.get_output_embeddings()\n\n def get_encoder(self):\n return self.language_model.get_encoder()\n\n def get_decoder(self):\n return self.language_model.get_decoder()\n\n def _tie_weights(self):\n if not self.config.use_decoder_only_language_model:\n self.language_model.encoder.embed_tokens = self.language_model.shared\n self.language_model.decoder.embed_tokens = self.language_model.shared\n\n def _preprocess_accelerate(self):\n r\"\"\"\n Some pre-processing hacks to make the model `accelerate` compatible. Check\n https://github.com/huggingface/transformers/pull/21707 for more details.\n \"\"\"\n hf_device_map = self.hf_device_map\n\n if len(hf_device_map) > 1 and \"language_model\" not in hf_device_map and torch.cuda.device_count() > 1:\n # warn users about unexpected behavior when using multi-GPU + mPLUG-Owl + `accelerate`.\n logger.warning(\n \"The `language_model` is not in the `hf_device_map` dictionary and you are running your script\"\n \" in a multi-GPU environment. this may lead to unexpected behavior when using `accelerate`.\"\n \" Please pass a `device_map` that contains `language_model` to remove this warning.\"\n \" Please refer to https://github.com/huggingface/blog/blob/main/accelerate-large-models.md for\"\n \" more details on creating a `device_map` for large models.\",\n )\n\n if hasattr(self.language_model, \"_hf_hook\"):\n self.language_model._hf_hook.io_same_device = True # For `generate` compatibility\n\n @add_start_docstrings_to_model_forward(MPLUG_OWL_INPUTS_DOCSTRING)\n @replace_return_docstrings(\n output_type=MplugOwlForConditionalGenerationModelOutput, config_class=MplugOwlVisionConfig\n )\n def forward(\n self,\n pixel_values: torch.FloatTensor,\n input_ids: torch.FloatTensor,\n num_images,\n non_padding_mask: Optional[torch.LongTensor] = None,\n non_media_mask: Optional[torch.LongTensor] = None,\n prompt_mask: Optional[torch.LongTensor] = None,\n attention_mask: Optional[torch.LongTensor] = None,\n decoder_input_ids: Optional[torch.LongTensor] = None,\n decoder_attention_mask: Optional[torch.LongTensor] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n labels: Optional[torch.LongTensor] = None,\n patch_positions=None,\n return_dict: Optional[bool] = None,\n ) -> Union[Tuple, MplugOwlForConditionalGenerationModelOutput]:\n r\"\"\"\n Returns:\n\n SFT example:\n\n ```python\n >>> from PIL import Image\n >>> import requests\n >>> from transformers import MplugOwlProcessor, MplugOwlForConditionalGeneration\n >>> import torch\n\n >>> device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n\n >>> processor = MplugOwlProcessor.from_pretrained(\"MAGAer13/mplug-owl-llama-7b\")\n >>> model = MplugOwlForConditionalGeneration.from_pretrained(\n ... \"MAGAer13/mplug-owl-llama-7b\", torch_dtype=torch.float16\n ... )\n >>> model.to(device) # doctest: +IGNORE_RESULT\n\n >>> url = \"http://images.cocodataset.org/val2017/000000039769.jpg\"\n >>> image = Image.open(requests.get(url, stream=True).raw)\n\n >>> prompt = [\n ... \"The following is a conversation between a curious human and AI assistant. The assistant gives helpful, detailed, and polite answers to the user's questions.\\nHuman: <image>\\nHuman: how many cats are there?\\nAI: \"\n ... ]\n >>> inputs = processor(images=[image], text=prompt, return_tensors=\"pt\").to(device, torch.float16)\n\n >>> generated_ids = model.generate(**inputs)\n >>> generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0].strip()\n >>> print(generated_text)\n There are two cats in the image.\n ```\"\"\"\n if pixel_values is not None:\n pixel_values = pixel_values.to(self.vision_model.embeddings.cls_token.data.dtype)\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n # get text embedding\n text_tokens_ = input_ids.clone()\n batch_size = input_ids.shape[0]\n # labels = text_tokens_[:, 1:].clone().contiguous()\n\n media_token_indices = [\n # [:-1] since we would not use the last token for embedding\n get_media_indices(text_tokens_[i][:-1], self.num_queries)\n for i in range(batch_size)\n ]\n text_tokens_[text_tokens_ < 0] = 1 # Not used\n # text_tokens = text_tokens_[:, :-1].contiguous()\n text_embeds = self.get_input_embeddings()(text_tokens_) # Temporally Embedding\n\n if pixel_values is not None:\n image_embeds = self.vision_model(pixel_values, patch_positions=patch_positions, return_dict=True).last_hidden_state\n\n image_attention_mask = torch.ones(image_embeds.size()[:-1], dtype=torch.long, device=image_embeds.device)\n query_tokens = self.query_tokens.expand(image_embeds.shape[0], -1, -1)\n\n query_features = self.abstractor(\n query_embeds=query_tokens,\n encoder_hidden_states=image_embeds,\n encoder_attention_mask=image_attention_mask,\n patch_positions=patch_positions,\n )[\"last_hidden_state\"]\n torch.ones(query_features.size()[:-1], dtype=torch.long).to(query_features.device)\n img_seq_length = query_features.shape[1]\n\n num_images_per_sample = num_images.long().cpu().tolist()\n\n text_chunk_embeds = []\n img_idx = 0\n for b in range(batch_size):\n start = 0\n result = []\n if len(media_token_indices[b]) > 0:\n for i, pos in enumerate(media_token_indices[b][0]):\n if pos > start:\n result.append(text_embeds[b, start:pos])\n result.append(query_features[img_idx + i])\n start = pos + img_seq_length\n if start < text_embeds.shape[1]:\n result.append(text_embeds[b, start:])\n\n img_idx += media_token_indices[b][1]\n text_chunk_embeds.append(torch.cat(result, dim=0))\n\n # Actual Input Embeddings\n input_embeds = torch.stack(text_chunk_embeds, dim=0)\n\n # Create causal mask and position ids\n _, loss_mask, position_ids = get_ltor_masks_and_position_ids_from_embeddings(input_embeds)\n\n # Calculate the loss_mask\n non_padding_mask = non_padding_mask.long()\n non_media_mask = non_media_mask.long()\n prompt_mask = prompt_mask.long() # TODO How to deal with prompt mask\n # from icecream import ic\n # non_padding_mask = non_padding_mask[:,:-1]\n # non_media_mask = non_media_mask[:,:-1]\n # prompt_mask = prompt_mask[:,:-1]\n # attention_mask = attention_mask[:,:-1]\n loss_mask = loss_mask[:, :-1]\n\n loss_mask = loss_mask * non_padding_mask * non_media_mask * prompt_mask\n labels[:, 1:][loss_mask != 1] = -100\n # Forward into GPT\n outputs = self.language_model(\n inputs_embeds=input_embeds,\n attention_mask=attention_mask,\n labels=labels,\n return_dict=return_dict,\n output_attentions=self.config.output_attentions,\n )\n outputs.loss = (outputs.loss * loss_mask.view(-1)\n ).sum()/loss_mask.sum()\n return outputs\n\n @torch.no_grad()\n def generate(\n self,\n pixel_values: torch.FloatTensor = None,\n input_ids: Optional[torch.LongTensor] = None,\n attention_mask: Optional[torch.LongTensor] = None,\n patch_positions=None,\n isdecoder=True,\n **generate_kwargs,\n ) -> torch.LongTensor:\n \"\"\"\n Overrides `generate` function to be able to use the model as a conditional generator.\n\n Args:\n pixel_values (`torch.FloatTensor` of shape (batch_size, num_channels, height, width)):\n Input images to be processed.\n input_ids (`torch.LongTensor` of shape (batch_size, sequence_length), *optional*):\n The sequence used as a prompt for the generation.\n attention_mask (`torch.LongTensor` of shape (batch_size, sequence_length), *optional*):\n Mask to avoid performing attention on padding token indices\n\n Returns:\n captions (list): A list of strings of length batch_size * num_captions.\n \"\"\"\n if pixel_values is not None:\n pixel_values = pixel_values.to(self.vision_model.embeddings.cls_token.data.dtype)\n if input_ids is None:\n return self.language_model.generate(attention_mask=attention_mask, **generate_kwargs)\n\n if attention_mask is None:\n attention_mask = input_ids.new_ones(*input_ids.shape)\n\n batch_size = input_ids.size(0)\n media_token_indices = [get_media_indices(input_ids[i], self.num_queries) for i in range(batch_size)]\n input_ids = input_ids.clone() # prevent inplace modify\n input_ids[input_ids < 0] = 0 # Not used\n\n if hasattr(self, \"hf_device_map\"):\n # preprocess for `accelerate`\n self._preprocess_accelerate()\n batch_size = input_ids.shape[0]\n # get text embedding\n inputs_embeds = self.get_input_embeddings()(input_ids)\n # get visual embedding\n if pixel_values is not None:\n pixel_values = pixel_values.to(input_ids.device)\n with torch.no_grad():\n image_embeds = self.vision_model(pixel_values, patch_positions=patch_positions, return_dict=True).last_hidden_state\n image_attention_mask = torch.ones(\n image_embeds.size()[:-1], dtype=torch.long, device=image_embeds.device\n )\n query_tokens = self.query_tokens.expand(image_embeds.shape[0], -1, -1)\n query_outputs = self.abstractor(\n query_embeds=query_tokens,\n encoder_hidden_states=image_embeds,\n encoder_attention_mask=image_attention_mask,\n patch_positions=patch_positions,\n return_dict=True,\n )\n query_output = query_outputs[\"last_hidden_state\"]\n image_embeds = query_output\n img_seq_length = image_embeds.shape[1]\n\n # ===================\n # Get actual input embeddings\n # ===================\n text_chunk_embeds = []\n text_chunk_attns = []\n img_idx = 0\n\n for b in range(batch_size):\n start = 0\n result = []\n result_attn = []\n for i, pos in enumerate(media_token_indices[b][0]):\n if pos > start:\n result.append(inputs_embeds[b, start:pos])\n result_attn.append(attention_mask[b, start:pos])\n result.append(image_embeds[img_idx + i])\n result_attn.append(torch.ones(image_embeds[img_idx + i].shape[0], device=inputs_embeds.device))\n start = pos + img_seq_length\n if start < inputs_embeds.shape[1]:\n result.append(inputs_embeds[b, start:])\n result_attn.append(attention_mask[b, start:])\n\n img_idx += media_token_indices[b][1]\n text_chunk_embeds.append(torch.cat(result, dim=0))\n text_chunk_attns.append(torch.cat(result_attn, dim=0))\n inputs_embeds = torch.stack(text_chunk_embeds, dim=0)\n attention_mask = torch.stack(text_chunk_attns, dim=0)\n\n outputs = self.language_model.generate(\n inputs_embeds=inputs_embeds,\n # input_ids=input_ids,\n attention_mask=attention_mask,\n **generate_kwargs,\n )\n\n return outputs\n\n def prepare_inputs_for_generation(\n self, input_ids, pixel_values=None, past_key_values=None, attention_mask=None, **model_kwargs\n ):\n input_shape = input_ids.shape\n # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly\n if attention_mask is None:\n attention_mask = input_ids.new_ones(input_shape)\n\n # # cut decoder_input_ids if past_key_values is used\n # if past_key_values is not None:\n # input_ids = input_ids[:, -1:]\n\n return {\n \"input_ids\": input_ids,\n \"pixel_values\": pixel_values,\n \"attention_mask\": attention_mask,\n \"is_decoder\": True,\n }" }, { "identifier": "MplugOwlConfig", "path": "mplug_owl/configuration_mplug_owl.py", "snippet": "class MplugOwlConfig(PretrainedConfig):\n r\"\"\"\n [`MplugOwlConfig`] is the configuration class to store the configuration of a [`MplugOwlForConditionalGeneration`].\n It is used to instantiate a mPLUG-Owl model according to the specified arguments, defining the vision model,\n Q-Former model and language model configs. Instantiating a configuration with the defaults will yield a similar\n configuration to that of the mPLUG-Owl [x-plug/x_plug-llama-7b](https://huggingface.co/x-plug/x_plug-llama-7b)\n architecture.\n\n Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the\n documentation from [`PretrainedConfig`] for more information.\n\n Args:\n vision_config (`dict`, *optional*):\n Dictionary of configuration options used to initialize [`MplugOwlVisionConfig`].\n visual_abstractor_config (`dict`, *optional*):\n Dictionary of configuration options used to initialize [`MplugOwlVisualAbstractorConfig`].\n text_config (`dict`, *optional*):\n Dictionary of configuration options used to initialize any [`PretrainedConfig`].\n num_query_tokens (`int`, *optional*, defaults to 32):\n The number of query tokens passed through the Transformer.\n\n kwargs (*optional*):\n Dictionary of keyword arguments.\n\n Example:\n\n ```python\n >>> from transformers import (\n ... MplugOwlVisionConfig,\n ... MplugOwlVisualAbstractorConfig,\n ... OPTConfig,\n ... MplugOwlConfig,\n ... MplugOwlForConditionalGeneration,\n ... )\n\n >>> # Initializing a MplugOwlConfig with x-plug/x_plug-llama-7b style configuration\n >>> configuration = MplugOwlConfig()\n\n >>> # Initializing a MplugOwlForConditionalGeneration (with random weights) from the x-plug/x_plug-llama-7b style configuration\n >>> model = MplugOwlForConditionalGeneration(configuration)\n\n >>> # Accessing the model configuration\n >>> configuration = model.config\n\n >>> # We can also initialize a MplugOwlConfig from a MplugOwlVisionConfig, MplugOwlVisualAbstractorConfig and any PretrainedConfig\n\n >>> # Initializing mPLUG-Owl vision, mPLUG-Owl Q-Former and language model configurations\n >>> vision_config = MplugOwlVisionConfig()\n >>> visual_abstractor_config = MplugOwlVisualAbstractorConfig()\n >>> text_config = OPTConfig()\n\n >>> config = MplugOwlConfig.from_text_vision_configs(vision_config, visual_abstractor_config, text_config)\n ```\"\"\"\n model_type = \"mplug-owl\"\n is_composition = True\n\n def __init__(\n self, vision_config=None, visual_abstractor_config=None, text_config=None, num_query_tokens=64, **kwargs\n ):\n super().__init__(**kwargs)\n if vision_config is None:\n vision_config = MplugOwlVisionConfig().to_dict()\n logger.info(\"vision_config is None.\")\n\n if visual_abstractor_config is None:\n visual_abstractor_config = {}\n logger.info(\"abstractor_config is None. \")\n\n if text_config is None:\n # we use LLAMA 7b by default\n from transformers.llama.configuration_llama import LlamaConfig\n\n text_config = LlamaConfig(pad_token_id=2).to_dict()\n logger.info(\"text_config is None.\")\n\n self.vision_config = MplugOwlVisionConfig(**vision_config)\n self.visual_abstractor_config = MplugOwlVisualAbstractorConfig(**visual_abstractor_config)\n # self.visual_abstractor_config.layer_norm_eps = 1e-6\n text_model_type = text_config[\"model_type\"] if \"model_type\" in text_config else \"llama\"\n self.text_config = CONFIG_MAPPING[text_model_type](**text_config)\n\n self.tie_word_embeddings = self.text_config.tie_word_embeddings\n self.is_encoder_decoder = self.text_config.is_encoder_decoder\n\n self.num_query_tokens = num_query_tokens\n # self.visual_abstractor_config.encoder_hidden_size = self.vision_config.hidden_size\n self.use_decoder_only_language_model = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES\n self.initializer_factor = 1.0\n self.initializer_range = 0.02\n\n for attr in dir(self.text_config):\n if not hasattr(self, attr):\n setattr(self, attr, getattr(self.text_config, attr))\n\n @classmethod\n def from_vision_visual_abstractor_text_configs(\n cls,\n vision_config: MplugOwlVisionConfig,\n visual_abstractor_config: MplugOwlVisualAbstractorConfig,\n text_config: PretrainedConfig,\n **kwargs,\n ):\n r\"\"\"\n Instantiate a [`MplugOwlConfig`] (or a derived class) from a mPLUG-Owl vision model, Q-Former and language\n model configurations.\n\n Returns:\n [`MplugOwlConfig`]: An instance of a configuration object\n \"\"\"\n\n return cls(\n vision_config=vision_config.to_dict(),\n visual_abstractor_config=visual_abstractor_config.to_dict(),\n text_config=text_config.to_dict(),\n **kwargs,\n )\n\n def to_dict(self):\n \"\"\"\n Serializes this instance to a Python dictionary. Override the default [`~PretrainedConfig.to_dict`].\n\n Returns:\n `Dict[str, any]`: Dictionary of all the attributes that make up this configuration instance,\n \"\"\"\n output = copy.deepcopy(self.__dict__)\n output[\"vision_config\"] = self.vision_config.to_dict()\n output[\"visual_abstractor_config\"] = self.visual_abstractor_config.to_dict()\n output[\"text_config\"] = self.text_config.to_dict()\n output[\"model_type\"] = self.__class__.model_type\n return output" }, { "identifier": "MplugOwlTokenizer", "path": "mplug_owl/tokenization_mplug_owl.py", "snippet": "class MplugOwlTokenizer(LlamaTokenizer):\n def __init__(\n self,\n vocab_file,\n unk_token=\"<unk>\",\n bos_token=\"<s>\",\n eos_token=\"</s>\",\n pad_token=\"<unk>\",\n sp_model_kwargs=None,\n add_bos_token=False,\n add_eos_token=False,\n clean_up_tokenization_spaces=False,\n **kwargs,\n ):\n super().__init__(\n vocab_file,\n unk_token,\n bos_token,\n eos_token,\n pad_token,\n sp_model_kwargs,\n add_bos_token,\n add_eos_token,\n clean_up_tokenization_spaces,\n **kwargs,\n )\n self.eod_id = self.eos_token_id" }, { "identifier": "post_process_output", "path": "serve/model_utils.py", "snippet": "def post_process_output(text):\n text = text.strip()\n pattern = re.compile(\n r\"<unk>|<pad>|<s>|</s>|\\[PAD\\]|<\\|endoftext\\|>|\\[UNK\\]|\\[CLS\\]|\\[MASK\\]|<\\|startofpiece\\|>|<\\|endofpiece\\|>|\\[gMASK\\]|\\[sMASK\\]\"\n )\n text = pattern.sub(\"\", text.strip()).strip()\n return text" }, { "identifier": "Stream", "path": "serve/model_utils.py", "snippet": "class Stream(transformers.StoppingCriteria):\n def __init__(self, callback_func=None):\n self.callback_func = callback_func\n\n def __call__(self, input_ids, scores) -> bool:\n if self.callback_func is not None:\n self.callback_func(input_ids[0])\n return False" }, { "identifier": "Iteratorize", "path": "serve/model_utils.py", "snippet": "class Iteratorize:\n\n \"\"\"\n Transforms a function that takes a callback\n into a lazy iterator (generator).\n \"\"\"\n\n def __init__(self, func, kwargs={}, callback=None):\n self.mfunc = func\n self.c_callback = callback\n self.q = Queue()\n self.sentinel = object()\n self.kwargs = kwargs\n self.stop_now = False\n\n def _callback(val):\n if self.stop_now:\n raise ValueError\n self.q.put(val)\n\n def gentask():\n try:\n ret = self.mfunc(callback=_callback, **self.kwargs)\n except ValueError:\n pass\n except:\n traceback.print_exc()\n pass\n\n self.q.put(self.sentinel)\n if self.c_callback:\n self.c_callback(ret)\n\n self.thread = Thread(target=gentask)\n self.thread.start()\n\n def __iter__(self):\n return self\n\n def __next__(self):\n obj = self.q.get(True, None)\n if obj is self.sentinel:\n raise StopIteration\n else:\n return obj\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.stop_now = True" }, { "identifier": "MplugOwlProcessor", "path": "mplug_owl/processing_mplug_owl.py", "snippet": "class MplugOwlProcessor(ProcessorMixin):\n attributes = []\n tokenizer_class = (\"MplugOwlTokenizer\")\n\n def __init__(self, image_processor=None, tokenizer=None, **kwargs):\n super().__init__(**kwargs)\n self.tokens_to_generate = 0\n self.image_processor = image_processor\n self.tokenizer = tokenizer\n self.add_BOS = True\n\n def __call__(self, text=None, images=None, return_tensors=None, **kwargs):\n args = get_args()\n if text is None and images is None:\n raise ValueError(\"You have to specify either text or images. Both cannot be none.\")\n\n if images is not None:\n if not isinstance(images, list):\n images = [images]\n # image_features, = self.image_processor(images, return_tensors=return_tensors, **kwargs)\n process_results = [self.image_processor(image=image, text=None) for image in images]\n if len(process_results)>0 and len(process_results[0][0].shape) == 4:\n # 图片被切分成了多块 默认是doc场景\n text_list = text.split('<image>')\n images = []\n patch_positions = []\n text = text_list[0]\n for ri, (image_input, text_input, patch_position) in enumerate(process_results):\n images.append(image_input)\n patch_positions.append(patch_position)\n if args.patch_pos_embed_type == 'pre':\n # 对于pre处理 v2t最终输出的是一张图的token\n text += '<image>'\n else:\n # 对于post处理 v2t最终输出的是多图\n text += '<image>'*image_input.shape[0]\n text += text_list[ri+1]\n images = torch.cat(images, dim=0)\n patch_positions = torch.cat(patch_positions, dim=0)\n else:\n # 如果没有切片 则正常stack 并创建patch position = num_image (0,0)的patch id以保持一致\n images = [_[0] for _ in process_results]\n images = torch.stack(images, dim=0)\n patch_positions = torch.zeros(images.shape[0],2).long()\n text = text\n if text is not None:\n encoding = tokenize_prompts(\n prompts=[text],\n tokens_to_generate=self.tokens_to_generate,\n add_BOS=self.add_BOS,\n tokenizer=self.tokenizer,\n ignore_dist=True,\n **kwargs,\n )\n # encoding = self.tokenizer(text, return_tensors=return_tensors, **kwargs)\n\n \n if text is not None and images is not None:\n encoding[\"pixel_values\"] = images\n encoding[\"patch_positions\"] = patch_position\n return BatchEncoding(data=encoding)\n elif text is not None:\n return BatchEncoding(data=encoding)\n else:\n return BatchEncoding(data=dict(pixel_values=images, patch_position=patch_position), tensor_type=return_tensors)\n\n def batch_decode(self, skip_special_tokens=True, *args, **kwargs):\n \"\"\"\n This method forwards all its arguments to CLIPTokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please\n refer to the docstring of this method for more information.\n \"\"\"\n return self.tokenizer.batch_decode(*args, skip_special_tokens=skip_special_tokens, **kwargs)\n\n def decode(self, skip_special_tokens=True, *args, **kwargs):\n \"\"\"\n This method forwards all its arguments to CLIPTokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer to\n the docstring of this method for more information.\n \"\"\"\n return self.tokenizer.decode(*args, skip_special_tokens=skip_special_tokens, **kwargs)" }, { "identifier": "MplugOwlForConditionalGeneration", "path": "mplug_owl/modeling_mplug_owl.py", "snippet": "class MplugOwlForConditionalGeneration(MplugOwlPreTrainedModel):\n config_class = MplugOwlConfig\n main_input_name = \"pixel_values\"\n\n def __init__(self, config: MplugOwlConfig):\n super().__init__(config)\n\n self.vision_model = MplugOwlVisionModel(config.vision_config)\n\n self.query_tokens = nn.Parameter(\n torch.zeros(1, config.num_query_tokens, config.visual_abstractor_config.hidden_size)\n )\n self.num_queries = config.num_query_tokens\n self.abstractor = MplugOwlVisualAbstractorModel(\n config.visual_abstractor_config, config.text_config.hidden_size\n )\n language_model = AutoModelForCausalLM.from_config(config.text_config)\n self.language_model = language_model\n\n # Initialize weights and apply final processing\n self.post_init()\n self.main_input_name = \"input_ids\"\n from transformers import GenerationConfig\n\n self.generation_config = GenerationConfig(\n max_length=512, do_sample=True, top_k=3, pad_token_id=0, unk_token_id=0, bos_token_id=1, eos_token_id=2\n )\n\n def get_input_embeddings(self):\n return self.language_model.get_input_embeddings()\n\n def set_input_embeddings(self, value):\n self.language_model.set_input_embeddings(value)\n\n def set_output_embeddings(self, new_embeddings):\n self.language_model.set_output_embeddings(new_embeddings)\n\n def get_output_embeddings(self) -> nn.Module:\n return self.language_model.get_output_embeddings()\n\n def get_encoder(self):\n return self.language_model.get_encoder()\n\n def get_decoder(self):\n return self.language_model.get_decoder()\n\n def _tie_weights(self):\n if not self.config.use_decoder_only_language_model:\n self.language_model.encoder.embed_tokens = self.language_model.shared\n self.language_model.decoder.embed_tokens = self.language_model.shared\n\n def _preprocess_accelerate(self):\n r\"\"\"\n Some pre-processing hacks to make the model `accelerate` compatible. Check\n https://github.com/huggingface/transformers/pull/21707 for more details.\n \"\"\"\n hf_device_map = self.hf_device_map\n\n if len(hf_device_map) > 1 and \"language_model\" not in hf_device_map and torch.cuda.device_count() > 1:\n # warn users about unexpected behavior when using multi-GPU + mPLUG-Owl + `accelerate`.\n logger.warning(\n \"The `language_model` is not in the `hf_device_map` dictionary and you are running your script\"\n \" in a multi-GPU environment. this may lead to unexpected behavior when using `accelerate`.\"\n \" Please pass a `device_map` that contains `language_model` to remove this warning.\"\n \" Please refer to https://github.com/huggingface/blog/blob/main/accelerate-large-models.md for\"\n \" more details on creating a `device_map` for large models.\",\n )\n\n if hasattr(self.language_model, \"_hf_hook\"):\n self.language_model._hf_hook.io_same_device = True # For `generate` compatibility\n\n @add_start_docstrings_to_model_forward(MPLUG_OWL_INPUTS_DOCSTRING)\n @replace_return_docstrings(\n output_type=MplugOwlForConditionalGenerationModelOutput, config_class=MplugOwlVisionConfig\n )\n def forward(\n self,\n pixel_values: torch.FloatTensor,\n input_ids: torch.FloatTensor,\n num_images,\n non_padding_mask: Optional[torch.LongTensor] = None,\n non_media_mask: Optional[torch.LongTensor] = None,\n prompt_mask: Optional[torch.LongTensor] = None,\n attention_mask: Optional[torch.LongTensor] = None,\n decoder_input_ids: Optional[torch.LongTensor] = None,\n decoder_attention_mask: Optional[torch.LongTensor] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n labels: Optional[torch.LongTensor] = None,\n patch_positions=None,\n return_dict: Optional[bool] = None,\n ) -> Union[Tuple, MplugOwlForConditionalGenerationModelOutput]:\n r\"\"\"\n Returns:\n\n SFT example:\n\n ```python\n >>> from PIL import Image\n >>> import requests\n >>> from transformers import MplugOwlProcessor, MplugOwlForConditionalGeneration\n >>> import torch\n\n >>> device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n\n >>> processor = MplugOwlProcessor.from_pretrained(\"MAGAer13/mplug-owl-llama-7b\")\n >>> model = MplugOwlForConditionalGeneration.from_pretrained(\n ... \"MAGAer13/mplug-owl-llama-7b\", torch_dtype=torch.float16\n ... )\n >>> model.to(device) # doctest: +IGNORE_RESULT\n\n >>> url = \"http://images.cocodataset.org/val2017/000000039769.jpg\"\n >>> image = Image.open(requests.get(url, stream=True).raw)\n\n >>> prompt = [\n ... \"The following is a conversation between a curious human and AI assistant. The assistant gives helpful, detailed, and polite answers to the user's questions.\\nHuman: <image>\\nHuman: how many cats are there?\\nAI: \"\n ... ]\n >>> inputs = processor(images=[image], text=prompt, return_tensors=\"pt\").to(device, torch.float16)\n\n >>> generated_ids = model.generate(**inputs)\n >>> generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0].strip()\n >>> print(generated_text)\n There are two cats in the image.\n ```\"\"\"\n if pixel_values is not None:\n pixel_values = pixel_values.to(self.vision_model.embeddings.cls_token.data.dtype)\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n # get text embedding\n text_tokens_ = input_ids.clone()\n batch_size = input_ids.shape[0]\n # labels = text_tokens_[:, 1:].clone().contiguous()\n\n media_token_indices = [\n # [:-1] since we would not use the last token for embedding\n get_media_indices(text_tokens_[i][:-1], self.num_queries)\n for i in range(batch_size)\n ]\n text_tokens_[text_tokens_ < 0] = 1 # Not used\n # text_tokens = text_tokens_[:, :-1].contiguous()\n text_embeds = self.get_input_embeddings()(text_tokens_) # Temporally Embedding\n\n if pixel_values is not None:\n image_embeds = self.vision_model(pixel_values, patch_positions=patch_positions, return_dict=True).last_hidden_state\n\n image_attention_mask = torch.ones(image_embeds.size()[:-1], dtype=torch.long, device=image_embeds.device)\n query_tokens = self.query_tokens.expand(image_embeds.shape[0], -1, -1)\n\n query_features = self.abstractor(\n query_embeds=query_tokens,\n encoder_hidden_states=image_embeds,\n encoder_attention_mask=image_attention_mask,\n patch_positions=patch_positions,\n )[\"last_hidden_state\"]\n torch.ones(query_features.size()[:-1], dtype=torch.long).to(query_features.device)\n img_seq_length = query_features.shape[1]\n\n num_images_per_sample = num_images.long().cpu().tolist()\n\n text_chunk_embeds = []\n img_idx = 0\n for b in range(batch_size):\n start = 0\n result = []\n if len(media_token_indices[b]) > 0:\n for i, pos in enumerate(media_token_indices[b][0]):\n if pos > start:\n result.append(text_embeds[b, start:pos])\n result.append(query_features[img_idx + i])\n start = pos + img_seq_length\n if start < text_embeds.shape[1]:\n result.append(text_embeds[b, start:])\n\n img_idx += media_token_indices[b][1]\n text_chunk_embeds.append(torch.cat(result, dim=0))\n\n # Actual Input Embeddings\n input_embeds = torch.stack(text_chunk_embeds, dim=0)\n\n # Create causal mask and position ids\n _, loss_mask, position_ids = get_ltor_masks_and_position_ids_from_embeddings(input_embeds)\n\n # Calculate the loss_mask\n non_padding_mask = non_padding_mask.long()\n non_media_mask = non_media_mask.long()\n prompt_mask = prompt_mask.long() # TODO How to deal with prompt mask\n # from icecream import ic\n # non_padding_mask = non_padding_mask[:,:-1]\n # non_media_mask = non_media_mask[:,:-1]\n # prompt_mask = prompt_mask[:,:-1]\n # attention_mask = attention_mask[:,:-1]\n loss_mask = loss_mask[:, :-1]\n\n loss_mask = loss_mask * non_padding_mask * non_media_mask * prompt_mask\n labels[:, 1:][loss_mask != 1] = -100\n # Forward into GPT\n outputs = self.language_model(\n inputs_embeds=input_embeds,\n attention_mask=attention_mask,\n labels=labels,\n return_dict=return_dict,\n output_attentions=self.config.output_attentions,\n )\n outputs.loss = (outputs.loss * loss_mask.view(-1)\n ).sum()/loss_mask.sum()\n return outputs\n\n @torch.no_grad()\n def generate(\n self,\n pixel_values: torch.FloatTensor = None,\n input_ids: Optional[torch.LongTensor] = None,\n attention_mask: Optional[torch.LongTensor] = None,\n patch_positions=None,\n isdecoder=True,\n **generate_kwargs,\n ) -> torch.LongTensor:\n \"\"\"\n Overrides `generate` function to be able to use the model as a conditional generator.\n\n Args:\n pixel_values (`torch.FloatTensor` of shape (batch_size, num_channels, height, width)):\n Input images to be processed.\n input_ids (`torch.LongTensor` of shape (batch_size, sequence_length), *optional*):\n The sequence used as a prompt for the generation.\n attention_mask (`torch.LongTensor` of shape (batch_size, sequence_length), *optional*):\n Mask to avoid performing attention on padding token indices\n\n Returns:\n captions (list): A list of strings of length batch_size * num_captions.\n \"\"\"\n if pixel_values is not None:\n pixel_values = pixel_values.to(self.vision_model.embeddings.cls_token.data.dtype)\n if input_ids is None:\n return self.language_model.generate(attention_mask=attention_mask, **generate_kwargs)\n\n if attention_mask is None:\n attention_mask = input_ids.new_ones(*input_ids.shape)\n\n batch_size = input_ids.size(0)\n media_token_indices = [get_media_indices(input_ids[i], self.num_queries) for i in range(batch_size)]\n input_ids = input_ids.clone() # prevent inplace modify\n input_ids[input_ids < 0] = 0 # Not used\n\n if hasattr(self, \"hf_device_map\"):\n # preprocess for `accelerate`\n self._preprocess_accelerate()\n batch_size = input_ids.shape[0]\n # get text embedding\n inputs_embeds = self.get_input_embeddings()(input_ids)\n # get visual embedding\n if pixel_values is not None:\n pixel_values = pixel_values.to(input_ids.device)\n with torch.no_grad():\n image_embeds = self.vision_model(pixel_values, patch_positions=patch_positions, return_dict=True).last_hidden_state\n image_attention_mask = torch.ones(\n image_embeds.size()[:-1], dtype=torch.long, device=image_embeds.device\n )\n query_tokens = self.query_tokens.expand(image_embeds.shape[0], -1, -1)\n query_outputs = self.abstractor(\n query_embeds=query_tokens,\n encoder_hidden_states=image_embeds,\n encoder_attention_mask=image_attention_mask,\n patch_positions=patch_positions,\n return_dict=True,\n )\n query_output = query_outputs[\"last_hidden_state\"]\n image_embeds = query_output\n img_seq_length = image_embeds.shape[1]\n\n # ===================\n # Get actual input embeddings\n # ===================\n text_chunk_embeds = []\n text_chunk_attns = []\n img_idx = 0\n\n for b in range(batch_size):\n start = 0\n result = []\n result_attn = []\n for i, pos in enumerate(media_token_indices[b][0]):\n if pos > start:\n result.append(inputs_embeds[b, start:pos])\n result_attn.append(attention_mask[b, start:pos])\n result.append(image_embeds[img_idx + i])\n result_attn.append(torch.ones(image_embeds[img_idx + i].shape[0], device=inputs_embeds.device))\n start = pos + img_seq_length\n if start < inputs_embeds.shape[1]:\n result.append(inputs_embeds[b, start:])\n result_attn.append(attention_mask[b, start:])\n\n img_idx += media_token_indices[b][1]\n text_chunk_embeds.append(torch.cat(result, dim=0))\n text_chunk_attns.append(torch.cat(result_attn, dim=0))\n inputs_embeds = torch.stack(text_chunk_embeds, dim=0)\n attention_mask = torch.stack(text_chunk_attns, dim=0)\n\n outputs = self.language_model.generate(\n inputs_embeds=inputs_embeds,\n # input_ids=input_ids,\n attention_mask=attention_mask,\n **generate_kwargs,\n )\n\n return outputs\n\n def prepare_inputs_for_generation(\n self, input_ids, pixel_values=None, past_key_values=None, attention_mask=None, **model_kwargs\n ):\n input_shape = input_ids.shape\n # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly\n if attention_mask is None:\n attention_mask = input_ids.new_ones(input_shape)\n\n # # cut decoder_input_ids if past_key_values is used\n # if past_key_values is not None:\n # input_ids = input_ids[:, -1:]\n\n return {\n \"input_ids\": input_ids,\n \"pixel_values\": pixel_values,\n \"attention_mask\": attention_mask,\n \"is_decoder\": True,\n }" }, { "identifier": "build_processors", "path": "pipeline/data_utils/processors/builder.py", "snippet": "def build_processors(processors_cfg):\n processors = dict()\n for task, processor in processors_cfg.items():\n processors[task] = build_from_cfg(processor, PROCESSORS)\n ic(type(processors[task]))\n return processors" } ]
from PIL import Image from io import BytesIO from .io_utils import IO, DefaultIO, OSS from mplug_owl.processing_mplug_owl import MplugOwlProcessor, MplugOwlImageProcessor from mplug_owl.modeling_mplug_owl import MplugOwlForConditionalGeneration from mplug_owl.configuration_mplug_owl import MplugOwlConfig from mplug_owl.tokenization_mplug_owl import MplugOwlTokenizer from transformers import GenerationConfig from .model_utils import post_process_output, Stream, Iteratorize from pathlib import Path from mplug_owl.processing_mplug_owl import MplugOwlProcessor from mplug_owl.modeling_mplug_owl import MplugOwlForConditionalGeneration from pipeline.data_utils.processors.builder import build_processors from pipeline.data_utils.processors import * from transformers.models.llama.tokenization_llama import LlamaTokenizer from icecream import ic import torch import gradio as gr import logging import sys import os import json import requests import datetime import uuid import base64 import time import sys import transformers
14,627
sys.path.append("..") server_error_msg = "**NETWORK ERROR DUE TO HIGH TRAFFIC. PLEASE REGENERATE OR REFRESH THIS PAGE.**" # from pipeline.data_utils.xgpt3_dataset import ImageIO # class ImageProcessor(object): # def __init__(self, resolution=224, tokenizer=None): # normalize = transforms.Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)) # # self.transform = transforms.Compose([ # # transforms.Resize((resolution, resolution),interpolation=Image.BICUBIC), # # transforms.ToTensor(), # # normalize, # # ]) # from megatron.data.processors import doc_processor # processor_class = os.environ.get('DocProcessor','DocSFTProcessor') # self.transform = getattr(doc_processor,processor_class)() # self.image_io = ImageIO() # self.tokenizer=tokenizer # def __call__(self, image_paths, prompts): # if isinstance(image_paths, str): # image_paths = [image_paths] # images = [] # images = self.image_io._load_img(image_paths) # images = [self.transform(image, None) for image in images] # image_input, text_input, patch_position # patch_position = [_[2] for _ in images] # images = [_[0] for _ in images] # text_list = prompts[0].split('<image>') # text = text_list[0] # for ri, image in enumerate(images): # if args.patch_pos_embed_type == 'pre': # # 对于pre处理 v2t最终输出的是一张图的token # text += '<image>' # else: # # 对于post处理 v2t最终输出的是多图 # text += '<image>'*image.shape[0] # text += text_list[ri+1] # images = torch.cat(images, dim=0) # patch_position = torch.cat(patch_position, dim=0) # print(text) # ic(images.shape) # ic(patch_position.shape) # from mplug_owl.processing_mplug_owl import tokenize_prompts # input_ids = tokenize_prompts(text, tokenizer=self.tokenizer, return_tensors='pt') # return { # "pixel_values": images, # 'patch_position': patch_position, # "input_ids": input_ids # } class mPLUG_Owl_Server: def __init__( self, base_model='MAGAer13/mplug-owl-llama-7b', log_dir='./', load_in_8bit=False, bf16=True, device="cuda", io=None, config=None, ): self.log_dir = log_dir self.config = config
sys.path.append("..") server_error_msg = "**NETWORK ERROR DUE TO HIGH TRAFFIC. PLEASE REGENERATE OR REFRESH THIS PAGE.**" # from pipeline.data_utils.xgpt3_dataset import ImageIO # class ImageProcessor(object): # def __init__(self, resolution=224, tokenizer=None): # normalize = transforms.Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)) # # self.transform = transforms.Compose([ # # transforms.Resize((resolution, resolution),interpolation=Image.BICUBIC), # # transforms.ToTensor(), # # normalize, # # ]) # from megatron.data.processors import doc_processor # processor_class = os.environ.get('DocProcessor','DocSFTProcessor') # self.transform = getattr(doc_processor,processor_class)() # self.image_io = ImageIO() # self.tokenizer=tokenizer # def __call__(self, image_paths, prompts): # if isinstance(image_paths, str): # image_paths = [image_paths] # images = [] # images = self.image_io._load_img(image_paths) # images = [self.transform(image, None) for image in images] # image_input, text_input, patch_position # patch_position = [_[2] for _ in images] # images = [_[0] for _ in images] # text_list = prompts[0].split('<image>') # text = text_list[0] # for ri, image in enumerate(images): # if args.patch_pos_embed_type == 'pre': # # 对于pre处理 v2t最终输出的是一张图的token # text += '<image>' # else: # # 对于post处理 v2t最终输出的是多图 # text += '<image>'*image.shape[0] # text += text_list[ri+1] # images = torch.cat(images, dim=0) # patch_position = torch.cat(patch_position, dim=0) # print(text) # ic(images.shape) # ic(patch_position.shape) # from mplug_owl.processing_mplug_owl import tokenize_prompts # input_ids = tokenize_prompts(text, tokenizer=self.tokenizer, return_tensors='pt') # return { # "pixel_values": images, # 'patch_position': patch_position, # "input_ids": input_ids # } class mPLUG_Owl_Server: def __init__( self, base_model='MAGAer13/mplug-owl-llama-7b', log_dir='./', load_in_8bit=False, bf16=True, device="cuda", io=None, config=None, ): self.log_dir = log_dir self.config = config
self.image_processor = build_processors(config['valid_processors'])['sft']
13
2023-10-08 06:29:02+00:00
24k
LeapLabTHU/Rank-DETR
projects/rank_detr/configs/models/rank_detr_r50.py
[ { "identifier": "HungarianMatcher", "path": "detrex/modeling/matcher/matcher.py", "snippet": "class HungarianMatcher(nn.Module):\n \"\"\"HungarianMatcher which computes an assignment between targets and predictions.\n\n For efficiency reasons, the targets don't include the no_object. Because of this, in general,\n there are more predictions than targets. In this case, we do a 1-to-1 matching of the best predictions,\n while the others are un-matched (and thus treated as non-objects).\n\n Args:\n cost_class (float): The relative weight of the classification error\n in the matching cost. Default: 1.\n cost_bbox (float): The relative weight of the L1 error of the bounding box\n coordinates in the matching cost. Default: 1.\n cost_giou (float): This is the relative weight of the giou loss of\n the bounding box in the matching cost. Default: 1.\n cost_class_type (str): How the classification error is calculated.\n Choose from ``[\"ce_cost\", \"focal_loss_cost\"]``. Default: \"focal_loss_cost\".\n alpha (float): Weighting factor in range (0, 1) to balance positive vs\n negative examples in focal loss. Default: 0.25.\n gamma (float): Exponent of modulating factor (1 - p_t) to balance easy vs\n hard examples in focal loss. Default: 2.\n \"\"\"\n\n def __init__(\n self,\n cost_class: float = 1,\n cost_bbox: float = 1,\n cost_giou: float = 1,\n cost_class_type: str = \"focal_loss_cost\",\n alpha: float = 0.25,\n gamma: float = 2.0,\n ):\n super().__init__()\n self.cost_class = cost_class\n self.cost_bbox = cost_bbox\n self.cost_giou = cost_giou\n self.cost_class_type = cost_class_type\n self.alpha = alpha\n self.gamma = gamma\n assert cost_class != 0 or cost_bbox != 0 or cost_giou != 0, \"all costs cant be 0\"\n assert cost_class_type in {\n \"ce_cost\",\n \"focal_loss_cost\",\n }, \"only support ce loss or focal loss for computing class cost\"\n\n @torch.no_grad()\n def forward(self, outputs, targets):\n \"\"\"Forward function for `HungarianMatcher` which performs the matching.\n\n Args:\n outputs (Dict[str, torch.Tensor]): This is a dict that contains at least these entries:\n\n - ``\"pred_logits\"``: Tensor of shape (bs, num_queries, num_classes) with the classification logits.\n - ``\"pred_boxes\"``: Tensor of shape (bs, num_queries, 4) with the predicted box coordinates.\n\n targets (List[Dict[str, torch.Tensor]]): This is a list of targets (len(targets) = batch_size),\n where each target is a dict containing:\n\n - ``\"labels\"``: Tensor of shape (num_target_boxes, ) (where num_target_boxes is the number of ground-truth objects in the target) containing the class labels. # noqa\n - ``\"boxes\"``: Tensor of shape (num_target_boxes, 4) containing the target box coordinates.\n\n Returns:\n list[torch.Tensor]: A list of size batch_size, containing tuples of `(index_i, index_j)` where:\n\n - ``index_i`` is the indices of the selected predictions (in order)\n - ``index_j`` is the indices of the corresponding selected targets (in order)\n\n For each batch element, it holds: `len(index_i) = len(index_j) = min(num_queries, num_target_boxes)`\n \"\"\"\n bs, num_queries = outputs[\"pred_logits\"].shape[:2]\n\n # We flatten to compute the cost matrices in a batch\n if self.cost_class_type == \"ce_cost\":\n out_prob = (\n outputs[\"pred_logits\"].flatten(0, 1).softmax(-1)\n ) # [batch_size * num_queries, num_classes]\n elif self.cost_class_type == \"focal_loss_cost\":\n out_prob = (\n outputs[\"pred_logits\"].flatten(0, 1).sigmoid()\n ) # [batch_size * num_queries, num_classes]\n\n out_bbox = outputs[\"pred_boxes\"].flatten(0, 1) # [batch_size * num_queries, 4]\n\n # Also concat the target labels and boxes\n tgt_ids = torch.cat([v[\"labels\"] for v in targets])\n tgt_bbox = torch.cat([v[\"boxes\"] for v in targets])\n\n # Compute the classification cost.\n if self.cost_class_type == \"ce_cost\":\n # Compute the classification cost. Contrary to the loss, we don't use the NLL,\n # but approximate it in 1 - proba[target class].\n # The 1 is a constant that doesn't change the matching, it can be ommitted.\n cost_class = -out_prob[:, tgt_ids]\n elif self.cost_class_type == \"focal_loss_cost\":\n alpha = self.alpha\n gamma = self.gamma\n neg_cost_class = (1 - alpha) * (out_prob**gamma) * (-(1 - out_prob + 1e-8).log())\n pos_cost_class = alpha * ((1 - out_prob) ** gamma) * (-(out_prob + 1e-8).log())\n cost_class = pos_cost_class[:, tgt_ids] - neg_cost_class[:, tgt_ids]\n\n # Compute the L1 cost between boxes\n cost_bbox = torch.cdist(out_bbox, tgt_bbox, p=1)\n\n # Compute the giou cost betwen boxes\n cost_giou = -generalized_box_iou(box_cxcywh_to_xyxy(out_bbox), box_cxcywh_to_xyxy(tgt_bbox))\n\n # Final cost matrix\n C = self.cost_bbox * cost_bbox + self.cost_class * cost_class + self.cost_giou * cost_giou\n C = C.view(bs, num_queries, -1).cpu()\n\n sizes = [len(v[\"boxes\"]) for v in targets]\n indices = [linear_sum_assignment(c[i]) for i, c in enumerate(C.split(sizes, -1))]\n return [\n (torch.as_tensor(i, dtype=torch.int64), torch.as_tensor(j, dtype=torch.int64))\n for i, j in indices\n ]\n\n def __repr__(self, _repr_indent=4):\n head = \"Matcher \" + self.__class__.__name__\n body = [\n \"cost_class: {}\".format(self.cost_class),\n \"cost_bbox: {}\".format(self.cost_bbox),\n \"cost_giou: {}\".format(self.cost_giou),\n \"cost_class_type: {}\".format(self.cost_class_type),\n \"focal cost alpha: {}\".format(self.alpha),\n \"focal cost gamma: {}\".format(self.gamma),\n ]\n lines = [head] + [\" \" * _repr_indent + line for line in body]\n return \"\\n\".join(lines)" }, { "identifier": "ChannelMapper", "path": "detrex/modeling/neck/channel_mapper.py", "snippet": "class ChannelMapper(nn.Module):\n \"\"\"Channel Mapper for reduce/increase channels of backbone features. Modified\n from `mmdet <https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/necks/channel_mapper.py>`_.\n\n This is used to reduce/increase the channels of backbone features.\n\n Args:\n input_shape (Dict[str, ShapeSpec]): A dict which contains the backbone features meta infomation,\n e.g. ``input_shape = {\"res5\": ShapeSpec(channels=2048)}``.\n in_features (List[str]): A list contains the keys which maps the features output from the backbone,\n e.g. ``in_features = [\"res\"]``.\n out_channels (int): Number of output channels for each scale.\n kernel_size (int, optional): Size of the convolving kernel for each scale.\n Default: 3.\n stride (int, optional): Stride of convolution for each scale. Default: 1.\n bias (bool, optional): If True, adds a learnable bias to the output of each scale.\n Default: True.\n groups (int, optional): Number of blocked connections from input channels to\n output channels for each scale. Default: 1.\n dilation (int, optional): Spacing between kernel elements for each scale.\n Default: 1.\n norm_layer (nn.Module, optional): The norm layer used for each scale. Default: None.\n activation (nn.Module, optional): The activation layer used for each scale. Default: None.\n num_outs (int, optional): Number of output feature maps. There will be ``extra_convs`` when\n ``num_outs`` is larger than the length of ``in_features``. Default: None.\n\n Examples:\n >>> import torch\n >>> import torch.nn as nn\n >>> from detrex.modeling import ChannelMapper\n >>> from detectron2.modeling import ShapeSpec\n >>> input_features = {\n ... \"p0\": torch.randn(1, 128, 128, 128),\n ... \"p1\": torch.randn(1, 256, 64, 64),\n ... \"p2\": torch.randn(1, 512, 32, 32),\n ... \"p3\": torch.randn(1, 1024, 16, 16),\n ... }\n >>> input_shapes = {\n ... \"p0\": ShapeSpec(channels=128),\n ... \"p1\": ShapeSpec(channels=256),\n ... \"p2\": ShapeSpec(channels=512),\n ... \"p3\": ShapeSpec(channels=1024),\n ... }\n >>> in_features = [\"p0\", \"p1\", \"p2\", \"p3\"]\n >>> neck = ChannelMapper(\n ... input_shapes=input_shapes,\n ... in_features=in_features,\n ... out_channels=256,\n ... norm_layer=nn.GroupNorm(num_groups=32, num_channels=256)\n >>> outputs = neck(input_features)\n >>> for i in range(len(outputs)):\n ... print(f\"output[{i}].shape = {outputs[i].shape}\")\n output[0].shape = torch.Size([1, 256, 128, 128])\n output[1].shape = torch.Size([1, 256, 64, 64])\n output[2].shape = torch.Size([1, 256, 32, 32])\n output[3].shape = torch.Size([1, 256, 16, 16])\n \"\"\"\n\n def __init__(\n self,\n input_shapes: Dict[str, ShapeSpec],\n in_features: List[str],\n out_channels: int,\n kernel_size: int = 3,\n stride: int = 1,\n bias: bool = True,\n groups: int = 1,\n dilation: int = 1,\n norm_layer: nn.Module = None,\n activation: nn.Module = None,\n num_outs: int = None,\n **kwargs,\n ):\n super(ChannelMapper, self).__init__()\n self.extra_convs = None\n\n in_channels_per_feature = [input_shapes[f].channels for f in in_features]\n\n if num_outs is None:\n num_outs = len(input_shapes)\n\n self.convs = nn.ModuleList()\n for in_channel in in_channels_per_feature:\n self.convs.append(\n ConvNormAct(\n in_channels=in_channel,\n out_channels=out_channels,\n kernel_size=kernel_size,\n stride=stride,\n padding=(kernel_size - 1) // 2,\n bias=bias,\n groups=groups,\n dilation=dilation,\n norm_layer=copy.deepcopy(norm_layer),\n activation=copy.deepcopy(activation),\n )\n )\n\n if num_outs > len(in_channels_per_feature):\n self.extra_convs = nn.ModuleList()\n for i in range(len(in_channels_per_feature), num_outs):\n if i == len(in_channels_per_feature):\n in_channel = in_channels_per_feature[-1]\n else:\n in_channel = out_channels\n self.extra_convs.append(\n ConvNormAct(\n in_channels=in_channel,\n out_channels=out_channels,\n kernel_size=3,\n stride=2,\n padding=1,\n bias=bias,\n groups=groups,\n dilation=dilation,\n norm_layer=copy.deepcopy(norm_layer),\n activation=copy.deepcopy(activation),\n )\n )\n\n self.input_shapes = input_shapes\n self.in_features = in_features\n self.out_channels = out_channels\n\n def forward(self, inputs):\n \"\"\"Forward function for ChannelMapper\n\n Args:\n inputs (Dict[str, torch.Tensor]): The backbone feature maps.\n\n Return:\n tuple(torch.Tensor): A tuple of the processed features.\n \"\"\"\n assert len(inputs) == len(self.convs)\n outs = [self.convs[i](inputs[self.in_features[i]]) for i in range(len(inputs))]\n if self.extra_convs:\n for i in range(len(self.extra_convs)):\n if i == 0:\n outs.append(self.extra_convs[0](inputs[self.in_features[-1]]))\n else:\n outs.append(self.extra_convs[i](outs[-1]))\n return tuple(outs)" }, { "identifier": "PositionEmbeddingSine", "path": "detrex/layers/position_embedding.py", "snippet": "class PositionEmbeddingSine(nn.Module):\n \"\"\"Sinusoidal position embedding used in DETR model.\n\n Please see `End-to-End Object Detection with Transformers\n <https://arxiv.org/pdf/2005.12872>`_ for more details.\n\n Args:\n num_pos_feats (int): The feature dimension for each position along\n x-axis or y-axis. The final returned dimension for each position\n is 2 times of the input value.\n temperature (int, optional): The temperature used for scaling\n the position embedding. Default: 10000.\n scale (float, optional): A scale factor that scales the position\n embedding. The scale will be used only when `normalize` is True.\n Default: 2*pi.\n eps (float, optional): A value added to the denominator for numerical\n stability. Default: 1e-6.\n offset (float): An offset added to embed when doing normalization.\n normalize (bool, optional): Whether to normalize the position embedding.\n Default: False.\n \"\"\"\n\n def __init__(\n self,\n num_pos_feats: int = 64,\n temperature: int = 10000,\n scale: float = 2 * math.pi,\n eps: float = 1e-6,\n offset: float = 0.0,\n normalize: bool = False,\n ):\n super().__init__()\n if normalize:\n assert isinstance(scale, (float, int)), (\n \"when normalize is set,\"\n \"scale should be provided and in float or int type, \"\n f\"found {type(scale)}\"\n )\n self.num_pos_feats = num_pos_feats\n self.temperature = temperature\n self.normalize = normalize\n self.scale = scale\n self.eps = eps\n self.offset = offset\n\n def forward(self, mask: torch.Tensor, **kwargs) -> torch.Tensor:\n \"\"\"Forward function for `PositionEmbeddingSine`.\n\n Args:\n mask (torch.Tensor): ByteTensor mask. Non-zero values representing\n ignored positions, while zero values means valid positions\n for the input tensor. Shape as `(bs, h, w)`.\n\n Returns:\n torch.Tensor: Returned position embedding with\n shape `(bs, num_pos_feats * 2, h, w)`\n \"\"\"\n assert mask is not None\n not_mask = ~mask\n y_embed = not_mask.cumsum(1, dtype=torch.float32)\n x_embed = not_mask.cumsum(2, dtype=torch.float32)\n if self.normalize:\n y_embed = (y_embed + self.offset) / (y_embed[:, -1:, :] + self.eps) * self.scale\n x_embed = (x_embed + self.offset) / (x_embed[:, :, -1:] + self.eps) * self.scale\n dim_t = torch.arange(self.num_pos_feats, dtype=torch.float32, device=mask.device)\n dim_t = self.temperature ** (\n 2 * torch.div(dim_t, 2, rounding_mode=\"floor\") / self.num_pos_feats\n )\n pos_x = x_embed[:, :, :, None] / dim_t\n pos_y = y_embed[:, :, :, None] / dim_t\n\n # use view as mmdet instead of flatten for dynamically exporting to ONNX\n B, H, W = mask.size()\n pos_x = torch.stack((pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4).view(\n B, H, W, -1\n )\n pos_y = torch.stack((pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4).view(\n B, H, W, -1\n )\n pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2)\n return pos" }, { "identifier": "RankDetrTransformerEncoder", "path": "projects/rank_detr/modeling/rank_transformer.py", "snippet": "class RankDetrTransformerEncoder(TransformerLayerSequence):\n def __init__(\n self,\n embed_dim: int = 256,\n num_heads: int = 8,\n feedforward_dim: int = 1024,\n attn_dropout: float = 0.1,\n ffn_dropout: float = 0.1,\n num_layers: int = 6,\n post_norm: bool = False,\n num_feature_levels: int = 4,\n use_checkpoint: bool = True,\n ):\n super(RankDetrTransformerEncoder, self).__init__(\n transformer_layers=BaseTransformerLayer(\n attn=MultiScaleDeformableAttention(\n embed_dim=embed_dim,\n num_heads=num_heads,\n dropout=attn_dropout,\n batch_first=True,\n num_levels=num_feature_levels,\n ),\n ffn=FFN(\n embed_dim=embed_dim,\n feedforward_dim=feedforward_dim,\n output_dim=embed_dim,\n num_fcs=2,\n ffn_drop=ffn_dropout,\n ),\n norm=nn.LayerNorm(embed_dim),\n operation_order=(\"self_attn\", \"norm\", \"ffn\", \"norm\"),\n ),\n num_layers=num_layers,\n )\n self.embed_dim = self.layers[0].embed_dim\n self.pre_norm = self.layers[0].pre_norm\n\n if post_norm:\n self.post_norm_layer = nn.LayerNorm(self.embed_dim)\n else:\n self.post_norm_layer = None\n\n # use encoder checkpoint\n if use_checkpoint:\n for layer in self.layers:\n layer = checkpoint_wrapper(layer)\n\n def forward(\n self,\n query,\n key,\n value,\n query_pos=None,\n key_pos=None,\n attn_masks=None,\n query_key_padding_mask=None,\n key_padding_mask=None,\n **kwargs,\n ):\n\n for layer in self.layers:\n query = layer(\n query,\n key,\n value,\n query_pos=query_pos,\n attn_masks=attn_masks,\n query_key_padding_mask=query_key_padding_mask,\n key_padding_mask=key_padding_mask,\n **kwargs,\n )\n\n if self.post_norm_layer is not None:\n query = self.post_norm_layer(query)\n return query" }, { "identifier": "RankDetrTransformerDecoder", "path": "projects/rank_detr/modeling/rank_transformer.py", "snippet": "class RankDetrTransformerDecoder(TransformerLayerSequence):\n def __init__(\n self,\n embed_dim: int = 256,\n num_heads: int = 8,\n feedforward_dim: int = 1024,\n attn_dropout: float = 0.1,\n ffn_dropout: float = 0.1,\n num_layers: int = 6,\n return_intermediate: bool = True,\n num_feature_levels: int = 4,\n use_checkpoint: bool = True,\n look_forward_twice=True,\n num_queries_one2one=300,\n num_queries_one2many=1500,\n two_stage_num_proposals=300,\n rank_adaptive_classhead=True,\n query_rank_layer=True,\n ):\n super(RankDetrTransformerDecoder, self).__init__(\n transformer_layers=BaseTransformerLayer(\n attn=[\n MultiheadAttention(\n embed_dim=embed_dim,\n num_heads=num_heads,\n attn_drop=attn_dropout,\n batch_first=True,\n ),\n MultiScaleDeformableAttention(\n embed_dim=embed_dim,\n num_heads=num_heads,\n dropout=attn_dropout,\n batch_first=True,\n num_levels=num_feature_levels,\n ),\n ],\n ffn=FFN(\n embed_dim=embed_dim,\n feedforward_dim=feedforward_dim,\n output_dim=embed_dim,\n ffn_drop=ffn_dropout,\n ),\n norm=nn.LayerNorm(embed_dim),\n operation_order=(\n \"self_attn\",\n \"norm\",\n \"cross_attn\",\n \"norm\",\n \"ffn\",\n \"norm\",\n ),\n ),\n num_layers=num_layers,\n )\n self.return_intermediate = return_intermediate\n\n self.bbox_embed = None\n self.class_embed = None\n self.look_forward_twice = look_forward_twice\n\n # Rank-adaptive Classification Head\n self.rank_adaptive_classhead = rank_adaptive_classhead\n\n # query rank layer\n self.query_rank_layer = query_rank_layer\n self.num_queries_one2one = num_queries_one2one\n self.num_queries_one2many = num_queries_one2many\n if self.query_rank_layer:\n self.rank_aware_content_query = nn.ModuleList([\n copy.deepcopy(nn.Embedding(two_stage_num_proposals, embed_dim))\n for _ in range(num_layers - 1)\n ])\n for m in self.rank_aware_content_query.parameters():\n nn.init.zeros_(m)\n\n self.pre_racq_trans = nn.ModuleList([\n copy.deepcopy(nn.Linear(embed_dim, embed_dim))\n for _ in range(num_layers - 1)\n ])\n self.post_racq_trans = nn.ModuleList([\n copy.deepcopy(nn.Linear(embed_dim * 2, embed_dim))\n for _ in range(num_layers - 1)\n ])\n\n # decoder checkpoint\n if use_checkpoint:\n for layer in self.layers:\n layer = checkpoint_wrapper(layer)\n\n def forward(\n self,\n query,\n key,\n value,\n query_pos=None,\n key_pos=None,\n attn_masks=None,\n query_key_padding_mask=None,\n key_padding_mask=None,\n reference_points=None,\n valid_ratios=None,\n **kwargs,\n ):\n output = query\n\n intermediate = []\n intermediate_reference_points = []\n for layer_idx, layer in enumerate(self.layers):\n\n # query rank layer\n if layer_idx >= 1:\n if self.query_rank_layer:\n output = torch.gather(\n output, 1, rank_indices.unsqueeze(-1).repeat(1, 1, output.shape[-1])\n )\n concat_term = self.pre_racq_trans[layer_idx - 1](\n self.rank_aware_content_query[layer_idx - 1].weight[:output.shape[1]].unsqueeze(0).expand(output.shape[0], -1, -1)\n )\n output = torch.cat((output, concat_term), dim=2)\n output = self.post_racq_trans[layer_idx - 1](output)\n query_pos = torch.gather(\n query_pos, 1, rank_indices.unsqueeze(-1).repeat(1, 1, query_pos.shape[-1])\n )\n if (not self.query_rank_layer) and (self.rank_adaptive_classhead):\n output = torch.gather(\n output, 1, rank_indices.unsqueeze(-1).repeat(1, 1, output.shape[-1])\n )\n query_pos = torch.gather(\n query_pos, 1, rank_indices.unsqueeze(-1).repeat(1, 1, query_pos.shape[-1])\n )\n\n if reference_points.shape[-1] == 4:\n reference_points_input = (\n reference_points[:, :, None]\n * torch.cat([valid_ratios, valid_ratios], -1)[:, None]\n )\n else:\n assert reference_points.shape[-1] == 2\n reference_points_input = reference_points[:, :, None] * valid_ratios[:, None]\n\n output = layer(\n output,\n key,\n value,\n query_pos=query_pos,\n key_pos=key_pos,\n attn_masks=attn_masks,\n query_key_padding_mask=query_key_padding_mask,\n key_padding_mask=key_padding_mask,\n reference_points=reference_points_input,\n **kwargs,\n )\n\n if self.bbox_embed is not None:\n tmp = self.bbox_embed[layer_idx](output)\n if reference_points.shape[-1] == 4:\n new_reference_points = tmp + inverse_sigmoid(reference_points)\n new_reference_points = new_reference_points.sigmoid()\n else:\n assert reference_points.shape[-1] == 2\n new_reference_points = tmp\n new_reference_points[..., :2] = tmp[..., :2] + inverse_sigmoid(reference_points)\n new_reference_points = new_reference_points.sigmoid()\n reference_points = new_reference_points.detach()\n\n if self.return_intermediate:\n\n if (layer_idx >= 0) and (self.query_rank_layer or self.rank_adaptive_classhead):\n # generate rank indices\n outputs_class_tmp = self.class_embed[layer_idx](output) # [bs, num_queries, embed_dim] -> [bs, num_queries, num_classes]\n rank_basis = outputs_class_tmp.sigmoid().max(dim=2, keepdim=False)[0] # tensor shape: [bs, num_queries]\n if self.training:\n rank_indices_one2one = torch.argsort(rank_basis[:, : self.num_queries_one2one], dim=1, descending=True) # tensor shape: [bs, num_queries_one2one]\n rank_indices_one2many = torch.argsort(rank_basis[:, self.num_queries_one2one :], dim=1, descending=True) # tensor shape: [bs, num_queries_one2many]\n rank_indices = torch.cat(\n (\n rank_indices_one2one,\n rank_indices_one2many + torch.ones_like(rank_indices_one2many) * self.num_queries_one2one\n ),\n dim=1,\n ) # tensor shape: [bs, num_queries_one2one+num_queries_one2many]\n else:\n rank_indices = torch.argsort(rank_basis[:, : self.num_queries_one2one], dim=1, descending=True)\n rank_indices = rank_indices.detach()\n # rank the reference points\n reference_points = torch.gather(\n reference_points, 1, rank_indices.unsqueeze(-1).repeat(1, 1, reference_points.shape[-1]))\n new_reference_points = torch.gather(\n new_reference_points, 1, rank_indices.unsqueeze(-1).repeat(1, 1, new_reference_points.shape[-1]))\n\n intermediate.append(output)\n intermediate_reference_points.append(\n new_reference_points if self.look_forward_twice else reference_points\n )\n\n if self.return_intermediate:\n return torch.stack(intermediate), torch.stack(intermediate_reference_points)\n\n return output, reference_points" }, { "identifier": "RankDetrTransformer", "path": "projects/rank_detr/modeling/rank_transformer.py", "snippet": "class RankDetrTransformer(nn.Module):\n \"\"\"Transformer module for Deformable DETR\n\n Args:\n encoder (nn.Module): encoder module.\n decoder (nn.Module): decoder module.\n as_two_stage (bool): whether to use two-stage transformer. Default False.\n num_feature_levels (int): number of feature levels. Default 4.\n two_stage_num_proposals (int): number of proposals in two-stage transformer. Default 300.\n Only used when as_two_stage is True.\n \"\"\"\n\n def __init__(\n self,\n encoder=None,\n decoder=None,\n num_feature_levels=4,\n as_two_stage=False,\n num_queries_one2one=300,\n num_queries_one2many=1500,\n two_stage_num_proposals=300,\n mixed_selection=True,\n rank_adaptive_classhead=True,\n ):\n super(RankDetrTransformer, self).__init__()\n self.encoder = encoder\n self.decoder = decoder\n self.num_feature_levels = num_feature_levels\n self.as_two_stage = as_two_stage\n self.two_stage_num_proposals = two_stage_num_proposals\n\n self.embed_dim = self.encoder.embed_dim\n\n self.level_embeds = nn.Parameter(torch.Tensor(self.num_feature_levels, self.embed_dim))\n\n if self.as_two_stage:\n self.enc_output = nn.Linear(self.embed_dim, self.embed_dim)\n self.enc_output_norm = nn.LayerNorm(self.embed_dim)\n self.pos_trans = nn.Linear(self.embed_dim * 2, self.embed_dim * 2)\n self.pos_trans_norm = nn.LayerNorm(self.embed_dim * 2)\n else:\n self.reference_points = nn.Linear(self.embed_dim, 2)\n\n self.mixed_selection = mixed_selection\n\n self.init_weights()\n\n def init_weights(self):\n for p in self.parameters():\n if p.dim() > 1:\n nn.init.xavier_uniform_(p)\n for m in self.modules():\n if isinstance(m, MultiScaleDeformableAttention):\n m.init_weights()\n if not self.as_two_stage:\n nn.init.xavier_normal_(self.reference_points.weight.data, gain=1.0)\n nn.init.constant_(self.reference_points.bias.data, 0.0)\n nn.init.normal_(self.level_embeds)\n\n def gen_encoder_output_proposals(self, memory, memory_padding_mask, spatial_shapes):\n N, S, C = memory.shape\n proposals = []\n _cur = 0\n for lvl, (H, W) in enumerate(spatial_shapes):\n mask_flatten_ = memory_padding_mask[:, _cur : (_cur + H * W)].view(N, H, W, 1)\n valid_H = torch.sum(~mask_flatten_[:, :, 0, 0], 1)\n valid_W = torch.sum(~mask_flatten_[:, 0, :, 0], 1)\n\n grid_y, grid_x = torch.meshgrid(\n torch.linspace(0, H - 1, H, dtype=torch.float32, device=memory.device),\n torch.linspace(0, W - 1, W, dtype=torch.float32, device=memory.device),\n )\n grid = torch.cat([grid_x.unsqueeze(-1), grid_y.unsqueeze(-1)], -1)\n\n scale = torch.cat([valid_W.unsqueeze(-1), valid_H.unsqueeze(-1)], 1).view(N, 1, 1, 2)\n grid = (grid.unsqueeze(0).expand(N, -1, -1, -1) + 0.5) / scale\n wh = torch.ones_like(grid) * 0.05 * (2.0**lvl)\n proposal = torch.cat((grid, wh), -1).view(N, -1, 4)\n proposals.append(proposal)\n _cur += H * W\n\n output_proposals = torch.cat(proposals, 1)\n output_proposals_valid = ((output_proposals > 0.01) & (output_proposals < 0.99)).all(\n -1, keepdim=True\n )\n output_proposals = torch.log(output_proposals / (1 - output_proposals))\n output_proposals = output_proposals.masked_fill(\n memory_padding_mask.unsqueeze(-1), float(\"inf\")\n )\n output_proposals = output_proposals.masked_fill(~output_proposals_valid, float(\"inf\"))\n\n output_memory = memory\n output_memory = output_memory.masked_fill(memory_padding_mask.unsqueeze(-1), float(0))\n output_memory = output_memory.masked_fill(~output_proposals_valid, float(0))\n output_memory = self.enc_output_norm(self.enc_output(output_memory))\n return output_memory, output_proposals\n\n @staticmethod\n def get_reference_points(spatial_shapes, valid_ratios, device):\n \"\"\"Get the reference points used in decoder.\n\n Args:\n spatial_shapes (Tensor): The shape of all\n feature maps, has shape (num_level, 2).\n valid_ratios (Tensor): The radios of valid\n points on the feature map, has shape\n (bs, num_levels, 2)\n device (obj:`device`): The device where\n reference_points should be.\n\n Returns:\n Tensor: reference points used in decoder, has \\\n shape (bs, num_keys, num_levels, 2).\n \"\"\"\n reference_points_list = []\n for lvl, (H, W) in enumerate(spatial_shapes):\n # TODO check this 0.5\n ref_y, ref_x = torch.meshgrid(\n torch.linspace(0.5, H - 0.5, H, dtype=torch.float32, device=device),\n torch.linspace(0.5, W - 0.5, W, dtype=torch.float32, device=device),\n )\n ref_y = ref_y.reshape(-1)[None] / (valid_ratios[:, None, lvl, 1] * H)\n ref_x = ref_x.reshape(-1)[None] / (valid_ratios[:, None, lvl, 0] * W)\n ref = torch.stack((ref_x, ref_y), -1)\n reference_points_list.append(ref)\n reference_points = torch.cat(reference_points_list, 1)\n reference_points = reference_points[:, :, None] * valid_ratios[:, None]\n return reference_points\n\n def get_valid_ratio(self, mask):\n \"\"\"Get the valid radios of feature maps of all level.\"\"\"\n _, H, W = mask.shape\n valid_H = torch.sum(~mask[:, :, 0], 1)\n valid_W = torch.sum(~mask[:, 0, :], 1)\n valid_ratio_h = valid_H.float() / H\n valid_ratio_w = valid_W.float() / W\n valid_ratio = torch.stack([valid_ratio_w, valid_ratio_h], -1)\n return valid_ratio\n\n def get_proposal_pos_embed(self, proposals, num_pos_feats=128, temperature=10000):\n \"\"\"Get the position embedding of proposal.\"\"\"\n scale = 2 * math.pi\n dim_t = torch.arange(num_pos_feats, dtype=torch.float32, device=proposals.device)\n dim_t = temperature ** (2 * torch.div(dim_t, 2, rounding_mode=\"floor\") / num_pos_feats)\n # N, L, 4\n proposals = proposals.sigmoid() * scale\n # N, L, 4, 128\n pos = proposals[:, :, :, None] / dim_t\n # N, L, 4, 64, 2\n pos = torch.stack((pos[:, :, :, 0::2].sin(), pos[:, :, :, 1::2].cos()), dim=4).flatten(2)\n return pos\n\n def forward(\n self,\n multi_level_feats,\n multi_level_masks,\n multi_level_pos_embeds,\n query_embed,\n self_attn_mask,\n **kwargs,\n ):\n assert self.as_two_stage or query_embed is not None\n\n feat_flatten = []\n mask_flatten = []\n lvl_pos_embed_flatten = []\n spatial_shapes = []\n for lvl, (feat, mask, pos_embed) in enumerate(\n zip(multi_level_feats, multi_level_masks, multi_level_pos_embeds)\n ):\n bs, c, h, w = feat.shape\n spatial_shape = (h, w)\n spatial_shapes.append(spatial_shape)\n\n feat = feat.flatten(2).transpose(1, 2) # bs, hw, c\n mask = mask.flatten(1)\n pos_embed = pos_embed.flatten(2).transpose(1, 2) # bs, hw, c\n lvl_pos_embed = pos_embed + self.level_embeds[lvl].view(1, 1, -1)\n lvl_pos_embed_flatten.append(lvl_pos_embed)\n feat_flatten.append(feat)\n mask_flatten.append(mask)\n feat_flatten = torch.cat(feat_flatten, 1)\n mask_flatten = torch.cat(mask_flatten, 1)\n lvl_pos_embed_flatten = torch.cat(lvl_pos_embed_flatten, 1)\n spatial_shapes = torch.as_tensor(\n spatial_shapes, dtype=torch.long, device=feat_flatten.device\n )\n level_start_index = torch.cat(\n (spatial_shapes.new_zeros((1,)), spatial_shapes.prod(1).cumsum(0)[:-1])\n )\n valid_ratios = torch.stack([self.get_valid_ratio(m) for m in multi_level_masks], 1)\n\n reference_points = self.get_reference_points(\n spatial_shapes, valid_ratios, device=feat.device\n )\n\n memory = self.encoder(\n query=feat_flatten,\n key=None,\n value=None,\n query_pos=lvl_pos_embed_flatten,\n query_key_padding_mask=mask_flatten,\n spatial_shapes=spatial_shapes,\n reference_points=reference_points,\n level_start_index=level_start_index,\n valid_ratios=valid_ratios,\n **kwargs,\n )\n\n bs, _, c = memory.shape\n if self.as_two_stage:\n output_memory, output_proposals = self.gen_encoder_output_proposals(\n memory, mask_flatten, spatial_shapes\n )\n\n enc_outputs_class = self.decoder.class_embed[self.decoder.num_layers](output_memory)\n enc_outputs_coord_unact = (\n self.decoder.bbox_embed[self.decoder.num_layers](output_memory) + output_proposals\n )\n\n topk = self.two_stage_num_proposals\n topk_proposals = torch.topk(enc_outputs_class[..., 0], topk, dim=1)[1]\n topk_coords_unact = torch.gather(\n enc_outputs_coord_unact, 1, topk_proposals.unsqueeze(-1).repeat(1, 1, 4)\n )\n topk_coords_unact = topk_coords_unact.detach()\n reference_points = topk_coords_unact.sigmoid()\n init_reference_out = reference_points\n pos_trans_out = self.pos_trans_norm(\n self.pos_trans(self.get_proposal_pos_embed(topk_coords_unact))\n )\n if not self.mixed_selection:\n query_pos, query = torch.split(pos_trans_out, c, dim=2)\n else:\n # query_pos here is the content embed for deformable DETR\n query = query_embed.unsqueeze(0).expand(bs, -1, -1)\n query_pos, _ = torch.split(pos_trans_out, c, dim=2)\n else:\n query_pos, query = torch.split(query_embed, c, dim=1)\n query_pos = query_pos.unsqueeze(0).expand(bs, -1, -1)\n query = query.unsqueeze(0).expand(bs, -1, -1)\n reference_points = self.reference_points(query_pos).sigmoid()\n init_reference_out = reference_points\n\n # decoder\n inter_states, inter_references = self.decoder(\n query=query, # bs, num_queries, embed_dims\n key=None, # bs, num_tokens, embed_dims\n value=memory, # bs, num_tokens, embed_dims\n query_pos=query_pos,\n key_padding_mask=mask_flatten, # bs, num_tokens\n reference_points=reference_points, # num_queries, 4\n spatial_shapes=spatial_shapes, # nlvl, 2\n level_start_index=level_start_index, # nlvl\n valid_ratios=valid_ratios, # bs, nlvl, 2\n attn_masks=[self_attn_mask, None],\n **kwargs,\n )\n\n inter_references_out = inter_references\n if self.as_two_stage:\n return (\n inter_states,\n init_reference_out,\n inter_references_out,\n enc_outputs_class,\n enc_outputs_coord_unact,\n )\n return inter_states, init_reference_out, inter_references_out, None, None" }, { "identifier": "RankDETR", "path": "projects/rank_detr/modeling/rank_detr.py", "snippet": "class RankDETR(nn.Module):\n \"\"\"Implements the Deformable DETR model.\n\n Code is modified from the `official github repo\n <https://github.com/fundamentalvision/Deformable-DETR>`_.\n\n More details can be found in the `paper\n <https://arxiv.org/abs/2010.04159>`_ .\n\n Args:\n backbone (nn.Module): the backbone module.\n position_embedding (nn.Module): the position embedding module.\n neck (nn.Module): the neck module.\n transformer (nn.Module): the transformer module.\n embed_dim (int): the dimension of the embedding.\n num_classes (int): Number of total categories.\n num_queries (int): Number of proposal dynamic anchor boxes in Transformer\n criterion (nn.Module): Criterion for calculating the total losses.\n pixel_mean (List[float]): Pixel mean value for image normalization.\n Default: [123.675, 116.280, 103.530].\n pixel_std (List[float]): Pixel std value for image normalization.\n Default: [58.395, 57.120, 57.375].\n aux_loss (bool): whether to use auxiliary loss. Default: True.\n with_box_refine (bool): whether to use box refinement. Default: False.\n as_two_stage (bool): whether to use two-stage. Default: False.\n select_box_nums_for_evaluation (int): the number of topk candidates\n slected at postprocess for evaluation. Default: 100.\n\n \"\"\"\n\n def __init__(\n self,\n backbone,\n position_embedding,\n neck,\n transformer,\n embed_dim,\n num_classes,\n num_queries_one2one,\n num_queries_one2many,\n criterion,\n pixel_mean,\n pixel_std,\n aux_loss=True,\n with_box_refine=False,\n as_two_stage=False,\n select_box_nums_for_evaluation=100,\n device=\"cuda\",\n mixed_selection=True,\n k_one2many=6,\n lambda_one2many=1.0,\n rank_adaptive_classhead=True,\n ):\n super().__init__()\n num_queries = num_queries_one2one + num_queries_one2many\n # define backbone and position embedding module\n self.backbone = backbone\n self.position_embedding = position_embedding\n\n # define neck module\n self.neck = neck\n\n # define learnable query embedding\n self.num_queries = num_queries\n if not as_two_stage:\n self.query_embedding = nn.Embedding(num_queries, embed_dim * 2)\n elif mixed_selection:\n self.query_embedding = nn.Embedding(num_queries, embed_dim)\n\n # define transformer module\n self.transformer = transformer\n\n # define classification head and box head\n self.num_classes = num_classes\n self.class_embed = nn.Linear(embed_dim, num_classes)\n self.bbox_embed = MLP(embed_dim, embed_dim, 4, 3)\n\n # where to calculate auxiliary loss in criterion\n self.aux_loss = aux_loss\n self.criterion = criterion\n\n # define contoller for box refinement and two-stage variants\n self.with_box_refine = with_box_refine\n self.as_two_stage = as_two_stage\n\n # init parameters for heads\n prior_prob = 0.01\n bias_value = -math.log((1 - prior_prob) / prior_prob)\n self.class_embed.bias.data = torch.ones(num_classes) * bias_value\n nn.init.constant_(self.bbox_embed.layers[-1].weight.data, 0)\n nn.init.constant_(self.bbox_embed.layers[-1].bias.data, 0)\n for _, neck_layer in self.neck.named_modules():\n if isinstance(neck_layer, nn.Conv2d):\n nn.init.xavier_uniform_(neck_layer.weight, gain=1)\n nn.init.constant_(neck_layer.bias, 0)\n\n # If two-stage, the last class_embed and bbox_embed is for region proposal generation\n # Decoder layers share the same heads without box refinement, while use the different\n # heads when box refinement is used.\n num_pred = (\n (transformer.decoder.num_layers + 1) if as_two_stage else transformer.decoder.num_layers\n )\n if with_box_refine:\n self.class_embed = nn.ModuleList(\n [copy.deepcopy(self.class_embed) for i in range(num_pred)]\n )\n self.bbox_embed = nn.ModuleList(\n [copy.deepcopy(self.bbox_embed) for i in range(num_pred)]\n )\n nn.init.constant_(self.bbox_embed[0].layers[-1].bias.data[2:], -2.0)\n self.transformer.decoder.bbox_embed = self.bbox_embed\n else:\n nn.init.constant_(self.bbox_embed.layers[-1].bias.data[2:], -2.0)\n self.class_embed = nn.ModuleList([self.class_embed for _ in range(num_pred)])\n self.bbox_embed = nn.ModuleList([self.bbox_embed for _ in range(num_pred)])\n self.transformer.decoder.bbox_embed = None\n\n # hack implementation for two-stage. The last class_embed and bbox_embed is for region proposal generation\n if as_two_stage:\n self.transformer.decoder.class_embed = self.class_embed\n for box_embed in self.bbox_embed:\n nn.init.constant_(box_embed.layers[-1].bias.data[2:], 0.0)\n\n # set topk boxes selected for inference\n self.select_box_nums_for_evaluation = select_box_nums_for_evaluation\n\n # normalizer for input raw images\n self.device = device\n pixel_mean = torch.Tensor(pixel_mean).to(self.device).view(3, 1, 1)\n pixel_std = torch.Tensor(pixel_std).to(self.device).view(3, 1, 1)\n self.normalizer = lambda x: (x - pixel_mean) / pixel_std\n self.num_queries_one2one = num_queries_one2one\n self.mixed_selection = mixed_selection\n self.k_one2many = k_one2many\n self.lambda_one2many = lambda_one2many\n\n # Rank-adaptive Classification Head\n self.rank_adaptive_classhead = rank_adaptive_classhead\n if self.rank_adaptive_classhead:\n self.rank_adaptive_classhead_emb = nn.ModuleList([\n copy.deepcopy(nn.Embedding(self.num_queries, num_classes))\n for _ in range(transformer.decoder.num_layers)\n ])\n for m in self.rank_adaptive_classhead_emb.parameters():\n nn.init.zeros_(m)\n\n def forward(self, batched_inputs):\n images = self.preprocess_image(batched_inputs)\n\n if self.training:\n batch_size, _, H, W = images.tensor.shape\n img_masks = images.tensor.new_ones(batch_size, H, W)\n for img_id in range(batch_size):\n # mask padding regions in batched images\n img_h, img_w = batched_inputs[img_id][\"instances\"].image_size\n img_masks[img_id, :img_h, :img_w] = 0\n else:\n batch_size, _, H, W = images.tensor.shape\n img_masks = images.tensor.new_zeros(batch_size, H, W)\n # disable the one-to-many branch queries\n # save them frist\n save_num_queries = self.num_queries\n save_two_stage_num_proposals = self.transformer.two_stage_num_proposals\n self.num_queries = self.num_queries_one2one\n self.transformer.two_stage_num_proposals = self.num_queries\n\n # original features\n features = self.backbone(images.tensor) # output feature dict\n\n # project backbone features to the reuired dimension of transformer\n # we use multi-scale features in deformable DETR\n multi_level_feats = self.neck(features)\n multi_level_masks = []\n multi_level_position_embeddings = []\n for feat in multi_level_feats:\n multi_level_masks.append(\n F.interpolate(img_masks[None], size=feat.shape[-2:]).to(torch.bool).squeeze(0)\n )\n multi_level_position_embeddings.append(self.position_embedding(multi_level_masks[-1]))\n\n # initialize object query embeddings\n query_embeds = None\n if not self.as_two_stage or self.mixed_selection:\n query_embeds = self.query_embedding.weight[0 : self.num_queries, :]\n\n # make attn mask\n \"\"\" attention mask to prevent information leakage\n \"\"\"\n self_attn_mask = (\n torch.zeros(\n [\n self.num_queries,\n self.num_queries,\n ]\n )\n .bool()\n .to(feat.device)\n )\n self_attn_mask[\n self.num_queries_one2one :,\n 0 : self.num_queries_one2one,\n ] = True\n self_attn_mask[\n 0 : self.num_queries_one2one,\n self.num_queries_one2one :,\n ] = True\n\n (\n inter_states,\n init_reference,\n inter_references,\n enc_outputs_class,\n enc_outputs_coord_unact,\n ) = self.transformer(\n multi_level_feats,\n multi_level_masks,\n multi_level_position_embeddings,\n query_embeds,\n self_attn_mask,\n )\n\n # Calculate output coordinates and classes.\n outputs_classes_one2one = []\n outputs_coords_one2one = []\n outputs_classes_one2many = []\n outputs_coords_one2many = []\n for lvl in range(inter_states.shape[0]):\n if lvl == 0:\n reference = init_reference\n else:\n reference = inter_references[lvl - 1]\n reference = inverse_sigmoid(reference)\n outputs_class = self.class_embed[lvl](inter_states[lvl])\n\n # Rank-adaptive Classification Head\n if self.rank_adaptive_classhead:\n bs, n_query = inter_states[lvl].shape[0], inter_states[lvl].shape[1]\n # bs = batch_size in one gpu; n_query = num_queries_one2one+num_queries_one2many for training, num_queries_one2one for testing\n rank_adaptive_classhead_emb_lvl = self.rank_adaptive_classhead_emb[lvl].weight[:n_query, :].unsqueeze(0).repeat(bs, 1, 1)\n # tensor shape: [bs, n_query, num_classes]\n outputs_class = outputs_class + rank_adaptive_classhead_emb_lvl\n # tensor shape: [bs, n_query, num_classes]\n\n tmp = self.bbox_embed[lvl](inter_states[lvl])\n if reference.shape[-1] == 4:\n tmp += reference\n else:\n assert reference.shape[-1] == 2\n tmp[..., :2] += reference\n outputs_coord = tmp.sigmoid()\n outputs_classes_one2one.append(outputs_class[:, 0 : self.num_queries_one2one])\n outputs_classes_one2many.append(outputs_class[:, self.num_queries_one2one :])\n outputs_coords_one2one.append(outputs_coord[:, 0 : self.num_queries_one2one])\n outputs_coords_one2many.append(outputs_coord[:, self.num_queries_one2one :])\n outputs_classes_one2one = torch.stack(outputs_classes_one2one)\n # tensor shape: [num_decoder_layers, bs, num_queries_one2one, num_classes]\n outputs_coords_one2one = torch.stack(outputs_coords_one2one)\n # tensor shape: [num_decoder_layers, bs, num_queries_one2one, 4]\n outputs_classes_one2many = torch.stack(outputs_classes_one2many)\n # tensor shape: [num_decoder_layers, bs, num_queries_one2many, num_classes]\n outputs_coords_one2many = torch.stack(outputs_coords_one2many)\n # tensor shape: [num_decoder_layers, bs, num_queries_one2many, 4]\n\n # prepare for loss computation\n output = {\n \"pred_logits\": outputs_classes_one2one[-1],\n \"pred_boxes\": outputs_coords_one2one[-1],\n \"pred_logits_one2many\": outputs_classes_one2many[-1],\n \"pred_boxes_one2many\": outputs_coords_one2many[-1],\n }\n if self.aux_loss:\n output[\"aux_outputs\"] = self._set_aux_loss(\n outputs_classes_one2one, outputs_coords_one2one\n )\n output[\"aux_outputs_one2many\"] = self._set_aux_loss(\n outputs_classes_one2many, outputs_coords_one2many\n )\n\n if self.as_two_stage:\n enc_outputs_coord = enc_outputs_coord_unact.sigmoid()\n output[\"enc_outputs\"] = {\n \"pred_logits\": enc_outputs_class,\n \"pred_boxes\": enc_outputs_coord,\n }\n\n if self.training:\n gt_instances = [x[\"instances\"].to(self.device) for x in batched_inputs]\n targets = self.prepare_targets(gt_instances)\n if self.k_one2many > 0:\n loss_dict = self.train_hybrid(\n output,\n targets,\n self.k_one2many,\n self.criterion,\n self.lambda_one2many,\n )\n else:\n loss_dict = self.criterion(output, targets)\n weight_dict = self.criterion.weight_dict\n new_dict = dict()\n for key, value in weight_dict.items():\n new_dict[key] = value\n new_dict[key + \"_one2many\"] = value\n weight_dict = new_dict\n for k in loss_dict.keys():\n if k in weight_dict:\n loss_dict[k] *= weight_dict[k]\n return loss_dict\n else:\n box_cls = output[\"pred_logits\"]\n box_pred = output[\"pred_boxes\"]\n results = self.inference(box_cls, box_pred, images.image_sizes)\n processed_results = []\n for results_per_image, input_per_image, image_size in zip(\n results, batched_inputs, images.image_sizes\n ):\n height = input_per_image.get(\"height\", image_size[0])\n width = input_per_image.get(\"width\", image_size[1])\n r = detector_postprocess(results_per_image, height, width)\n processed_results.append({\"instances\": r})\n # recover the model parameters for next training epoch\n self.num_queries = save_num_queries\n self.transformer.two_stage_num_proposals = save_two_stage_num_proposals\n return processed_results\n\n def train_hybrid(self, outputs, targets, k_one2many, criterion, lambda_one2many):\n # one-to-one-loss\n loss_dict = criterion(outputs, targets)\n multi_targets = copy.deepcopy(targets)\n # repeat the targets\n for target in multi_targets:\n target[\"boxes\"] = target[\"boxes\"].repeat(k_one2many, 1)\n target[\"labels\"] = target[\"labels\"].repeat(k_one2many)\n\n outputs_one2many = dict()\n outputs_one2many[\"pred_logits\"] = outputs[\"pred_logits_one2many\"]\n outputs_one2many[\"pred_boxes\"] = outputs[\"pred_boxes_one2many\"]\n outputs_one2many[\"aux_outputs\"] = outputs[\"aux_outputs_one2many\"]\n\n # one-to-many loss\n loss_dict_one2many = criterion(outputs_one2many, multi_targets)\n for key, value in loss_dict_one2many.items():\n if key + \"_one2many\" in loss_dict.keys():\n loss_dict[key + \"_one2many\"] += value * lambda_one2many\n else:\n loss_dict[key + \"_one2many\"] = value * lambda_one2many\n return loss_dict\n\n @torch.jit.unused\n def _set_aux_loss(self, outputs_class, outputs_coord):\n # this is a workaround to make torchscript happy, as torchscript\n # doesn't support dictionary with non-homogeneous values, such\n # as a dict having both a Tensor and a list.\n return [\n {\"pred_logits\": a, \"pred_boxes\": b}\n for a, b in zip(outputs_class[:-1], outputs_coord[:-1])\n ]\n\n def inference(self, box_cls, box_pred, image_sizes):\n \"\"\"\n Arguments:\n box_cls (Tensor): tensor of shape (batch_size, num_queries, K).\n The tensor predicts the classification probability for each query.\n box_pred (Tensor): tensors of shape (batch_size, num_queries, 4).\n The tensor predicts 4-vector (x,y,w,h) box\n regression values for every queryx\n image_sizes (List[torch.Size]): the input image sizes\n\n Returns:\n results (List[Instances]): a list of #images elements.\n \"\"\"\n assert len(box_cls) == len(image_sizes)\n results = []\n\n # Select top-k confidence boxes for inference\n prob = box_cls.sigmoid()\n topk_values, topk_indexes = torch.topk(\n prob.view(box_cls.shape[0], -1), self.select_box_nums_for_evaluation, dim=1\n )\n scores = topk_values\n topk_boxes = torch.div(topk_indexes, box_cls.shape[2], rounding_mode=\"floor\")\n labels = topk_indexes % box_cls.shape[2]\n\n boxes = torch.gather(box_pred, 1, topk_boxes.unsqueeze(-1).repeat(1, 1, 4))\n\n for (\n i,\n (scores_per_image, labels_per_image, box_pred_per_image, image_size),\n ) in enumerate(zip(scores, labels, boxes, image_sizes)):\n result = Instances(image_size)\n result.pred_boxes = Boxes(box_cxcywh_to_xyxy(box_pred_per_image))\n result.pred_boxes.scale(scale_x=image_size[1], scale_y=image_size[0])\n result.scores = scores_per_image\n result.pred_classes = labels_per_image\n results.append(result)\n return results\n\n def prepare_targets(self, targets):\n new_targets = []\n for targets_per_image in targets:\n h, w = targets_per_image.image_size\n image_size_xyxy = torch.as_tensor([w, h, w, h], dtype=torch.float, device=self.device)\n gt_classes = targets_per_image.gt_classes\n gt_boxes = targets_per_image.gt_boxes.tensor / image_size_xyxy\n gt_boxes = box_xyxy_to_cxcywh(gt_boxes)\n new_targets.append({\"labels\": gt_classes, \"boxes\": gt_boxes})\n return new_targets\n\n def preprocess_image(self, batched_inputs):\n images = [self.normalizer(x[\"image\"].to(self.device)) for x in batched_inputs]\n images = ImageList.from_tensors(images)\n return images" }, { "identifier": "RankDetrCriterion", "path": "projects/rank_detr/modeling/rankdetr_criterion.py", "snippet": "class RankDetrCriterion(SetCriterion):\n \"\"\"This class computes the loss for Deformable-DETR\n and two-stage Deformable-DETR\n \"\"\"\n\n def __init__(\n self,\n num_classes,\n matcher,\n weight_dict,\n losses: List[str] = [\"class\", \"boxes\"],\n eos_coef: float = 0.1,\n loss_class_type: str = \"focal_loss\",\n alpha: float = 0.25,\n gamma: float = 2.0,\n GIoU_aware_class_loss: bool = True,\n ):\n super(RankDetrCriterion, self).__init__(\n num_classes=num_classes,\n matcher=matcher,\n weight_dict=weight_dict,\n losses=losses,\n eos_coef=eos_coef,\n loss_class_type=loss_class_type,\n alpha=alpha,\n gamma=gamma,\n )\n self.GIoU_aware_class_loss = GIoU_aware_class_loss\n\n def loss_labels(self, outputs, targets, indices, num_boxes, GIoU_aware_class_loss):\n \"\"\"Classification loss (Binary focal loss)\n targets dicts must contain the key \"labels\" containing a tensor of dim [nb_target_boxes]\n \"\"\"\n assert \"pred_logits\" in outputs\n src_logits = outputs[\"pred_logits\"]\n\n idx = self._get_src_permutation_idx(indices)\n target_classes_o = torch.cat([t[\"labels\"][J] for t, (_, J) in zip(targets, indices)])\n target_classes = torch.full(\n src_logits.shape[:2],\n self.num_classes,\n dtype=torch.int64,\n device=src_logits.device,\n )\n target_classes[idx] = target_classes_o\n\n # Computation classification loss\n if self.loss_class_type == \"ce_loss\":\n loss_class = F.cross_entropy(\n src_logits.transpose(1, 2), target_classes, self.empty_weight\n )\n elif self.loss_class_type == \"focal_loss\":\n # src_logits: (b, num_queries, num_classes) = (2, 300, 80)\n # target_classes_one_hot = (2, 300, 80)\n target_classes_onehot = torch.zeros(\n [src_logits.shape[0], src_logits.shape[1], src_logits.shape[2] + 1],\n dtype=src_logits.dtype,\n layout=src_logits.layout,\n device=src_logits.device,\n )\n target_classes_onehot.scatter_(2, target_classes.unsqueeze(-1), 1)\n target_classes_onehot = target_classes_onehot[:, :, :-1]\n\n if GIoU_aware_class_loss:\n # get GIoU-aware classification target: t = (GIoU + 1) / 2\n\n # # get normed GIoU\n bs, n_query = outputs[\"pred_boxes\"].shape[:2]\n out_bbox = outputs[\"pred_boxes\"].flatten(0, 1) # tensor shape: [bs * n_query, 4]\n tgt_bbox = torch.cat([v[\"boxes\"] for v in targets]) # tensor shape: [gt number within a batch, 4]\n bbox_giou = generalized_box_iou(\n box_cxcywh_to_xyxy(out_bbox), box_cxcywh_to_xyxy(tgt_bbox)\n ) # tensor shape: [bs * n_query, gt number within a batch]\n bbox_giou_normed = (bbox_giou + 1) / 2.0\n bbox_giou_normed = bbox_giou_normed.reshape(bs, n_query, -1) # tensor shape: [bs, n_query, gt number within a batch]\n\n # # get matched gt indices: gt_indices\n for indices_idx, element in enumerate(indices):\n if indices_idx == 0:\n gt_indices = element[1]\n else:\n curr_length = gt_indices.shape[0]\n gt_indices = torch.cat((gt_indices, element[1] + curr_length), dim=0)\n\n # # get the supervision with a shape of [bs, n_query, num_classes]\n class_supervision = torch.zeros(\n [src_logits.shape[0], src_logits.shape[1]],\n dtype=src_logits.dtype,\n layout=src_logits.layout,\n device=src_logits.device,\n )\n class_supervision[idx] = bbox_giou_normed[(idx[0], idx[1], gt_indices)] # idx[0]: batch idx; idx[1]: query idx; gt_indices: matched gt idx\n class_supervision = class_supervision.detach()\n target_classes_onehot_GIoU_aware = target_classes_onehot * class_supervision.unsqueeze(-1)\n\n # sigmoid_focal_loss supervised by target_classes_onehot_GIoU_aware\n src_prob = src_logits.sigmoid()\n\n # # positive samples\n bce_loss_pos = F.binary_cross_entropy_with_logits(src_logits, target_classes_onehot_GIoU_aware, reduction=\"none\") * target_classes_onehot\n p_t_pos = torch.abs(target_classes_onehot_GIoU_aware - src_prob * target_classes_onehot) ** self.gamma\n\n # # negative samples\n bce_loss_neg = F.binary_cross_entropy_with_logits(src_logits, target_classes_onehot, reduction=\"none\") * (1 - target_classes_onehot)\n p_t_neg = torch.abs(src_prob * (1 - target_classes_onehot)) ** self.gamma\n\n # # total loss\n loss = p_t_pos * bce_loss_pos + p_t_neg * bce_loss_neg\n\n if self.alpha >= 0:\n alpha_t = self.alpha * target_classes_onehot + (1 - self.alpha) * (1 - target_classes_onehot)\n loss = alpha_t * loss\n\n loss_class = loss.mean(1).sum() / num_boxes\n loss_class = loss_class * src_logits.shape[1]\n else:\n loss_class = (\n sigmoid_focal_loss(\n src_logits,\n target_classes_onehot,\n num_boxes=num_boxes,\n alpha=self.alpha,\n gamma=self.gamma,\n )\n * src_logits.shape[1]\n )\n losses = {\"loss_class\": loss_class}\n\n return losses\n\n def forward(self, outputs, targets):\n outputs_without_aux = {\n k: v for k, v in outputs.items() if k != \"aux_outputs\" and k != \"enc_outputs\"\n }\n\n # Retrieve the matching between the outputs of the last layer and the targets\n indices = self.matcher(outputs_without_aux, targets)\n\n # Compute the average number of target boxes accross all nodes, for normalization purposes\n num_boxes = sum(len(t[\"labels\"]) for t in targets)\n num_boxes = torch.as_tensor(\n [num_boxes], dtype=torch.float, device=next(iter(outputs.values())).device\n )\n if is_dist_avail_and_initialized():\n torch.distributed.all_reduce(num_boxes)\n num_boxes = torch.clamp(num_boxes / get_world_size(), min=1).item()\n\n # Compute all the requested losses\n losses = {}\n for loss in self.losses:\n kwargs = {}\n if loss == \"class\":\n kwargs[\"GIoU_aware_class_loss\"] = True if (self.training and self.GIoU_aware_class_loss) else False\n losses.update(self.get_loss(loss, outputs, targets, indices, num_boxes, **kwargs))\n\n # In case of auxiliary losses, we repeat this process with the output of each intermediate layer.\n if \"aux_outputs\" in outputs:\n for i, aux_outputs in enumerate(outputs[\"aux_outputs\"]):\n indices = self.matcher(aux_outputs, targets)\n for loss in self.losses:\n kwargs = {}\n if loss == \"class\":\n kwargs[\"GIoU_aware_class_loss\"] = True if (self.training and self.GIoU_aware_class_loss) else False\n l_dict = self.get_loss(loss, aux_outputs, targets, indices, num_boxes, **kwargs)\n l_dict = {k + f\"_{i}\": v for k, v in l_dict.items()}\n losses.update(l_dict)\n\n # Compute losses for two-stage deformable-detr\n if \"enc_outputs\" in outputs:\n enc_outputs = outputs[\"enc_outputs\"]\n bin_targets = copy.deepcopy(targets)\n for bt in bin_targets:\n bt[\"labels\"] = torch.zeros_like(bt[\"labels\"])\n indices = self.matcher(enc_outputs, bin_targets)\n for loss in self.losses:\n kwargs = {}\n if loss == \"class\":\n kwargs[\"GIoU_aware_class_loss\"] = False\n l_dict = self.get_loss(loss, enc_outputs, bin_targets, indices, num_boxes, **kwargs)\n l_dict = {k + \"_enc\": v for k, v in l_dict.items()}\n losses.update(l_dict)\n\n return losses" }, { "identifier": "HighOrderMatcher", "path": "projects/rank_detr/modeling/high_order_matcher.py", "snippet": "class HighOrderMatcher(nn.Module):\n \"\"\"HighOrderMatcher which computes an assignment between targets and predictions.\n\n For efficiency reasons, the targets don't include the no_object. Because of this, in general,\n there are more predictions than targets. In this case, we do a 1-to-1 matching of the best predictions,\n while the others are un-matched (and thus treated as non-objects).\n\n Args:\n cost_class (float): The relative weight of the classification error\n in the matching cost. Default: 1.\n cost_bbox (float): The relative weight of the L1 error of the bounding box\n coordinates in the matching cost. Default: 1.\n cost_giou (float): This is the relative weight of the giou loss of\n the bounding box in the matching cost. Default: 1.\n cost_class_type (str): How the classification error is calculated.\n Choose from ``[\"ce_cost\", \"focal_loss_cost\"]``. Default: \"focal_loss_cost\".\n alpha (float): Weighting factor in range (0, 1) to balance positive vs\n negative examples in focal loss. Default: 0.25.\n gamma (float): Exponent of modulating factor (1 - p_t) to balance easy vs\n hard examples in focal loss. Default: 2.\n \"\"\"\n\n def __init__(\n self,\n cost_class: float = 1,\n cost_bbox: float = 1,\n cost_giou: float = 1,\n cost_class_type: str = \"focal_loss_cost\",\n alpha: float = 0.25,\n gamma: float = 2.0,\n iou_order_alpha: float = 4.0,\n matcher_change_iter: int = 67500,\n ):\n super().__init__()\n self.cost_class = cost_class\n self.cost_bbox = cost_bbox\n self.cost_giou = cost_giou\n self.cost_class_type = cost_class_type\n self.alpha = alpha\n self.gamma = gamma\n assert cost_class != 0 or cost_bbox != 0 or cost_giou != 0, \"all costs cant be 0\"\n assert cost_class_type in {\n \"ce_cost\",\n \"focal_loss_cost\",\n }, \"only support ce loss or focal loss for computing class cost\"\n self.iou_order_alpha = iou_order_alpha\n self.iter = -1\n self.matcher_change_iter = matcher_change_iter\n\n @torch.no_grad()\n def forward(self, outputs, targets):\n \"\"\"Forward function for `HungarianMatcher` which performs the matching.\n\n Args:\n outputs (Dict[str, torch.Tensor]): This is a dict that contains at least these entries:\n\n - ``\"pred_logits\"``: Tensor of shape (bs, num_queries, num_classes) with the classification logits.\n - ``\"pred_boxes\"``: Tensor of shape (bs, num_queries, 4) with the predicted box coordinates.\n\n targets (List[Dict[str, torch.Tensor]]): This is a list of targets (len(targets) = batch_size),\n where each target is a dict containing:\n\n - ``\"labels\"``: Tensor of shape (num_target_boxes, ) (where num_target_boxes is the number of ground-truth objects in the target) containing the class labels. # noqa\n - ``\"boxes\"``: Tensor of shape (num_target_boxes, 4) containing the target box coordinates.\n\n Returns:\n list[torch.Tensor]: A list of size batch_size, containing tuples of `(index_i, index_j)` where:\n\n - ``index_i`` is the indices of the selected predictions (in order)\n - ``index_j`` is the indices of the corresponding selected targets (in order)\n\n For each batch element, it holds: `len(index_i) = len(index_j) = min(num_queries, num_target_boxes)`\n \"\"\"\n bs, num_queries = outputs[\"pred_logits\"].shape[:2]\n\n # We flatten to compute the cost matrices in a batch\n if self.cost_class_type == \"ce_cost\":\n out_prob = (\n outputs[\"pred_logits\"].flatten(0, 1).softmax(-1)\n ) # [batch_size * num_queries, num_classes]\n elif self.cost_class_type == \"focal_loss_cost\":\n out_prob = (\n outputs[\"pred_logits\"].flatten(0, 1).sigmoid()\n ) # [batch_size * num_queries, num_classes]\n\n out_bbox = outputs[\"pred_boxes\"].flatten(0, 1) # [batch_size * num_queries, 4]\n\n # Also concat the target labels and boxes\n tgt_ids = torch.cat([v[\"labels\"] for v in targets])\n tgt_bbox = torch.cat([v[\"boxes\"] for v in targets])\n\n if self.iter < self.matcher_change_iter:\n # Compute the classification cost.\n if self.cost_class_type == \"ce_cost\":\n # Compute the classification cost. Contrary to the loss, we don't use the NLL,\n # but approximate it in 1 - proba[target class].\n # The 1 is a constant that doesn't change the matching, it can be ommitted.\n cost_class = -out_prob[:, tgt_ids]\n elif self.cost_class_type == \"focal_loss_cost\":\n alpha = self.alpha\n gamma = self.gamma\n neg_cost_class = (1 - alpha) * (out_prob**gamma) * (-(1 - out_prob + 1e-8).log())\n pos_cost_class = alpha * ((1 - out_prob) ** gamma) * (-(out_prob + 1e-8).log())\n cost_class = pos_cost_class[:, tgt_ids] - neg_cost_class[:, tgt_ids]\n\n # Compute the L1 cost between boxes\n cost_bbox = torch.cdist(out_bbox, tgt_bbox, p=1)\n\n # Compute the giou cost betwen boxes\n cost_giou = -generalized_box_iou(box_cxcywh_to_xyxy(out_bbox), box_cxcywh_to_xyxy(tgt_bbox))\n\n # Final cost matrix\n C = self.cost_bbox * cost_bbox + self.cost_class * cost_class + self.cost_giou * cost_giou\n else:\n # high-order matching cost\n # # Compute the class_score\n class_score = out_prob[:, tgt_ids] # shape = [batch_size * num_queries, gt num within a batch]\n\n # # Compute iou\n bbox_iou, _ = box_iou(\n box_cxcywh_to_xyxy(out_bbox), box_cxcywh_to_xyxy(tgt_bbox)\n ) # shape = [batch_size * num_queries, gt num within a batch]\n\n # Final cost matrix\n C = (-1) * (\n class_score * torch.pow(bbox_iou, self.iou_order_alpha)\n )\n\n C = C.view(bs, num_queries, -1).cpu()\n\n sizes = [len(v[\"boxes\"]) for v in targets]\n indices = [linear_sum_assignment(c[i]) for i, c in enumerate(C.split(sizes, -1))]\n return [\n (torch.as_tensor(i, dtype=torch.int64), torch.as_tensor(j, dtype=torch.int64))\n for i, j in indices\n ]\n\n def __repr__(self, _repr_indent=4):\n head = \"Matcher \" + self.__class__.__name__\n body = [\n \"cost_class: {}\".format(self.cost_class),\n \"cost_bbox: {}\".format(self.cost_bbox),\n \"cost_giou: {}\".format(self.cost_giou),\n \"cost_class_type: {}\".format(self.cost_class_type),\n \"focal cost alpha: {}\".format(self.alpha),\n \"focal cost gamma: {}\".format(self.gamma),\n ]\n lines = [head] + [\" \" * _repr_indent + line for line in body]\n return \"\\n\".join(lines)" } ]
import torch.nn as nn from detectron2.modeling.backbone import ResNet, BasicStem from detectron2.layers import ShapeSpec from detectron2.config import LazyCall as L from detrex.modeling.matcher import HungarianMatcher from detrex.modeling.neck import ChannelMapper from detrex.layers import PositionEmbeddingSine from projects.rank_detr.modeling import ( RankDETR, RankDetrTransformerEncoder, RankDetrTransformerDecoder, RankDetrTransformer, RankDetrCriterion, HighOrderMatcher, )
18,632
model = L(RankDETR)( backbone=L(ResNet)( stem=L(BasicStem)(in_channels=3, out_channels=64, norm="FrozenBN"), stages=L(ResNet.make_default_stages)( depth=50, stride_in_1x1=False, norm="FrozenBN", ), out_features=["res3", "res4", "res5"], freeze_at=1, ), position_embedding=L(PositionEmbeddingSine)( num_pos_feats=128, temperature=10000, normalize=True, offset=-0.5, ), neck=L(ChannelMapper)( input_shapes={ "res3": ShapeSpec(channels=512), "res4": ShapeSpec(channels=1024), "res5": ShapeSpec(channels=2048), }, in_features=["res3", "res4", "res5"], out_channels=256, num_outs=4, kernel_size=1, norm_layer=L(nn.GroupNorm)(num_groups=32, num_channels=256), ), transformer=L(RankDetrTransformer)( encoder=L(RankDetrTransformerEncoder)( embed_dim=256, num_heads=8, feedforward_dim=2048, attn_dropout=0.0, ffn_dropout=0.0, num_layers=6, post_norm=False, use_checkpoint=False, num_feature_levels="${..num_feature_levels}", ), decoder=L(RankDetrTransformerDecoder)( embed_dim=256, num_heads=8, feedforward_dim=2048, attn_dropout=0.0, ffn_dropout=0.0, num_layers=6, return_intermediate=True, num_feature_levels="${..num_feature_levels}", use_checkpoint=False, look_forward_twice=True, num_queries_one2one="${..num_queries_one2one}", num_queries_one2many="${..num_queries_one2many}", two_stage_num_proposals="${..two_stage_num_proposals}", rank_adaptive_classhead="${..rank_adaptive_classhead}", query_rank_layer=True, ), as_two_stage="${..as_two_stage}", num_feature_levels=4, num_queries_one2one="${..num_queries_one2one}", num_queries_one2many="${..num_queries_one2many}", two_stage_num_proposals=1800, mixed_selection=True, rank_adaptive_classhead="${..rank_adaptive_classhead}", ), embed_dim=256, num_classes=80, num_queries_one2one=300, num_queries_one2many=1500, aux_loss=True, with_box_refine=False, as_two_stage=False,
model = L(RankDETR)( backbone=L(ResNet)( stem=L(BasicStem)(in_channels=3, out_channels=64, norm="FrozenBN"), stages=L(ResNet.make_default_stages)( depth=50, stride_in_1x1=False, norm="FrozenBN", ), out_features=["res3", "res4", "res5"], freeze_at=1, ), position_embedding=L(PositionEmbeddingSine)( num_pos_feats=128, temperature=10000, normalize=True, offset=-0.5, ), neck=L(ChannelMapper)( input_shapes={ "res3": ShapeSpec(channels=512), "res4": ShapeSpec(channels=1024), "res5": ShapeSpec(channels=2048), }, in_features=["res3", "res4", "res5"], out_channels=256, num_outs=4, kernel_size=1, norm_layer=L(nn.GroupNorm)(num_groups=32, num_channels=256), ), transformer=L(RankDetrTransformer)( encoder=L(RankDetrTransformerEncoder)( embed_dim=256, num_heads=8, feedforward_dim=2048, attn_dropout=0.0, ffn_dropout=0.0, num_layers=6, post_norm=False, use_checkpoint=False, num_feature_levels="${..num_feature_levels}", ), decoder=L(RankDetrTransformerDecoder)( embed_dim=256, num_heads=8, feedforward_dim=2048, attn_dropout=0.0, ffn_dropout=0.0, num_layers=6, return_intermediate=True, num_feature_levels="${..num_feature_levels}", use_checkpoint=False, look_forward_twice=True, num_queries_one2one="${..num_queries_one2one}", num_queries_one2many="${..num_queries_one2many}", two_stage_num_proposals="${..two_stage_num_proposals}", rank_adaptive_classhead="${..rank_adaptive_classhead}", query_rank_layer=True, ), as_two_stage="${..as_two_stage}", num_feature_levels=4, num_queries_one2one="${..num_queries_one2one}", num_queries_one2many="${..num_queries_one2many}", two_stage_num_proposals=1800, mixed_selection=True, rank_adaptive_classhead="${..rank_adaptive_classhead}", ), embed_dim=256, num_classes=80, num_queries_one2one=300, num_queries_one2many=1500, aux_loss=True, with_box_refine=False, as_two_stage=False,
criterion=L(RankDetrCriterion)(
7
2023-10-12 03:02:25+00:00
24k
sakemin/cog-musicgen-remixer
predict.py
[ { "identifier": "MultiBandDiffusion", "path": "audiocraft/models/multibanddiffusion.py", "snippet": "class MultiBandDiffusion:\n \"\"\"Sample from multiple diffusion models.\n\n Args:\n DPs (list of DiffusionProcess): Diffusion processes.\n codec_model (CompressionModel): Underlying compression model used to obtain discrete tokens.\n \"\"\"\n def __init__(self, DPs: tp.List[DiffusionProcess], codec_model: CompressionModel) -> None:\n self.DPs = DPs\n self.codec_model = codec_model\n self.device = next(self.codec_model.parameters()).device\n\n @property\n def sample_rate(self) -> int:\n return self.codec_model.sample_rate\n\n @staticmethod\n def get_mbd_musicgen(device=None):\n \"\"\"Load our diffusion models trained for MusicGen.\"\"\"\n if device is None:\n device = 'cuda' if torch.cuda.is_available() else 'cpu'\n path = 'facebook/multiband-diffusion'\n filename = 'mbd_musicgen_32khz.th'\n name = 'facebook/musicgen-small'\n codec_model = load_compression_model(name, device=device)\n models, processors, cfgs = load_diffusion_models(path, filename=filename, device=device)\n DPs = []\n for i in range(len(models)):\n schedule = NoiseSchedule(**cfgs[i].schedule, sample_processor=processors[i], device=device)\n DPs.append(DiffusionProcess(model=models[i], noise_schedule=schedule))\n return MultiBandDiffusion(DPs=DPs, codec_model=codec_model)\n\n @staticmethod\n def get_mbd_24khz(bw: float = 3.0, pretrained: bool = True,\n device: tp.Optional[tp.Union[torch.device, str]] = None,\n n_q: tp.Optional[int] = None):\n \"\"\"Get the pretrained Models for MultibandDiffusion.\n\n Args:\n bw (float): Bandwidth of the compression model.\n pretrained (bool): Whether to use / download if necessary the models.\n device (torch.device or str, optional): Device on which the models are loaded.\n n_q (int, optional): Number of quantizers to use within the compression model.\n \"\"\"\n if device is None:\n device = 'cuda' if torch.cuda.is_available() else 'cpu'\n assert bw in [1.5, 3.0, 6.0], f\"bandwidth {bw} not available\"\n if n_q is not None:\n assert n_q in [2, 4, 8]\n assert {1.5: 2, 3.0: 4, 6.0: 8}[bw] == n_q, \\\n f\"bandwidth and number of codebooks missmatch to use n_q = {n_q} bw should be {n_q * (1.5 / 2)}\"\n n_q = {1.5: 2, 3.0: 4, 6.0: 8}[bw]\n codec_model = CompressionSolver.model_from_checkpoint(\n '//pretrained/facebook/encodec_24khz', device=device)\n codec_model.set_num_codebooks(n_q)\n codec_model = codec_model.to(device)\n path = 'facebook/multiband-diffusion'\n filename = f'mbd_comp_{n_q}.pt'\n models, processors, cfgs = load_diffusion_models(path, filename=filename, device=device)\n DPs = []\n for i in range(len(models)):\n schedule = NoiseSchedule(**cfgs[i].schedule, sample_processor=processors[i], device=device)\n DPs.append(DiffusionProcess(model=models[i], noise_schedule=schedule))\n return MultiBandDiffusion(DPs=DPs, codec_model=codec_model)\n\n return MultiBandDiffusion(DPs, codec_model)\n\n @torch.no_grad()\n def get_condition(self, wav: torch.Tensor, sample_rate: int) -> torch.Tensor:\n \"\"\"Get the conditioning (i.e. latent reprentatios of the compression model) from a waveform.\n Args:\n wav (torch.Tensor): The audio that we want to extract the conditioning from\n sample_rate (int): sample rate of the audio\"\"\"\n if sample_rate != self.sample_rate:\n wav = julius.resample_frac(wav, sample_rate, self.sample_rate)\n codes, scale = self.codec_model.encode(wav)\n assert scale is None, \"Scaled compression models not supported.\"\n emb = self.get_emb(codes)\n return emb\n\n @torch.no_grad()\n def get_emb(self, codes: torch.Tensor):\n \"\"\"Get latent representation from the discrete codes\n Argrs:\n codes (torch.Tensor): discrete tokens\"\"\"\n emb = self.codec_model.decode_latent(codes)\n return emb\n\n def generate(self, emb: torch.Tensor, size: tp.Optional[torch.Size] = None,\n step_list: tp.Optional[tp.List[int]] = None):\n \"\"\"Generate Wavform audio from the latent embeddings of the compression model\n Args:\n emb (torch.Tensor): Conditioning embeddinds\n size (none torch.Size): size of the output\n if None this is computed from the typical upsampling of the model\n step_list (optional list[int]): list of Markov chain steps, defaults to 50 linearly spaced step.\n \"\"\"\n if size is None:\n upsampling = int(self.codec_model.sample_rate / self.codec_model.frame_rate)\n size = torch.Size([emb.size(0), self.codec_model.channels, emb.size(-1) * upsampling])\n assert size[0] == emb.size(0)\n out = torch.zeros(size).to(self.device)\n for DP in self.DPs:\n out += DP.generate(condition=emb, step_list=step_list, initial_noise=torch.randn_like(out))\n return out\n\n def re_eq(self, wav: torch.Tensor, ref: torch.Tensor, n_bands: int = 32, strictness: float = 1):\n \"\"\"match the eq to the encodec output by matching the standard deviation of some frequency bands\n Args:\n wav (torch.Tensor): audio to equalize\n ref (torch.Tensor):refenrence audio from which we match the spectrogram.\n n_bands (int): number of bands of the eq\n strictness (float): how strict the the matching. 0 is no matching, 1 is exact matching.\n \"\"\"\n split = julius.SplitBands(n_bands=n_bands, sample_rate=self.codec_model.sample_rate).to(wav.device)\n bands = split(wav)\n bands_ref = split(ref)\n out = torch.zeros_like(ref)\n for i in range(n_bands):\n out += bands[i] * (bands_ref[i].std() / bands[i].std()) ** strictness\n return out\n\n def regenerate(self, wav: torch.Tensor, sample_rate: int):\n \"\"\"Regenerate a wavform through compression and diffusion regeneration.\n Args:\n wav (torch.Tensor): Original 'ground truth' audio\n sample_rate (int): sample rate of the input (and output) wav\n \"\"\"\n if sample_rate != self.codec_model.sample_rate:\n wav = julius.resample_frac(wav, sample_rate, self.codec_model.sample_rate)\n emb = self.get_condition(wav, sample_rate=self.codec_model.sample_rate)\n size = wav.size()\n out = self.generate(emb, size=size)\n if sample_rate != self.codec_model.sample_rate:\n out = julius.resample_frac(out, self.codec_model.sample_rate, sample_rate)\n return out\n\n def tokens_to_wav(self, tokens: torch.Tensor, n_bands: int = 32):\n \"\"\"Generate Waveform audio with diffusion from the discrete codes.\n Args:\n tokens (torch.Tensor): discrete codes\n n_bands (int): bands for the eq matching.\n \"\"\"\n wav_encodec = self.codec_model.decode(tokens)\n condition = self.get_emb(tokens)\n wav_diffusion = self.generate(emb=condition, size=wav_encodec.size())\n return self.re_eq(wav=wav_diffusion, ref=wav_encodec, n_bands=n_bands)" }, { "identifier": "MusicGen", "path": "audiocraft/models/musicgen.py", "snippet": "class MusicGen:\n \"\"\"MusicGen main model with convenient generation API.\n\n Args:\n name (str): name of the model.\n compression_model (CompressionModel): Compression model\n used to map audio to invertible discrete representations.\n lm (LMModel): Language model over discrete representations.\n max_duration (float, optional): maximum duration the model can produce,\n otherwise, inferred from the training params.\n \"\"\"\n def __init__(self, name: str, compression_model: CompressionModel, lm: LMModel,\n max_duration: tp.Optional[float] = None):\n self.name = name\n self.compression_model = compression_model\n self.lm = lm\n self.cfg: tp.Optional[omegaconf.DictConfig] = None\n # Just to be safe, let's put everything in eval mode.\n self.compression_model.eval()\n self.lm.eval()\n\n if hasattr(lm, 'cfg'):\n cfg = lm.cfg\n assert isinstance(cfg, omegaconf.DictConfig)\n self.cfg = cfg\n\n if self.cfg is not None:\n self.compression_model = get_wrapped_compression_model(self.compression_model, self.cfg)\n\n if max_duration is None:\n if self.cfg is not None:\n max_duration = lm.cfg.dataset.segment_duration # type: ignore\n else:\n raise ValueError(\"You must provide max_duration when building directly MusicGen\")\n assert max_duration is not None\n self.max_duration: float = max_duration\n self.device = next(iter(lm.parameters())).device\n\n self.generation_params: dict = {}\n self.set_generation_params(duration=15) # 15 seconds by default\n self._progress_callback: tp.Optional[tp.Callable[[int, int], None]] = None\n if self.device.type == 'cpu':\n self.autocast = TorchAutocast(enabled=False)\n else:\n self.autocast = TorchAutocast(\n enabled=True, device_type=self.device.type, dtype=torch.float16)\n\n @property\n def frame_rate(self) -> float:\n \"\"\"Roughly the number of AR steps per seconds.\"\"\"\n return self.compression_model.frame_rate\n\n @property\n def sample_rate(self) -> int:\n \"\"\"Sample rate of the generated audio.\"\"\"\n return self.compression_model.sample_rate\n\n @property\n def audio_channels(self) -> int:\n \"\"\"Audio channels of the generated audio.\"\"\"\n return self.compression_model.channels\n\n @staticmethod\n def get_pretrained(name: str = 'facebook/musicgen-melody', device=None):\n \"\"\"Return pretrained model, we provide four models:\n - facebook/musicgen-small (300M), text to music,\n # see: https://huggingface.co/facebook/musicgen-small\n - facebook/musicgen-medium (1.5B), text to music,\n # see: https://huggingface.co/facebook/musicgen-medium\n - facebook/musicgen-melody (1.5B) text to music and text+melody to music,\n # see: https://huggingface.co/facebook/musicgen-melody\n - facebook/musicgen-large (3.3B), text to music,\n # see: https://huggingface.co/facebook/musicgen-large\n \"\"\"\n if device is None:\n if torch.cuda.device_count():\n device = 'cuda'\n else:\n device = 'cpu'\n\n if name == 'debug':\n # used only for unit tests\n compression_model = get_debug_compression_model(device)\n lm = get_debug_lm_model(device)\n return MusicGen(name, compression_model, lm, max_duration=30)\n\n if name in _HF_MODEL_CHECKPOINTS_MAP:\n warnings.warn(\n \"MusicGen pretrained model relying on deprecated checkpoint mapping. \" +\n f\"Please use full pre-trained id instead: facebook/musicgen-{name}\")\n name = _HF_MODEL_CHECKPOINTS_MAP[name]\n\n lm = load_lm_model(name, device=device)\n compression_model = load_compression_model(name, device=device)\n if 'self_wav' in lm.condition_provider.conditioners:\n lm.condition_provider.conditioners['self_wav'].match_len_on_eval = True\n lm.condition_provider.conditioners['self_wav']._use_masking = False\n\n return MusicGen(name, compression_model, lm)\n\n def set_generation_params(self, use_sampling: bool = True, top_k: int = 250,\n top_p: float = 0.0, temperature: float = 1.0,\n duration: float = 30.0, cfg_coef: float = 3.0,\n two_step_cfg: bool = False, extend_stride: float = 18):\n \"\"\"Set the generation parameters for MusicGen.\n\n Args:\n use_sampling (bool, optional): Use sampling if True, else do argmax decoding. Defaults to True.\n top_k (int, optional): top_k used for sampling. Defaults to 250.\n top_p (float, optional): top_p used for sampling, when set to 0 top_k is used. Defaults to 0.0.\n temperature (float, optional): Softmax temperature parameter. Defaults to 1.0.\n duration (float, optional): Duration of the generated waveform. Defaults to 30.0.\n cfg_coef (float, optional): Coefficient used for classifier free guidance. Defaults to 3.0.\n two_step_cfg (bool, optional): If True, performs 2 forward for Classifier Free Guidance,\n instead of batching together the two. This has some impact on how things\n are padded but seems to have little impact in practice.\n extend_stride: when doing extended generation (i.e. more than 30 seconds), by how much\n should we extend the audio each time. Larger values will mean less context is\n preserved, and shorter value will require extra computations.\n \"\"\"\n assert extend_stride < self.max_duration, \"Cannot stride by more than max generation duration.\"\n self.extend_stride = extend_stride\n self.duration = duration\n self.generation_params = {\n 'use_sampling': use_sampling,\n 'temp': temperature,\n 'top_k': top_k,\n 'top_p': top_p,\n 'cfg_coef': cfg_coef,\n 'two_step_cfg': two_step_cfg,\n }\n\n def set_custom_progress_callback(self, progress_callback: tp.Optional[tp.Callable[[int, int], None]] = None):\n \"\"\"Override the default progress callback.\"\"\"\n self._progress_callback = progress_callback\n\n def generate_unconditional(self, num_samples: int, progress: bool = False,\n return_tokens: bool = False) -> tp.Union[torch.Tensor,\n tp.Tuple[torch.Tensor, torch.Tensor]]:\n \"\"\"Generate samples in an unconditional manner.\n\n Args:\n num_samples (int): Number of samples to be generated.\n progress (bool, optional): Flag to display progress of the generation process. Defaults to False.\n \"\"\"\n descriptions: tp.List[tp.Optional[str]] = [None] * num_samples\n attributes, prompt_tokens = self._prepare_tokens_and_attributes(descriptions, None)\n tokens = self._generate_tokens(attributes, prompt_tokens, progress)\n if return_tokens:\n return self.generate_audio(tokens), tokens\n return self.generate_audio(tokens)\n\n def generate(self, descriptions: tp.List[str], progress: bool = False, return_tokens: bool = False) \\\n -> tp.Union[torch.Tensor, tp.Tuple[torch.Tensor, torch.Tensor]]:\n \"\"\"Generate samples conditioned on text.\n\n Args:\n descriptions (list of str): A list of strings used as text conditioning.\n progress (bool, optional): Flag to display progress of the generation process. Defaults to False.\n \"\"\"\n attributes, prompt_tokens = self._prepare_tokens_and_attributes(descriptions, None)\n assert prompt_tokens is None\n tokens = self._generate_tokens(attributes, prompt_tokens, progress)\n if return_tokens:\n return self.generate_audio(tokens), tokens\n return self.generate_audio(tokens)\n\n def generate_with_chroma(self, descriptions: tp.List[str], melody_wavs: MelodyType,\n melody_sample_rate: int, progress: bool = False,\n return_tokens: bool = False) -> tp.Union[torch.Tensor,\n tp.Tuple[torch.Tensor, torch.Tensor]]:\n \"\"\"Generate samples conditioned on text and melody.\n\n Args:\n descriptions (list of str): A list of strings used as text conditioning.\n melody_wavs: (torch.Tensor or list of Tensor): A batch of waveforms used as\n melody conditioning. Should have shape [B, C, T] with B matching the description length,\n C=1 or 2. It can be [C, T] if there is a single description. It can also be\n a list of [C, T] tensors.\n melody_sample_rate: (int): Sample rate of the melody waveforms.\n progress (bool, optional): Flag to display progress of the generation process. Defaults to False.\n \"\"\"\n if isinstance(melody_wavs, torch.Tensor):\n if melody_wavs.dim() == 2:\n melody_wavs = melody_wavs[None]\n if melody_wavs.dim() != 3:\n raise ValueError(\"Melody wavs should have a shape [B, C, T].\")\n melody_wavs = list(melody_wavs)\n else:\n for melody in melody_wavs:\n if melody is not None:\n assert melody.dim() == 2, \"One melody in the list has the wrong number of dims.\"\n\n melody_wavs = [\n convert_audio(wav, melody_sample_rate, self.sample_rate, self.audio_channels)\n if wav is not None else None\n for wav in melody_wavs]\n attributes, prompt_tokens = self._prepare_tokens_and_attributes(descriptions=descriptions, prompt=None,\n melody_wavs=melody_wavs)\n assert prompt_tokens is None\n tokens = self._generate_tokens(attributes, prompt_tokens, progress)\n if return_tokens:\n return self.generate_audio(tokens), tokens\n return self.generate_audio(tokens)\n\n def generate_continuation(self, prompt: torch.Tensor, prompt_sample_rate: int,\n descriptions: tp.Optional[tp.List[tp.Optional[str]]] = None,\n progress: bool = False, return_tokens: bool = False) \\\n -> tp.Union[torch.Tensor, tp.Tuple[torch.Tensor, torch.Tensor]]:\n \"\"\"Generate samples conditioned on audio prompts.\n\n Args:\n prompt (torch.Tensor): A batch of waveforms used for continuation.\n Prompt should be [B, C, T], or [C, T] if only one sample is generated.\n prompt_sample_rate (int): Sampling rate of the given audio waveforms.\n descriptions (list of str, optional): A list of strings used as text conditioning. Defaults to None.\n progress (bool, optional): Flag to display progress of the generation process. Defaults to False.\n \"\"\"\n if prompt.dim() == 2:\n prompt = prompt[None]\n if prompt.dim() != 3:\n raise ValueError(\"prompt should have 3 dimensions: [B, C, T] (C = 1).\")\n prompt = convert_audio(prompt, prompt_sample_rate, self.sample_rate, self.audio_channels)\n if descriptions is None:\n descriptions = [None] * len(prompt)\n attributes, prompt_tokens = self._prepare_tokens_and_attributes(descriptions, prompt)\n assert prompt_tokens is not None\n tokens = self._generate_tokens(attributes, prompt_tokens, progress)\n if return_tokens:\n return self.generate_audio(tokens), tokens\n return self.generate_audio(tokens)\n \n def generate_continuation_with_audio_token(self, prompt, \n descriptions: tp.Optional[tp.List[tp.Optional[str]]] = None,\n progress: bool = False, return_tokens: bool = False) \\\n -> tp.Union[torch.Tensor, tp.Tuple[torch.Tensor, torch.Tensor]]:\n \"\"\"Generate samples conditioned on audio prompts.\n\n Args:\n prompt (torch.Tensor): A batch of waveforms used for continuation.\n Prompt should be [B, C, T], or [C, T] if only one sample is generated.\n prompt_sample_rate (int): Sampling rate of the given audio waveforms.\n descriptions (list of str, optional): A list of strings used as text conditioning. Defaults to None.\n progress (bool, optional): Flag to display progress of the generation process. Defaults to False.\n \"\"\"\n \n if descriptions is None:\n descriptions = [None] * len(prompt)\n attributes, prompt_tokens = self._prepare_tokens_and_attributes(descriptions, None)\n assert prompt_tokens is None\n prompt_tokens = prompt\n tokens = self._generate_tokens(attributes, prompt_tokens, progress)\n if return_tokens:\n return self.generate_audio(tokens), tokens\n return self.generate_audio(tokens)\n\n def generate_continuation_with_audio_chroma(self, prompt: torch.Tensor, prompt_sample_rate: int, melody_wavs: MelodyType,\n melody_sample_rate: int, descriptions: tp.Optional[tp.List[tp.Optional[str]]] = None,\n progress: bool = False, return_tokens: bool = False) \\\n -> tp.Union[torch.Tensor, tp.Tuple[torch.Tensor, torch.Tensor]]:\n \"\"\"Generate samples conditioned on audio prompts.\n\n Args:\n prompt (torch.Tensor): A batch of waveforms used for continuation.\n Prompt should be [B, C, T], or [C, T] if only one sample is generated.\n prompt_sample_rate (int): Sampling rate of the given audio waveforms.\n descriptions (list of str, optional): A list of strings used as text conditioning. Defaults to None.\n progress (bool, optional): Flag to display progress of the generation process. Defaults to False.\n \"\"\"\n if prompt.dim() == 2:\n prompt = prompt[None]\n if prompt.dim() != 3:\n raise ValueError(\"prompt should have 3 dimensions: [B, C, T] (C = 1).\")\n prompt = convert_audio(prompt, prompt_sample_rate, self.sample_rate, self.audio_channels)\n\n if isinstance(melody_wavs, torch.Tensor):\n if melody_wavs.dim() == 2:\n melody_wavs = melody_wavs[None]\n if melody_wavs.dim() != 3:\n raise ValueError(\"Melody wavs should have a shape [B, C, T].\")\n melody_wavs = list(melody_wavs)\n else:\n for melody in melody_wavs:\n if melody is not None:\n assert melody.dim() == 2, \"One melody in the list has the wrong number of dims.\"\n\n melody_wavs = [\n convert_audio(wav, melody_sample_rate, self.sample_rate, self.audio_channels)\n if wav is not None else None\n for wav in melody_wavs]\n \n if descriptions is None:\n descriptions = [None] * len(prompt)\n \n attributes, prompt_tokens = self._prepare_tokens_and_attributes(descriptions=descriptions, prompt=prompt, melody_wavs=melody_wavs)\n assert prompt_tokens is not None\n tokens = self._generate_tokens(attributes, prompt_tokens, progress)\n if return_tokens:\n return self.generate_audio(tokens), tokens\n return self.generate_audio(tokens)\n\n def generate_continuation_with_audio_tokens_and_audio_chroma(self, prompt, melody_wavs: MelodyType,\n melody_sample_rate: int, descriptions: tp.Optional[tp.List[tp.Optional[str]]] = None,\n progress: bool = False, return_tokens: bool = False) \\\n -> tp.Union[torch.Tensor, tp.Tuple[torch.Tensor, torch.Tensor]]:\n \"\"\"Generate samples conditioned on audio prompts.\n\n Args:\n prompt (torch.Tensor): A batch of waveforms used for continuation.\n Prompt should be [B, C, T], or [C, T] if only one sample is generated.\n prompt_sample_rate (int): Sampling rate of the given audio waveforms.\n descriptions (list of str, optional): A list of strings used as text conditioning. Defaults to None.\n progress (bool, optional): Flag to display progress of the generation process. Defaults to False.\n \"\"\"\n if isinstance(melody_wavs, torch.Tensor):\n if melody_wavs.dim() == 2:\n melody_wavs = melody_wavs[None]\n if melody_wavs.dim() != 3:\n raise ValueError(\"Melody wavs should have a shape [B, C, T].\")\n melody_wavs = list(melody_wavs)\n else:\n for melody in melody_wavs:\n if melody is not None:\n assert melody.dim() == 2, \"One melody in the list has the wrong number of dims.\"\n\n melody_wavs = [\n convert_audio(wav, melody_sample_rate, self.sample_rate, self.audio_channels)\n if wav is not None else None\n for wav in melody_wavs]\n \n if descriptions is None:\n descriptions = [None] * len(prompt)\n \n attributes, prompt_tokens = self._prepare_tokens_and_attributes(descriptions=descriptions, prompt=None, melody_wavs=melody_wavs)\n assert prompt_tokens is None\n prompt_tokens = prompt\n tokens = self._generate_tokens(attributes, prompt_tokens, progress)\n if return_tokens:\n return self.generate_audio(tokens), tokens\n return self.generate_audio(tokens)\n\n def generate_continuation_with_text_chroma(self, prompt: torch.Tensor, prompt_sample_rate: int, descriptions: tp.List[str], chord_texts: tp.Union[tp.List[str],str],\n progress: bool = False, bpm: tp.Union[float,int,tp.List[float],tp.List[int]] = 120, meter: tp.Optional[tp.Union[int,tp.List[int]]] = 4,\n return_tokens: bool = False) -> tp.Union[torch.Tensor,\n tp.Tuple[torch.Tensor, torch.Tensor]]:\n \"\"\"Generate samples conditioned on text and melody.\n\n Args:\n descriptions (list of str): A list of strings used as text conditioning.\n melody_wavs: (torch.Tensor or list of Tensor): A batch of waveforms used as\n melody conditioning. Should have shape [B, C, T] with B matching the description length,\n C=1 or 2. It can be [C, T] if there is a single description. It can also be\n a list of [C, T] tensors.\n melody_sample_rate: (int): Sample rate of the melody waveforms.\n progress (bool, optional): Flag to display progress of the generation process. Defaults to False.\n \"\"\"\n if prompt.dim() == 2:\n prompt = prompt[None]\n if prompt.dim() != 3:\n raise ValueError(\"prompt should have 3 dimensions: [B, C, T] (C = 1).\")\n prompt = convert_audio(prompt, prompt_sample_rate, self.sample_rate, self.audio_channels)\n\n if isinstance(chord_texts, str):\n chord_texts = [chord_texts]\n\n attributes, prompt_tokens = self._prepare_tokens_and_attributes(descriptions=descriptions, prompt=prompt,\n melody_wavs=chord_texts, bpm=bpm, meter=meter)\n\n tokens = self._generate_tokens(attributes, prompt_tokens, progress)\n if return_tokens:\n return self.generate_audio(tokens), tokens\n return self.generate_audio(tokens)\n\n def generate_continuation_with_audio_tokens_and_text_chroma(self, prompt, descriptions: tp.List[str], chord_texts: tp.Union[tp.List[str],str],\n progress: bool = False, bpm: tp.Union[float,int,tp.List[float],tp.List[int]] = 120, meter: tp.Optional[tp.Union[int,tp.List[int]]] = 4,\n return_tokens: bool = False) -> tp.Union[torch.Tensor,\n tp.Tuple[torch.Tensor, torch.Tensor]]:\n \"\"\"Generate samples conditioned on text and melody.\n\n Args:\n descriptions (list of str): A list of strings used as text conditioning.\n melody_wavs: (torch.Tensor or list of Tensor): A batch of waveforms used as\n melody conditioning. Should have shape [B, C, T] with B matching the description length,\n C=1 or 2. It can be [C, T] if there is a single description. It can also be\n a list of [C, T] tensors.\n melody_sample_rate: (int): Sample rate of the melody waveforms.\n progress (bool, optional): Flag to display progress of the generation process. Defaults to False.\n \"\"\"\n \n if isinstance(chord_texts, str):\n chord_texts = [chord_texts]\n\n attributes, prompt_tokens = self._prepare_tokens_and_attributes(descriptions=descriptions, prompt=None,\n melody_wavs=chord_texts, bpm=bpm, meter=meter)\n prompt_tokens = prompt\n tokens = self._generate_tokens(attributes, prompt_tokens, progress)\n if return_tokens:\n return self.generate_audio(tokens), tokens\n return self.generate_audio(tokens)\n \n def generate_with_text_chroma(self, descriptions: tp.List[str], chord_texts: tp.Union[tp.List[str],str],\n progress: bool = False, bpm: tp.Union[float,int,tp.List[float],tp.List[int]] = 120, meter: tp.Optional[tp.Union[int,tp.List[int]]] = 4,\n return_tokens: bool = False) -> tp.Union[torch.Tensor,\n tp.Tuple[torch.Tensor, torch.Tensor]]:\n \"\"\"Generate samples conditioned on text and melody.\n\n Args:\n descriptions (list of str): A list of strings used as text conditioning.\n melody_wavs: (torch.Tensor or list of Tensor): A batch of waveforms used as\n melody conditioning. Should have shape [B, C, T] with B matching the description length,\n C=1 or 2. It can be [C, T] if there is a single description. It can also be\n a list of [C, T] tensors.\n melody_sample_rate: (int): Sample rate of the melody waveforms.\n progress (bool, optional): Flag to display progress of the generation process. Defaults to False.\n \"\"\"\n if isinstance(chord_texts, str):\n chord_texts = [chord_texts]\n\n attributes, prompt_tokens = self._prepare_tokens_and_attributes(descriptions=descriptions, prompt=None,\n melody_wavs=chord_texts, bpm=bpm, meter=meter)\n assert prompt_tokens is None\n tokens = self._generate_tokens(attributes, prompt_tokens, progress)\n if return_tokens:\n return self.generate_audio(tokens), tokens\n return self.generate_audio(tokens)\n \n @torch.no_grad()\n def _prepare_tokens_and_attributes(\n self,\n descriptions: tp.Sequence[tp.Optional[str]],\n prompt: tp.Optional[torch.Tensor],\n melody_wavs: tp.Optional[tp.Union[MelodyList,tp.List[str]]] = None, bpm: tp.Optional[tp.Union[float,int,tp.List[float],tp.List[int]]] = None, meter:tp.Optional[tp.Union[int,tp.List[int]]] = None\n ) -> tp.Tuple[tp.List[ConditioningAttributes], tp.Optional[torch.Tensor]]:\n \"\"\"Prepare model inputs.\n\n Args:\n descriptions (list of str): A list of strings used as text conditioning.\n prompt (torch.Tensor): A batch of waveforms used for continuation.\n melody_wavs (torch.Tensor, optional): A batch of waveforms\n used as melody conditioning. Defaults to None.\n \"\"\"\n attributes = [\n ConditioningAttributes(text={'description': description})\n for description in descriptions]\n\n if melody_wavs is None:\n for attr in attributes:\n attr.wav['self_wav'] = WavCondition(\n torch.zeros((1, 1, 1), device=self.device),\n torch.tensor([0], device=self.device),\n sample_rate=[self.sample_rate],\n path=[None])\n else:\n if 'self_wav' not in self.lm.condition_provider.conditioners:\n raise RuntimeError(\"This model doesn't support melody conditioning. \"\n \"Use the `melody` model.\")\n assert len(melody_wavs) == len(descriptions), \\\n f\"number of melody wavs must match number of descriptions! \" \\\n f\"got melody len={len(melody_wavs)}, and descriptions len={len(descriptions)}\"\n\n if bpm is not None and (isinstance(bpm, int) or isinstance(bpm, float)):\n bpm = [bpm for i in range(len(melody_wavs))]\n elif bpm is not None and isinstance(bpm, tp.List):\n assert len(melody_wavs) == len(bpm)\n\n if meter is not None and (isinstance(meter, int) or isinstance(meter, float)):\n meter = [meter for i in range(len(melody_wavs))]\n elif meter is not None and isinstance(meter, tp.List):\n assert len(melody_wavs) == len(meter)\n\n for attr, melody, i in zip(attributes, melody_wavs, range(len(melody_wavs))):\n if melody is None:\n attr.wav['self_wav'] = WavCondition(\n torch.zeros((1, 1, 1), device=self.device),\n torch.tensor([0], device=self.device),\n sample_rate=[self.sample_rate],\n path=[None])\n elif isinstance(melody, torch.Tensor):\n attr.wav['self_wav'] = WavCondition(\n melody[None].to(device=self.device),\n torch.tensor([melody.shape[-1]], device=self.device),\n sample_rate=[self.sample_rate],\n path=[None],\n )\n else :\n attr.wav['self_wav'] = WavChordTextCondition(\n [melody],\n torch.tensor([self.duration*self.sample_rate], device=self.device),\n sample_rate=[self.sample_rate],\n path=[None],\n bpm = [bpm[i]],\n meter = [meter[i]]\n )\n\n if prompt is not None:\n if descriptions is not None:\n assert len(descriptions) == len(prompt), \"Prompt and nb. descriptions doesn't match\"\n prompt = prompt.to(self.device)\n prompt_tokens, scale = self.compression_model.encode(prompt)\n assert scale is None\n else:\n prompt_tokens = None\n return attributes, prompt_tokens\n\n def _generate_tokens(self, attributes: tp.List[ConditioningAttributes],\n prompt_tokens: tp.Optional[torch.Tensor], progress: bool = False) -> torch.Tensor:\n \"\"\"Generate discrete audio tokens given audio prompt and/or conditions.\n\n Args:\n attributes (list of ConditioningAttributes): Conditions used for generation (text/melody).\n prompt_tokens (torch.Tensor, optional): Audio prompt used for continuation.\n progress (bool, optional): Flag to display progress of the generation process. Defaults to False.\n Returns:\n torch.Tensor: Generated audio, of shape [B, C, T], T is defined by the generation params.\n \"\"\"\n total_gen_len = int(self.duration * self.frame_rate)\n max_prompt_len = int(min(self.duration, self.max_duration) * self.frame_rate)\n current_gen_offset: int = 0\n\n def _progress_callback(generated_tokens: int, tokens_to_generate: int):\n generated_tokens += current_gen_offset\n if self._progress_callback is not None:\n # Note that total_gen_len might be quite wrong depending on the\n # codebook pattern used, but with delay it is almost accurate.\n self._progress_callback(generated_tokens, total_gen_len)\n else:\n print(f'{generated_tokens: 6d} / {total_gen_len: 6d}', end='\\r')\n\n if prompt_tokens is not None:\n assert max_prompt_len >= prompt_tokens.shape[-1], \\\n \"Prompt is longer than audio to generate\"\n\n callback = None\n if progress:\n callback = _progress_callback\n\n if self.duration <= self.max_duration:\n # generate by sampling from LM, simple case.\n with self.autocast:\n gen_tokens = self.lm.generate(\n prompt_tokens, attributes,\n callback=callback, max_gen_len=total_gen_len, **self.generation_params)\n\n else:\n # now this gets a bit messier, we need to handle prompts,\n # melody conditioning etc.\n ref_wavs = [attr.wav['self_wav'] for attr in attributes]\n all_tokens = []\n if prompt_tokens is None:\n prompt_length = 0\n else:\n all_tokens.append(prompt_tokens)\n prompt_length = prompt_tokens.shape[-1]\n\n stride_tokens = int(self.frame_rate * self.extend_stride)\n step = 0\n\n while current_gen_offset + prompt_length < total_gen_len:\n self.lm.condition_provider.conditioners['self_wav'].set_continuation_count(self.extend_stride/self.max_duration, step) #For text based chord conditioning\n time_offset = current_gen_offset / self.frame_rate\n chunk_duration = min(self.duration - time_offset, self.max_duration)\n max_gen_len = int(chunk_duration * self.frame_rate)\n for attr, ref_wav in zip(attributes, ref_wavs):\n if isinstance(ref_wav, WavCondition):\n wav_length = ref_wav.length.item()\n if wav_length == 0:\n continue\n # We will extend the wav periodically if it not long enough.\n # we have to do it here rather than in conditioners.py as otherwise\n # we wouldn't have the full wav.\n initial_position = int(time_offset * self.sample_rate)\n wav_target_length = int(self.max_duration * self.sample_rate)\n positions = torch.arange(initial_position,\n initial_position + wav_target_length, device=self.device)\n attr.wav['self_wav'] = WavCondition(\n ref_wav[0][..., positions % wav_length],\n torch.full_like(ref_wav[1], wav_target_length),\n [self.sample_rate] * ref_wav[0].size(0),\n [None], [0.])\n with self.autocast:\n gen_tokens = self.lm.generate(\n prompt_tokens, attributes,\n callback=callback, max_gen_len=max_gen_len, **self.generation_params)\n if prompt_tokens is None:\n all_tokens.append(gen_tokens)\n else:\n all_tokens.append(gen_tokens[:, :, prompt_tokens.shape[-1]:])\n prompt_tokens = gen_tokens[:, :, stride_tokens:]\n prompt_length = prompt_tokens.shape[-1]\n current_gen_offset += stride_tokens\n step = step + 1\n\n gen_tokens = torch.cat(all_tokens, dim=-1)\n return gen_tokens\n\n def generate_audio(self, gen_tokens: torch.Tensor):\n \"\"\"Generate Audio from tokens\"\"\"\n assert gen_tokens.dim() == 3\n with torch.no_grad():\n gen_audio = self.compression_model.decode(gen_tokens, None)\n return gen_audio" }, { "identifier": "CompressionSolver", "path": "audiocraft/solvers/compression.py", "snippet": "class CompressionSolver(base.StandardSolver):\n \"\"\"Solver for compression task.\n\n The compression task combines a set of perceptual and objective losses\n to train an EncodecModel (composed of an encoder-decoder and a quantizer)\n to perform high fidelity audio reconstruction.\n \"\"\"\n def __init__(self, cfg: omegaconf.DictConfig):\n super().__init__(cfg)\n self.rng: torch.Generator # set at each epoch\n self.adv_losses = builders.get_adversarial_losses(self.cfg)\n self.aux_losses = nn.ModuleDict()\n self.info_losses = nn.ModuleDict()\n assert not cfg.fsdp.use, \"FSDP not supported by CompressionSolver.\"\n loss_weights = dict()\n for loss_name, weight in self.cfg.losses.items():\n if loss_name in ['adv', 'feat']:\n for adv_name, _ in self.adv_losses.items():\n loss_weights[f'{loss_name}_{adv_name}'] = weight\n elif weight > 0:\n self.aux_losses[loss_name] = builders.get_loss(loss_name, self.cfg)\n loss_weights[loss_name] = weight\n else:\n self.info_losses[loss_name] = builders.get_loss(loss_name, self.cfg)\n self.balancer = builders.get_balancer(loss_weights, self.cfg.balancer)\n self.register_stateful('adv_losses')\n\n @property\n def best_metric_name(self) -> tp.Optional[str]:\n # best model is the last for the compression model\n return None\n\n def build_model(self):\n \"\"\"Instantiate model and optimizer.\"\"\"\n # Model and optimizer\n self.model = models.builders.get_compression_model(self.cfg).to(self.device)\n self.optimizer = builders.get_optimizer(self.model.parameters(), self.cfg.optim)\n self.register_stateful('model', 'optimizer')\n self.register_best_state('model')\n self.register_ema('model')\n\n def build_dataloaders(self):\n \"\"\"Instantiate audio dataloaders for each stage.\"\"\"\n self.dataloaders = builders.get_audio_datasets(self.cfg)\n\n def show(self):\n \"\"\"Show the compression model and employed adversarial loss.\"\"\"\n self.logger.info(f\"Compression model with {self.model.quantizer.total_codebooks} codebooks:\")\n self.log_model_summary(self.model)\n self.logger.info(\"Adversarial loss:\")\n self.log_model_summary(self.adv_losses)\n self.logger.info(\"Auxiliary losses:\")\n self.logger.info(self.aux_losses)\n self.logger.info(\"Info losses:\")\n self.logger.info(self.info_losses)\n\n def run_step(self, idx: int, batch: torch.Tensor, metrics: dict):\n \"\"\"Perform one training or valid step on a given batch.\"\"\"\n x = batch.to(self.device)\n y = x.clone()\n\n qres = self.model(x)\n assert isinstance(qres, quantization.QuantizedResult)\n y_pred = qres.x\n # Log bandwidth in kb/s\n metrics['bandwidth'] = qres.bandwidth.mean()\n\n if self.is_training:\n d_losses: dict = {}\n if len(self.adv_losses) > 0 and torch.rand(1, generator=self.rng).item() <= 1 / self.cfg.adversarial.every:\n for adv_name, adversary in self.adv_losses.items():\n disc_loss = adversary.train_adv(y_pred, y)\n d_losses[f'd_{adv_name}'] = disc_loss\n metrics['d_loss'] = torch.sum(torch.stack(list(d_losses.values())))\n metrics.update(d_losses)\n\n balanced_losses: dict = {}\n other_losses: dict = {}\n\n # penalty from quantization\n if qres.penalty is not None and qres.penalty.requires_grad:\n other_losses['penalty'] = qres.penalty # penalty term from the quantizer\n\n # adversarial losses\n for adv_name, adversary in self.adv_losses.items():\n adv_loss, feat_loss = adversary(y_pred, y)\n balanced_losses[f'adv_{adv_name}'] = adv_loss\n balanced_losses[f'feat_{adv_name}'] = feat_loss\n\n # auxiliary losses\n for loss_name, criterion in self.aux_losses.items():\n loss = criterion(y_pred, y)\n balanced_losses[loss_name] = loss\n\n # weighted losses\n metrics.update(balanced_losses)\n metrics.update(other_losses)\n metrics.update(qres.metrics)\n\n if self.is_training:\n # backprop losses that are not handled by balancer\n other_loss = torch.tensor(0., device=self.device)\n if 'penalty' in other_losses:\n other_loss += other_losses['penalty']\n if other_loss.requires_grad:\n other_loss.backward(retain_graph=True)\n ratio1 = sum(p.grad.data.norm(p=2).pow(2)\n for p in self.model.parameters() if p.grad is not None)\n assert isinstance(ratio1, torch.Tensor)\n metrics['ratio1'] = ratio1.sqrt()\n\n # balancer losses backward, returns effective training loss\n # with effective weights at the current batch.\n metrics['g_loss'] = self.balancer.backward(balanced_losses, y_pred)\n # add metrics corresponding to weight ratios\n metrics.update(self.balancer.metrics)\n ratio2 = sum(p.grad.data.norm(p=2).pow(2)\n for p in self.model.parameters() if p.grad is not None)\n assert isinstance(ratio2, torch.Tensor)\n metrics['ratio2'] = ratio2.sqrt()\n\n # optim\n flashy.distrib.sync_model(self.model)\n if self.cfg.optim.max_norm:\n torch.nn.utils.clip_grad_norm_(\n self.model.parameters(), self.cfg.optim.max_norm\n )\n self.optimizer.step()\n self.optimizer.zero_grad()\n\n # informative losses only\n info_losses: dict = {}\n with torch.no_grad():\n for loss_name, criterion in self.info_losses.items():\n loss = criterion(y_pred, y)\n info_losses[loss_name] = loss\n\n metrics.update(info_losses)\n\n # aggregated GAN losses: this is useful to report adv and feat across different adversarial loss setups\n adv_losses = [loss for loss_name, loss in metrics.items() if loss_name.startswith('adv')]\n if len(adv_losses) > 0:\n metrics['adv'] = torch.sum(torch.stack(adv_losses))\n feat_losses = [loss for loss_name, loss in metrics.items() if loss_name.startswith('feat')]\n if len(feat_losses) > 0:\n metrics['feat'] = torch.sum(torch.stack(feat_losses))\n\n return metrics\n\n def run_epoch(self):\n # reset random seed at the beginning of the epoch\n self.rng = torch.Generator()\n self.rng.manual_seed(1234 + self.epoch)\n # run epoch\n super().run_epoch()\n\n def evaluate(self):\n \"\"\"Evaluate stage. Runs audio reconstruction evaluation.\"\"\"\n self.model.eval()\n evaluate_stage_name = str(self.current_stage)\n\n loader = self.dataloaders['evaluate']\n updates = len(loader)\n lp = self.log_progress(f'{evaluate_stage_name} inference', loader, total=updates, updates=self.log_updates)\n average = flashy.averager()\n\n pendings = []\n ctx = multiprocessing.get_context('spawn')\n with get_pool_executor(self.cfg.evaluate.num_workers, mp_context=ctx) as pool:\n for idx, batch in enumerate(lp):\n x = batch.to(self.device)\n with torch.no_grad():\n qres = self.model(x)\n\n y_pred = qres.x.cpu()\n y = batch.cpu() # should already be on CPU but just in case\n pendings.append(pool.submit(evaluate_audio_reconstruction, y_pred, y, self.cfg))\n\n metrics_lp = self.log_progress(f'{evaluate_stage_name} metrics', pendings, updates=self.log_updates)\n for pending in metrics_lp:\n metrics = pending.result()\n metrics = average(metrics)\n\n metrics = flashy.distrib.average_metrics(metrics, len(loader))\n return metrics\n\n def generate(self):\n \"\"\"Generate stage.\"\"\"\n self.model.eval()\n sample_manager = SampleManager(self.xp, map_reference_to_sample_id=True)\n generate_stage_name = str(self.current_stage)\n\n loader = self.dataloaders['generate']\n updates = len(loader)\n lp = self.log_progress(generate_stage_name, loader, total=updates, updates=self.log_updates)\n\n for batch in lp:\n reference, _ = batch\n reference = reference.to(self.device)\n with torch.no_grad():\n qres = self.model(reference)\n assert isinstance(qres, quantization.QuantizedResult)\n\n reference = reference.cpu()\n estimate = qres.x.cpu()\n sample_manager.add_samples(estimate, self.epoch, ground_truth_wavs=reference)\n\n flashy.distrib.barrier()\n\n def load_from_pretrained(self, name: str) -> dict:\n model = models.CompressionModel.get_pretrained(name)\n if isinstance(model, models.DAC):\n raise RuntimeError(\"Cannot fine tune a DAC model.\")\n elif isinstance(model, models.HFEncodecCompressionModel):\n self.logger.warning('Trying to automatically convert a HuggingFace model '\n 'to AudioCraft, this might fail!')\n state = model.model.state_dict()\n new_state = {}\n for k, v in state.items():\n if k.startswith('decoder.layers') and '.conv.' in k and '.block.' not in k:\n # We need to determine if this a convtr or a regular conv.\n layer = int(k.split('.')[2])\n if isinstance(model.model.decoder.layers[layer].conv, torch.nn.ConvTranspose1d):\n\n k = k.replace('.conv.', '.convtr.')\n k = k.replace('encoder.layers.', 'encoder.model.')\n k = k.replace('decoder.layers.', 'decoder.model.')\n k = k.replace('conv.', 'conv.conv.')\n k = k.replace('convtr.', 'convtr.convtr.')\n k = k.replace('quantizer.layers.', 'quantizer.vq.layers.')\n k = k.replace('.codebook.', '._codebook.')\n new_state[k] = v\n state = new_state\n elif isinstance(model, models.EncodecModel):\n state = model.state_dict()\n else:\n raise RuntimeError(f\"Cannot fine tune model type {type(model)}.\")\n return {\n 'best_state': {'model': state}\n }\n\n @staticmethod\n def model_from_checkpoint(checkpoint_path: tp.Union[Path, str],\n device: tp.Union[torch.device, str] = 'cpu') -> models.CompressionModel:\n \"\"\"Instantiate a CompressionModel from a given checkpoint path or dora sig.\n This method is a convenient endpoint to load a CompressionModel to use in other solvers.\n\n Args:\n checkpoint_path (Path or str): Path to checkpoint or dora sig from where the checkpoint is resolved.\n This also supports pre-trained models by using a path of the form //pretrained/NAME.\n See `model_from_pretrained` for a list of supported pretrained models.\n use_ema (bool): Use EMA variant of the model instead of the actual model.\n device (torch.device or str): Device on which the model is loaded.\n \"\"\"\n checkpoint_path = str(checkpoint_path)\n if checkpoint_path.startswith('//pretrained/'):\n name = checkpoint_path.split('/', 3)[-1]\n return models.CompressionModel.get_pretrained(name, device)\n logger = logging.getLogger(__name__)\n logger.info(f\"Loading compression model from checkpoint: {checkpoint_path}\")\n _checkpoint_path = checkpoint.resolve_checkpoint_path(checkpoint_path, use_fsdp=False)\n assert _checkpoint_path is not None, f\"Could not resolve compression model checkpoint path: {checkpoint_path}\"\n state = checkpoint.load_checkpoint(_checkpoint_path)\n assert state is not None and 'xp.cfg' in state, f\"Could not load compression model from ckpt: {checkpoint_path}\"\n cfg = state['xp.cfg']\n cfg.device = device\n compression_model = models.builders.get_compression_model(cfg).to(device)\n assert compression_model.sample_rate == cfg.sample_rate, \"Compression model sample rate should match\"\n\n assert 'best_state' in state and state['best_state'] != {}\n assert 'exported' not in state, \"When loading an exported checkpoint, use the //pretrained/ prefix.\"\n compression_model.load_state_dict(state['best_state']['model'])\n compression_model.eval()\n logger.info(\"Compression model loaded!\")\n return compression_model\n\n @staticmethod\n def wrapped_model_from_checkpoint(cfg: omegaconf.DictConfig,\n checkpoint_path: tp.Union[Path, str],\n device: tp.Union[torch.device, str] = 'cpu') -> models.CompressionModel:\n \"\"\"Instantiate a wrapped CompressionModel from a given checkpoint path or dora sig.\n\n Args:\n cfg (omegaconf.DictConfig): Configuration to read from for wrapped mode.\n checkpoint_path (Path or str): Path to checkpoint or dora sig from where the checkpoint is resolved.\n use_ema (bool): Use EMA variant of the model instead of the actual model.\n device (torch.device or str): Device on which the model is loaded.\n \"\"\"\n compression_model = CompressionSolver.model_from_checkpoint(checkpoint_path, device)\n compression_model = models.builders.get_wrapped_compression_model(compression_model, cfg)\n return compression_model" }, { "identifier": "load_compression_model", "path": "audiocraft/models/loaders.py", "snippet": "def load_compression_model(file_or_url_or_id: tp.Union[Path, str], device='cpu', cache_dir: tp.Optional[str] = None):\n pkg = load_compression_model_ckpt(file_or_url_or_id, cache_dir=cache_dir)\n if 'pretrained' in pkg:\n return CompressionModel.get_pretrained(pkg['pretrained'], device=device)\n cfg = OmegaConf.create(pkg['xp.cfg'])\n cfg.device = str(device)\n model = builders.get_compression_model(cfg)\n model.load_state_dict(pkg['best_state'])\n model.eval()\n return model" }, { "identifier": "load_lm_model", "path": "audiocraft/models/loaders.py", "snippet": "def load_lm_model(file_or_url_or_id: tp.Union[Path, str], device='cpu', cache_dir: tp.Optional[str] = None):\n pkg = load_lm_model_ckpt(file_or_url_or_id, cache_dir=cache_dir)\n cfg = OmegaConf.create(pkg['xp.cfg'])\n cfg.device = str(device)\n if cfg.device == 'cpu':\n cfg.dtype = 'float32'\n else:\n cfg.dtype = 'float16'\n _delete_param(cfg, 'conditioners.self_wav.chroma_chord.cache_path')\n _delete_param(cfg, 'conditioners.self_wav.chroma_stem.cache_path')\n _delete_param(cfg, 'conditioners.args.merge_text_conditions_p')\n _delete_param(cfg, 'conditioners.args.drop_desc_p')\n model = builders.get_lm_model(cfg)\n model.load_state_dict(pkg['best_state'])\n model.eval()\n model.cfg = cfg\n return model" }, { "identifier": "audio_write", "path": "audiocraft/data/audio.py", "snippet": "def audio_write(stem_name: tp.Union[str, Path],\n wav: torch.Tensor, sample_rate: int,\n format: str = 'wav', mp3_rate: int = 320, ogg_rate: tp.Optional[int] = None,\n normalize: bool = True, strategy: str = 'peak', peak_clip_headroom_db: float = 1,\n rms_headroom_db: float = 18, loudness_headroom_db: float = 14,\n loudness_compressor: bool = False,\n log_clipping: bool = True, make_parent_dir: bool = True,\n add_suffix: bool = True) -> Path:\n \"\"\"Convenience function for saving audio to disk. Returns the filename the audio was written to.\n\n Args:\n stem_name (str or Path): Filename without extension which will be added automatically.\n wav (torch.Tensor): Audio data to save.\n sample_rate (int): Sample rate of audio data.\n format (str): Either \"wav\", \"mp3\", \"ogg\", or \"flac\".\n mp3_rate (int): kbps when using mp3s.\n ogg_rate (int): kbps when using ogg/vorbis. If not provided, let ffmpeg decide for itself.\n normalize (bool): if `True` (default), normalizes according to the prescribed\n strategy (see after). If `False`, the strategy is only used in case clipping\n would happen.\n strategy (str): Can be either 'clip', 'peak', or 'rms'. Default is 'peak',\n i.e. audio is normalized by its largest value. RMS normalizes by root-mean-square\n with extra headroom to avoid clipping. 'clip' just clips.\n peak_clip_headroom_db (float): Headroom in dB when doing 'peak' or 'clip' strategy.\n rms_headroom_db (float): Headroom in dB when doing 'rms' strategy. This must be much larger\n than the `peak_clip` one to avoid further clipping.\n loudness_headroom_db (float): Target loudness for loudness normalization.\n loudness_compressor (bool): Uses tanh for soft clipping when strategy is 'loudness'.\n when strategy is 'loudness' log_clipping (bool): If True, basic logging on stderr when clipping still\n occurs despite strategy (only for 'rms').\n make_parent_dir (bool): Make parent directory if it doesn't exist.\n Returns:\n Path: Path of the saved audio.\n \"\"\"\n assert wav.dtype.is_floating_point, \"wav is not floating point\"\n if wav.dim() == 1:\n wav = wav[None]\n elif wav.dim() > 2:\n raise ValueError(\"Input wav should be at most 2 dimension.\")\n assert wav.isfinite().all()\n wav = normalize_audio(wav, normalize, strategy, peak_clip_headroom_db,\n rms_headroom_db, loudness_headroom_db, loudness_compressor,\n log_clipping=log_clipping, sample_rate=sample_rate,\n stem_name=str(stem_name))\n if format == 'mp3':\n suffix = '.mp3'\n flags = ['-f', 'mp3', '-c:a', 'libmp3lame', '-b:a', f'{mp3_rate}k']\n elif format == 'wav':\n suffix = '.wav'\n flags = ['-f', 'wav', '-c:a', 'pcm_s16le']\n elif format == 'ogg':\n suffix = '.ogg'\n flags = ['-f', 'ogg', '-c:a', 'libvorbis']\n if ogg_rate is not None:\n flags += ['-b:a', f'{ogg_rate}k']\n elif format == 'flac':\n suffix = '.flac'\n flags = ['-f', 'flac']\n else:\n raise RuntimeError(f\"Invalid format {format}. Only wav or mp3 are supported.\")\n if not add_suffix:\n suffix = ''\n path = Path(str(stem_name) + suffix)\n if make_parent_dir:\n path.parent.mkdir(exist_ok=True, parents=True)\n try:\n _piping_to_ffmpeg(path, wav, sample_rate, flags)\n except Exception:\n if path.exists():\n # we do not want to leave half written files around.\n path.unlink()\n raise\n return path" }, { "identifier": "get_lm_model", "path": "audiocraft/models/builders.py", "snippet": "def get_lm_model(cfg: omegaconf.DictConfig) -> LMModel:\n \"\"\"Instantiate a transformer LM.\"\"\"\n if cfg.lm_model == 'transformer_lm':\n kwargs = dict_from_config(getattr(cfg, 'transformer_lm'))\n n_q = kwargs['n_q']\n q_modeling = kwargs.pop('q_modeling', None)\n codebooks_pattern_cfg = getattr(cfg, 'codebooks_pattern')\n attribute_dropout = dict_from_config(getattr(cfg, 'attribute_dropout'))\n cls_free_guidance = dict_from_config(getattr(cfg, 'classifier_free_guidance'))\n cfg_prob, cfg_coef = cls_free_guidance['training_dropout'], cls_free_guidance['inference_coef']\n fuser = get_condition_fuser(cfg)\n condition_provider = get_conditioner_provider(kwargs[\"dim\"], cfg).to(cfg.device)\n if len(fuser.fuse2cond['cross']) > 0: # enforce cross-att programmatically\n kwargs['cross_attention'] = True\n if codebooks_pattern_cfg.modeling is None:\n assert q_modeling is not None, \\\n \"LM model should either have a codebook pattern defined or transformer_lm.q_modeling\"\n codebooks_pattern_cfg = omegaconf.OmegaConf.create(\n {'modeling': q_modeling, 'delay': {'delays': list(range(n_q))}}\n )\n pattern_provider = get_codebooks_pattern_provider(n_q, codebooks_pattern_cfg)\n return LMModel(\n pattern_provider=pattern_provider,\n condition_provider=condition_provider,\n fuser=fuser,\n cfg_dropout=cfg_prob,\n cfg_coef=cfg_coef,\n attribute_dropout=attribute_dropout,\n dtype=getattr(torch, cfg.dtype),\n device=cfg.device,\n **kwargs\n ).to(cfg.device)\n else:\n raise KeyError(f\"Unexpected LM model {cfg.lm_model}\")" } ]
import os import random import torchaudio import typing as tp import numpy as np import torch import librosa import subprocess import math import allin1 import pytsmod as tsm import shutil import shutil from typing import Optional from cog import BasePredictor, Input, Path from audiocraft.models import MusicGen, MultiBandDiffusion from audiocraft.solvers.compression import CompressionSolver from audiocraft.models.loaders import ( load_compression_model, load_lm_model, ) from audiocraft.data.audio import audio_write from audiocraft.models.builders import get_lm_model from omegaconf import OmegaConf from audiocraft.modules.btc.btc_model import BTC_model from audiocraft.modules.btc.utils.mir_eval_modules import idx2chord from demucs.audio import convert_audio from demucs.apply import apply_model
14,818
# Prediction interface for Cog ⚙️ # https://github.com/replicate/cog/blob/main/docs/python.md # We need to set `TRANSFORMERS_CACHE` before any imports, which is why this is up here. MODEL_PATH = "/src/models/" os.environ["TRANSFORMERS_CACHE"] = MODEL_PATH os.environ["TORCH_HOME"] = MODEL_PATH # Model specific imports def _delete_param(cfg, full_name: str): parts = full_name.split('.') for part in parts[:-1]: if part in cfg: cfg = cfg[part] else: return OmegaConf.set_struct(cfg, False) if parts[-1] in cfg: del cfg[parts[-1]] OmegaConf.set_struct(cfg, True) def load_ckpt(path, device, url=False): if url: loaded = torch.hub.load_state_dict_from_url(str(path)) else: loaded = torch.load(str(path)) cfg = OmegaConf.create(loaded['xp.cfg']) cfg.device = str(device) if cfg.device == 'cpu': cfg.dtype = 'float32' else: cfg.dtype = 'float16' _delete_param(cfg, 'conditioners.self_wav.chroma_chord.cache_path') _delete_param(cfg, 'conditioners.self_wav.chroma_stem.cache_path') _delete_param(cfg, 'conditioners.args.merge_text_conditions_p') _delete_param(cfg, 'conditioners.args.drop_desc_p') lm = get_lm_model(loaded['xp.cfg']) lm.load_state_dict(loaded['model']) lm.eval() lm.cfg = cfg compression_model = CompressionSolver.model_from_checkpoint(cfg.compression_model_checkpoint, device=device) return MusicGen(f"{os.getenv('COG_USERNAME')}/musicgen-chord", compression_model, lm) class Predictor(BasePredictor): def setup(self, weights: Optional[Path] = None): """Load the model into memory to make running multiple predictions efficient""" self.device = "cuda" if torch.cuda.is_available() else "cpu" self.mbd = MultiBandDiffusion.get_mbd_musicgen() def _load_model( self, model_path: str, cls: Optional[any] = None, load_args: Optional[dict] = {}, model_id: Optional[str] = None, device: Optional[str] = None, ) -> MusicGen: if device is None: device = self.device compression_model = load_compression_model( model_id, device=device, cache_dir=model_path )
# Prediction interface for Cog ⚙️ # https://github.com/replicate/cog/blob/main/docs/python.md # We need to set `TRANSFORMERS_CACHE` before any imports, which is why this is up here. MODEL_PATH = "/src/models/" os.environ["TRANSFORMERS_CACHE"] = MODEL_PATH os.environ["TORCH_HOME"] = MODEL_PATH # Model specific imports def _delete_param(cfg, full_name: str): parts = full_name.split('.') for part in parts[:-1]: if part in cfg: cfg = cfg[part] else: return OmegaConf.set_struct(cfg, False) if parts[-1] in cfg: del cfg[parts[-1]] OmegaConf.set_struct(cfg, True) def load_ckpt(path, device, url=False): if url: loaded = torch.hub.load_state_dict_from_url(str(path)) else: loaded = torch.load(str(path)) cfg = OmegaConf.create(loaded['xp.cfg']) cfg.device = str(device) if cfg.device == 'cpu': cfg.dtype = 'float32' else: cfg.dtype = 'float16' _delete_param(cfg, 'conditioners.self_wav.chroma_chord.cache_path') _delete_param(cfg, 'conditioners.self_wav.chroma_stem.cache_path') _delete_param(cfg, 'conditioners.args.merge_text_conditions_p') _delete_param(cfg, 'conditioners.args.drop_desc_p') lm = get_lm_model(loaded['xp.cfg']) lm.load_state_dict(loaded['model']) lm.eval() lm.cfg = cfg compression_model = CompressionSolver.model_from_checkpoint(cfg.compression_model_checkpoint, device=device) return MusicGen(f"{os.getenv('COG_USERNAME')}/musicgen-chord", compression_model, lm) class Predictor(BasePredictor): def setup(self, weights: Optional[Path] = None): """Load the model into memory to make running multiple predictions efficient""" self.device = "cuda" if torch.cuda.is_available() else "cpu" self.mbd = MultiBandDiffusion.get_mbd_musicgen() def _load_model( self, model_path: str, cls: Optional[any] = None, load_args: Optional[dict] = {}, model_id: Optional[str] = None, device: Optional[str] = None, ) -> MusicGen: if device is None: device = self.device compression_model = load_compression_model( model_id, device=device, cache_dir=model_path )
lm = load_lm_model(model_id, device=device, cache_dir=model_path)
4
2023-10-09 09:55:24+00:00
24k
oracle/guardian-ai
tests/unitary/test_fairness_metrics.py
[ { "identifier": "ConsistencyScorer", "path": "guardian_ai/fairness/metrics/dataset.py", "snippet": "class ConsistencyScorer(_SimpleDatasetFairnessScorer):\n \"\"\"\n Measures the consistency of a dataset.\n\n Consistency is measured as the number of ratio of instances that have a\n different label from the k=5 nearest neighbors.\n\n Perfect score\n A perfect score for this metric is 0, meaning that the dataset does\n not have different labels for instances that are similar to one another.\n\n Parameters\n ----------\n protected_attributes: pandas.Series, numpy.ndarray, list, str\n Array of attributes or single attribute that should be treated as\n protected. If an attribute is protected, then all of its unique\n values are considered as subgroups.\n\n Examples\n --------\n .. code-block:: python\n\n from guardian_ai.fairness.metrics import ConsistencyScorer\n scorer = ConsistencyScorer(['race', 'sex'])\n scorer(X=X, y_true=y_true)\n scorer(None, X, y_true)\n \"\"\"\n\n def __init__(self, protected_attributes: Union[pd.Series, np.ndarray, List, str]):\n super().__init__(protected_attributes=protected_attributes, metric=consistency)" }, { "identifier": "DatasetStatisticalParityScorer", "path": "guardian_ai/fairness/metrics/dataset.py", "snippet": "class DatasetStatisticalParityScorer(_DatasetFairnessScorer):\n \"\"\"\n Measures the statistical parity [1] of a dataset. Statistical parity (also\n known as Base Rate or Disparate Impact) for a dataset states that a dataset\n is unbiased if the label is independent of the protected attribute.\n\n For each subgroup, statistical parity is computed as the ratio of positive\n labels in a subgroup.\n\n Statistical Parity (also known as Base Rate or Disparate Impact) is\n calculated as PL / N, where PL and N are the number of Positive Labels and\n total number of instances, respectively.\n\n Perfect score\n A perfect score for this metric means that the dataset does not have\n a different ratio of positive labels for a subgroup than it does for\n the rest of the subgroups. For example, if the protected attributes\n are race and sex, then a perfect statistical parity would mean that\n all combinations of values for race and sex have identical ratios of\n positive labels. Perfect values are:\n\n - 1 if using ``'ratio'`` as ``distance_measure``.\n - 0 if using ``'diff'`` as ``distance_measure``.\n\n Parameters\n ----------\n protected_attributes: pandas.Series, numpy.ndarray, list, str\n Array of attributes or single attribute that should be treated as\n protected. If an attribute is protected, then all of its unique\n values are considered as subgroups.\n distance_measure : str, default='diff'\n Determines the distance used to compare a subgroup's metric against\n the rest of the subgroups. Possible values are:\n\n * ``'ratio'``: Uses ``(subgroup1_val / subgroup2_val)``. Inverted to always be >= 1 if needed.\n * ``'diff'``: Uses ``| subgroup1_val - subgroup2_val |``.\n\n reduction : str or None, default='mean'\n Determines how to reduce scores on all subgroups to a single output.\n Possible values are:\n\n * ``'max'``: Returns the maximal value among all subgroup metrics.\n * ``'mean'``: Returns the mean over all subgroup metrics.\n * ``None``: Returns a ``{subgroup_pair: subgroup_pair_metric, ...}`` dict.\n\n\n References\n ----------\n [1] `Cynthia Dwork et al. \"Fairness Through Awareness\". Innovations in\n Theoretical Computer Science. 2012. <https://arxiv.org/abs/1104.3913>`_\n\n Examples\n --------\n .. code-block:: python\n\n from guardian_ai.fairness.metrics import DatasetStatisticalParityScorer\n scorer = DatasetStatisticalParityScorer(['race', 'sex'])\n scorer(X=X, y_true=y_true)\n scorer(None, X, y_true)\n \"\"\"\n\n def __init__(\n self,\n protected_attributes: Union[pd.Series, np.ndarray, List, str],\n distance_measure: str = DEFAULT_DISTANCE,\n reduction: Optional[str] = DEFAULT_REDUCTION,\n ):\n super().__init__(\n protected_attributes=protected_attributes,\n metric=dataset_statistical_parity,\n distance_measure=distance_measure,\n reduction=reduction,\n allow_distance_measure_none=False,\n )" }, { "identifier": "SmoothedEDFScorer", "path": "guardian_ai/fairness/metrics/dataset.py", "snippet": "class SmoothedEDFScorer(_SimpleDatasetFairnessScorer):\n \"\"\"\n Measures the smoothed Empirical Differential Fairness (EDF) of a dataset, as\n proposed by Foulds et al. [1].\n\n Smoothed EDF returns the minimal exponential deviation of positive target\n ratios comparing a subgroup to the rest of the subgroups.\n\n This metric is related to :class:`.DatasetStatisticalParity` with\n `reduction='max'` and `distance_measure='ratio'`, with the only difference\n being that :class:`.SmoothedEDFScorer` returns a logarithmic value instead.\n\n Perfect score\n A perfect score for this metric is 0, meaning that the dataset does\n not have a different ratio of positive labels for a subgroup than\n it does for the rest of the subgroups. For example, if the\n protected attributes are race and sex, then a perfect smoothed EDF\n would mean that all combinations of values for race and sex have\n identical ratios of positive labels.\n\n Parameters\n ----------\n protected_attributes: pandas.Series, numpy.ndarray, list, str\n Array of attributes or single attribute that should be treated as\n protected. If an attribute is protected, then all of its unique\n values are considered as subgroups.\n\n References\n ----------\n [1] `Foulds, James R., et al. \"An intersectional definition of fairness.\"\n 2020 IEEE 36th International Conference on Data Engineering (ICDE).\n IEEE, 2020. <https://arxiv.org/abs/1807.08362>`_\n\n Examples\n --------\n .. code-block:: python\n\n from guardian_ai.fairness.metrics import SmoothedEDFScorer\n scorer = SmoothedEDFScorer(['race', 'sex'])\n scorer(X=X, y_true=y_true)\n scorer(None, X, y_true)\n \"\"\"\n\n def __init__(self, protected_attributes: Union[pd.Series, np.ndarray, List, str]):\n super().__init__(protected_attributes=protected_attributes, metric=smoothed_edf)" }, { "identifier": "consistency", "path": "guardian_ai/fairness/metrics/dataset.py", "snippet": "def consistency(y_true: Union[pd.Series, np.ndarray, List], subgroups: pd.DataFrame):\n \"\"\"\n Measures the consistency of a dataset.\n\n For more details, refer to :class:`.ConsistencyScorer`.\n\n Parameters\n ----------\n y_true : pandas.Series, numpy.ndarray, list\n Array of groundtruth labels\n subgroups : pandas.DataFrame\n Dataframe containing protected attributes for each instance.\n\n Examples\n --------\n .. code-block:: python\n\n from guardian_ai.fairness.metrics import consistency\n subgroups = X[['race', 'sex']]\n consistency(y_true, subgroups)\n \"\"\"\n # Need to read with [0] because consistency returns an array of size 1.\n return _simple_dataset_metric(y_true, subgroups, metric=\"consistency\")[0]" }, { "identifier": "dataset_statistical_parity", "path": "guardian_ai/fairness/metrics/dataset.py", "snippet": "def dataset_statistical_parity(\n y_true: Union[pd.Series, np.ndarray, List],\n subgroups: pd.DataFrame,\n distance_measure: str = DEFAULT_DISTANCE,\n reduction: str = DEFAULT_REDUCTION,\n):\n \"\"\"\n Measures the statistical parity of a dataset.\n\n For more details, refer to :class:`.DatasetStatisticalParityScorer`.\n\n Parameters\n ----------\n y_true : pandas.Series, numpy.ndarray, list\n Array of groundtruth labels\n subgroups : pandas.DataFrame\n Dataframe containing protected attributes for each instance.\n distance_measure : str, default='diff'\n Determines the distance used to compare a subgroup's metric against\n the rest of the subgroups. Possible values are:\n\n * ``'ratio'``: Uses ``(subgroup1_val / subgroup2_val)``. Inverted to always be >= 1 if needed.\n * ``'diff'``: Uses ``| subgroup1_val - subgroup2_val |``.\n\n reduction : str, default='mean'\n Determines how to reduce scores on all subgroups to a single output.\n Possible values are:\n\n * ``'max'``: Returns the maximal value among all subgroup metrics.\n * ``'mean'``: Returns the mean over all subgroup metrics.\n * ``None``: Returns a ``{subgroup_pair: subgroup_pair_metric, ...}`` dict.\n\n Examples\n --------\n .. code-block:: python\n\n from guardian_ai.fairness.metrics import dataset_statistical_parity\n subgroups = X[['race', 'sex']]\n dataset_statistical_parity(y_true, subgroups)\n \"\"\"\n return _dataset_metric(\n y_true,\n subgroups,\n metric=\"base_rate\",\n distance_measure=distance_measure,\n reduction=reduction,\n allow_distance_measure_none=False,\n )" }, { "identifier": "smoothed_edf", "path": "guardian_ai/fairness/metrics/dataset.py", "snippet": "def smoothed_edf(y_true: Union[pd.Series, np.ndarray, List], subgroups: pd.DataFrame):\n \"\"\"\n Measures the smoothed Empirical Differential Fairness (EDF) of a dataset, as\n proposed by Foulds et al. [1].\n\n For more details, refer to :class:`.SmoothedEDFScorer`.\n\n Parameters\n ----------\n y_true : pandas.Series, numpy.ndarray, list\n Array of groundtruth labels\n subgroups : pandas.DataFrame\n Dataframe containing protected attributes for each instance.\n\n References\n ----------\n [1] `Foulds, James R., et al. \"An intersectional definition of fairness.\"\n 2020 IEEE 36th International Conference on Data Engineering (ICDE).\n IEEE, 2020. <https://arxiv.org/abs/1807.08362>`_\n\n Examples\n --------\n .. code-block:: python\n\n from guardian_ai.fairness.metrics import smoothed_edf\n subgroups = X[['race', 'sex']]\n smoothed_edf(y_true, subgroups)\n \"\"\"\n return _simple_dataset_metric(\n y_true, subgroups, metric=\"smoothed_empirical_differential_fairness\"\n )" }, { "identifier": "EqualizedOddsScorer", "path": "guardian_ai/fairness/metrics/model.py", "snippet": "class EqualizedOddsScorer(_ModelFairnessScorer):\n \"\"\"\n Measures the disparity of a model's true positive and false positive rates\n between subgroups and the rest of the subgroups.\n\n The disparity is measured by comparing the true positive and false positive\n rates on instances of a subgroup against the rest of the subgroups.\n\n True Positive Rate (also known as TPR, recall, or sensitivity) is\n calculated as TP / (TP + FN), where TP and FN are the number of true\n positives and false negatives, respectively.\n\n False Positive Rate (also known as FPR or fall-out) is calculated as\n FP / (FP + TN), where FP and TN are the number of false positives and\n true negatives, respectively.\n\n Equalized Odds [1] is computed by taking the maximum distance between\n TPR and FPR for a subgroup against the rest of the subgroups.\n\n Perfect score\n A perfect score for this metric means that the model has the same TPR and\n FPR when comparing a subgroup to the rest of the subgroups. For example,\n if the protected attributes are race and sex, then a perfect\n Equalized Odds disparity would mean that all combinations of values for\n race and sex have identical TPR and FPR. Perfect values are:\n\n - 1 if using ``'ratio'`` as ``distance_measure``.\n - 0 if using ``'diff'`` as ``distance_measure``.\n\n Parameters\n ----------\n protected_attributes: pandas.Series, numpy.ndarray, list, str\n Array of attributes or single attribute that should be treated as\n protected. If an attribute is protected, then all of its unique\n values are considered as subgroups.\n distance_measure : str, default='diff'\n Determines the distance used to compare a subgroup's metric against\n the rest of the subgroups. Possible values are:\n\n * ``'ratio'``: Uses ``(subgroup1_val / subgroup2_val)``. Inverted to always be >= 1 if needed.\n * ``'diff'``: Uses ``| subgroup1_val - subgroup2_val |``.\n\n reduction : str or None, default='mean'\n Determines how to reduce scores on all subgroups to a single output.\n Possible values are:\n\n * ``'max'``: Returns the maximal value among all subgroup metrics.\n * ``'mean'``: Returns the mean over all subgroup metrics.\n * ``None``: Returns a ``{subgroup_pair: subgroup_pair_metric, ...}`` dict.\n\n References\n ----------\n [1] `Moritz Hardt et al. \"Equality of Opportunity in Supervised Learning\".\n Advances in Neural Information Processing Systems. 2016.\n <https://arxiv.org/pdf/1610.02413.pdf>`_\n\n Examples\n --------\n .. code-block:: python\n\n from guardian_ai.fairness.metrics import EqualizedOddsScorer\n scorer = EqualizedOddsScorer(['race', 'sex'])\n scorer(model, X, y_true)\n \"\"\"\n\n def __init__(\n self,\n protected_attributes: Union[pd.Series, np.ndarray, List, str],\n distance_measure: str = DEFAULT_DISTANCE,\n reduction: Optional[str] = DEFAULT_REDUCTION,\n ):\n super().__init__(\n protected_attributes=protected_attributes,\n metric=equalized_odds,\n distance_measure=distance_measure,\n reduction=reduction,\n allow_distance_measure_none=False,\n )" }, { "identifier": "ErrorRateScorer", "path": "guardian_ai/fairness/metrics/model.py", "snippet": "class ErrorRateScorer(_ModelFairnessScorer):\n \"\"\"\n Measures the disparity of a model's error rate between all subgroup pairs.\n\n For each subgroup, the disparity is measured by comparing the error rate on\n instances of a subgroup against the rest of the subgroups.\n\n Error Rate (also known as inaccuracy) is calculated as\n (FP + FN) / N, where FP and FN are the number of false positives and\n false negatives, respectively, while N is the total Number of\n instances.\n\n Perfect score\n A perfect score for this metric means that the model does not make more\n mistakes for any of the subgroups more often than it\n does for the rest of the subgroups. For example, if the protected\n attributes are race and sex, then a perfect error rate disparity would\n mean that all combinations of values for race and sex have identical\n error rates. Perfect values are:\n\n - 1 if using ``'ratio'`` as ``distance_measure``.\n - 0 if using ``'diff'`` as ``distance_measure``.\n\n Parameters\n ----------\n protected_attributes: pandas.Series, numpy.ndarray, list, str\n Array of attributes or single attribute that should be treated as\n protected. If an attribute is protected, then all of its unique\n values are considered as subgroups.\n distance_measure : str, default='diff'\n Determines the distance used to compare a subgroup's metric against\n the rest of the subgroups. Possible values are:\n\n * ``'ratio'``: Uses ``(subgroup1_val / subgroup2_val)``. Inverted to always be >= 1 if needed.\n * ``'diff'``: Uses ``| subgroup1_val - subgroup2_val |``.\n\n reduction : str or None, default='mean'\n Determines how to reduce scores on all subgroups to a single output.\n Possible values are:\n\n * ``'max'``: Returns the maximal value among all subgroup metrics.\n * ``'mean'``: Returns the mean over all subgroup metrics.\n * ``None``: Returns a ``{subgroup_pair: subgroup_pair_metric, ...}`` dict.\n\n Examples\n --------\n .. code-block:: python\n\n from guardian_ai.fairness.metrics import ErrorRateScorer\n scorer = ErrorRateScorer(['race', 'sex'])\n scorer(model, X, y_true)\n \"\"\"\n\n def __init__(\n self,\n protected_attributes: Union[pd.Series, np.ndarray, List, str],\n distance_measure: str = DEFAULT_DISTANCE,\n reduction: Optional[str] = DEFAULT_REDUCTION,\n ):\n super().__init__(\n protected_attributes=protected_attributes,\n metric=error_rate,\n distance_measure=distance_measure,\n reduction=reduction,\n allow_distance_measure_none=False,\n )" }, { "identifier": "FalseDiscoveryRateScorer", "path": "guardian_ai/fairness/metrics/model.py", "snippet": "class FalseDiscoveryRateScorer(_ModelFairnessScorer):\n \"\"\"\n Measures the disparity of a model's false discovery rate between all subgroup pairs.\n\n For each subgroup, the disparity is measured by comparing the false\n discovery rate on instances of a subgroup against the rest of the\n subgroups.\n\n False Discovery Rate (also known as FDR) is calculated as\n FP / (FP + TP), where FP and TP are the number of false positives and\n true positives, respectively.\n\n Perfect score\n A perfect score for this metric means that the model does not make more\n mistakes on the positive class for any of the subgroups more often than it\n does for the rest of the subgroups. For example, if the protected\n attributes are race and sex, then a perfect false discovery rate disparity\n would mean that all combinations of values for race and sex have identical\n false discovery rates. Perfect values are:\n\n - 1 if using ``'ratio'`` as ``distance_measure``.\n - 0 if using ``'diff'`` as ``distance_measure``.\n\n Parameters\n ----------\n protected_attributes: pandas.Series, numpy.ndarray, list, str\n Array of attributes or single attribute that should be treated as\n protected. If an attribute is protected, then all of its unique\n values are considered as subgroups.\n distance_measure : str, default='diff'\n Determines the distance used to compare a subgroup's metric against\n the rest of the subgroups. Possible values are:\n\n * ``'ratio'``: Uses ``(subgroup1_val / subgroup2_val)``. Inverted to always be >= 1 if needed.\n * ``'diff'``: Uses ``| subgroup1_val - subgroup2_val |``.\n\n reduction : str, default='mean'\n Determines how to reduce scores on all subgroups to a single output.\n Possible values are:\n\n * ``'max'``: Returns the maximal value among all subgroup metrics.\n * ``'mean'``: Returns the mean over all subgroup metrics.\n * ``None``: Returns a ``{subgroup_pair: subgroup_pair_metric, ...}`` dict.\n\n Examples\n --------\n .. code-block:: python\n\n from guardian_ai.fairness.metrics import FalseDiscoveryRateScorer\n scorer = FalseDiscoveryRateScorer(['race', 'sex'])\n scorer(model, X, y_true)\n \"\"\"\n\n def __init__(\n self,\n protected_attributes: Union[pd.Series, np.ndarray, List, str],\n distance_measure: str = DEFAULT_DISTANCE,\n reduction: Optional[str] = DEFAULT_REDUCTION,\n ):\n super().__init__(\n protected_attributes=protected_attributes,\n metric=false_discovery_rate,\n distance_measure=distance_measure,\n reduction=reduction,\n allow_distance_measure_none=False,\n )" }, { "identifier": "FalseNegativeRateScorer", "path": "guardian_ai/fairness/metrics/model.py", "snippet": "class FalseNegativeRateScorer(_ModelFairnessScorer):\n \"\"\"\n Measures the disparity of a model's false negative rate between all subgroup pairs.\n\n For each subgroup, the disparity is measured by comparing the false\n negative rate on instances of a subgroup against the rest of the subgroups.\n\n False Negative Rate [1] (also known as FNR or miss rate) is calculated as\n FN / (FN + TP), where FN and TP are the number of false negatives and\n true positives, respectively.\n\n Perfect score\n A perfect score for this metric means that the model does not incorrectly\n predict the negative class for any of the subgroups more often than it\n does for the rest of the subgroups. For example, if the protected\n attributes are race and sex, then a perfect false negative rate disparity\n would mean that all combinations of values for race and sex have identical\n false negative rates. Perfect values are:\n\n - 1 if using ``'ratio'`` as ``distance_measure``.\n - 0 if using ``'diff'`` as ``distance_measure``.\n\n Parameters\n ----------\n protected_attributes: pandas.Series, numpy.ndarray, list, str\n Array of attributes or single attribute that should be treated as\n protected. If an attribute is protected, then all of its unique\n values are considered as subgroups.\n distance_measure : str, default='diff'\n Determines the distance used to compare a subgroup's metric against\n the rest of the subgroups. Possible values are:\n\n * ``'ratio'``: Uses ``(subgroup1_val / subgroup2_val)``. Inverted to always be >= 1 if needed.\n * ``'diff'``: Uses ``| subgroup1_val - subgroup2_val |``.\n\n reduction : str or None, default='mean'\n Determines how to reduce scores on all subgroups to a single output.\n Possible values are:\n\n * ``'max'``: Returns the maximal value among all subgroup metrics.\n * ``'mean'``: Returns the mean over all subgroup metrics.\n * ``None``: Returns a ``{subgroup_pair: subgroup_pair_metric, ...}`` dict.\n\n References\n ----------\n [1] `Alexandra Chouldechova. \"Fair Prediction with Disparate Impact: A Study\n of Bias in Recidivism Prediction Instruments\". Big Data (2016).\n <https://www.liebertpub.com/doi/10.1089/big.2016.0047>`_\n\n Examples\n --------\n .. code-block:: python\n\n from guardian_ai.fairness.metrics import FalseNegativeRateScorer\n scorer = FalseNegativeRateScorer(['race', 'sex'])\n scorer(model, X, y_true)\n \"\"\"\n\n def __init__(\n self,\n protected_attributes: Union[pd.Series, np.ndarray, List, str],\n distance_measure: str = DEFAULT_DISTANCE,\n reduction: Optional[str] = DEFAULT_REDUCTION,\n ):\n super().__init__(\n protected_attributes=protected_attributes,\n metric=false_negative_rate,\n distance_measure=distance_measure,\n reduction=reduction,\n allow_distance_measure_none=False,\n )" }, { "identifier": "FalseOmissionRateScorer", "path": "guardian_ai/fairness/metrics/model.py", "snippet": "class FalseOmissionRateScorer(_ModelFairnessScorer):\n \"\"\"\n Measures the disparity of a model's false omission rate between all subgroup pairs.\n\n For each subgroup, the disparity is measured by comparing the false\n omission rate on instances of a subgroup against the rest of the subgroups.\n\n False Omission Rate (also known as FOR) is calculated as\n FN / (FN + TN), where FN and TN are the number of false negatives and\n true negatives, respectively.\n\n Perfect score\n A perfect score for this metric means that the model does not make more\n mistakes on the negative class for any of the subgroups more often than it\n does for the rest of the subgroups. For example, if the protected\n attributes are race and sex, then a perfect false omission rate disparity\n would mean that all combinations of values for race and sex have identical\n false omission rates. Perfect values are:\n\n - 1 if using ``'ratio'`` as ``distance_measure``.\n - 0 if using ``'diff'`` as ``distance_measure``.\n\n Parameters\n ----------\n protected_attributes: pandas.Series, numpy.ndarray, list, str\n Array of attributes or single attribute that should be treated as\n protected. If an attribute is protected, then all of its unique\n values are considered as subgroups.\n distance_measure : str, default='diff'\n Determines the distance used to compare a subgroup's metric against\n the rest of the subgroups. Possible values are:\n\n * ``'ratio'``: Uses ``(subgroup1_val / subgroup2_val)``. Inverted to always be >= 1 if needed.\n * ``'diff'``: Uses ``| subgroup1_val - subgroup2_val |``.\n\n reduction : str or None, default='mean'\n Determines how to reduce scores on all subgroups to a single output.\n Possible values are:\n\n * ``'max'``: Returns the maximal value among all subgroup metrics.\n * ``'mean'``: Returns the mean over all subgroup metrics.\n * ``None``: Returns a ``{subgroup_pair: subgroup_pair_metric, ...}`` dict.\n\n Examples\n --------\n .. code-block:: python\n\n from guardian_ai.fairness.metrics import FalseOmissionRateScorer\n scorer = FalseOmissionRateScorer(['race', 'sex'])\n scorer(model, X, y_true)\n \"\"\"\n\n def __init__(\n self,\n protected_attributes: Union[pd.Series, np.ndarray, List, str],\n distance_measure: str = DEFAULT_DISTANCE,\n reduction: Optional[str] = DEFAULT_REDUCTION,\n ):\n super().__init__(\n protected_attributes=protected_attributes,\n metric=false_omission_rate,\n distance_measure=distance_measure,\n reduction=reduction,\n allow_distance_measure_none=False,\n )" }, { "identifier": "FalsePositiveRateScorer", "path": "guardian_ai/fairness/metrics/model.py", "snippet": "class FalsePositiveRateScorer(_ModelFairnessScorer):\n \"\"\"\n Measures the disparity of a model's false positive rate between all subgroup pairs.\n\n For each subgroup, the disparity is measured by comparing the false\n positive rate on instances of a subgroup against the rest of the subgroups.\n\n False Positive Rate [1] (also known as FPR or fall-out) is calculated as\n FP / (FP + TN), where FP and TN are the number of false positives and\n true negatives, respectively.\n\n Perfect score\n A perfect score for this metric means that the model does not incorrectly\n predict the positive class for any of the subgroups more often than it\n does for the rest of the subgroups. For example, if the protected\n attributes are race and sex, then a perfect false positive rate disparity\n would mean that all combinations of values for race and sex have identical\n false positive rates. Perfect values are:\n\n - 1 if using ``'ratio'`` as ``distance_measure``.\n - 0 if using ``'diff'`` as ``distance_measure``.\n\n Parameters\n ----------\n protected_attributes: pandas.Series, numpy.ndarray, list, str\n Array of attributes or single attribute that should be treated as\n protected. If an attribute is protected, then all of its unique\n values are considered as subgroups.\n distance_measure : str, default='diff'\n Determines the distance used to compare a subgroup's metric against\n the rest of the subgroups. Possible values are:\n\n * ``'ratio'``: Uses ``(subgroup1_val / subgroup2_val)``. Inverted to always be >= 1 if needed.\n * ``'diff'``: Uses ``| subgroup1_val - subgroup2_val |``.\n\n reduction : str or None, default='mean'\n Determines how to reduce scores on all subgroups to a single output.\n Possible values are:\n\n * ``'max'``: Returns the maximal value among all subgroup metrics.\n * ``'mean'``: Returns the mean over all subgroup metrics.\n * ``None``: Returns a ``{subgroup_pair: subgroup_pair_metric, ...}`` dict.\n\n References\n ----------\n [1] `Alexandra Chouldechova. \"Fair Prediction with Disparate Impact: A Study\n of Bias in Recidivism Prediction Instruments\". Big Data (2016).\n <https://www.liebertpub.com/doi/10.1089/big.2016.0047>`_\n\n Examples\n --------\n .. code-block:: python\n\n from guardian_ai.fairness.metrics import FalsePositiveRateScorer\n scorer = FalsePositiveRateScorer(['race', 'sex'])\n scorer(model, X, y_true)\n \"\"\"\n\n def __init__(\n self,\n protected_attributes: Union[pd.Series, np.ndarray, List, str],\n distance_measure: str = DEFAULT_DISTANCE,\n reduction: Optional[str] = DEFAULT_REDUCTION,\n ):\n super().__init__(\n protected_attributes=protected_attributes,\n metric=false_positive_rate,\n distance_measure=distance_measure,\n reduction=reduction,\n allow_distance_measure_none=False,\n )" }, { "identifier": "ModelStatisticalParityScorer", "path": "guardian_ai/fairness/metrics/model.py", "snippet": "class ModelStatisticalParityScorer(_ModelFairnessScorer): # noqa: D412\n \"\"\"\n Measure the statistical parity [1] of a model's output between all subgroup pairs.\n\n Statistical parity (also known as Base Rate or Disparate Impact) states that\n a predictor is unbiased if the prediction is independent of the protected\n attribute.\n\n Statistical Parity is calculated as PP / N, where PP and N are the number of\n Positive Predictions and total Number of predictions made, respectively.\n\n Perfect score\n A perfect score for this metric means that the model does not predict\n positively any of the subgroups at a different rate than it does for the\n rest of the subgroups. For example, if the protected attributes are race\n and sex, then a perfect statistical parity would mean that all combinations\n of values for race and sex have identical ratios of positive predictions.\n Perfect values are:\n\n - 1 if using ``'ratio'`` as ``distance_measure``.\n - 0 if using ``'diff'`` as ``distance_measure``.\n\n Parameters\n ----------\n protected_attributes: pandas.Series, numpy.ndarray, list, str\n Array of attributes or single attribute that should be treated as\n protected. If an attribute is protected, then all of its unique\n values are considered as subgroups.\n distance_measure : str, default='diff'\n Determines the distance used to compare a subgroup's metric against\n the rest of the subgroups. Possible values are:\n\n * ``'ratio'``: Uses ``(subgroup1_val / subgroup2_val)``. Inverted to always be >= 1 if needed.\n * ``'diff'``: Uses ``| subgroup1_val - subgroup2_val |``.\n\n reduction : str, default='mean'\n Determines how to reduce scores on all subgroups to a single output.\n Possible values are:\n\n * ``'max'``: Returns the maximal value among all subgroup metrics.\n * ``'mean'``: Returns the mean over all subgroup metrics.\n * ``None``: Returns a ``{subgroup_pair: subgroup_pair_metric, ...}`` dict.\n\n\n References\n ----------\n [1] `Cynthia Dwork et al. \"Fairness Through Awareness\". Innovations in\n Theoretical Computer Science. 2012. <https://arxiv.org/abs/1104.3913>`_\n\n Examples\n --------\n\n .. code-block:: python\n\n from guardian_ai.fairness.metrics import ModelStatisticalParityScorer\n\n scorer = ModelStatisticalParityScorer(['race', 'sex'])\n scorer(model, X, y_true)\n\n This metric does not require `y_true`. It can also be called using\n\n .. code-block:: python\n\n scorer(model, X)\n \"\"\" # noqa: D412\n\n def __init__(\n self,\n protected_attributes: Union[pd.Series, np.ndarray, List, str],\n distance_measure: str = DEFAULT_DISTANCE,\n reduction: Optional[str] = DEFAULT_REDUCTION,\n ):\n super().__init__(\n protected_attributes=protected_attributes,\n metric=model_statistical_parity,\n distance_measure=distance_measure,\n reduction=reduction,\n allow_distance_measure_none=False,\n )\n\n def __call__(\n self,\n model: Any,\n X: pd.DataFrame,\n y_true: Optional[Union[pd.Series, np.ndarray, List]] = None,\n supplementary_features: Optional[pd.DataFrame] = None,\n ):\n \"\"\"\n Compute the metric using a model's predictions on a given array\n of instances ``X``.\n\n Parameters\n ----------\n model: Any\n Object that implements a `predict(X)` function to collect\n categorical predictions.\n X : pandas.DataFrame\n Array of instances to compute the metric on.\n y_true : pandas.Series, numpy.ndarray, list, or None, default=None\n Array of groundtruth labels.\n supplementary_features : pandas.DataFrame, or None, default=None\n Array of supplementary features for each instance. Used in case\n one attribute in ``self.protected_attributes`` is not contained by\n ``X`` (e.g. if the protected attribute is not used by the model).\n\n Returns\n -------\n float, dict\n The computed metric value, with format according to ``self.reduction``.\n\n\n Raises\n ------\n GuardianAIValueError\n - if a feature is present in both ``X``\n and ``supplementary_features``.\n\n \"\"\"\n y_pred = model.predict(X)\n\n subgroups = self._get_check_subgroups(X, supplementary_features)\n\n return self.metric(\n y_true, y_pred, subgroups, self.distance_measure, self.reduction\n )" }, { "identifier": "TheilIndexScorer", "path": "guardian_ai/fairness/metrics/model.py", "snippet": "class TheilIndexScorer(_ModelFairnessScorer):\n \"\"\"\n Measures the disparity of a model's predictions according to groundtruth\n labels, as proposed by Speicher et al. [1].\n\n Intuitively, the Theil Index can be thought of as a measure of the\n divergence between a subgroup's different error distributions (i.e. false\n positives and false negatives) against the rest of the subgroups.\n\n Perfect score\n The perfect score for this metric is 0, meaning that the model does not\n have a different error distribution for any subgroup when compared to the\n rest of the subgroups. For example, if the protected attributes are\n race and sex, then a perfect Theil Index disparity would mean that all\n combinations of values for race and sex have identical error\n distributions.\n\n Parameters\n ----------\n protected_attributes: pandas.Series, numpy.ndarray, list, str\n Array of attributes or single attribute that should be treated as\n protected. If an attribute is protected, then all of its unique\n values are considered as subgroups.\n distance_measure : str or None, default=None\n Determines the distance used to compare a subgroup's metric against\n the rest of the subgroups. Possible values are:\n\n * ``'ratio'``: Uses ``(subgroup1_val / subgroup2_val)``. Inverted to always be >= 1 if needed.\n * ``'diff'``: Uses ``| subgroup1_val - subgroup2_val |``.\n reduction : str or None, default='mean'\n Determines how to reduce scores on all subgroups to a single output.\n Possible values are:\n\n * ``'max'``: Returns the maximal value among all subgroup metrics.\n * ``'mean'``: Returns the mean over all subgroup metrics.\n * ``None``: Returns a ``{subgroup_pair: subgroup_pair_metric, ...}`` dict.\n\n References\n ----------\n [1] `Speicher, Till, et al. \"A unified approach to quantifying algorithmic\n unfairness: Measuring individual & group unfairness via inequality indices.\"\n Proceedings of the 24th ACM SIGKDD international conference on knowledge\n discovery & data mining. 2018. <https://arxiv.org/abs/1807.00787>`_\n\n Examples\n --------\n .. code-block:: python\n\n from guardian_ai.fairness.metrics import TheilIndexScorer\n scorer = TheilIndexScorer(['race', 'sex'])\n scorer(model, X, y_true)\n \"\"\"\n\n def __init__(\n self,\n protected_attributes: Union[pd.Series, np.ndarray, List, str],\n distance_measure: Optional[str] = None,\n reduction: Optional[str] = DEFAULT_REDUCTION,\n ):\n super().__init__(\n protected_attributes=protected_attributes,\n metric=theil_index,\n distance_measure=distance_measure,\n reduction=reduction,\n allow_distance_measure_none=True,\n )" }, { "identifier": "TruePositiveRateScorer", "path": "guardian_ai/fairness/metrics/model.py", "snippet": "class TruePositiveRateScorer(_ModelFairnessScorer):\n \"\"\"\n Measures the disparity of a model's true positive rate between\n all subgroup pairs (also known as equal opportunity).\n\n For each subgroup, the disparity is measured by comparing the true positive\n rate on instances of a subgroup against the rest of the subgroups.\n\n True Positive Rate [1] (also known as TPR, recall, or sensitivity) is\n calculated as TP / (TP + FN), where TP and FN are the number of true\n positives and false negatives, respectively.\n\n\n Perfect score\n A perfect score for this metric means that the model does not correctly\n predict the positive class for any of the subgroups more often than it\n does for the rest of the subgroups. For example, if the protected\n attributes are race and sex, then a perfect true positive rate disparity\n would mean that all combinations of values for race and sex have\n identical true positive rates. Perfect values are:\n\n - 1 if using ``'ratio'`` as ``distance_measure``.\n - 0 if using ``'diff'`` as ``distance_measure``.\n\n Parameters\n ----------\n protected_attributes: pandas.Series, numpy.ndarray, list, str\n Array of attributes or single attribute that should be treated as\n protected. If an attribute is protected, then all of its unique\n values are considered as subgroups.\n distance_measure : str, default='diff'\n Determines the distance used to compare a subgroup's metric against\n the rest of the subgroups. Possible values are:\n\n * ``'ratio'``: Uses ``(subgroup1_val / subgroup2_val)``. Inverted to always be >= 1 if needed.\n * ``'diff'``: Uses ``| subgroup1_val - subgroup2_val |``.\n\n reduction : str or None, default='mean'\n Determines how to reduce scores on all subgroups to a single output.\n Possible values are:\n\n * ``'max'``: Returns the maximal value among all subgroup metrics.\n * ``'mean'``: Returns the mean over all subgroup metrics.\n * ``None``: Returns a ``{subgroup_pair: subgroup_pair_metric, ...}`` dict.\n\n References\n ----------\n [1] `Moritz Hardt et al. \"Equality of Opportunity in Supervised Learning\".\n Advances in Neural Information Processing Systems. 2016.\n <https://arxiv.org/pdf/1610.02413.pdf>`_\n\n Examples\n --------\n .. code-block:: python\n\n from guardian_ai.fairness.metrics import TruePositiveRateScorer\n scorer = TruePositiveRateScorer(['race', 'sex'])\n scorer(model, X, y_true)\n \"\"\"\n\n def __init__(\n self,\n protected_attributes: Union[pd.Series, np.ndarray, List, str],\n distance_measure: str = DEFAULT_DISTANCE,\n reduction: Optional[str] = DEFAULT_REDUCTION,\n ):\n super().__init__(\n protected_attributes=protected_attributes,\n metric=true_positive_rate,\n distance_measure=distance_measure,\n reduction=reduction,\n allow_distance_measure_none=False,\n )" }, { "identifier": "equalized_odds", "path": "guardian_ai/fairness/metrics/model.py", "snippet": "def equalized_odds(\n y_true: Union[pd.Series, np.ndarray, List],\n y_pred: Union[pd.Series, np.ndarray, List],\n subgroups: pd.DataFrame,\n distance_measure: str = DEFAULT_DISTANCE,\n reduction: Optional[str] = DEFAULT_REDUCTION,\n):\n \"\"\"\n Measures the disparity of a model's true positive and false positive rates\n between subgroups and the rest of the subgroups.\n\n For more details, refer to :class:`.EqualizedOddsScorer`.\n\n Parameters\n ----------\n y_true : pandas.Series, numpy.ndarray, list\n Array of groundtruth labels.\n y_pred : pandas.Series, numpy.ndarray, list\n Array of model predictions.\n subgroups : pandas.DataFrame\n Dataframe containing protected attributes for each instance.\n distance_measure : str, default='diff'\n Determines the distance used to compare a subgroup's metric against\n the rest of the subgroups. Possible values are:\n\n * ``'ratio'``: Uses ``(subgroup1_val / subgroup2_val)``. Inverted to always be >= 1 if needed.\n * ``'diff'``: Uses ``| subgroup1_val - subgroup2_val |``.\n\n reduction : str or None, default='mean'\n Determines how to reduce scores on all subgroups to a single output.\n Possible values are:\n\n * ``'max'``: Returns the maximal value among all subgroup metrics.\n * ``'mean'``: Returns the mean over all subgroup metrics.\n * ``None``: Returns a ``{subgroup_pair: subgroup_pair_metric, ...}`` dict.\n\n Returns\n -------\n float, dict\n The computed metric value, with format according to `reduction`.\n\n\n Examples\n --------\n .. code-block:: python\n\n from guardian_ai.fairness.metrics import equalized_odds\n subgroups = X[['race', 'sex']]\n equalized_odds(y_true, y_pred, subgroups)\n \"\"\"\n tpr = true_positive_rate(\n y_true,\n y_pred,\n subgroups,\n distance_measure=distance_measure,\n reduction=reduction,\n )\n\n fpr = false_positive_rate(\n y_true,\n y_pred,\n subgroups,\n distance_measure=distance_measure,\n reduction=reduction,\n )\n if isinstance(tpr, dict):\n eq_odds = {}\n for key in tpr:\n eq_odds[key] = np.nanmax([tpr[key], fpr[key]])\n else:\n eq_odds = np.nanmax([tpr, fpr])\n\n return eq_odds" }, { "identifier": "error_rate", "path": "guardian_ai/fairness/metrics/model.py", "snippet": "def error_rate(\n y_true: Union[pd.Series, np.ndarray, List],\n y_pred: Union[pd.Series, np.ndarray, List],\n subgroups: pd.DataFrame,\n distance_measure: str = DEFAULT_DISTANCE,\n reduction: Optional[str] = DEFAULT_REDUCTION,\n):\n \"\"\"\n Measures the disparity of a model's error rate between all subgroup pairs.\n\n For more details, refer to :class:`.ErrorRateScorer`.\n\n Parameters\n ----------\n y_true : pandas.Series, numpy.ndarray, list\n Array of groundtruth labels.\n y_pred : pandas.Series, numpy.ndarray, list\n Array of model predictions.\n subgroups : pandas.DataFrame\n Dataframe containing protected attributes for each instance.\n distance_measure : str, default='diff'\n Determines the distance used to compare a subgroup's metric against\n the rest of the subgroups. Possible values are:\n\n * ``'ratio'``: Uses ``(subgroup1_val / subgroup2_val)``. Inverted to always be >= 1 if needed.\n * ``'diff'``: Uses ``| subgroup1_val - subgroup2_val |``.\n\n reduction : str or None, default='mean'\n Determines how to reduce scores on all subgroups to a single output.\n Possible values are:\n\n * ``'max'``: Returns the maximal value among all subgroup metrics.\n * ``'mean'``: Returns the mean over all subgroup metrics.\n * ``None``: Returns a ``{subgroup_pair: subgroup_pair_metric, ...}`` dict.\n\n Returns\n -------\n float, dict\n The computed metric value, with format according to `reduction`.\n\n\n Examples\n --------\n .. code-block:: python\n\n from guardian_ai.fairness.metrics import error_rate\n subgroups = X[['race', 'sex']]\n error_rate(y_true, y_pred, subgroups)\n \"\"\"\n return _model_metric(\n y_true,\n y_pred,\n subgroups,\n metric=\"error_rate\",\n distance_measure=distance_measure,\n reduction=reduction,\n allow_y_true_none=False,\n allow_distance_measure_none=False,\n )" }, { "identifier": "false_discovery_rate", "path": "guardian_ai/fairness/metrics/model.py", "snippet": "def false_discovery_rate(\n y_true: Union[pd.Series, np.ndarray, List],\n y_pred: Union[pd.Series, np.ndarray, List],\n subgroups: pd.DataFrame,\n distance_measure: str = DEFAULT_DISTANCE,\n reduction: Optional[str] = DEFAULT_REDUCTION,\n):\n \"\"\"\n Measures the disparity of a model's false discovery rate between all subgroup pairs.\n\n For more details, refer to :class:`.FalseDiscoveryRateScorer`.\n\n Parameters\n ----------\n y_true : pandas.Series, numpy.ndarray, list\n Array of groundtruth labels.\n y_pred : pandas.Series, numpy.ndarray, list\n Array of model predictions.\n subgroups : pandas.DataFrame\n Dataframe containing protected attributes for each instance.\n distance_measure : str, default='diff'\n Determines the distance used to compare a subgroup's metric against\n the rest of the subgroups. Possible values are:\n\n * ``'ratio'``: Uses ``(subgroup1_val / subgroup2_val)``. Inverted to always be >= 1 if needed.\n * ``'diff'``: Uses ``| subgroup1_val - subgroup2_val |``.\n\n reduction : str or None, default='mean'\n Determines how to reduce scores on all subgroups to a single output.\n Possible values are:\n\n * ``'max'``: Returns the maximal value among all subgroup metrics.\n * ``'mean'``: Returns the mean over all subgroup metrics.\n * ``None``: Returns a ``{subgroup_pair: subgroup_pair_metric, ...}`` dict.\n\n Returns\n -------\n float, dict\n The computed metric value, with format according to `reduction`.\n\n\n Examples\n --------\n .. code-block:: python\n\n from guardian_ai.fairness.metrics import false_discovery_rate\n subgroups = X[['race', 'sex']]\n false_discovery_rate(y_true, y_pred, subgroups)\n \"\"\"\n return _model_metric(\n y_true,\n y_pred,\n subgroups,\n metric=\"false_discovery_rate\",\n distance_measure=distance_measure,\n reduction=reduction,\n allow_y_true_none=False,\n allow_distance_measure_none=False,\n )" }, { "identifier": "false_negative_rate", "path": "guardian_ai/fairness/metrics/model.py", "snippet": "def false_negative_rate(\n y_true: Union[pd.Series, np.ndarray, List],\n y_pred: Union[pd.Series, np.ndarray, List],\n subgroups: pd.DataFrame,\n distance_measure: str = DEFAULT_DISTANCE,\n reduction: Optional[str] = DEFAULT_REDUCTION,\n):\n \"\"\"\n Measures the disparity of a model's false negative rate between all subgroup pairs.\n\n For more details, refer to :class:`.FalseNegativeRateScorer`.\n\n Parameters\n ----------\n y_true : pandas.Series, numpy.ndarray, list\n Array of groundtruth labels.\n y_pred : pandas.Series, numpy.ndarray, list\n Array of model predictions.\n subgroups : pandas.DataFrame\n Dataframe containing protected attributes for each instance.\n distance_measure : str, default='diff'\n Determines the distance used to compare a subgroup's metric against\n the rest of the subgroups. Possible values are:\n\n * ``'ratio'``: Uses ``(subgroup1_val / subgroup2_val)``. Inverted to always be >= 1 if needed.\n * ``'diff'``: Uses ``| subgroup1_val - subgroup2_val |``.\n\n reduction : str or None, default='mean'\n Determines how to reduce scores on all subgroups to a single output.\n Possible values are:\n\n * ``'max'``: Returns the maximal value among all subgroup metrics.\n * ``'mean'``: Returns the mean over all subgroup metrics.\n * ``None``: Returns a ``{subgroup_pair: subgroup_pair_metric, ...}`` dict.\n\n Returns\n -------\n float, dict\n The computed metric value, with format according to `reduction`.\n\n\n Examples\n --------\n .. code-block:: python\n\n from guardian_ai.fairness.metrics import false_negative_rate\n subgroups = X[['race', 'sex']]\n false_negative_rate(y_true, y_pred, subgroups)\n \"\"\"\n return _model_metric(\n y_true,\n y_pred,\n subgroups,\n metric=\"false_negative_rate\",\n distance_measure=distance_measure,\n reduction=reduction,\n allow_y_true_none=False,\n allow_distance_measure_none=False,\n )" }, { "identifier": "false_omission_rate", "path": "guardian_ai/fairness/metrics/model.py", "snippet": "def false_omission_rate(\n y_true: Union[pd.Series, np.ndarray, List],\n y_pred: Union[pd.Series, np.ndarray, List],\n subgroups: pd.DataFrame,\n distance_measure: str = DEFAULT_DISTANCE,\n reduction: Optional[str] = DEFAULT_REDUCTION,\n):\n \"\"\"\n Measures the disparity of a model's false omission rate between all subgroup pairs.\n\n For more details, refer to :class:`.FalseOmissionRateScorer`.\n\n Parameters\n ----------\n y_true : pandas.Series, numpy.ndarray, list\n Array of groundtruth labels.\n y_pred : pandas.Series, numpy.ndarray, list\n Array of model predictions.\n subgroups : pandas.DataFrame\n Dataframe containing protected attributes for each instance.\n distance_measure : str, default='diff'\n Determines the distance used to compare a subgroup's metric against\n the rest of the subgroups. Possible values are:\n\n * ``'ratio'``: Uses ``(subgroup1_val / subgroup2_val)``. Inverted to always be >= 1 if needed.\n * ``'diff'``: Uses ``| subgroup1_val - subgroup2_val |``.\n\n reduction : str or None, default='mean'\n Determines how to reduce scores on all subgroups to a single output.\n Possible values are:\n\n * ``'max'``: Returns the maximal value among all subgroup metrics.\n * ``'mean'``: Returns the mean over all subgroup metrics.\n * ``None``: Returns a ``{subgroup_pair: subgroup_pair_metric, ...}`` dict.\n\n Returns\n -------\n float, dict\n The computed metric value, with format according to `reduction`.\n\n\n Examples\n --------\n .. code-block:: python\n\n from guardian_ai.fairness.metrics import false_omission_rate\n subgroups = X[['race', 'sex']]\n false_omission_rate(y_true, y_pred, subgroups)\n \"\"\"\n return _model_metric(\n y_true,\n y_pred,\n subgroups,\n metric=\"false_omission_rate\",\n distance_measure=distance_measure,\n reduction=reduction,\n allow_y_true_none=False,\n allow_distance_measure_none=False,\n )" }, { "identifier": "false_positive_rate", "path": "guardian_ai/fairness/metrics/model.py", "snippet": "def false_positive_rate(\n y_true: Union[pd.Series, np.ndarray, List],\n y_pred: Union[pd.Series, np.ndarray, List],\n subgroups: pd.DataFrame,\n distance_measure: str = DEFAULT_DISTANCE,\n reduction: Optional[str] = DEFAULT_REDUCTION,\n):\n \"\"\"\n Measures the disparity of a model's false positive rate between all subgroup pairs.\n\n For more details, refer to :class:`.FalsePositiveRateScorer`.\n\n Parameters\n ----------\n y_true : pandas.Series, numpy.ndarray, list\n Array of groundtruth labels.\n y_pred : pandas.Series, numpy.ndarray, list\n Array of model predictions.\n subgroups : pandas.DataFrame\n Dataframe containing protected attributes for each instance.\n distance_measure : str, default='diff'\n Determines the distance used to compare a subgroup's metric against\n the rest of the subgroups. Possible values are:\n\n * ``'ratio'``: Uses ``(subgroup1_val / subgroup2_val)``. Inverted to always be >= 1 if needed.\n * ``'diff'``: Uses ``| subgroup1_val - subgroup2_val |``.\n\n reduction : str or None, default='mean'\n Determines how to reduce scores on all subgroups to a single output.\n Possible values are:\n\n * ``'max'``: Returns the maximal value among all subgroup metrics.\n * ``'mean'``: Returns the mean over all subgroup metrics.\n * ``None``: Returns a ``{subgroup_pair: subgroup_pair_metric, ...}`` dict.\n\n Returns\n -------\n float, dict\n The computed metric value, with format according to `reduction`.\n\n\n Examples\n --------\n .. code-block:: python\n\n from guardian_ai.fairness.metrics import false_positive_rate\n subgroups = X[['race', 'sex']]\n false_positive_rate(y_true, y_pred, subgroups)\n \"\"\"\n return _model_metric(\n y_true,\n y_pred,\n subgroups,\n metric=\"false_positive_rate\",\n distance_measure=distance_measure,\n reduction=reduction,\n allow_y_true_none=False,\n allow_distance_measure_none=False,\n )" }, { "identifier": "model_statistical_parity", "path": "guardian_ai/fairness/metrics/model.py", "snippet": "def model_statistical_parity(\n y_true: Optional[Union[pd.Series, np.ndarray, List]] = None,\n y_pred: Optional[Union[pd.Series, np.ndarray, List]] = None,\n subgroups: Optional[pd.DataFrame] = None,\n distance_measure: str = DEFAULT_DISTANCE,\n reduction: Optional[str] = DEFAULT_REDUCTION,\n):\n \"\"\"\n Measure the statistical parity of a model's output between all subgroup pairs.\n\n For more details, refer to :class:`.ModelStatisticalParityScorer`.\n\n Parameters\n ----------\n y_true : pandas.Series, numpy.ndarray, list or None, default=None\n Array of groundtruth labels.\n y_pred : pandas.Series, numpy.ndarray, list or None, default=None\n Array of model predictions.\n subgroups : pandas.DataFrame or None, default=None\n Dataframe containing protected attributes for each instance.\n distance_measure : str, default='diff'\n Determines the distance used to compare a subgroup's metric against\n the rest of the subgroups. Possible values are:\n\n * ``'ratio'``: Uses ``(subgroup1_val / subgroup2_val)``. Inverted to always be >= 1 if needed.\n * ``'diff'``: Uses ``| subgroup1_val - subgroup2_val |``.\n\n reduction : str or None, default='mean'\n Determines how to reduce scores on all subgroups to a single output.\n Possible values are:\n\n * ``'max'``: Returns the maximal value among all subgroup metrics.\n * ``'mean'``: Returns the mean over all subgroup metrics.\n * ``None``: Returns a ``{subgroup_pair: subgroup_pair_metric, ...}`` dict.\n\n Returns\n -------\n float, dict\n The computed metric value, with format according to `reduction`.\n\n Raises\n ------\n GuardianAIValueError\n If Value of None is received for either `y_pred` or `subgroups`.\n\n Examples\n --------\n\n .. code-block:: python\n\n from guardian_ai.fairness.metrics import model_statistical_parity\n subgroups = X[['race', 'sex']]\n model_statistical_parity(y_true, y_pred, subgroups)\n\n This metric does not require `y_true`. It can also be called using\n\n .. code-block:: python\n\n model_statistical_parity(None, y_pred, subgroups)\n model_statistical_parity(y_pred=y_pred, subgroups=subgroups)\n \"\"\" # noqa: D412\n\n if y_pred is None or subgroups is None:\n raise GuardianAIValueError(\n \"Value of None was received for either `y_pred` or `subgroups`. \"\n \"This may be due to calling the metric using only 2 positional \"\n \"arguments. If this is the case, either call the function by \"\n \"passing ``None`` as the first argument or use named arguments for \"\n \"`y_pred` and `subgroups`.\"\n )\n\n return _model_metric(\n None,\n y_pred,\n subgroups,\n metric=\"selection_rate\",\n distance_measure=distance_measure,\n reduction=reduction,\n allow_y_true_none=True,\n allow_distance_measure_none=False,\n )" }, { "identifier": "theil_index", "path": "guardian_ai/fairness/metrics/model.py", "snippet": "def theil_index(\n y_true: Union[pd.Series, np.ndarray, List],\n y_pred: Union[pd.Series, np.ndarray, List],\n subgroups: pd.DataFrame,\n distance_measure: Optional[str] = None,\n reduction: Optional[str] = DEFAULT_REDUCTION,\n):\n \"\"\"\n Measures the disparity of a model's predictions according to groundtruth\n labels, as proposed by Speicher et al. [1].\n\n For more details, refer to :class:`.TheilIndexScorer`.\n\n Parameters\n ----------\n y_true : pandas.Series, numpy.ndarray, list\n Array of groundtruth labels.\n y_pred : pandas.Series, numpy.ndarray, list\n Array of model predictions.\n subgroups : pandas.DataFrame\n Dataframe containing protected attributes for each instance.\n distance_measure : str or None, default=None\n Determines the distance used to compare a subgroup's metric against\n the rest of the subgroups. Possible values are:\n\n * ``'ratio'``: Uses ``(subgroup1_val / subgroup2_val)``. Inverted to always be >= 1 if needed.\n * ``'diff'``: Uses ``| subgroup1_val - subgroup2_val |``.\n\n reduction : str or None, default='mean'\n Determines how to reduce scores on all subgroups to a single output.\n Possible values are:\n\n * ``'max'``: Returns the maximal value among all subgroup metrics.\n * ``'mean'``: Returns the mean over all subgroup metrics.\n * ``None``: Returns a ``{subgroup_pair: subgroup_pair_metric, ...}`` dict.\n\n Returns\n -------\n float, dict\n The computed metric value, with format according to `reduction`.\n\n Raises\n ------\n GuardianAIValueError\n If distance_measure values are given to Theil Index.\n\n References\n ----------\n [1]: `Speicher, Till, et al. \"A unified approach to quantifying algorithmic\n unfairness: Measuring individual & group unfairness via inequality indices.\"\n Proceedings of the 24th ACM SIGKDD international conference on knowledge\n discovery & data mining. 2018. <https://arxiv.org/abs/1807.00787>`_\n\n Examples\n --------\n .. code-block:: python\n\n from guardian_ai.fairness.metrics import theil_index\n subgroups = X[['race', 'sex']]\n theil_index(y_true, y_pred, subgroups)\n \"\"\"\n\n if distance_measure is not None and not isinstance(\n distance_measure, _DistanceMetric\n ):\n raise GuardianAIValueError(\n \"Theil Index does not accept distance_measure values. It should\"\n \"always be set to ``None``.\"\n )\n\n return _model_metric(\n y_true,\n y_pred,\n subgroups,\n metric=\"between_group_theil_index\",\n distance_measure=None,\n reduction=reduction,\n allow_y_true_none=False,\n allow_distance_measure_none=True,\n )" }, { "identifier": "true_positive_rate", "path": "guardian_ai/fairness/metrics/model.py", "snippet": "def true_positive_rate(\n y_true: Union[pd.Series, np.ndarray, List],\n y_pred: Union[pd.Series, np.ndarray, List],\n subgroups: pd.DataFrame,\n distance_measure: str = DEFAULT_DISTANCE,\n reduction: Optional[str] = DEFAULT_REDUCTION,\n):\n \"\"\"\n Measures the disparity of a model's true positive rate between all subgroup pairs.\n\n For more details, refer to :class:`.TruePositiveRateScorer`.\n\n Parameters\n ----------\n y_true : pandas.Series, numpy.ndarray, list\n Array of groundtruth labels.\n y_pred : pandas.Series, numpy.ndarray, list\n Array of model predictions.\n subgroups : pandas.DataFrame\n Dataframe containing protected attributes for each instance.\n distance_measure : str, default='diff'\n Determines the distance used to compare a subgroup's metric against\n the rest of the subgroups. Possible values are:\n\n * ``'ratio'``: Uses ``(subgroup1_val / subgroup2_val)``. Inverted to always be >= 1 if needed.\n * ``'diff'``: Uses ``| subgroup1_val - subgroup2_val |``.\n reduction : str or None, default='mean'\n Determines how to reduce scores on all subgroups to a single output.\n Possible values are:\n\n * ``'max'``: Returns the maximal value among all subgroup metrics.\n * ``'mean'``: Returns the mean over all subgroup metrics.\n * ``None``: Returns a ``{subgroup_pair: subgroup_pair_metric, ...}`` dict.\n\n Returns\n -------\n float, dict\n The computed metric value, with format according to `reduction`.\n\n\n Examples\n --------\n .. code-block:: python\n\n from guardian_ai.fairness.metrics import true_positive_rate\n subgroups = X[['race', 'sex']]\n true_positive_rate(y_true, y_pred, subgroups)\n \"\"\"\n return _model_metric(\n y_true,\n y_pred,\n subgroups,\n metric=\"true_positive_rate\",\n distance_measure=distance_measure,\n reduction=reduction,\n allow_y_true_none=False,\n allow_distance_measure_none=False,\n )" }, { "identifier": "GuardianAITypeError", "path": "guardian_ai/utils/exception.py", "snippet": "class GuardianAITypeError(TypeError, GuardianAIError):\n \"\"\"Exception raised for generic type issues.\"\"\"\n\n pass" }, { "identifier": "GuardianAIValueError", "path": "guardian_ai/utils/exception.py", "snippet": "class GuardianAIValueError(ValueError, GuardianAIError):\n \"\"\"Exception raised for unexpected values.\"\"\"\n\n pass" }, { "identifier": "get_dummy_dataset", "path": "tests/utils.py", "snippet": "def get_dummy_dataset(\n n_samples=5000,\n n_features=10,\n n_classes=2,\n types=[str, float, bool, int],\n content=[],\n contain_null=False,\n null_ratio=0.3,\n dtime_types=[],\n tz_aware=False,\n reg_range=10.0,\n cat_range=30,\n random_seed=9999,\n imb_factor=1.0,\n task=\"classification\",\n **kwargs,\n):\n \"\"\"\n Generates a dummy dataset and returns its corresponding ope/oml\n dataframe:\n dataset shape n_samples x n_features.\n\n types: column types you wish to generate (random number of columns=\n n_features types are generated, with at least one of each type).\n\n content: list of tuples (dtype, feature) specifying bad column\n features. Features can be 'const' - to make all values in column\n constant, or value between 0 and 1 which indicates percentage of\n missing values in a column\n\n dtime_types: datetime column types to generate. Acceptable types\n are: ['datetime', 'date', 'time', 'timedelta', 'datetimetz']\n\n n_classes: number of target classes (only used for classification)\n\n reg_range: range of target for regression datasets, not used for\n classification\n\n cat_range: maximum number of unique values for the categorical\n features\n\n imb_factor: ~ class_ratio = minority_class_size/majority_class_size\n approximately controls dataset target imbalance\n (only used for classification).\n\n \"\"\"\n np.random.seed(random_seed)\n allowed_dtime_types = [\n \"datetime\",\n \"date\",\n \"time\",\n \"timedelta\",\n \"datetimez\",\n \"Timestamp\",\n ]\n\n # sanity checks\n assert (\n n_samples >= n_classes\n ), \"Number of samples has to be greater than num of classes\"\n assert (imb_factor > 0) and (\n imb_factor <= 1.0\n ), \"imb_factor has to be in range of (0, 1.0]\"\n assert len(types) == len(set(types)), \"types inside the list must be unique\"\n assert len(dtime_types) == len(\n set(dtime_types)\n ), \"dtime_types inside the list must be unique\"\n assert (\n len(dtime_types) + len(types) <= n_features\n ), \"provided number of feature types is more than n_features\"\n assert task in [\n \"classification\",\n \"regression\",\n \"anomaly_detection\",\n ], \"Task must be one of classification or regression\"\n assert all(\n x for x in dtime_types if x in allowed_dtime_types\n ), \"dtime_types: {} outside of allowed: {}\".format(dtime_types, allowed_dtime_types)\n\n extra_types, extra_feats, extra_cols = [], [], 0\n if content != []:\n extra_cols = len(content)\n extra_types = [x for x, _ in content]\n extra_feats = [x for _, x in content]\n\n # target labels for the dataset\n if task == \"classification\" or task == \"anomaly_detection\":\n # assign class counts based on geometric distribution of classes based on imb_factor\n class_weights = np.geomspace(imb_factor, 1.0, num=n_classes)\n class_counts = [\n max(1, int(n_samples * x / np.sum(class_weights))) for x in class_weights\n ]\n class_excess = np.sum(class_counts) - n_samples\n class_counts[-1] -= class_excess\n\n # create labels based on class counts and shuffle them\n y = np.hstack(\n [np.full((1, count), cl) for cl, count in enumerate(class_counts)]\n ).ravel()\n np.random.shuffle(y.astype(int))\n y = y.tolist()\n elif task == \"regression\":\n # noise between (-reg_range/2, reg_range/2) for regression\n y = reg_range * np.random.random(size=(1, n_samples, 1)) + reg_range / 2.0\n y = y.reshape(1, n_samples).ravel().tolist()\n\n # tally total number of features\n all_feat_types = types + dtime_types + extra_types\n total_feat_types = len(types) + len(dtime_types)\n if total_feat_types > 0:\n feat_col_types = np.random.choice(\n range(0, total_feat_types), size=n_features - total_feat_types\n ).tolist()\n feat_col_types += list(\n range(0, total_feat_types)\n ) # to ensure at least one of each type\n\n else:\n feat_col_types = []\n feat_col_types += list(range(total_feat_types, total_feat_types + len(extra_types)))\n features = []\n col_types = []\n tz = {}\n # extra_features provided in content, and certain datetime columns are handled differently\n # they get added as pandas Series or DataFrames to rest of features in the end\n special_cols_num, special_pd_df = [], []\n extra_features = pd.DataFrame()\n for i, t in enumerate(feat_col_types):\n assert t < total_feat_types + len(extra_types)\n typ = all_feat_types[t]\n if typ is str:\n high_val = np.random.randint(3, cat_range)\n feat = np.random.randint(0, high_val, size=n_samples).tolist()\n feat = [\"STR{}\".format(val) for val in feat]\n elif typ is int:\n low_val = np.random.randint(-50000, -10)\n high_val = np.random.randint(10, 50000)\n feat = np.random.randint(low_val, high_val, size=n_samples).tolist()\n elif typ is float:\n feat = np.random.rand(n_samples).tolist()\n elif typ is bool:\n feat = np.random.randint(0, 2, size=n_samples).tolist()\n feat = [bool(val) for val in feat]\n elif typ in allowed_dtime_types:\n if typ == \"datetime\":\n # generating random datetime\n deltas = random.sample(range(1, 172800000), n_samples)\n d1 = datetime.datetime.now() - datetime.timedelta(days=2000)\n d2 = datetime.datetime.now()\n generated_datetime = []\n for d in deltas:\n generated_datetime.append(d1 + datetime.timedelta(seconds=d))\n feat = generated_datetime\n elif typ == \"timedelta\":\n feat = n_samples * [datetime.timedelta()]\n elif typ == \"time\":\n feat = n_samples * [datetime.time()]\n elif typ == \"date\":\n feat = n_samples * [datetime.date(2019, 9, 11)]\n elif typ == \"datetimez\":\n special_cols_num.append(i)\n special_pd_df.append(\n pd.date_range(start=0, periods=n_samples, tz=\"UTC\")\n )\n feat = n_samples * [\n datetime.date(2019, 9, 11)\n ] # needs to be handled in special way b/c it's already pandas obj\n else:\n raise Exception(\"Unrecognized datetime type of column\")\n else:\n raise Exception(\"Unrecognized type of column\")\n\n # If index reached the last extra_col number of feature types, start modifying features\n # and adding them to extra_features DataFrame instead of list of features\n if extra_cols > 0 and i >= (len(feat_col_types) - extra_cols):\n feat_idx = i - (len(feat_col_types) - extra_cols)\n if isinstance(extra_feats[feat_idx], numbers.Number):\n # missing values given by extra_feats[feat_idx] percentage of instances\n assert (\n extra_feats[feat_idx] <= 1.0 and extra_feats[feat_idx] >= 0\n ), \"feature in content has to be ratio between 0 and 1\"\n ids = np.random.choice(\n range(0, n_samples), size=int(extra_feats[feat_idx] * n_samples)\n ).astype(int)\n dtype = map_col_types([extra_types[feat_idx].__name__])[0]\n feat = pd.Series(data=np.array(feat), dtype=dtype)\n feat[ids] = np.nan\n elif extra_feats[feat_idx] == \"const\":\n # constant column, set all rows to be same as the first instance\n dtype = map_col_types([extra_types[feat_idx].__name__])[0]\n feat = pd.Series(data=np.array(feat), dtype=dtype)\n feat = feat[0]\n extra_features[i] = feat\n else: # add features to the list\n features.append(feat)\n col_types.append(type(feat[0]).__name__)\n\n # if task == 'regression':\n # # Add scaled target column for regression so that score is positive\n # features.append([-0.5*x for x in y])\n # col_types.append('float') # target column type is int\n\n # Add target column and convert all types to pandas dtypes\n features.append(y)\n col_types.append(\n \"int\" if task == \"classification\" else \"float\"\n ) # target column type is int\n pd_col_types = map_col_types(col_types)\n pd_df = pd.DataFrame(features).T # transpose to get samples x features\n num_feats = len(features) - 1\n columns = list(range(0, num_feats)) if num_feats > 0 else []\n columns = columns + [\"target\"]\n pd_df.columns = columns # rename columns\n\n # handle special column from datettime: replace placeholder with pandas.date_range columns\n for i, col in enumerate(special_cols_num):\n pd_df[col] = special_pd_df[i]\n pd_col_types[col] = pd_df.dtypes[col]\n\n # assign datatypes to pd dataframe for non-datetime types\n columns_types_all = list(zip(columns, pd_col_types))\n columns_types_nodtime = [\n (name, typ)\n for (name, typ) in columns_types_all\n if typ not in allowed_dtime_types\n ]\n columns_types_dtime = [\n (name, typ) for (name, typ) in columns_types_all if typ in allowed_dtime_types\n ]\n pd_df = pd_df.astype(dict(columns_types_nodtime)) # cast types on non-dtime columns\n\n # assign datatypes to pd dataframe only for datetime types\n for col, col_type in columns_types_dtime:\n if col_type == \"timedelta\":\n pd_df[col] = pd.to_timedelta(pd_df[col], errors=\"coerce\")\n elif col_type == \"datetimez\":\n pd_df[col] = pd_df[col]\n elif col_type == \"datetime\":\n pd_df[col] = pd.to_datetime(pd_df[col], errors=\"coerce\")\n if contain_null:\n pd_df[col] = generate_null(pd_df[col], null_ratio)\n if tz_aware:\n tz[str(col)] = pytz.all_timezones[\n np.random.randint(len(pytz.all_timezones))\n ]\n else:\n pd_df[col] = pd.to_timedelta(pd_df[col], errors=\"coerce\")\n\n # add extra features columns that were provided by content\n pd_df[pd_df.shape[1] + extra_features.columns] = extra_features\n\n # Convert all the column names to string type (mainly for FS min_features [] tests)\n pd_df.columns = [str(col) for col in pd_df.columns]\n\n if tz_aware:\n return pd_df.drop([\"target\"], axis=1), pd_df[\"target\"], tz\n else:\n return pd_df.drop([\"target\"], axis=1), pd_df[\"target\"]" } ]
import math import numpy as np import pandas as pd import pytest import sklearn from sklearn.pipeline import Pipeline from sklearn.ensemble import RandomForestClassifier from sklearn.preprocessing import OneHotEncoder from guardian_ai.fairness.metrics.dataset import ( ConsistencyScorer, DatasetStatisticalParityScorer, SmoothedEDFScorer, consistency, dataset_statistical_parity, smoothed_edf, ) from guardian_ai.fairness.metrics.model import ( EqualizedOddsScorer, ErrorRateScorer, FalseDiscoveryRateScorer, FalseNegativeRateScorer, FalseOmissionRateScorer, FalsePositiveRateScorer, ModelStatisticalParityScorer, TheilIndexScorer, TruePositiveRateScorer, equalized_odds, error_rate, false_discovery_rate, false_negative_rate, false_omission_rate, false_positive_rate, model_statistical_parity, theil_index, true_positive_rate, ) from guardian_ai.utils.exception import GuardianAITypeError, GuardianAIValueError from tests.utils import get_dummy_dataset
21,103
# Validate for ratio distance X_y_scorer = X_y_scorer_fn(sensitive_attr_names, distance_measure="ratio") assert isinstance(X_y_scorer(model, dataset, target), float) assert isinstance( subgroup_scorer(target, y_pred, subgroups, distance_measure="ratio"), float ) assert is_close( X_y_scorer(model, dataset, target), subgroup_scorer(target, y_pred, subgroups, distance_measure="ratio"), ) # Validate for diff distance X_y_scorer = X_y_scorer_fn(sensitive_attr_names, distance_measure="diff") assert isinstance(X_y_scorer(model, dataset, target), float) assert isinstance( subgroup_scorer(target, y_pred, subgroups, distance_measure="diff"), float ) assert is_close( X_y_scorer(model, dataset, target), subgroup_scorer(target, y_pred, subgroups, distance_measure="diff"), ) # Do not accept other distances with pytest.raises(GuardianAIValueError): X_y_scorer = X_y_scorer_fn( sensitive_attr_names, distance_measure="something" ) with pytest.raises(GuardianAIValueError): subgroup_scorer(target, y_pred, subgroups, distance_measure="something") # Do not accept None distances with pytest.raises(GuardianAIValueError): X_y_scorer = X_y_scorer_fn(sensitive_attr_names, distance_measure=None) with pytest.raises(GuardianAIValueError): subgroup_scorer(target, y_pred, subgroups, distance_measure=None) else: # Accepts only None as distance_measure X_y_scorer = X_y_scorer_fn(sensitive_attr_names, distance_measure=None) assert isinstance(X_y_scorer(model, dataset, target), float) assert isinstance( subgroup_scorer(target, y_pred, subgroups, distance_measure=None), float ) with pytest.raises(GuardianAIValueError): X_y_scorer = X_y_scorer_fn( sensitive_attr_names, distance_measure="something" ) with pytest.raises(GuardianAIValueError): subgroup_scorer(target, y_pred, subgroups, distance_measure="something") @pytest.mark.parametrize("scorer", DATASET_SCORERS_USING_DISTANCE) def test_dataset_scorer_distance(sensitive_dataset_and_model, scorer): dataset, target, model, sensitive_attr_names = sensitive_dataset_and_model X_y_scorer_fn = DATASET_X_Y_SCORERS[scorer] subgroup_scorer = DATASET_SUBGROUPS_SCORERS[scorer] subgroups = dataset[sensitive_attr_names] # Validate for ratio distance X_y_scorer = X_y_scorer_fn(sensitive_attr_names, distance_measure="ratio") assert isinstance(X_y_scorer(None, dataset, target), float) assert isinstance( subgroup_scorer(target, subgroups, distance_measure="ratio"), float ) assert is_close( X_y_scorer(None, dataset, target), subgroup_scorer(target, subgroups, distance_measure="ratio"), ) # Validate for diff distance X_y_scorer = X_y_scorer_fn(sensitive_attr_names, distance_measure="diff") assert isinstance(X_y_scorer(None, dataset, target), float) assert isinstance( subgroup_scorer(target, subgroups, distance_measure="diff"), float ) assert is_close( X_y_scorer(None, dataset, target), subgroup_scorer(target, subgroups, distance_measure="diff"), ) # Do not accept other distances with pytest.raises(GuardianAIValueError): X_y_scorer = X_y_scorer_fn(sensitive_attr_names, distance_measure="something") with pytest.raises(GuardianAIValueError): subgroup_scorer(target, subgroups, distance_measure="something") @pytest.mark.parametrize("scorer", MODEL_SUBGROUPS_SCORERS) def test_model_scorers_y_format(sensitive_dataset_and_model, scorer): dataset, target, model, sensitive_attr_names = sensitive_dataset_and_model run_y_true_tests = scorer not in MODEL_SCORERS_ALLOWING_Y_TRUE_NONE scorer = MODEL_SUBGROUPS_SCORERS[scorer] subgroups = dataset[sensitive_attr_names] # Accept same number of instances y_pred = target.copy() assert isinstance(scorer(target, y_pred, subgroups), float) # Do not accept different number of instances if run_y_true_tests: y_pred = target[:-1].copy() with pytest.raises(GuardianAIValueError): scorer(target, y_pred, subgroups) # Do not accept multiclass classification multiclass_y = target.copy() multiclass_y[:3] = 2 # Change a few labels assert multiclass_y.nunique() == 3 # Sanity check with pytest.raises(GuardianAIValueError): scorer(multiclass_y, y_pred, subgroups) # Do not accept non-array inputs if run_y_true_tests: y_pred = target.copy() bad_target = {i: itm for i, itm in enumerate(target)}
#!/usr/bin/env python # -*- coding: utf-8 -*-- # Copyright (c) 2023 Oracle and/or its affiliates. # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/ @pytest.fixture(scope="module", autouse=True) def init(): np.random.seed(12345) def is_close(a, b): return math.isclose(a, b, rel_tol=1e-5) def approx_dict(d): return pytest.approx(d, rel=1e-5) MODEL_X_Y_SCORERS = { "model_statistical_parity_scorer": ModelStatisticalParityScorer, "true_positive_rate_scorer": TruePositiveRateScorer, "false_positive_rate_scorer": FalsePositiveRateScorer, "false_negative_rate_scorer": FalseNegativeRateScorer, "false_omission_rate_scorer": FalseOmissionRateScorer, "false_discovery_rate_scorer": FalseDiscoveryRateScorer, "error_rate_scorer": ErrorRateScorer, "equalized_odds_scorer": EqualizedOddsScorer, "theil_index_scorer": TheilIndexScorer, } MODEL_SUBGROUPS_SCORERS = { "model_statistical_parity_scorer": model_statistical_parity, "true_positive_rate_scorer": true_positive_rate, "false_positive_rate_scorer": false_positive_rate, "false_negative_rate_scorer": false_negative_rate, "false_omission_rate_scorer": false_omission_rate, "false_discovery_rate_scorer": false_discovery_rate, "error_rate_scorer": error_rate, "equalized_odds_scorer": equalized_odds, "theil_index_scorer": theil_index, } MODEL_SCORERS_ALLOWING_REDUCTION = list(MODEL_X_Y_SCORERS.keys()) MODEL_SCORERS_USING_DISTANCE = [ scorer for scorer in MODEL_X_Y_SCORERS if scorer != "theil_index_scorer" ] MODEL_SCORERS_ALLOWING_Y_TRUE_NONE = ["model_statistical_parity_scorer"] DATASET_X_Y_SCORERS = { "dataset_statistical_parity_scorer": DatasetStatisticalParityScorer, "consistency_scorer": ConsistencyScorer, "smoothed_edf_scorer": SmoothedEDFScorer, } DATASET_SUBGROUPS_SCORERS = { "dataset_statistical_parity_scorer": dataset_statistical_parity, "consistency_scorer": consistency, "smoothed_edf_scorer": smoothed_edf, } DATASET_SCORERS_ALLOWING_REDUCTION = ["dataset_statistical_parity_scorer"] DATASET_SCORERS_USING_DISTANCE = ["dataset_statistical_parity_scorer"] ALL_X_Y_SCORERS = {**MODEL_X_Y_SCORERS, **DATASET_X_Y_SCORERS} SENSITIVE_FEATURES_VARIATIONS = { "one_attr_two_classes": {"n_classes": (2,)}, "one_attr_n_classes": {"n_classes": (4,)}, "n_attrs": {"n_classes": (3, 4)}, } class DummyBinaryStochasticModel: def predict(self, X): return np.random.randint(0, 2, size=X.shape[0]) def create_concat_sensitive_attrs(dataset, n_classes): if not isinstance(n_classes, list): n_classes = list(n_classes) sensitive_dataset = dataset.copy() sensitive_attrs_names = [] for i, n_classes_i in enumerate(n_classes): sensitive_vals = np.array( [f"sensitive_val_{idx}" for idx in range(n_classes_i)] ) attr_name = f"sensitive_attr_{i}" sensitive_dataset = concat_sensitive_attr_column( sensitive_vals, sensitive_dataset, attr_name ) sensitive_attrs_names.append(attr_name) return sensitive_dataset, sensitive_attrs_names def concat_sensitive_attr_column(vals, dataset, attr_name): sensitive_vals = np.random.choice(vals, size=len(dataset)) sensitive_feats = pd.DataFrame(np.transpose(sensitive_vals), columns=[attr_name]) return pd.concat([dataset, sensitive_feats], axis=1) @pytest.fixture(scope="module") def model_type(): return "LogisticRegression" @pytest.fixture(scope="module") def base_dataset(): return get_dummy_dataset(n_samples=500, n_features=5, n_classes=2) @pytest.fixture( scope="module", params=SENSITIVE_FEATURES_VARIATIONS.values(), ids=SENSITIVE_FEATURES_VARIATIONS.keys(), ) def sensitive_dataset_and_model(model_type, base_dataset, request): dataset, target = base_dataset dataset, sensitive_attr_names = create_concat_sensitive_attrs( dataset, **request.param ) model = Pipeline( steps=[ ("preprocessor", OneHotEncoder(handle_unknown="ignore")), ("classifier", RandomForestClassifier()), ] ) model.fit(dataset, target) return dataset, target, model, sensitive_attr_names @pytest.mark.parametrize("scorer", DATASET_X_Y_SCORERS.keys()) def test_dataset_X_y_scorer_signature(sensitive_dataset_and_model, scorer): dataset, target, model, sensitive_attr_names = sensitive_dataset_and_model scorer = DATASET_X_Y_SCORERS[scorer](sensitive_attr_names) # Validate call signatures assert isinstance(scorer(X=dataset, y_true=target), float) assert isinstance(scorer(None, dataset, target), float) with pytest.raises(GuardianAIValueError): scorer(dataset, target) # Two ways to call metric are equivalent assert is_close(scorer(X=dataset, y_true=target), scorer(None, dataset, target)) @pytest.mark.parametrize("scorer", DATASET_SUBGROUPS_SCORERS.keys()) def test_dataset_subgroups_scorer_signature(sensitive_dataset_and_model, scorer): dataset, target, model, sensitive_attr_names = sensitive_dataset_and_model scorer = DATASET_SUBGROUPS_SCORERS[scorer] subgroups = dataset[sensitive_attr_names] # Validate call signatures assert isinstance(scorer(target, subgroups), float) @pytest.mark.parametrize("scorer", DATASET_SUBGROUPS_SCORERS.keys()) def test_dataset_scorers_equivalence(sensitive_dataset_and_model, scorer): dataset, target, model, sensitive_attr_names = sensitive_dataset_and_model X_y_scorer = DATASET_X_Y_SCORERS[scorer](sensitive_attr_names) subgroup_scorer = DATASET_SUBGROUPS_SCORERS[scorer] subgroups = dataset[sensitive_attr_names] # Validate same value assert is_close( subgroup_scorer(target, subgroups), X_y_scorer(X=dataset, y_true=target) ) @pytest.mark.parametrize("scorer", MODEL_X_Y_SCORERS.keys()) def test_model_X_y_scorer_signature(sensitive_dataset_and_model, scorer): dataset, target, model, sensitive_attr_names = sensitive_dataset_and_model scorer = MODEL_X_Y_SCORERS[scorer](sensitive_attr_names) # Validate call signature assert isinstance(scorer(model, dataset, target), float) # Utility that can ignore some input columns. Useful for testing when a # column is moved from X to supp_features and wanting identical model # predictions class ModelIgnoreOtherFeatures(sklearn.base.BaseEstimator): def __init__(self, model, features_to_keep): self.model = model self.features_to_keep = features_to_keep def _trim_X(self, X): features_to_drop = [ col for col in X.columns if col not in self.features_to_keep ] return X.drop(columns=features_to_drop) def predict(self, X): return self.model.predict(self._trim_X(X)) def predict_proba(self, X): return self.model.predict_proba(self._trim_X(X)) @pytest.mark.parametrize("scorer", ALL_X_Y_SCORERS.keys()) def test_X_y_scorer_supp_features(base_dataset, model_type, scorer): # Need to create our own dataset and model to test variations of supp_features dataset_no_sf, target = base_dataset dataset, sensitive_attr_names = create_concat_sensitive_attrs( dataset_no_sf, n_classes=(3, 4) ) if scorer in MODEL_X_Y_SCORERS: scorer = MODEL_X_Y_SCORERS[scorer](sensitive_attr_names) model = Pipeline( steps=[ ("preprocessor", OneHotEncoder(handle_unknown="ignore")), ("classifier", RandomForestClassifier()), ] ) model.fit(dataset_no_sf, target) model = ModelIgnoreOtherFeatures(model, dataset_no_sf.columns) else: scorer = DATASET_X_Y_SCORERS[scorer](sensitive_attr_names) model = None correct_score = scorer(model, dataset, target) # All sensitive features are in X (default) assert is_close(scorer(model, dataset, target), correct_score) # All sensitive features are in supplementary_features dataset_no_sf = dataset.drop(columns=sensitive_attr_names) all_supp_features = dataset[sensitive_attr_names] assert is_close( scorer(model, dataset_no_sf, target, supplementary_features=all_supp_features), correct_score, ) # Features are split across X and supplementary_features some_sf = [sensitive_attr_names[0]] dataset_some_sf = dataset.drop(columns=some_sf) supp_features = dataset[some_sf] assert is_close( scorer(model, dataset_some_sf, target, supplementary_features=supp_features), correct_score, ) # supplementary_features invalid type with pytest.raises(GuardianAIValueError): scorer(model, dataset, target, supplementary_features=[]) # Duplicate features between X and supplementary_features with pytest.raises(GuardianAIValueError): scorer(model, dataset, target, supplementary_features=all_supp_features) @pytest.mark.parametrize("scorer", MODEL_SUBGROUPS_SCORERS.keys()) def test_model_subgroups_scorer_signature(sensitive_dataset_and_model, scorer): dataset, target, model, sensitive_attr_names = sensitive_dataset_and_model scorer = MODEL_SUBGROUPS_SCORERS[scorer] subgroups = dataset[sensitive_attr_names] y_pred = model.predict(dataset) # Validate call signatures assert isinstance(scorer(target, y_pred, subgroups), float) @pytest.mark.parametrize("scorer", MODEL_SUBGROUPS_SCORERS.keys()) def test_model_scorers_equivalence(sensitive_dataset_and_model, scorer): dataset, target, model, sensitive_attr_names = sensitive_dataset_and_model X_y_scorer = MODEL_X_Y_SCORERS[scorer](sensitive_attr_names) subgroup_scorer = MODEL_SUBGROUPS_SCORERS[scorer] subgroups = dataset[sensitive_attr_names] y_pred = model.predict(dataset) # Validate same value assert is_close( subgroup_scorer(target, y_pred, subgroups), X_y_scorer(model, dataset, target) ) @pytest.mark.parametrize("scorer", ALL_X_Y_SCORERS.keys()) def test_X_y_scorer_sensitive_attr_formats(base_dataset, scorer): dataset, target = base_dataset sensitive_attrs_name = "sensitive_attr" if scorer in MODEL_X_Y_SCORERS: scorer = MODEL_X_Y_SCORERS[scorer]([sensitive_attrs_name]) model = DummyBinaryStochasticModel() else: scorer = DATASET_X_Y_SCORERS[scorer]([sensitive_attrs_name]) model = None # Accept str vals vals = [f"val_{i}" for i in range(5)] sensitive_dataset = concat_sensitive_attr_column( vals, dataset, sensitive_attrs_name ) assert isinstance(scorer(model, sensitive_dataset, target), float) # Accept categorical vals vals = list(range(5)) sensitive_vals = np.random.choice(vals, size=len(dataset)) sensitive_feats = pd.Series( np.transpose(sensitive_vals), dtype="category", name=sensitive_attrs_name ) sensitive_dataset = pd.concat([dataset, sensitive_feats], axis=1) assert isinstance(scorer(model, sensitive_dataset, target), float) # Accept bool vals vals = [True, False] sensitive_dataset = concat_sensitive_attr_column( vals, dataset, sensitive_attrs_name ) assert isinstance(scorer(model, sensitive_dataset, target), float) # Reject (non-categoricalized) integer vals vals = list(range(5)) sensitive_dataset = concat_sensitive_attr_column( vals, dataset, sensitive_attrs_name ) with pytest.raises(GuardianAIValueError): scorer(model, sensitive_dataset, target) # Reject float vals vals = np.random.rand(5) sensitive_dataset = concat_sensitive_attr_column( vals, dataset, sensitive_attrs_name ) with pytest.raises(GuardianAIValueError): scorer(model, sensitive_dataset, target) @pytest.mark.parametrize("scorer", MODEL_X_Y_SCORERS.keys()) def test_model_metrics_y_true_None(sensitive_dataset_and_model, scorer): dataset, target, model, sensitive_attr_names = sensitive_dataset_and_model X_y_scorer = MODEL_X_Y_SCORERS[scorer](sensitive_attr_names) subgroup_scorer = MODEL_SUBGROUPS_SCORERS[scorer] subgroups = dataset[sensitive_attr_names] y_pred = model.predict(dataset) if scorer in MODEL_SCORERS_ALLOWING_Y_TRUE_NONE: # Can pass y_true=None assert isinstance(X_y_scorer(model, dataset, None), float) assert isinstance(subgroup_scorer(None, y_pred, subgroups), float) # Cannot pass only two arguments with pytest.raises(GuardianAIValueError): subgroup_scorer(y_pred, subgroups) else: with pytest.raises(GuardianAIValueError): X_y_scorer(model, dataset, None) with pytest.raises(GuardianAIValueError): subgroup_scorer(None, y_pred, subgroups) @pytest.mark.parametrize("scorer", ALL_X_Y_SCORERS.keys()) def test_X_y_scorer_feature_not_in_dataset(sensitive_dataset_and_model, scorer): dataset, target, model, sensitive_attr_names = sensitive_dataset_and_model if scorer in MODEL_X_Y_SCORERS: scorer_maker = MODEL_X_Y_SCORERS[scorer] else: scorer_maker = DATASET_X_Y_SCORERS[scorer] model = None # Correct output if features present scorer_obj = scorer_maker(sensitive_attr_names) assert isinstance(scorer_obj(model, dataset, target), float) # Error if one missing feature one_missing_feature = sensitive_attr_names + ["missing_feature"] scorer_obj = scorer_maker(one_missing_feature) with pytest.raises(GuardianAIValueError): scorer_obj(model, dataset, target) # Error if all missing features all_missing_features = [f"missing_feature_{i}" for i in range(3)] scorer_obj = scorer_maker(all_missing_features) with pytest.raises(GuardianAIValueError): scorer_obj(model, dataset, target) @pytest.mark.parametrize("scorer", MODEL_SCORERS_ALLOWING_REDUCTION) def test_model_scorer_reduction(sensitive_dataset_and_model, scorer): dataset, target, model, sensitive_attr_names = sensitive_dataset_and_model X_y_scorer_fn = MODEL_X_Y_SCORERS[scorer] subgroups_scorer = MODEL_SUBGROUPS_SCORERS[scorer] y_pred = model.predict(dataset) subgroups = dataset[sensitive_attr_names] # Mean reduction X_y_scorer = X_y_scorer_fn(sensitive_attr_names, reduction="mean") assert isinstance(X_y_scorer(model, dataset, target), float) assert isinstance( subgroups_scorer(target, y_pred, subgroups, reduction="mean"), float ) assert is_close( X_y_scorer(model, dataset, target), subgroups_scorer(target, y_pred, subgroups, reduction="mean"), ) # Max reduction X_y_scorer = X_y_scorer_fn(sensitive_attr_names, reduction="max") assert isinstance(X_y_scorer(model, dataset, target), float) assert isinstance( subgroups_scorer(target, y_pred, subgroups, reduction="max"), float ) assert is_close( X_y_scorer(model, dataset, target), subgroups_scorer(target, y_pred, subgroups, reduction="max"), ) # None reduction X_y_scorer = X_y_scorer_fn(sensitive_attr_names, reduction=None) X_y_result = X_y_scorer(model, dataset, target) assert isinstance(X_y_result, dict) subgroups_result = subgroups_scorer(target, y_pred, subgroups, reduction=None) assert isinstance(subgroups_result, dict) assert X_y_result == approx_dict(subgroups_result) # Other value with pytest.raises(GuardianAIValueError): X_y_scorer = X_y_scorer_fn(sensitive_attr_names, reduction="other") with pytest.raises(GuardianAIValueError): subgroups_scorer(target, y_pred, subgroups, reduction="other") @pytest.mark.parametrize("scorer", DATASET_SCORERS_ALLOWING_REDUCTION) def test_dataset_scorer_reduction(sensitive_dataset_and_model, scorer): dataset, target, model, sensitive_attr_names = sensitive_dataset_and_model X_y_scorer_fn = DATASET_X_Y_SCORERS[scorer] subgroups_scorer = DATASET_SUBGROUPS_SCORERS[scorer] subgroups = dataset[sensitive_attr_names] # Mean reduction X_y_scorer = X_y_scorer_fn(sensitive_attr_names, reduction="mean") assert isinstance(X_y_scorer(None, dataset, target), float) assert isinstance(subgroups_scorer(target, subgroups, reduction="mean"), float) assert is_close( X_y_scorer(None, dataset, target), subgroups_scorer(target, subgroups, reduction="mean"), ) # Max reduction X_y_scorer = X_y_scorer_fn(sensitive_attr_names, reduction="max") assert isinstance(X_y_scorer(None, dataset, target), float) assert isinstance(subgroups_scorer(target, subgroups, reduction="max"), float) assert is_close( X_y_scorer(None, dataset, target), subgroups_scorer(target, subgroups, reduction="max"), ) # None reduction X_y_scorer = X_y_scorer_fn(sensitive_attr_names, reduction=None) X_y_result = X_y_scorer(None, dataset, target) assert isinstance(X_y_result, dict) subgroups_result = subgroups_scorer(target, subgroups, reduction=None) assert isinstance(subgroups_result, dict) assert X_y_result == approx_dict(subgroups_result) # Other value with pytest.raises(GuardianAIValueError): X_y_scorer = X_y_scorer_fn(sensitive_attr_names, reduction="other") with pytest.raises(GuardianAIValueError): subgroups_scorer(target, subgroups, reduction="other") @pytest.mark.parametrize("scorer", MODEL_X_Y_SCORERS.keys()) def test_model_scorer_distance(sensitive_dataset_and_model, scorer): dataset, target, model, sensitive_attr_names = sensitive_dataset_and_model X_y_scorer_fn = MODEL_X_Y_SCORERS[scorer] subgroup_scorer = MODEL_SUBGROUPS_SCORERS[scorer] subgroups = dataset[sensitive_attr_names] y_pred = model.predict(dataset) if scorer in MODEL_SCORERS_USING_DISTANCE: # Validate for ratio distance X_y_scorer = X_y_scorer_fn(sensitive_attr_names, distance_measure="ratio") assert isinstance(X_y_scorer(model, dataset, target), float) assert isinstance( subgroup_scorer(target, y_pred, subgroups, distance_measure="ratio"), float ) assert is_close( X_y_scorer(model, dataset, target), subgroup_scorer(target, y_pred, subgroups, distance_measure="ratio"), ) # Validate for diff distance X_y_scorer = X_y_scorer_fn(sensitive_attr_names, distance_measure="diff") assert isinstance(X_y_scorer(model, dataset, target), float) assert isinstance( subgroup_scorer(target, y_pred, subgroups, distance_measure="diff"), float ) assert is_close( X_y_scorer(model, dataset, target), subgroup_scorer(target, y_pred, subgroups, distance_measure="diff"), ) # Do not accept other distances with pytest.raises(GuardianAIValueError): X_y_scorer = X_y_scorer_fn( sensitive_attr_names, distance_measure="something" ) with pytest.raises(GuardianAIValueError): subgroup_scorer(target, y_pred, subgroups, distance_measure="something") # Do not accept None distances with pytest.raises(GuardianAIValueError): X_y_scorer = X_y_scorer_fn(sensitive_attr_names, distance_measure=None) with pytest.raises(GuardianAIValueError): subgroup_scorer(target, y_pred, subgroups, distance_measure=None) else: # Accepts only None as distance_measure X_y_scorer = X_y_scorer_fn(sensitive_attr_names, distance_measure=None) assert isinstance(X_y_scorer(model, dataset, target), float) assert isinstance( subgroup_scorer(target, y_pred, subgroups, distance_measure=None), float ) with pytest.raises(GuardianAIValueError): X_y_scorer = X_y_scorer_fn( sensitive_attr_names, distance_measure="something" ) with pytest.raises(GuardianAIValueError): subgroup_scorer(target, y_pred, subgroups, distance_measure="something") @pytest.mark.parametrize("scorer", DATASET_SCORERS_USING_DISTANCE) def test_dataset_scorer_distance(sensitive_dataset_and_model, scorer): dataset, target, model, sensitive_attr_names = sensitive_dataset_and_model X_y_scorer_fn = DATASET_X_Y_SCORERS[scorer] subgroup_scorer = DATASET_SUBGROUPS_SCORERS[scorer] subgroups = dataset[sensitive_attr_names] # Validate for ratio distance X_y_scorer = X_y_scorer_fn(sensitive_attr_names, distance_measure="ratio") assert isinstance(X_y_scorer(None, dataset, target), float) assert isinstance( subgroup_scorer(target, subgroups, distance_measure="ratio"), float ) assert is_close( X_y_scorer(None, dataset, target), subgroup_scorer(target, subgroups, distance_measure="ratio"), ) # Validate for diff distance X_y_scorer = X_y_scorer_fn(sensitive_attr_names, distance_measure="diff") assert isinstance(X_y_scorer(None, dataset, target), float) assert isinstance( subgroup_scorer(target, subgroups, distance_measure="diff"), float ) assert is_close( X_y_scorer(None, dataset, target), subgroup_scorer(target, subgroups, distance_measure="diff"), ) # Do not accept other distances with pytest.raises(GuardianAIValueError): X_y_scorer = X_y_scorer_fn(sensitive_attr_names, distance_measure="something") with pytest.raises(GuardianAIValueError): subgroup_scorer(target, subgroups, distance_measure="something") @pytest.mark.parametrize("scorer", MODEL_SUBGROUPS_SCORERS) def test_model_scorers_y_format(sensitive_dataset_and_model, scorer): dataset, target, model, sensitive_attr_names = sensitive_dataset_and_model run_y_true_tests = scorer not in MODEL_SCORERS_ALLOWING_Y_TRUE_NONE scorer = MODEL_SUBGROUPS_SCORERS[scorer] subgroups = dataset[sensitive_attr_names] # Accept same number of instances y_pred = target.copy() assert isinstance(scorer(target, y_pred, subgroups), float) # Do not accept different number of instances if run_y_true_tests: y_pred = target[:-1].copy() with pytest.raises(GuardianAIValueError): scorer(target, y_pred, subgroups) # Do not accept multiclass classification multiclass_y = target.copy() multiclass_y[:3] = 2 # Change a few labels assert multiclass_y.nunique() == 3 # Sanity check with pytest.raises(GuardianAIValueError): scorer(multiclass_y, y_pred, subgroups) # Do not accept non-array inputs if run_y_true_tests: y_pred = target.copy() bad_target = {i: itm for i, itm in enumerate(target)}
with pytest.raises(GuardianAITypeError):
24
2023-10-09 09:48:50+00:00
24k
jiangjiechen/auction-arena
app.py
[ { "identifier": "create_items", "path": "src/item_base.py", "snippet": "def create_items(item_info_jsl):\n '''\n item_info: a list of dict (name, price, desc, id)\n '''\n item_info_jsl = LoadJsonL(item_info_jsl)\n item_list = []\n for info in item_info_jsl:\n item_list.append(Item(**info))\n return item_list" }, { "identifier": "Bidder", "path": "src/bidder_base.py", "snippet": "class Bidder(BaseModel):\n name: str\n model_name: str \n budget: int \n desire: str\n plan_strategy: str\n temperature: float = 0.7\n overestimate_percent: int = 10\n correct_belief: bool\n enable_learning: bool = False\n \n llm: BaseLanguageModel = None\n openai_cost = 0\n llm_token_count = 0\n \n verbose: bool = False\n auction_hash: str = ''\n\n system_message: str = ''\n original_budget: int = 0\n\n # working memory\n profit: int = 0\n cur_item_id = 0\n items: list = []\n dialogue_history: list = [] # for gradio UI display\n llm_prompt_history: list = [] # for tracking llm calling\n items_won = []\n bid_history: list = [] # history of the bidding of a single item\n plan_instruct: str = '' # instruction for planning\n cur_plan: str = '' # current plan\n status_quo: dict = {} # belief of budget and profit, self and others\n withdraw: bool = False # state of withdraw\n learnings: str = '' # learnings from previous biddings. If given, then use it to guide the rest of the auction.\n max_bid_cnt: int = 4 # Rule Bidder: maximum number of bids on one item (K = 1 starting bid + K-1 increase bid)\n rule_bid_cnt: int = 0 # Rule Bidder: count of bids on one item\n\n # belief tracking\n failed_bid_cnt: int = 0 # count of failed bids (overspending)\n total_bid_cnt: int = 0 # count of total bids\n self_belief_error_cnt: int = 0\n total_self_belief_cnt: int = 0\n other_belief_error_cnt: int = 0\n total_other_belief_cnt: int = 0\n \n engagement_count: int = 0\n budget_history = []\n profit_history = []\n budget_error_history = []\n profit_error_history = []\n win_bid_error_history = []\n engagement_history = defaultdict(int)\n all_bidders_status = {} # track others' profit\n changes_of_plan = []\n \n # not used\n input_box: str = None\n need_input = False\n semaphore = 0\n\n class Config:\n arbitrary_types_allowed = True\n\n def __repr__(self):\n return self.name\n\n def __str__(self):\n return self.name\n \n @classmethod\n def create(cls, **data):\n instance = cls(**data)\n instance._post_init()\n return instance\n\n def _post_init(self):\n self.original_budget = self.budget\n self.system_message = SYSTEM_MESSAGE.format(\n name=self.name,\n desire_desc=DESIRE_DESC[self.desire],\n )\n self._parse_llm()\n self.dialogue_history += [\n SystemMessage(content=self.system_message), \n AIMessage(content='')\n ]\n self.budget_history.append(self.budget)\n self.profit_history.append(self.profit)\n\n def _parse_llm(self):\n if 'gpt-' in self.model_name:\n self.llm = ChatOpenAI(model=self.model_name, temperature=self.temperature, max_retries=30, request_timeout=1200)\n elif 'claude' in self.model_name:\n self.llm = ChatAnthropic(model=self.model_name, temperature=self.temperature, default_request_timeout=1200)\n elif 'bison' in self.model_name:\n self.llm = ChatGooglePalm(model_name=f'models/{self.model_name}', temperature=self.temperature)\n elif 'rule' in self.model_name or 'human' in self.model_name:\n self.llm = None\n else:\n raise NotImplementedError(self.model_name)\n \n # def _rotate_openai_org(self):\n # # use two organizations to avoid rate limit\n # if os.environ.get('OPENAI_ORGANIZATION_1') and os.environ.get('OPENAI_ORGANIZATION_2'):\n # return random.choice([os.environ.get('OPENAI_ORGANIZATION_1'), os.environ.get('OPENAI_ORGANIZATION_2')])\n # else:\n # return None\n \n def _run_llm_standalone(self, messages: list):\n \n with get_openai_callback() as cb:\n for i in range(6):\n try:\n input_token_num = self.llm.get_num_tokens_from_messages(messages)\n if 'claude' in self.model_name: # anthropic's claude\n result = self.llm(messages, max_tokens_to_sample=2048)\n elif 'bison' in self.model_name: # google's palm-2\n max_tokens = min(max(3900 - input_token_num, 192), 2048)\n if isinstance(self.llm, ChatVertexAI):\n result = self.llm(messages, max_output_tokens=max_tokens)\n else:\n result = self.llm(messages)\n elif 'gpt' in self.model_name: # openai\n if 'gpt-3.5-turbo' in self.model_name and '16k' not in self.model_name:\n max_tokens = max(3900 - input_token_num, 192)\n else:\n # gpt-4\n # self.llm.openai_organization = self._rotate_openai_org()\n max_tokens = max(8000 - input_token_num, 192)\n result = self.llm(messages, max_tokens=max_tokens)\n elif 'llama' in self.model_name.lower():\n raise NotImplementedError\n else:\n raise NotImplementedError\n break\n except:\n print(f'Retrying for {self.model_name} ({i+1}/6), wait for {2**(i+1)} sec...')\n time.sleep(2**(i+1))\n self.openai_cost += cb.total_cost\n self.llm_token_count = self.llm.get_num_tokens_from_messages(messages)\n return result.content\n\n def _get_estimated_value(self, item):\n value = item.true_value * (1 + self.overestimate_percent / 100)\n return int(value)\n \n def _get_cur_item(self, key=None):\n if self.cur_item_id < len(self.items):\n if key is not None:\n return self.items[self.cur_item_id].__dict__[key]\n else:\n return self.items[self.cur_item_id]\n else:\n return 'no item left'\n \n def _get_next_item(self, key=None):\n if self.cur_item_id + 1 < len(self.items):\n if key is not None:\n return self.items[self.cur_item_id + 1].__dict__[key]\n else:\n return self.items[self.cur_item_id + 1]\n else:\n return 'no item left'\n \n def _get_remaining_items(self, as_str=False):\n remain_items = self.items[self.cur_item_id + 1:]\n if as_str:\n return ', '.join([item.name for item in remain_items])\n else:\n return remain_items\n \n def _get_items_value_str(self, items: List[Item]):\n if not isinstance(items, list):\n items = [items]\n items_info = ''\n for i, item in enumerate(items):\n estimated_value = self._get_estimated_value(item)\n _info = f\"{i+1}. {item}, starting price is ${item.price}. Your estimated value for this item is ${estimated_value}.\\n\"\n items_info += _info\n return items_info.strip()\n \n # ********** Main Instructions and Functions ********** #\n \n def learn_from_prev_auction(self, past_learnings, past_auction_log):\n if not self.enable_learning or 'rule' in self.model_name or 'human' in self.model_name:\n return ''\n \n instruct_learn = INSTRUCT_LEARNING_TEMPLATE.format(\n past_auction_log=past_auction_log,\n past_learnings=past_learnings)\n\n result = self._run_llm_standalone([HumanMessage(content=instruct_learn)])\n self.dialogue_history += [\n HumanMessage(content=instruct_learn),\n AIMessage(content=result),\n ]\n self.llm_prompt_history.append({\n 'messages': [{x.type: x.content} for x in [HumanMessage(content=instruct_learn)]],\n 'result': result,\n 'tag': 'learn_0'\n })\n \n self.learnings = '\\n'.join(extract_numbered_list(result))\n if self.learnings != '':\n self.system_message += f\"\\n\\nHere are your key learning points and practical tips from a previous auction. You can use them to guide this auction:\\n```\\n{self.learnings}\\n```\"\n \n if self.verbose:\n print(f\"Learn from previous auction: {self.name} ({self.model_name}).\")\n return result\n\n def _choose_items(self, budget, items: List[Item]):\n '''\n Choose items within budget for rule bidders.\n Cheap ones first if maximize_items, expensive ones first if maximize_profit.\n '''\n sorted_items = sorted(items, key=lambda x: self._get_estimated_value(x), \n reverse=self.desire == 'maximize_profit')\n \n chosen_items = []\n i = 0\n while budget >= 0 and i < len(sorted_items):\n item = sorted_items[i]\n if item.price <= budget:\n chosen_items.append(item)\n budget -= item.price\n i += 1\n \n return chosen_items\n \n def get_plan_instruct(self, items: List[Item]):\n self.items = items\n plan_instruct = INSTRUCT_PLAN_TEMPLATE.format(\n bidder_name=self.name, \n budget=self.budget, \n item_num=len(items), \n items_info=self._get_items_value_str(items), \n desire_desc=DESIRE_DESC[self.desire],\n learning_statement='' if not self.enable_learning else _LEARNING_STATEMENT\n )\n return plan_instruct\n \n def init_plan(self, plan_instruct: str):\n '''\n Plan for bidding with auctioneer's instruction and items information for customize estimated value.\n plan = plan(system_message, instruct_plan)\n '''\n if 'rule' in self.model_name: \n # self.cur_plan = ', '.join([x.name for x in self._choose_items(self.budget, self.items)])\n # self.dialogue_history += [\n # HumanMessage(content=plan_instruct),\n # AIMessage(content=self.cur_plan),\n # ]\n # return self.cur_plan\n return ''\n\n self.status_quo = {\n 'remaining_budget': self.budget,\n 'total_profits': {bidder: 0 for bidder in self.all_bidders_status.keys()},\n 'winning_bids': {bidder: {} for bidder in self.all_bidders_status.keys()},\n }\n\n if self.plan_strategy == 'none':\n self.plan_instruct = ''\n self.cur_plan = ''\n return None\n\n system_msg = SystemMessage(content=self.system_message)\n plan_msg = HumanMessage(content=plan_instruct)\n messages = [system_msg, plan_msg]\n result = self._run_llm_standalone(messages)\n \n if self.verbose:\n print(get_colored_text(plan_msg.content, 'red'))\n print(get_colored_text(result, 'green'))\n \n self.dialogue_history += [\n plan_msg,\n AIMessage(content=result),\n ]\n self.llm_prompt_history.append({\n 'messages': [{x.type: x.content} for x in messages],\n 'result': result,\n 'tag': 'plan_0'\n })\n self.cur_plan = result\n self.plan_instruct = plan_instruct\n \n self.changes_of_plan.append([\n f\"{self.cur_item_id} (Initial)\", \n False, \n json.dumps(extract_jsons_from_text(result)[-1]),\n ])\n \n if self.verbose:\n print(f\"Plan: {self.name} ({self.model_name}) for {self._get_cur_item()}.\")\n return result\n \n def get_rebid_instruct(self, auctioneer_msg: str):\n self.dialogue_history += [\n HumanMessage(content=auctioneer_msg),\n AIMessage(content='')\n ]\n return auctioneer_msg\n\n def get_bid_instruct(self, auctioneer_msg: str, bid_round: int):\n auctioneer_msg = auctioneer_msg.replace(self.name, f'You ({self.name})')\n \n bid_instruct = INSTRUCT_BID_TEMPLATE.format(\n auctioneer_msg=auctioneer_msg, \n bidder_name=self.name,\n cur_item=self._get_cur_item(),\n estimated_value=self._get_estimated_value(self._get_cur_item()),\n desire_desc=DESIRE_DESC[self.desire],\n learning_statement='' if not self.enable_learning else _LEARNING_STATEMENT\n )\n if bid_round == 0:\n if self.plan_strategy in ['static', 'none']:\n # if static planner, then no replanning is needed. status quo is updated in replanning. thus need to add status quo in bid instruct.\n bid_instruct = f\"\"\"The status quo of this auction so far is:\\n\"{json.dumps(self.status_quo, indent=4)}\"\\n\\n{bid_instruct}\\n---\\n\"\"\"\n else:\n bid_instruct = f'Now, the auctioneer says: \"{auctioneer_msg}\"'\n \n self.dialogue_history += [\n HumanMessage(content=bid_instruct),\n AIMessage(content='')\n ]\n return bid_instruct\n \n def bid_rule(self, cur_bid: int, min_markup_pct: float = 0.1):\n '''\n :param cur_bid: current highest bid\n :param min_markup_pct: minimum percentage for bid increase\n :param max_bid_cnt: maximum number of bids on one item (K = 1 starting bid + K-1 increase bid)\n '''\n # dialogue history already got bid_instruction.\n cur_item = self._get_cur_item()\n \n if cur_bid <= 0:\n next_bid = cur_item.price\n else:\n next_bid = cur_bid + min_markup_pct * cur_item.price\n \n if self.budget - next_bid >= 0 and self.rule_bid_cnt < self.max_bid_cnt:\n msg = int(next_bid)\n self.rule_bid_cnt += 1\n else:\n msg = -1\n \n content = f'The current highest bid for {cur_item.name} is ${cur_bid}. '\n content += \"I'm out!\" if msg < 0 else f\"I bid ${msg}! (Rule generated)\"\n self.dialogue_history += [\n HumanMessage(content=''),\n AIMessage(content=content)\n ]\n \n return msg\n \n def bid(self, bid_instruct):\n '''\n Bid for an item with auctioneer's instruction and bidding history.\n bid_history = bid(system_message, instruct_plan, plan, bid_history)\n '''\n if self.model_name == 'rule':\n return ''\n \n bid_msg = HumanMessage(content=bid_instruct)\n \n if self.plan_strategy == 'none':\n messages = [SystemMessage(content=self.system_message)]\n else:\n messages = [SystemMessage(content=self.system_message),\n HumanMessage(content=self.plan_instruct),\n AIMessage(content=self.cur_plan)]\n \n self.bid_history += [bid_msg]\n messages += self.bid_history\n \n result = self._run_llm_standalone(messages)\n \n self.bid_history += [AIMessage(content=result)]\n\n self.dialogue_history += [\n HumanMessage(content=''),\n AIMessage(content=result)\n ]\n \n self.llm_prompt_history.append({\n 'messages': [{x.type: x.content} for x in messages],\n 'result': result,\n 'tag': f'bid_{self.cur_item_id}'\n })\n \n if self.verbose:\n print(get_colored_text(bid_instruct, 'yellow'))\n print(get_colored_text(result, 'green'))\n \n print(f\"Bid: {self.name} ({self.model_name}) for {self._get_cur_item()}.\")\n self.total_bid_cnt += 1\n \n return result\n\n def get_summarize_instruct(self, bidding_history: str, hammer_msg: str, win_lose_msg: str):\n instruct = INSTRUCT_SUMMARIZE_TEMPLATE.format(\n cur_item=self._get_cur_item(), \n bidding_history=bidding_history, \n hammer_msg=hammer_msg.strip(), \n win_lose_msg=win_lose_msg.strip(), \n bidder_name=self.name,\n prev_status=self._status_json_to_text(self.status_quo),\n )\n return instruct\n\n def summarize(self, instruct_summarize: str):\n '''\n Update belief/status quo\n status_quo = summarize(system_message, bid_history, prev_status + instruct_summarize)\n '''\n self.budget_history.append(self.budget)\n self.profit_history.append(self.profit)\n \n if self.model_name == 'rule': \n self.rule_bid_cnt = 0 # reset bid count for rule bidder\n return ''\n \n messages = [SystemMessage(content=self.system_message)]\n # messages += self.bid_history\n summ_msg = HumanMessage(content=instruct_summarize)\n messages.append(summ_msg)\n\n status_quo_text = self._run_llm_standalone(messages)\n \n self.dialogue_history += [summ_msg, AIMessage(content=status_quo_text)]\n self.bid_history += [summ_msg, AIMessage(content=status_quo_text)]\n \n self.llm_prompt_history.append({\n 'messages': [{x.type: x.content} for x in messages],\n 'result': status_quo_text,\n 'tag': f'summarize_{self.cur_item_id}'\n })\n\n cnt = 0\n while cnt <= 3:\n sanity_msg = self._sanity_check_status_json(extract_jsons_from_text(status_quo_text)[-1])\n if sanity_msg == '':\n # pass sanity check then track beliefs\n consistency_msg = self._belief_tracking(status_quo_text)\n else:\n sanity_msg = f'- {sanity_msg}'\n consistency_msg = ''\n \n if sanity_msg != '' or (consistency_msg != '' and self.correct_belief):\n err_msg = f\"As {self.name}, here are some error(s) of your summary of the status JSON:\\n{sanity_msg.strip()}\\n{consistency_msg.strip()}\\n\\nPlease revise the status JSON based on the errors. Don't apologize. Just give me the revised status JSON.\".strip()\n \n # print(f\"{self.name}: revising status quo for the {cnt} time:\")\n # print(get_colored_text(err_msg, 'green'))\n # print(get_colored_text(status_quo_text, 'red'))\n \n messages += [AIMessage(content=status_quo_text), \n HumanMessage(content=err_msg)]\n status_quo_text = self._run_llm_standalone(messages)\n self.dialogue_history += [\n HumanMessage(content=err_msg),\n AIMessage(content=status_quo_text),\n ]\n cnt += 1\n else:\n break\n \n self.status_quo = extract_jsons_from_text(status_quo_text)[-1]\n\n if self.verbose:\n print(get_colored_text(instruct_summarize, 'blue'))\n print(get_colored_text(status_quo_text, 'green'))\n \n print(f\"Summarize: {self.name} ({self.model_name}) for {self._get_cur_item()}.\")\n \n return status_quo_text\n \n def get_replan_instruct(self):\n instruct = INSTRUCT_REPLAN_TEMPLATE.format(\n status_quo=self._status_json_to_text(self.status_quo),\n remaining_items_info=self._get_items_value_str(self._get_remaining_items()),\n bidder_name=self.name,\n desire_desc=DESIRE_DESC[self.desire],\n learning_statement='' if not self.enable_learning else _LEARNING_STATEMENT\n )\n return instruct\n\n def replan(self, instruct_replan: str):\n '''\n plan = replan(system_message, instruct_plan, prev_plan, status_quo + (learning) + instruct_replan)\n '''\n if self.model_name == 'rule': \n self.withdraw = False\n self.cur_item_id += 1\n return ''\n \n if self.plan_strategy in ['none', 'static']:\n self.bid_history = [] # clear bid history\n self.cur_item_id += 1\n self.withdraw = False\n return 'Skip replanning for bidders with static or no plan.'\n \n replan_msg = HumanMessage(content=instruct_replan)\n \n messages = [SystemMessage(content=self.system_message),\n HumanMessage(content=self.plan_instruct),\n AIMessage(content=self.cur_plan)]\n messages.append(replan_msg)\n\n result = self._run_llm_standalone(messages)\n \n new_plan_dict = extract_jsons_from_text(result)[-1]\n cnt = 0\n while len(new_plan_dict) == 0 and cnt < 2:\n err_msg = 'Your response does not contain a JSON-format priority list for items. Please revise your plan.'\n messages += [\n AIMessage(content=result),\n HumanMessage(content=err_msg),\n ]\n result = self._run_llm_standalone(messages)\n new_plan_dict = extract_jsons_from_text(result)[-1]\n \n self.dialogue_history += [\n HumanMessage(content=err_msg),\n AIMessage(content=result),\n ]\n cnt += 1\n \n old_plan_dict = extract_jsons_from_text(self.cur_plan)[-1]\n self.changes_of_plan.append([\n f\"{self.cur_item_id + 1} ({self._get_cur_item('name')})\", \n self._change_of_plan(old_plan_dict, new_plan_dict),\n json.dumps(new_plan_dict)\n ])\n \n self.plan_instruct = instruct_replan\n self.cur_plan = result\n self.withdraw = False\n self.bid_history = [] # clear bid history\n self.cur_item_id += 1\n\n self.dialogue_history += [\n replan_msg,\n AIMessage(content=result),\n ]\n self.llm_prompt_history.append({\n 'messages': [{x.type: x.content} for x in messages],\n 'result': result,\n 'tag': f'plan_{self.cur_item_id}'\n })\n \n if self.verbose:\n print(get_colored_text(instruct_replan, 'blue'))\n print(get_colored_text(result, 'green'))\n\n print(f\"Replan: {self.name} ({self.model_name}).\")\n return result\n \n def _change_of_plan(self, old_plan: dict, new_plan: dict):\n for k in new_plan:\n if new_plan[k] != old_plan.get(k, None):\n return True\n return False\n \n # *********** Belief Tracking and Sanity Check *********** #\n \n def bid_sanity_check(self, bid_price, prev_round_max_bid, min_markup_pct):\n # can't bid more than budget or less than previous highest bid\n if bid_price < 0:\n msg = None\n else:\n min_bid_increase = int(min_markup_pct * self._get_cur_item('price'))\n if bid_price > self.budget:\n msg = f\"you don't have insufficient budget (${self.budget} left)\"\n elif bid_price < self._get_cur_item('price'):\n msg = f\"your bid is lower than the starting bid (${self._get_cur_item('price')})\"\n elif bid_price < prev_round_max_bid + min_bid_increase:\n msg = f\"you must advance previous highest bid (${prev_round_max_bid}) by at least ${min_bid_increase} ({int(100 * min_markup_pct)}%).\"\n else:\n msg = None\n return msg\n\n def rebid_for_failure(self, fail_instruct: str):\n result = self.bid(fail_instruct)\n self.failed_bid_cnt += 1\n return result\n \n def _sanity_check_status_json(self, data: dict):\n if data == {}:\n return \"Error: No parsible JSON in your response. Possibly due to missing a closing curly bracket '}', or unpasible values (e.g., 'profit': 1000 + 400, instead of 'profit': 1400).\"\n\n # Check if all expected top-level keys are present\n expected_keys = [\"remaining_budget\", \"total_profits\", \"winning_bids\"]\n for key in expected_keys:\n if key not in data:\n return f\"Error: Missing '{key}' field in the status JSON.\"\n\n # Check if \"remaining_budget\" is a number\n if not isinstance(data[\"remaining_budget\"], (int, float)):\n return \"Error: 'remaining_budget' should be a number, and only about your remaining budget.\"\n\n # Check if \"total_profits\" is a dictionary with numbers as values\n if not isinstance(data[\"total_profits\"], dict):\n return \"Error: 'total_profits' should be a dictionary of every bidder.\"\n for bidder, profit in data[\"total_profits\"].items():\n if not isinstance(profit, (int, float)):\n return f\"Error: Profit for {bidder} should be a number.\"\n\n # Check if \"winning_bids\" is a dictionary and that each bidder's entry is a dictionary with numbers\n if not isinstance(data[\"winning_bids\"], dict):\n return \"Error: 'winning_bids' should be a dictionary.\"\n for bidder, bids in data[\"winning_bids\"].items():\n if not isinstance(bids, dict):\n return f\"Error: Bids for {bidder} should be a dictionary.\"\n for item, amount in bids.items():\n if not isinstance(amount, (int, float)):\n return f\"Error: Amount for {item} under {bidder} should be a number.\"\n\n # If everything is fine\n return \"\"\n \n def _status_json_to_text(self, data: dict):\n if 'rule' in self.model_name: return ''\n \n # Extract and format remaining budget\n structured_text = f\"* Remaining Budget: ${data.get('remaining_budget', 'unknown')}\\n\\n\"\n \n # Extract and format total profits for each bidder\n structured_text += \"* Total Profits:\\n\"\n if data.get('total_profits'):\n for bidder, profit in data['total_profits'].items():\n structured_text += f\" * {bidder}: ${profit}\\n\"\n \n # Extract and list the winning bids for each item by each bidder\n structured_text += \"\\n* Winning Bids:\\n\"\n if data.get('winning_bids'):\n for bidder, bids in data['winning_bids'].items():\n structured_text += f\" * {bidder}:\\n\"\n if bids:\n for item, amount in bids.items():\n structured_text += f\" * {item}: ${amount}\\n\"\n else:\n structured_text += f\" * No winning bids\\n\"\n \n return structured_text.strip()\n\n def _belief_tracking(self, status_text: str):\n '''\n Parse status quo and check if the belief is correct.\n '''\n belief_json = extract_jsons_from_text(status_text)[-1]\n # {\"remaining_budget\": 8000, \"total_profits\": {\"Bidder 1\": 1300, \"Bidder 2\": 1800, \"Bidder 3\": 0}, \"winning_bids\": {\"Bidder 1\": {\"Item 2\": 1200, \"Item 3\": 1000}, \"Bidder 2\": {\"Item 1\": 2000}, \"Bidder 3\": {}}}\n budget_belief = belief_json['remaining_budget']\n profits_belief = belief_json['total_profits']\n winning_bids = belief_json['winning_bids']\n\n msg = ''\n # track belief of budget\n self.total_self_belief_cnt += 1\n if budget_belief != self.budget:\n msg += f'- Your belief of budget is wrong: you have ${self.budget} left, but you think you have ${budget_belief} left.\\n'\n self.self_belief_error_cnt += 1\n self.budget_error_history.append([\n self._get_cur_item('name'),\n budget_belief,\n self.budget,\n ])\n \n # track belief of profits\n for bidder_name, profit in profits_belief.items():\n if self.all_bidders_status.get(bidder_name) is None:\n # due to a potentially unreasonable parsing\n continue\n \n if self.name in bidder_name: \n bidder_name = self.name\n self.total_self_belief_cnt += 1\n else:\n self.total_other_belief_cnt += 1\n \n real_profit = self.all_bidders_status[bidder_name]['profit']\n \n if profit != real_profit:\n if self.name == bidder_name:\n self.self_belief_error_cnt += 1\n else:\n self.other_belief_error_cnt += 1\n\n msg += f'- Your belief of total profit of {bidder_name} is wrong: {bidder_name} has earned ${real_profit} so far, but you think {bidder_name} has earned ${profit}.\\n'\n\n # add to history\n self.profit_error_history.append([\n f\"{bidder_name} ({self._get_cur_item('name')})\",\n profit,\n real_profit\n ])\n\n # track belief of winning bids\n for bidder_name, items_won_dict in winning_bids.items():\n if self.all_bidders_status.get(bidder_name) is None:\n # due to a potentially unreasonable parsing\n continue\n\n real_items_won = self.all_bidders_status[bidder_name]['items_won']\n # items_won = [(item, bid_price), ...)]\n \n items_won_list = list(items_won_dict.keys())\n real_items_won_list = [str(x) for x, _ in real_items_won]\n \n if self.name in bidder_name:\n self.total_self_belief_cnt += 1\n else:\n self.total_other_belief_cnt += 1\n \n if not item_list_equal(items_won_list, real_items_won_list):\n if bidder_name == self.name:\n self.self_belief_error_cnt += 1\n _bidder_name = f'you'\n else:\n self.other_belief_error_cnt += 1\n _bidder_name = bidder_name\n \n msg += f\"- Your belief of winning items of {bidder_name} is wrong: {bidder_name} won {real_items_won}, but you think {bidder_name} won {items_won_dict}.\\n\"\n\n self.win_bid_error_history.append([\n f\"{_bidder_name} ({self._get_cur_item('name')})\",\n ', '.join(items_won_list),\n ', '.join(real_items_won_list)\n ])\n \n return msg\n \n def win_bid(self, item: Item, bid: int):\n self.budget -= bid\n self.profit += item.true_value - bid\n self.items_won += [[item, bid]]\n msg = f\"Congratuations! You won {item} at ${bid}.\"# Now you have ${self.budget} left. Your total profit so far is ${self.profit}.\"\n return msg\n \n def lose_bid(self, item: Item):\n return f\"You lost {item}.\"# Now, you have ${self.budget} left. Your total profit so far is ${self.profit}.\"\n \n # set the profit information of other bidders\n def set_all_bidders_status(self, all_bidders_status: dict):\n self.all_bidders_status = all_bidders_status.copy()\n\n def set_withdraw(self, bid: int):\n if bid < 0: # withdraw\n self.withdraw = True\n elif bid == 0: # enable discount and bid again\n self.withdraw = False\n else: # normal bid\n self.withdraw = False\n self.engagement_count += 1\n self.engagement_history[self._get_cur_item('name')] += 1\n \n # ****************** Logging ****************** #\n \n # def _parse_hedging(self, plan: str): # deprecated\n # prompt = PARSE_HEDGE_INSTRUCTION.format(\n # item_name=self._get_cur_item(), \n # plan=plan)\n \n # with get_openai_callback() as cb:\n # llm = ChatOpenAI(model='gpt-3.5-turbo-0613', temperature=0)\n # result = llm([HumanMessage(content=prompt)]).content\n # self.openai_cost += cb.total_cost\n # # parse a number, which could be a digit\n # hedge_percent = re.findall(r'\\d+\\.?\\d*%', result)\n # if len(hedge_percent) > 0:\n # hedge_percent = hedge_percent[0].replace('%', '')\n # else:\n # hedge_percent = 0\n # return float(hedge_percent)\n \n def profit_report(self):\n '''\n Personal profit report at the end of an auction.\n '''\n msg = f\"* {self.name}, starting with ${self.original_budget}, has won {len(self.items_won)} items in this auction, with a total profit of ${self.profit}.:\\n\"\n profit = 0\n for item, bid in self.items_won:\n profit += item.true_value - bid\n msg += f\" * Won {item} at ${bid} over ${item.price}, with a true value of ${item.true_value}.\\n\"\n return msg.strip()\n \n def to_monitors(self, as_json=False):\n # budget, profit, items_won, tokens\n if len(self.items_won) == 0 and not as_json: \n items_won = [['', 0, 0]]\n else:\n items_won = []\n for item, bid in self.items_won:\n items_won.append([str(item), bid, item.true_value])\n \n profit_error_history = self.profit_error_history if self.profit_error_history != [] or as_json else [['', '', '']]\n win_bid_error_history = self.win_bid_error_history if self.win_bid_error_history != [] or as_json else [['', '', '']]\n budget_error_history = self.budget_error_history if self.budget_error_history != [] or as_json else [['', '']]\n changes_of_plan = self.changes_of_plan if self.changes_of_plan != [] or as_json else [['', '', '']]\n \n if as_json:\n return {\n 'auction_hash': self.auction_hash,\n 'bidder_name': self.name,\n 'model_name': self.model_name,\n 'desire': self.desire,\n 'plan_strategy': self.plan_strategy,\n 'overestimate_percent': self.overestimate_percent,\n 'temperature': self.temperature,\n 'correct_belief': self.correct_belief,\n 'enable_learning': self.enable_learning,\n 'budget': self.original_budget,\n 'money_left': self.budget,\n 'profit': self.profit,\n 'items_won': items_won,\n 'tokens_used': self.llm_token_count,\n 'openai_cost': round(self.openai_cost, 2),\n 'failed_bid_cnt': self.failed_bid_cnt,\n 'self_belief_error_cnt': self.self_belief_error_cnt,\n 'other_belief_error_cnt': self.other_belief_error_cnt,\n 'failed_bid_rate': round(self.failed_bid_cnt / (self.total_bid_cnt+1e-8), 2),\n 'self_error_rate': round(self.self_belief_error_cnt / (self.total_self_belief_cnt+1e-8), 2),\n 'other_error_rate': round(self.other_belief_error_cnt / (self.total_other_belief_cnt+1e-8), 2),\n 'engagement_count': self.engagement_count,\n 'engagement_history': self.engagement_history,\n 'changes_of_plan': changes_of_plan,\n 'budget_error_history': budget_error_history,\n 'profit_error_history': profit_error_history,\n 'win_bid_error_history': win_bid_error_history,\n 'history': self.llm_prompt_history\n }\n else:\n return [\n self.budget, \n self.profit, \n items_won, \n self.llm_token_count, \n round(self.openai_cost, 2), \n round(self.failed_bid_cnt / (self.total_bid_cnt+1e-8), 2), \n round(self.self_belief_error_cnt / (self.total_self_belief_cnt+1e-8), 2), \n round(self.other_belief_error_cnt / (self.total_other_belief_cnt+1e-8), 2), \n self.engagement_count,\n draw_plot(f\"{self.name} ({self.model_name})\", self.budget_history, self.profit_history), \n changes_of_plan,\n budget_error_history,\n profit_error_history, \n win_bid_error_history\n ]\n\n def dialogue_to_chatbot(self):\n # chatbot: [[Human, AI], [], ...]\n # only dialogue will be sent to LLMs. chatbot is just for display.\n assert len(self.dialogue_history) % 2 == 0\n chatbot = []\n for i in range(0, len(self.dialogue_history), 2):\n # if exceeds the length of dialogue, append the last message\n human_msg = self.dialogue_history[i].content\n ai_msg = self.dialogue_history[i+1].content\n if ai_msg == '': ai_msg = None\n if human_msg == '': human_msg = None\n chatbot.append([human_msg, ai_msg])\n return chatbot" }, { "identifier": "HumanBidder", "path": "src/human_bidder.py", "snippet": "class HumanBidder(Bidder):\n name: str\n human_name: str = \"Adam\"\n budget: int\n auction_hash: str\n \n cur_item_id = 0\n items: list = []\n withdraw: bool = False\n \n engagement_count: int = 0\n original_budget: int = 0\n profit: int = 0\n items_won = []\n \n all_bidders_status = {} # track others' profit\n \n # essential for demo\n need_input: bool = False\n semaphore: int = 0 # if needs input, then semaphore is set as 1, else waits.\n input_box: str = None # global variable for accepting user input\n \n # not used\n model_name: str = 'human'\n openai_cost = 0\n desire = ''\n plan_strategy = ''\n correct_belief = True\n \n class Config:\n arbitrary_types_allowed = True\n \n def get_plan_instruct(self, items: List[Item]):\n self.items = items\n plan_instruct = \"As {bidder_name}, you have a total budget of ${budget}. This auction has a total of {item_num} items to be sequentially presented, they are:\\n{items_info}\".format(\n bidder_name=self.name, \n budget=self.budget, \n item_num=len(items), \n items_info=self._get_items_value_str(items)\n )\n return plan_instruct\n \n def init_plan(self, plan_instruct: str):\n # Human = auctioneer, AI = bidder\n self.dialogue_history += [\n HumanMessage(content=plan_instruct),\n AIMessage(content='(Getting ready...)')\n ]\n return ''\n \n def get_bid_instruct(self, auctioneer_msg, bid_round):\n self.dialogue_history += [\n HumanMessage(content=auctioneer_msg), \n AIMessage(content='')\n ]\n return auctioneer_msg\n \n def bid(self, bid_instruct):\n # wait for the cue to handle user input\n while self.semaphore <= 0:\n time.sleep(1)\n \n self.dialogue_history += [\n HumanMessage(content=''),\n AIMessage(content=self.input_box)\n ]\n self.semaphore -= 1\n self.need_input = False\n return self.input_box\n \n def get_summarize_instruct(self, bidding_history: str, hammer_msg: str, win_lose_msg: str):\n instruct_summarize = f\"{bidding_history}\\n\\n{hammer_msg}\\n{win_lose_msg}\"\n return instruct_summarize\n \n def summarize(self, instruct_summarize: str):\n self.dialogue_history += [\n HumanMessage(content=instruct_summarize),\n AIMessage(content='(Taking notes...)')\n ]\n self.budget_history.append(self.budget)\n self.profit_history.append(self.profit)\n return ''\n \n def get_replan_instruct(self):\n return ''\n\n def replan(self, instruct_replan):\n self.withdraw = False\n self.cur_item_id += 1\n return ''\n \n def to_monitors(self, as_json=False):\n items_won = []\n for item, bid in self.items_won:\n items_won.append([str(item), bid, item.true_value])\n if as_json:\n return {\n 'auction_hash': self.auction_hash,\n 'bidder_name': self.name,\n 'human_name': self.human_name,\n 'model_name': self.model_name,\n 'budget': self.original_budget,\n 'money_left': self.budget,\n 'profit': self.profit,\n 'items_won': items_won,\n 'engagement_count': self.engagement_count,\n }\n else:\n return [\n self.budget, \n self.profit, \n items_won, \n 0, \n 0, \n round(self.failed_bid_cnt / (self.total_bid_cnt+1e-8), 2), \n 0, \n 0, \n self.engagement_count,\n draw_plot(f\"{self.name} ({self.model_name})\", self.budget_history, self.profit_history), \n [],\n [],\n [], \n []\n ]" }, { "identifier": "Auctioneer", "path": "src/auctioneer_base.py", "snippet": "class Auctioneer(BaseModel):\n enable_discount: bool = False\n items: List[Item] = []\n cur_item: Item = None\n highest_bidder: Bidder = None\n highest_bid: int = -1\n bidding_history = defaultdict(list) # history about the bidding war of one item\n items_queue: List[Item] = [] # updates when a item is taken.\n auction_logs = defaultdict(list) # history about the bidding war of all items\n openai_cost = 0\n prev_round_max_bid: int = -1\n min_bid: int = 0\n fail_to_sell = False\n min_markup_pct = 0.1\n\n class Config:\n arbitrary_types_allowed = True\n \n def init_items(self, items: List[Item]):\n for item in items:\n # reset discounted price\n item.reset_price()\n self.items = items\n self.items_queue = items.copy()\n\n def summarize_items_info(self):\n desc = ''\n for item in self.items:\n desc += f\"- {item.get_desc()}\\n\"\n return desc.strip()\n \n def present_item(self):\n cur_item = self.items_queue.pop(0)\n self.cur_item = cur_item\n return cur_item\n \n def shuffle_items(self):\n random.shuffle(self.items)\n self.items_queue = self.items.copy()\n \n def record_bid(self, bid_info: dict, bid_round: int):\n '''\n Save the bidding history for each round, log the highest bidder and highest bidding\n '''\n # bid_info: {'bidder': xxx, 'bid': xxx, 'raw_msg': xxx}\n self.bidding_history[bid_round].append(bid_info)\n for hist in self.bidding_history[bid_round]:\n if hist['bid'] > 0:\n if self.highest_bid < hist['bid']:\n self.highest_bid = hist['bid']\n self.highest_bidder = hist['bidder']\n elif self.highest_bid == hist['bid']:\n # random if there's a tie\n self.highest_bidder = random.choice([self.highest_bidder, hist['bidder']])\n self.auction_logs[f\"{self.cur_item.get_desc()}\"].append(\n {'bidder': bid_info['bidder'], \n 'bid': bid_info['bid'], \n 'bid_round': bid_round})\n\n def _biddings_to_string(self, bid_round: int):\n '''\n Return a string that summarizes the bidding history in a round\n '''\n # bid_hist_text = '' if bid_round == 0 else f'- {self.highest_bidder}: ${self.highest_bid}\\n'\n bid_hist_text = ''\n for js in self.bidding_history[bid_round]:\n if js['bid'] < 0:\n bid_hist_text += f\"- {js['bidder']} withdrew\\n\"\n else:\n bid_hist_text += f\"- {js['bidder']}: ${js['bid']}\\n\"\n return bid_hist_text.strip()\n \n def all_bidding_history_to_string(self):\n bid_hist_text = ''\n for bid_round in self.bidding_history:\n bid_hist_text += f\"Round {bid_round}:\\n{self._biddings_to_string(bid_round)}\\n\\n\"\n return bid_hist_text.strip()\n\n def ask_for_bid(self, bid_round: int):\n '''\n Ask for bid, return the message to be sent to bidders\n '''\n if self.highest_bidder is None:\n if bid_round > 0:\n msg = f\"Seeing as we've had no takers at the initial price, we're going to lower the starting bid to ${self.cur_item.price} for {self.cur_item.name} to spark some interest! Do I have any takers?\"\n else:\n remaining_items = [self.cur_item.name] + [item.name for item in self.items_queue]\n msg = f\"Attention, bidders! {len(remaining_items)} item(s) left, they are: {', '.join(remaining_items)}.\\n\\nNow, please bid on {self.cur_item}. The starting price for bidding for {self.cur_item} is ${self.cur_item.price}. Anyone interested in this item?\"\n else:\n bidding_history = self._biddings_to_string(bid_round - 1)\n msg = f\"Thank you! This is the {p.ordinal(bid_round)} round of bidding for this item:\\n{bidding_history}\\n\\nNow we have ${self.highest_bid} from {self.highest_bidder.name} for {self.cur_item.name}. The minimum increase over this highest bid is ${int(self.cur_item.price * self.min_markup_pct)}. Do I have any advance on ${self.highest_bid}?\"\n return msg\n \n def ask_for_rebid(self, fail_msg: str, bid_price: int):\n return f\"Your bid of ${bid_price} failed, because {fail_msg}: You must reconsider your bid.\"\n\n def get_hammer_msg(self):\n if self.highest_bidder is None:\n return f\"Since no one bid on {self.cur_item.name}, we'll move on to the next item.\"\n else:\n return f\"Sold! {self.cur_item} to {self.highest_bidder} at ${self.highest_bid}! The true value for {self.cur_item} is ${self.cur_item.true_value}.\"# Thus {self.highest_bidder}'s profit by winning this item is ${self.cur_item.true_value - self.highest_bid}.\"\n\n def check_hammer(self, bid_round: int):\n # check if the item is sold\n self.fail_to_sell = False\n num_bid = self._num_bids_in_round(bid_round)\n\n # highest_bidder has already been updated in record_bid().\n # so when num_bid == 0 & highest_bidder is None, it means no one bid on this item\n if self.highest_bidder is None:\n if num_bid == 0:\n # failed to sell, as there is no highest bidder\n self.fail_to_sell = True\n if self.enable_discount and bid_round < 3:\n # lower the starting price by 50%. discoutn only applies to the first 3 rounds\n self.cur_item.lower_price(0.5)\n is_sold = False\n else:\n is_sold = True\n else:\n # won't happen\n raise ValueError(f\"highest_bidder is None but num_bid is {num_bid}\")\n else:\n if self.prev_round_max_bid < 0 and num_bid == 1:\n # only one bidder in the first round \n is_sold = True\n else:\n self.prev_round_max_bid = self.highest_bid\n is_sold = self._num_bids_in_round(bid_round) == 0\n return is_sold\n \n def _num_bids_in_round(self, bid_round: int):\n # check if there is no bid in the current round\n cnt = 0\n for hist in self.bidding_history[bid_round]:\n if hist['bid'] > 0:\n cnt += 1\n return cnt\n\n def hammer_fall(self):\n print(f'* Sold! {self.cur_item} (${self.cur_item.true_value}) goes to {self.highest_bidder} at ${self.highest_bid}.')\n self.auction_logs[f\"{self.cur_item.get_desc()}\"].append({\n 'bidder': self.highest_bidder, \n 'bid': f\"{self.highest_bid} (${self.cur_item.true_value})\", # no need for the first $, as it will be added in the self.log()\n 'bid_round': 'Hammer price (true value)'})\n self.cur_item = None\n self.highest_bidder = None\n self.highest_bid = -1\n self.bidding_history = defaultdict(list)\n self.prev_round_max_bid = -1\n self.fail_to_sell = False\n\n def end_auction(self):\n return len(self.items_queue) == 0\n \n def gather_all_status(self, bidders: List[Bidder]):\n status = {}\n for bidder in bidders:\n status[bidder.name] = {\n 'profit': bidder.profit, \n 'items_won': bidder.items_won\n }\n return status\n\n def parse_bid(self, text: str):\n prompt = PARSE_BID_INSTRUCTION.format(response=text)\n with get_openai_callback() as cb:\n llm = ChatOpenAI(model='gpt-3.5-turbo-0613', temperature=0)\n result = llm([HumanMessage(content=prompt)]).content\n self.openai_cost += cb.total_cost\n \n bid_number = re.findall(r'\\$?\\d+', result.replace(',', ''))\n # find number in the result\n if '-1' in result:\n return -1\n elif len(bid_number) > 0:\n return int(bid_number[-1].replace('$', ''))\n else:\n print('* Rebid:', text)\n return None\n\n def log(self, bidder_personal_reports: list = [], show_model_name=True):\n ''' example\n Apparatus H, starting at $1000.\n\n 1st bid:\n Bidder 1 (gpt-3.5-turbo-16k-0613): $1200\n Bidder 2 (gpt-3.5-turbo-16k-0613): $1100\n Bidder 3 (gpt-3.5-turbo-16k-0613): Withdrawn\n Bidder 4 (gpt-3.5-turbo-16k-0613): $1200\n \n 2nd bid:\n Bidder 1 (gpt-3.5-turbo-16k-0613): Withdrawn\n Bidder 2 (gpt-3.5-turbo-16k-0613): Withdrawn\n \n Hammer price:\n Bidder 4 (gpt-3.5-turbo-16k-0613): $1200\n '''\n markdown_output = \"## Auction Log\\n\\n\"\n for i, (item, bids) in enumerate(self.auction_logs.items()):\n markdown_output += f\"### {i+1}. {item}\\n\\n\"\n cur_bid_round = -1\n for i, bid in enumerate(bids):\n if bid['bid_round'] != cur_bid_round:\n cur_bid_round = bid['bid_round']\n if isinstance(bid['bid_round'], int):\n markdown_output += f\"\\n#### {p.ordinal(bid['bid_round']+1)} bid:\\n\\n\"\n else:\n markdown_output += f\"\\n#### {bid['bid_round']}:\\n\\n\"\n bid_price = f\"${bid['bid']}\" if bid['bid'] != -1 else 'Withdrew'\n if isinstance(bid['bidder'], Bidder) or isinstance(bid['bidder'], HumanBidder):\n if show_model_name:\n markdown_output += f\"* {bid['bidder']} ({bid['bidder'].model_name}): {bid_price}\\n\"\n else:\n markdown_output += f\"* {bid['bidder']}: {bid_price}\\n\"\n else:\n markdown_output += f\"* None bid\\n\"\n markdown_output += \"\\n\"\n \n if len(bidder_personal_reports) != 0:\n markdown_output += f\"\\n## Personal Report\"\n for report in bidder_personal_reports:\n markdown_output += f\"\\n\\n{report}\"\n return markdown_output.strip()\n \n def finish_auction(self):\n self.auction_logs = defaultdict(list)\n self.cur_item = None\n self.highest_bidder = None\n self.highest_bid = -1\n self.bidding_history = defaultdict(list)\n self.items_queue = []\n self.items = []\n self.prev_round_max_bid = -1\n self.fail_to_sell = False\n self.min_bid = 0" }, { "identifier": "run_auction", "path": "auction_workflow.py", "snippet": "def run_auction(\n auction_hash: str, \n auctioneer: Auctioneer, \n bidder_list: List[Bidder], \n thread_num: int, \n yield_for_demo=True,\n log_dir=LOG_DIR,\n repeat_num=0,\n memo_file=None):\n \n # bidder_list[0].verbose=True\n \n if yield_for_demo:\n chatbot_list = bidders_to_chatbots(bidder_list)\n yield [bidder_list] + chatbot_list + monitor_all(bidder_list) + [auctioneer.log()] + [disable_gr, disable_gr] + disable_all_box(bidder_list)\n \n # ***************** Learn Round ****************\n for bidder in bidder_list:\n if bidder.enable_learning and memo_file:\n # if no prev memo file, then no need to learn.\n if os.path.exists(memo_file):\n with open(memo_file) as f:\n data = json.load(f)\n past_learnings = data['learnings'][bidder.name]\n past_auction_log = data['auction_log']\n bidder.learn_from_prev_auction(past_learnings, past_auction_log)\n \n # ***************** Plan Round *****************\n # init bidder profit\n bidder_profit_info = auctioneer.gather_all_status(bidder_list)\n for bidder in bidder_list:\n bidder.set_all_bidders_status(bidder_profit_info)\n\n plan_instructs = [bidder.get_plan_instruct(auctioneer.items) for bidder in bidder_list]\n\n bidding_multithread(bidder_list, plan_instructs, func_type='plan', thread_num=thread_num)\n \n if yield_for_demo:\n chatbot_list = bidders_to_chatbots(bidder_list)\n yield [bidder_list] + chatbot_list + monitor_all(bidder_list) + [auctioneer.log()] + [disable_gr, disable_gr] + disable_all_box(bidder_list)\n \n bar = tqdm(total=len(auctioneer.items_queue), desc='Auction Progress')\n while not auctioneer.end_auction():\n cur_item = auctioneer.present_item()\n \n bid_round = 0\n while True:\n # ***************** Bid Round ***************** \n auctioneer_msg = auctioneer.ask_for_bid(bid_round)\n _bidder_list = []\n _bid_instruct_list = []\n # remove highest bidder and withdrawn bidders\n for bidder in bidder_list:\n if bidder is auctioneer.highest_bidder or bidder.withdraw:\n bidder.need_input = False\n continue\n else:\n bidder.need_input = True # enable input from demo\n instruct = bidder.get_bid_instruct(auctioneer_msg, bid_round)\n _bidder_list.append(bidder)\n _bid_instruct_list.append(instruct)\n \n if yield_for_demo:\n chatbot_list = bidders_to_chatbots(bidder_list)\n yield [bidder_list] + chatbot_list + monitor_all(bidder_list) + [auctioneer.log()] + [disable_gr, disable_gr] + enable_human_box(bidder_list)\n \n _msgs = bidding_multithread(_bidder_list, _bid_instruct_list, func_type='bid', thread_num=thread_num)\n\n for i, (msg, bidder) in enumerate(zip(_msgs, _bidder_list)):\n if bidder.model_name == 'rule':\n bid_price = bidder.bid_rule(auctioneer.prev_round_max_bid, auctioneer.min_markup_pct)\n else:\n bid_price = parse_bid_price(auctioneer, bidder, msg)\n\n # can't bid more than budget or less than previous highest bid\n while True:\n fail_msg = bidder.bid_sanity_check(bid_price, auctioneer.prev_round_max_bid, auctioneer.min_markup_pct)\n if fail_msg is None: \n break\n else:\n bidder.need_input = True # enable input from demo\n auctioneer_msg = auctioneer.ask_for_rebid(fail_msg=fail_msg, bid_price=bid_price)\n rebid_instruct = bidder.get_rebid_instruct(auctioneer_msg)\n \n if yield_for_demo:\n chatbot_list = bidders_to_chatbots(bidder_list)\n yield [bidder_list] + chatbot_list + monitor_all(bidder_list) + [auctioneer.log()] + [disable_gr, disable_gr] + disable_all_box(bidder_list)\n \n msg = bidder.rebid_for_failure(rebid_instruct)\n bid_price = parse_bid_price(auctioneer, bidder, msg)\n \n if yield_for_demo:\n chatbot_list = bidders_to_chatbots(bidder_list)\n yield [bidder_list] + chatbot_list + monitor_all(bidder_list) + [auctioneer.log()] + [disable_gr, disable_gr] + disable_all_box(bidder_list)\n \n bidder.set_withdraw(bid_price)\n auctioneer.record_bid({'bidder': bidder, 'bid': bid_price, 'raw_msg': msg}, bid_round)\n \n if yield_for_demo:\n chatbot_list = bidders_to_chatbots(bidder_list)\n yield [bidder_list] + chatbot_list + monitor_all(bidder_list) + [auctioneer.log()] + [disable_gr, disable_gr] + disable_all_box(bidder_list)\n \n is_sold = auctioneer.check_hammer(bid_round)\n bid_round += 1\n if is_sold: \n break\n else:\n if auctioneer.fail_to_sell and auctioneer.enable_discount:\n for bidder in bidder_list:\n bidder.set_withdraw(0) # back in the game\n\n # ***************** Summarize ***************** \n summarize_instruct_list = []\n for bidder in bidder_list:\n if bidder is auctioneer.highest_bidder:\n win_lose_msg = bidder.win_bid(cur_item, auctioneer.highest_bid)\n else:\n win_lose_msg = bidder.lose_bid(cur_item)\n msg = bidder.get_summarize_instruct(\n bidding_history=auctioneer.all_bidding_history_to_string(),\n hammer_msg=auctioneer.get_hammer_msg(),\n win_lose_msg=win_lose_msg\n )\n summarize_instruct_list.append(msg)\n\n # record profit information of all bidders for each bidder\n # (not used in the auction, just for belief tracking evaluation)\n bidder_profit_info = auctioneer.gather_all_status(bidder_list)\n for bidder in bidder_list:\n bidder.set_all_bidders_status(bidder_profit_info)\n \n bidding_multithread(bidder_list, summarize_instruct_list, func_type='summarize', thread_num=thread_num)\n \n if yield_for_demo:\n chatbot_list = bidders_to_chatbots(bidder_list)\n yield [bidder_list] + chatbot_list + monitor_all(bidder_list) + [auctioneer.log()] + [disable_gr, disable_gr] + disable_all_box(bidder_list)\n\n # ***************** Replan *****************\n if len(auctioneer.items_queue) > 0: # no need to replan if all items are sold\n replan_instruct_list = [bidder.get_replan_instruct(\n # bidding_history=auctioneer.all_bidding_history_to_string(), \n # hammer_msg=auctioneer.get_hammer_msg()\n ) for bidder in bidder_list]\n bidding_multithread(bidder_list, replan_instruct_list, func_type='replan', thread_num=thread_num)\n \n if yield_for_demo:\n chatbot_list = bidders_to_chatbots(bidder_list)\n yield [bidder_list] + chatbot_list + monitor_all(bidder_list) + [auctioneer.log()] + [disable_gr, disable_gr] + disable_all_box(bidder_list)\n\n auctioneer.hammer_fall()\n bar.update(1)\n\n total_cost = sum([b.openai_cost for b in bidder_list]) + auctioneer.openai_cost\n bidder_reports = [bidder.profit_report() for bidder in bidder_list]\n \n if yield_for_demo:\n chatbot_list = bidders_to_chatbots(bidder_list, profit_report=True)\n yield [bidder_list] + chatbot_list + monitor_all(bidder_list) + [auctioneer.log(bidder_reports) + f'\\n## Total Cost: ${total_cost}'] + [disable_gr, enable_gr] + disable_all_box(bidder_list)\n \n memo = {'auction_log': auctioneer.log(show_model_name=False),\n 'memo_text': bidder_reports,\n 'profit': {bidder.name: bidder.profit for bidder in bidder_list},\n 'total_cost': total_cost,\n 'learnings': {bidder.name: bidder.learnings for bidder in bidder_list},\n 'model_info': {bidder.name: bidder.model_name for bidder in bidder_list}}\n log_bidders(log_dir, auction_hash, bidder_list, repeat_num, memo)\n \n auctioneer.finish_auction()\n \n if not yield_for_demo:\n yield total_cost" }, { "identifier": "make_auction_hash", "path": "auction_workflow.py", "snippet": "def make_auction_hash():\n return str(int(time.time()))" }, { "identifier": "chunks", "path": "utils.py", "snippet": "def chunks(lst, n):\n \"\"\"Yield successive n-sized chunks from lst.\"\"\"\n for i in range(0, len(lst), n):\n yield lst[i : i + n]" }, { "identifier": "reset_state_list", "path": "utils.py", "snippet": "def reset_state_list(*states):\n empty = [None for _ in states[1:]]\n return [[]] + empty" } ]
import os import gradio as gr from app_modules.presets import * from app_modules.overwrites import * from app_modules.utils import * from src.item_base import create_items from src.bidder_base import Bidder from src.human_bidder import HumanBidder from src.auctioneer_base import Auctioneer from auction_workflow import run_auction, make_auction_hash from utils import chunks, reset_state_list
15,763
BIDDER_NUM = 4 items = create_items('data/items_demo.jsonl') def auction_loop_app(*args): global items bidder_list = args[0] # gr.State() -> session state items_id = args[1] os.environ['OPENAI_API_KEY'] = args[2] if args[2] != '' else os.environ.get('OPENAI_API_KEY', '') os.environ['ANTHROPIC_API_KEY'] = args[3] if args[3] != '' else os.environ.get('ANTHROPIC_API_KEY', '') thread_num = args[4] item_shuffle = args[5] enable_discount = args[6] min_markup_pct = args[7] args = args[8:] auction_hash = make_auction_hash() items_to_bid = [items[i] for i in items_id] auctioneer = Auctioneer(enable_discount=enable_discount, min_markup_pct=min_markup_pct) auctioneer.init_items(items_to_bid) if item_shuffle: auctioneer.shuffle_items() # must correspond to the order in app's parameters input_keys = [ 'chatbot', 'model_name', 'desire', 'plan_strategy', 'budget', 'correct_belief', 'enable_learning', 'temperature', 'overestimate_percent', ] # convert flatten list into a json list input_jsl = [] for i, chunk in enumerate(chunks(args, len(input_keys))): js = {'name': f"Bidder {i+1}", 'auction_hash': auction_hash} for k, v in zip(input_keys, chunk): js[k] = v input_jsl.append(js) for js in input_jsl: js.pop('chatbot') if 'human' in js['model_name']: bidder_list.append(HumanBidder.create(**js)) else: bidder_list.append(Bidder.create(**js))
BIDDER_NUM = 4 items = create_items('data/items_demo.jsonl') def auction_loop_app(*args): global items bidder_list = args[0] # gr.State() -> session state items_id = args[1] os.environ['OPENAI_API_KEY'] = args[2] if args[2] != '' else os.environ.get('OPENAI_API_KEY', '') os.environ['ANTHROPIC_API_KEY'] = args[3] if args[3] != '' else os.environ.get('ANTHROPIC_API_KEY', '') thread_num = args[4] item_shuffle = args[5] enable_discount = args[6] min_markup_pct = args[7] args = args[8:] auction_hash = make_auction_hash() items_to_bid = [items[i] for i in items_id] auctioneer = Auctioneer(enable_discount=enable_discount, min_markup_pct=min_markup_pct) auctioneer.init_items(items_to_bid) if item_shuffle: auctioneer.shuffle_items() # must correspond to the order in app's parameters input_keys = [ 'chatbot', 'model_name', 'desire', 'plan_strategy', 'budget', 'correct_belief', 'enable_learning', 'temperature', 'overestimate_percent', ] # convert flatten list into a json list input_jsl = [] for i, chunk in enumerate(chunks(args, len(input_keys))): js = {'name': f"Bidder {i+1}", 'auction_hash': auction_hash} for k, v in zip(input_keys, chunk): js[k] = v input_jsl.append(js) for js in input_jsl: js.pop('chatbot') if 'human' in js['model_name']: bidder_list.append(HumanBidder.create(**js)) else: bidder_list.append(Bidder.create(**js))
yield from run_auction(auction_hash, auctioneer, bidder_list, thread_num, yield_for_demo=True)
4
2023-10-08 09:30:57+00:00
24k
sakemin/cog-musicgen-chord
predict.py
[ { "identifier": "CompressionSolver", "path": "audiocraft/solvers/compression.py", "snippet": "class CompressionSolver(base.StandardSolver):\n \"\"\"Solver for compression task.\n\n The compression task combines a set of perceptual and objective losses\n to train an EncodecModel (composed of an encoder-decoder and a quantizer)\n to perform high fidelity audio reconstruction.\n \"\"\"\n def __init__(self, cfg: omegaconf.DictConfig):\n super().__init__(cfg)\n self.rng: torch.Generator # set at each epoch\n self.adv_losses = builders.get_adversarial_losses(self.cfg)\n self.aux_losses = nn.ModuleDict()\n self.info_losses = nn.ModuleDict()\n assert not cfg.fsdp.use, \"FSDP not supported by CompressionSolver.\"\n loss_weights = dict()\n for loss_name, weight in self.cfg.losses.items():\n if loss_name in ['adv', 'feat']:\n for adv_name, _ in self.adv_losses.items():\n loss_weights[f'{loss_name}_{adv_name}'] = weight\n elif weight > 0:\n self.aux_losses[loss_name] = builders.get_loss(loss_name, self.cfg)\n loss_weights[loss_name] = weight\n else:\n self.info_losses[loss_name] = builders.get_loss(loss_name, self.cfg)\n self.balancer = builders.get_balancer(loss_weights, self.cfg.balancer)\n self.register_stateful('adv_losses')\n\n @property\n def best_metric_name(self) -> tp.Optional[str]:\n # best model is the last for the compression model\n return None\n\n def build_model(self):\n \"\"\"Instantiate model and optimizer.\"\"\"\n # Model and optimizer\n self.model = models.builders.get_compression_model(self.cfg).to(self.device)\n self.optimizer = builders.get_optimizer(self.model.parameters(), self.cfg.optim)\n self.register_stateful('model', 'optimizer')\n self.register_best_state('model')\n self.register_ema('model')\n\n def build_dataloaders(self):\n \"\"\"Instantiate audio dataloaders for each stage.\"\"\"\n self.dataloaders = builders.get_audio_datasets(self.cfg)\n\n def show(self):\n \"\"\"Show the compression model and employed adversarial loss.\"\"\"\n self.logger.info(f\"Compression model with {self.model.quantizer.total_codebooks} codebooks:\")\n self.log_model_summary(self.model)\n self.logger.info(\"Adversarial loss:\")\n self.log_model_summary(self.adv_losses)\n self.logger.info(\"Auxiliary losses:\")\n self.logger.info(self.aux_losses)\n self.logger.info(\"Info losses:\")\n self.logger.info(self.info_losses)\n\n def run_step(self, idx: int, batch: torch.Tensor, metrics: dict):\n \"\"\"Perform one training or valid step on a given batch.\"\"\"\n x = batch.to(self.device)\n y = x.clone()\n\n qres = self.model(x)\n assert isinstance(qres, quantization.QuantizedResult)\n y_pred = qres.x\n # Log bandwidth in kb/s\n metrics['bandwidth'] = qres.bandwidth.mean()\n\n if self.is_training:\n d_losses: dict = {}\n if len(self.adv_losses) > 0 and torch.rand(1, generator=self.rng).item() <= 1 / self.cfg.adversarial.every:\n for adv_name, adversary in self.adv_losses.items():\n disc_loss = adversary.train_adv(y_pred, y)\n d_losses[f'd_{adv_name}'] = disc_loss\n metrics['d_loss'] = torch.sum(torch.stack(list(d_losses.values())))\n metrics.update(d_losses)\n\n balanced_losses: dict = {}\n other_losses: dict = {}\n\n # penalty from quantization\n if qres.penalty is not None and qres.penalty.requires_grad:\n other_losses['penalty'] = qres.penalty # penalty term from the quantizer\n\n # adversarial losses\n for adv_name, adversary in self.adv_losses.items():\n adv_loss, feat_loss = adversary(y_pred, y)\n balanced_losses[f'adv_{adv_name}'] = adv_loss\n balanced_losses[f'feat_{adv_name}'] = feat_loss\n\n # auxiliary losses\n for loss_name, criterion in self.aux_losses.items():\n loss = criterion(y_pred, y)\n balanced_losses[loss_name] = loss\n\n # weighted losses\n metrics.update(balanced_losses)\n metrics.update(other_losses)\n metrics.update(qres.metrics)\n\n if self.is_training:\n # backprop losses that are not handled by balancer\n other_loss = torch.tensor(0., device=self.device)\n if 'penalty' in other_losses:\n other_loss += other_losses['penalty']\n if other_loss.requires_grad:\n other_loss.backward(retain_graph=True)\n ratio1 = sum(p.grad.data.norm(p=2).pow(2)\n for p in self.model.parameters() if p.grad is not None)\n assert isinstance(ratio1, torch.Tensor)\n metrics['ratio1'] = ratio1.sqrt()\n\n # balancer losses backward, returns effective training loss\n # with effective weights at the current batch.\n metrics['g_loss'] = self.balancer.backward(balanced_losses, y_pred)\n # add metrics corresponding to weight ratios\n metrics.update(self.balancer.metrics)\n ratio2 = sum(p.grad.data.norm(p=2).pow(2)\n for p in self.model.parameters() if p.grad is not None)\n assert isinstance(ratio2, torch.Tensor)\n metrics['ratio2'] = ratio2.sqrt()\n\n # optim\n flashy.distrib.sync_model(self.model)\n if self.cfg.optim.max_norm:\n torch.nn.utils.clip_grad_norm_(\n self.model.parameters(), self.cfg.optim.max_norm\n )\n self.optimizer.step()\n self.optimizer.zero_grad()\n\n # informative losses only\n info_losses: dict = {}\n with torch.no_grad():\n for loss_name, criterion in self.info_losses.items():\n loss = criterion(y_pred, y)\n info_losses[loss_name] = loss\n\n metrics.update(info_losses)\n\n # aggregated GAN losses: this is useful to report adv and feat across different adversarial loss setups\n adv_losses = [loss for loss_name, loss in metrics.items() if loss_name.startswith('adv')]\n if len(adv_losses) > 0:\n metrics['adv'] = torch.sum(torch.stack(adv_losses))\n feat_losses = [loss for loss_name, loss in metrics.items() if loss_name.startswith('feat')]\n if len(feat_losses) > 0:\n metrics['feat'] = torch.sum(torch.stack(feat_losses))\n\n return metrics\n\n def run_epoch(self):\n # reset random seed at the beginning of the epoch\n self.rng = torch.Generator()\n self.rng.manual_seed(1234 + self.epoch)\n # run epoch\n super().run_epoch()\n\n def evaluate(self):\n \"\"\"Evaluate stage. Runs audio reconstruction evaluation.\"\"\"\n self.model.eval()\n evaluate_stage_name = str(self.current_stage)\n\n loader = self.dataloaders['evaluate']\n updates = len(loader)\n lp = self.log_progress(f'{evaluate_stage_name} inference', loader, total=updates, updates=self.log_updates)\n average = flashy.averager()\n\n pendings = []\n ctx = multiprocessing.get_context('spawn')\n with get_pool_executor(self.cfg.evaluate.num_workers, mp_context=ctx) as pool:\n for idx, batch in enumerate(lp):\n x = batch.to(self.device)\n with torch.no_grad():\n qres = self.model(x)\n\n y_pred = qres.x.cpu()\n y = batch.cpu() # should already be on CPU but just in case\n pendings.append(pool.submit(evaluate_audio_reconstruction, y_pred, y, self.cfg))\n\n metrics_lp = self.log_progress(f'{evaluate_stage_name} metrics', pendings, updates=self.log_updates)\n for pending in metrics_lp:\n metrics = pending.result()\n metrics = average(metrics)\n\n metrics = flashy.distrib.average_metrics(metrics, len(loader))\n return metrics\n\n def generate(self):\n \"\"\"Generate stage.\"\"\"\n self.model.eval()\n sample_manager = SampleManager(self.xp, map_reference_to_sample_id=True)\n generate_stage_name = str(self.current_stage)\n\n loader = self.dataloaders['generate']\n updates = len(loader)\n lp = self.log_progress(generate_stage_name, loader, total=updates, updates=self.log_updates)\n\n for batch in lp:\n reference, _ = batch\n reference = reference.to(self.device)\n with torch.no_grad():\n qres = self.model(reference)\n assert isinstance(qres, quantization.QuantizedResult)\n\n reference = reference.cpu()\n estimate = qres.x.cpu()\n sample_manager.add_samples(estimate, self.epoch, ground_truth_wavs=reference)\n\n flashy.distrib.barrier()\n\n def load_from_pretrained(self, name: str) -> dict:\n model = models.CompressionModel.get_pretrained(name)\n if isinstance(model, models.DAC):\n raise RuntimeError(\"Cannot fine tune a DAC model.\")\n elif isinstance(model, models.HFEncodecCompressionModel):\n self.logger.warning('Trying to automatically convert a HuggingFace model '\n 'to AudioCraft, this might fail!')\n state = model.model.state_dict()\n new_state = {}\n for k, v in state.items():\n if k.startswith('decoder.layers') and '.conv.' in k and '.block.' not in k:\n # We need to determine if this a convtr or a regular conv.\n layer = int(k.split('.')[2])\n if isinstance(model.model.decoder.layers[layer].conv, torch.nn.ConvTranspose1d):\n\n k = k.replace('.conv.', '.convtr.')\n k = k.replace('encoder.layers.', 'encoder.model.')\n k = k.replace('decoder.layers.', 'decoder.model.')\n k = k.replace('conv.', 'conv.conv.')\n k = k.replace('convtr.', 'convtr.convtr.')\n k = k.replace('quantizer.layers.', 'quantizer.vq.layers.')\n k = k.replace('.codebook.', '._codebook.')\n new_state[k] = v\n state = new_state\n elif isinstance(model, models.EncodecModel):\n state = model.state_dict()\n else:\n raise RuntimeError(f\"Cannot fine tune model type {type(model)}.\")\n return {\n 'best_state': {'model': state}\n }\n\n @staticmethod\n def model_from_checkpoint(checkpoint_path: tp.Union[Path, str],\n device: tp.Union[torch.device, str] = 'cpu') -> models.CompressionModel:\n \"\"\"Instantiate a CompressionModel from a given checkpoint path or dora sig.\n This method is a convenient endpoint to load a CompressionModel to use in other solvers.\n\n Args:\n checkpoint_path (Path or str): Path to checkpoint or dora sig from where the checkpoint is resolved.\n This also supports pre-trained models by using a path of the form //pretrained/NAME.\n See `model_from_pretrained` for a list of supported pretrained models.\n use_ema (bool): Use EMA variant of the model instead of the actual model.\n device (torch.device or str): Device on which the model is loaded.\n \"\"\"\n checkpoint_path = str(checkpoint_path)\n if checkpoint_path.startswith('//pretrained/'):\n name = checkpoint_path.split('/', 3)[-1]\n return models.CompressionModel.get_pretrained(name, device)\n logger = logging.getLogger(__name__)\n logger.info(f\"Loading compression model from checkpoint: {checkpoint_path}\")\n _checkpoint_path = checkpoint.resolve_checkpoint_path(checkpoint_path, use_fsdp=False)\n assert _checkpoint_path is not None, f\"Could not resolve compression model checkpoint path: {checkpoint_path}\"\n state = checkpoint.load_checkpoint(_checkpoint_path)\n assert state is not None and 'xp.cfg' in state, f\"Could not load compression model from ckpt: {checkpoint_path}\"\n cfg = state['xp.cfg']\n cfg.device = device\n compression_model = models.builders.get_compression_model(cfg).to(device)\n assert compression_model.sample_rate == cfg.sample_rate, \"Compression model sample rate should match\"\n\n assert 'best_state' in state and state['best_state'] != {}\n assert 'exported' not in state, \"When loading an exported checkpoint, use the //pretrained/ prefix.\"\n compression_model.load_state_dict(state['best_state']['model'])\n compression_model.eval()\n logger.info(\"Compression model loaded!\")\n return compression_model\n\n @staticmethod\n def wrapped_model_from_checkpoint(cfg: omegaconf.DictConfig,\n checkpoint_path: tp.Union[Path, str],\n device: tp.Union[torch.device, str] = 'cpu') -> models.CompressionModel:\n \"\"\"Instantiate a wrapped CompressionModel from a given checkpoint path or dora sig.\n\n Args:\n cfg (omegaconf.DictConfig): Configuration to read from for wrapped mode.\n checkpoint_path (Path or str): Path to checkpoint or dora sig from where the checkpoint is resolved.\n use_ema (bool): Use EMA variant of the model instead of the actual model.\n device (torch.device or str): Device on which the model is loaded.\n \"\"\"\n compression_model = CompressionSolver.model_from_checkpoint(checkpoint_path, device)\n compression_model = models.builders.get_wrapped_compression_model(compression_model, cfg)\n return compression_model" }, { "identifier": "MultiBandDiffusion", "path": "audiocraft/models/multibanddiffusion.py", "snippet": "class MultiBandDiffusion:\n \"\"\"Sample from multiple diffusion models.\n\n Args:\n DPs (list of DiffusionProcess): Diffusion processes.\n codec_model (CompressionModel): Underlying compression model used to obtain discrete tokens.\n \"\"\"\n def __init__(self, DPs: tp.List[DiffusionProcess], codec_model: CompressionModel) -> None:\n self.DPs = DPs\n self.codec_model = codec_model\n self.device = next(self.codec_model.parameters()).device\n\n @property\n def sample_rate(self) -> int:\n return self.codec_model.sample_rate\n\n @staticmethod\n def get_mbd_musicgen(device=None):\n \"\"\"Load our diffusion models trained for MusicGen.\"\"\"\n if device is None:\n device = 'cuda' if torch.cuda.is_available() else 'cpu'\n path = 'facebook/multiband-diffusion'\n filename = 'mbd_musicgen_32khz.th'\n name = 'facebook/musicgen-small'\n codec_model = load_compression_model(name, device=device)\n models, processors, cfgs = load_diffusion_models(path, filename=filename, device=device)\n DPs = []\n for i in range(len(models)):\n schedule = NoiseSchedule(**cfgs[i].schedule, sample_processor=processors[i], device=device)\n DPs.append(DiffusionProcess(model=models[i], noise_schedule=schedule))\n return MultiBandDiffusion(DPs=DPs, codec_model=codec_model)\n\n @staticmethod\n def get_mbd_24khz(bw: float = 3.0, pretrained: bool = True,\n device: tp.Optional[tp.Union[torch.device, str]] = None,\n n_q: tp.Optional[int] = None):\n \"\"\"Get the pretrained Models for MultibandDiffusion.\n\n Args:\n bw (float): Bandwidth of the compression model.\n pretrained (bool): Whether to use / download if necessary the models.\n device (torch.device or str, optional): Device on which the models are loaded.\n n_q (int, optional): Number of quantizers to use within the compression model.\n \"\"\"\n if device is None:\n device = 'cuda' if torch.cuda.is_available() else 'cpu'\n assert bw in [1.5, 3.0, 6.0], f\"bandwidth {bw} not available\"\n if n_q is not None:\n assert n_q in [2, 4, 8]\n assert {1.5: 2, 3.0: 4, 6.0: 8}[bw] == n_q, \\\n f\"bandwidth and number of codebooks missmatch to use n_q = {n_q} bw should be {n_q * (1.5 / 2)}\"\n n_q = {1.5: 2, 3.0: 4, 6.0: 8}[bw]\n codec_model = CompressionSolver.model_from_checkpoint(\n '//pretrained/facebook/encodec_24khz', device=device)\n codec_model.set_num_codebooks(n_q)\n codec_model = codec_model.to(device)\n path = 'facebook/multiband-diffusion'\n filename = f'mbd_comp_{n_q}.pt'\n models, processors, cfgs = load_diffusion_models(path, filename=filename, device=device)\n DPs = []\n for i in range(len(models)):\n schedule = NoiseSchedule(**cfgs[i].schedule, sample_processor=processors[i], device=device)\n DPs.append(DiffusionProcess(model=models[i], noise_schedule=schedule))\n return MultiBandDiffusion(DPs=DPs, codec_model=codec_model)\n\n return MultiBandDiffusion(DPs, codec_model)\n\n @torch.no_grad()\n def get_condition(self, wav: torch.Tensor, sample_rate: int) -> torch.Tensor:\n \"\"\"Get the conditioning (i.e. latent reprentatios of the compression model) from a waveform.\n Args:\n wav (torch.Tensor): The audio that we want to extract the conditioning from\n sample_rate (int): sample rate of the audio\"\"\"\n if sample_rate != self.sample_rate:\n wav = julius.resample_frac(wav, sample_rate, self.sample_rate)\n codes, scale = self.codec_model.encode(wav)\n assert scale is None, \"Scaled compression models not supported.\"\n emb = self.get_emb(codes)\n return emb\n\n @torch.no_grad()\n def get_emb(self, codes: torch.Tensor):\n \"\"\"Get latent representation from the discrete codes\n Argrs:\n codes (torch.Tensor): discrete tokens\"\"\"\n emb = self.codec_model.decode_latent(codes)\n return emb\n\n def generate(self, emb: torch.Tensor, size: tp.Optional[torch.Size] = None,\n step_list: tp.Optional[tp.List[int]] = None):\n \"\"\"Generate Wavform audio from the latent embeddings of the compression model\n Args:\n emb (torch.Tensor): Conditioning embeddinds\n size (none torch.Size): size of the output\n if None this is computed from the typical upsampling of the model\n step_list (optional list[int]): list of Markov chain steps, defaults to 50 linearly spaced step.\n \"\"\"\n if size is None:\n upsampling = int(self.codec_model.sample_rate / self.codec_model.frame_rate)\n size = torch.Size([emb.size(0), self.codec_model.channels, emb.size(-1) * upsampling])\n assert size[0] == emb.size(0)\n out = torch.zeros(size).to(self.device)\n for DP in self.DPs:\n out += DP.generate(condition=emb, step_list=step_list, initial_noise=torch.randn_like(out))\n return out\n\n def re_eq(self, wav: torch.Tensor, ref: torch.Tensor, n_bands: int = 32, strictness: float = 1):\n \"\"\"match the eq to the encodec output by matching the standard deviation of some frequency bands\n Args:\n wav (torch.Tensor): audio to equalize\n ref (torch.Tensor):refenrence audio from which we match the spectrogram.\n n_bands (int): number of bands of the eq\n strictness (float): how strict the the matching. 0 is no matching, 1 is exact matching.\n \"\"\"\n split = julius.SplitBands(n_bands=n_bands, sample_rate=self.codec_model.sample_rate).to(wav.device)\n bands = split(wav)\n bands_ref = split(ref)\n out = torch.zeros_like(ref)\n for i in range(n_bands):\n out += bands[i] * (bands_ref[i].std() / bands[i].std()) ** strictness\n return out\n\n def regenerate(self, wav: torch.Tensor, sample_rate: int):\n \"\"\"Regenerate a wavform through compression and diffusion regeneration.\n Args:\n wav (torch.Tensor): Original 'ground truth' audio\n sample_rate (int): sample rate of the input (and output) wav\n \"\"\"\n if sample_rate != self.codec_model.sample_rate:\n wav = julius.resample_frac(wav, sample_rate, self.codec_model.sample_rate)\n emb = self.get_condition(wav, sample_rate=self.codec_model.sample_rate)\n size = wav.size()\n out = self.generate(emb, size=size)\n if sample_rate != self.codec_model.sample_rate:\n out = julius.resample_frac(out, self.codec_model.sample_rate, sample_rate)\n return out\n\n def tokens_to_wav(self, tokens: torch.Tensor, n_bands: int = 32):\n \"\"\"Generate Waveform audio with diffusion from the discrete codes.\n Args:\n tokens (torch.Tensor): discrete codes\n n_bands (int): bands for the eq matching.\n \"\"\"\n wav_encodec = self.codec_model.decode(tokens)\n condition = self.get_emb(tokens)\n wav_diffusion = self.generate(emb=condition, size=wav_encodec.size())\n return self.re_eq(wav=wav_diffusion, ref=wav_encodec, n_bands=n_bands)" }, { "identifier": "MusicGen", "path": "audiocraft/models/musicgen.py", "snippet": "class MusicGen:\n \"\"\"MusicGen main model with convenient generation API.\n\n Args:\n name (str): name of the model.\n compression_model (CompressionModel): Compression model\n used to map audio to invertible discrete representations.\n lm (LMModel): Language model over discrete representations.\n max_duration (float, optional): maximum duration the model can produce,\n otherwise, inferred from the training params.\n \"\"\"\n def __init__(self, name: str, compression_model: CompressionModel, lm: LMModel,\n max_duration: tp.Optional[float] = None):\n self.name = name\n self.compression_model = compression_model\n self.lm = lm\n self.cfg: tp.Optional[omegaconf.DictConfig] = None\n # Just to be safe, let's put everything in eval mode.\n self.compression_model.eval()\n self.lm.eval()\n\n if hasattr(lm, 'cfg'):\n cfg = lm.cfg\n assert isinstance(cfg, omegaconf.DictConfig)\n self.cfg = cfg\n\n if self.cfg is not None:\n self.compression_model = get_wrapped_compression_model(self.compression_model, self.cfg)\n\n if max_duration is None:\n if self.cfg is not None:\n max_duration = lm.cfg.dataset.segment_duration # type: ignore\n else:\n raise ValueError(\"You must provide max_duration when building directly MusicGen\")\n assert max_duration is not None\n self.max_duration: float = max_duration\n self.device = next(iter(lm.parameters())).device\n\n self.generation_params: dict = {}\n self.set_generation_params(duration=15) # 15 seconds by default\n self._progress_callback: tp.Optional[tp.Callable[[int, int], None]] = None\n if self.device.type == 'cpu':\n self.autocast = TorchAutocast(enabled=False)\n else:\n self.autocast = TorchAutocast(\n enabled=True, device_type=self.device.type, dtype=torch.float16)\n\n @property\n def frame_rate(self) -> float:\n \"\"\"Roughly the number of AR steps per seconds.\"\"\"\n return self.compression_model.frame_rate\n\n @property\n def sample_rate(self) -> int:\n \"\"\"Sample rate of the generated audio.\"\"\"\n return self.compression_model.sample_rate\n\n @property\n def audio_channels(self) -> int:\n \"\"\"Audio channels of the generated audio.\"\"\"\n return self.compression_model.channels\n\n @staticmethod\n def get_pretrained(name: str = 'facebook/musicgen-melody', device=None):\n \"\"\"Return pretrained model, we provide four models:\n - facebook/musicgen-small (300M), text to music,\n # see: https://huggingface.co/facebook/musicgen-small\n - facebook/musicgen-medium (1.5B), text to music,\n # see: https://huggingface.co/facebook/musicgen-medium\n - facebook/musicgen-melody (1.5B) text to music and text+melody to music,\n # see: https://huggingface.co/facebook/musicgen-melody\n - facebook/musicgen-large (3.3B), text to music,\n # see: https://huggingface.co/facebook/musicgen-large\n \"\"\"\n if device is None:\n if torch.cuda.device_count():\n device = 'cuda'\n else:\n device = 'cpu'\n\n if name == 'debug':\n # used only for unit tests\n compression_model = get_debug_compression_model(device)\n lm = get_debug_lm_model(device)\n return MusicGen(name, compression_model, lm, max_duration=30)\n\n if name in _HF_MODEL_CHECKPOINTS_MAP:\n warnings.warn(\n \"MusicGen pretrained model relying on deprecated checkpoint mapping. \" +\n f\"Please use full pre-trained id instead: facebook/musicgen-{name}\")\n name = _HF_MODEL_CHECKPOINTS_MAP[name]\n\n lm = load_lm_model(name, device=device)\n compression_model = load_compression_model(name, device=device)\n if 'self_wav' in lm.condition_provider.conditioners:\n lm.condition_provider.conditioners['self_wav'].match_len_on_eval = True\n lm.condition_provider.conditioners['self_wav']._use_masking = False\n\n return MusicGen(name, compression_model, lm)\n\n def set_generation_params(self, use_sampling: bool = True, top_k: int = 250,\n top_p: float = 0.0, temperature: float = 1.0,\n duration: float = 30.0, cfg_coef: float = 3.0,\n two_step_cfg: bool = False, extend_stride: float = 18):\n \"\"\"Set the generation parameters for MusicGen.\n\n Args:\n use_sampling (bool, optional): Use sampling if True, else do argmax decoding. Defaults to True.\n top_k (int, optional): top_k used for sampling. Defaults to 250.\n top_p (float, optional): top_p used for sampling, when set to 0 top_k is used. Defaults to 0.0.\n temperature (float, optional): Softmax temperature parameter. Defaults to 1.0.\n duration (float, optional): Duration of the generated waveform. Defaults to 30.0.\n cfg_coef (float, optional): Coefficient used for classifier free guidance. Defaults to 3.0.\n two_step_cfg (bool, optional): If True, performs 2 forward for Classifier Free Guidance,\n instead of batching together the two. This has some impact on how things\n are padded but seems to have little impact in practice.\n extend_stride: when doing extended generation (i.e. more than 30 seconds), by how much\n should we extend the audio each time. Larger values will mean less context is\n preserved, and shorter value will require extra computations.\n \"\"\"\n assert extend_stride < self.max_duration, \"Cannot stride by more than max generation duration.\"\n self.extend_stride = extend_stride\n self.duration = duration\n self.generation_params = {\n 'use_sampling': use_sampling,\n 'temp': temperature,\n 'top_k': top_k,\n 'top_p': top_p,\n 'cfg_coef': cfg_coef,\n 'two_step_cfg': two_step_cfg,\n }\n\n def set_custom_progress_callback(self, progress_callback: tp.Optional[tp.Callable[[int, int], None]] = None):\n \"\"\"Override the default progress callback.\"\"\"\n self._progress_callback = progress_callback\n\n def generate_unconditional(self, num_samples: int, progress: bool = False,\n return_tokens: bool = False) -> tp.Union[torch.Tensor,\n tp.Tuple[torch.Tensor, torch.Tensor]]:\n \"\"\"Generate samples in an unconditional manner.\n\n Args:\n num_samples (int): Number of samples to be generated.\n progress (bool, optional): Flag to display progress of the generation process. Defaults to False.\n \"\"\"\n descriptions: tp.List[tp.Optional[str]] = [None] * num_samples\n attributes, prompt_tokens = self._prepare_tokens_and_attributes(descriptions, None)\n tokens = self._generate_tokens(attributes, prompt_tokens, progress)\n if return_tokens:\n return self.generate_audio(tokens), tokens\n return self.generate_audio(tokens)\n\n def generate(self, descriptions: tp.List[str], progress: bool = False, return_tokens: bool = False) \\\n -> tp.Union[torch.Tensor, tp.Tuple[torch.Tensor, torch.Tensor]]:\n \"\"\"Generate samples conditioned on text.\n\n Args:\n descriptions (list of str): A list of strings used as text conditioning.\n progress (bool, optional): Flag to display progress of the generation process. Defaults to False.\n \"\"\"\n attributes, prompt_tokens = self._prepare_tokens_and_attributes(descriptions, None)\n assert prompt_tokens is None\n tokens = self._generate_tokens(attributes, prompt_tokens, progress)\n if return_tokens:\n return self.generate_audio(tokens), tokens\n return self.generate_audio(tokens)\n\n def generate_with_chroma(self, descriptions: tp.List[str], melody_wavs: MelodyType,\n melody_sample_rate: int, progress: bool = False,\n return_tokens: bool = False) -> tp.Union[torch.Tensor,\n tp.Tuple[torch.Tensor, torch.Tensor]]:\n \"\"\"Generate samples conditioned on text and melody.\n\n Args:\n descriptions (list of str): A list of strings used as text conditioning.\n melody_wavs: (torch.Tensor or list of Tensor): A batch of waveforms used as\n melody conditioning. Should have shape [B, C, T] with B matching the description length,\n C=1 or 2. It can be [C, T] if there is a single description. It can also be\n a list of [C, T] tensors.\n melody_sample_rate: (int): Sample rate of the melody waveforms.\n progress (bool, optional): Flag to display progress of the generation process. Defaults to False.\n \"\"\"\n if isinstance(melody_wavs, torch.Tensor):\n if melody_wavs.dim() == 2:\n melody_wavs = melody_wavs[None]\n if melody_wavs.dim() != 3:\n raise ValueError(\"Melody wavs should have a shape [B, C, T].\")\n melody_wavs = list(melody_wavs)\n else:\n for melody in melody_wavs:\n if melody is not None:\n assert melody.dim() == 2, \"One melody in the list has the wrong number of dims.\"\n\n melody_wavs = [\n convert_audio(wav, melody_sample_rate, self.sample_rate, self.audio_channels)\n if wav is not None else None\n for wav in melody_wavs]\n attributes, prompt_tokens = self._prepare_tokens_and_attributes(descriptions=descriptions, prompt=None,\n melody_wavs=melody_wavs)\n assert prompt_tokens is None\n tokens = self._generate_tokens(attributes, prompt_tokens, progress)\n if return_tokens:\n return self.generate_audio(tokens), tokens\n return self.generate_audio(tokens)\n\n def generate_continuation(self, prompt: torch.Tensor, prompt_sample_rate: int,\n descriptions: tp.Optional[tp.List[tp.Optional[str]]] = None,\n progress: bool = False, return_tokens: bool = False) \\\n -> tp.Union[torch.Tensor, tp.Tuple[torch.Tensor, torch.Tensor]]:\n \"\"\"Generate samples conditioned on audio prompts.\n\n Args:\n prompt (torch.Tensor): A batch of waveforms used for continuation.\n Prompt should be [B, C, T], or [C, T] if only one sample is generated.\n prompt_sample_rate (int): Sampling rate of the given audio waveforms.\n descriptions (list of str, optional): A list of strings used as text conditioning. Defaults to None.\n progress (bool, optional): Flag to display progress of the generation process. Defaults to False.\n \"\"\"\n if prompt.dim() == 2:\n prompt = prompt[None]\n if prompt.dim() != 3:\n raise ValueError(\"prompt should have 3 dimensions: [B, C, T] (C = 1).\")\n prompt = convert_audio(prompt, prompt_sample_rate, self.sample_rate, self.audio_channels)\n if descriptions is None:\n descriptions = [None] * len(prompt)\n attributes, prompt_tokens = self._prepare_tokens_and_attributes(descriptions, prompt)\n assert prompt_tokens is not None\n tokens = self._generate_tokens(attributes, prompt_tokens, progress)\n if return_tokens:\n return self.generate_audio(tokens), tokens\n return self.generate_audio(tokens)\n \n def generate_continuation_with_audio_token(self, prompt, \n descriptions: tp.Optional[tp.List[tp.Optional[str]]] = None,\n progress: bool = False, return_tokens: bool = False) \\\n -> tp.Union[torch.Tensor, tp.Tuple[torch.Tensor, torch.Tensor]]:\n \"\"\"Generate samples conditioned on audio prompts.\n\n Args:\n prompt (torch.Tensor): A batch of waveforms used for continuation.\n Prompt should be [B, C, T], or [C, T] if only one sample is generated.\n prompt_sample_rate (int): Sampling rate of the given audio waveforms.\n descriptions (list of str, optional): A list of strings used as text conditioning. Defaults to None.\n progress (bool, optional): Flag to display progress of the generation process. Defaults to False.\n \"\"\"\n \n if descriptions is None:\n descriptions = [None] * len(prompt)\n attributes, prompt_tokens = self._prepare_tokens_and_attributes(descriptions, None)\n assert prompt_tokens is None\n prompt_tokens = prompt\n tokens = self._generate_tokens(attributes, prompt_tokens, progress)\n if return_tokens:\n return self.generate_audio(tokens), tokens\n return self.generate_audio(tokens)\n\n def generate_continuation_with_audio_chroma(self, prompt: torch.Tensor, prompt_sample_rate: int, melody_wavs: MelodyType,\n melody_sample_rate: int, descriptions: tp.Optional[tp.List[tp.Optional[str]]] = None,\n progress: bool = False, return_tokens: bool = False) \\\n -> tp.Union[torch.Tensor, tp.Tuple[torch.Tensor, torch.Tensor]]:\n \"\"\"Generate samples conditioned on audio prompts.\n\n Args:\n prompt (torch.Tensor): A batch of waveforms used for continuation.\n Prompt should be [B, C, T], or [C, T] if only one sample is generated.\n prompt_sample_rate (int): Sampling rate of the given audio waveforms.\n descriptions (list of str, optional): A list of strings used as text conditioning. Defaults to None.\n progress (bool, optional): Flag to display progress of the generation process. Defaults to False.\n \"\"\"\n if prompt.dim() == 2:\n prompt = prompt[None]\n if prompt.dim() != 3:\n raise ValueError(\"prompt should have 3 dimensions: [B, C, T] (C = 1).\")\n prompt = convert_audio(prompt, prompt_sample_rate, self.sample_rate, self.audio_channels)\n\n if isinstance(melody_wavs, torch.Tensor):\n if melody_wavs.dim() == 2:\n melody_wavs = melody_wavs[None]\n if melody_wavs.dim() != 3:\n raise ValueError(\"Melody wavs should have a shape [B, C, T].\")\n melody_wavs = list(melody_wavs)\n else:\n for melody in melody_wavs:\n if melody is not None:\n assert melody.dim() == 2, \"One melody in the list has the wrong number of dims.\"\n\n melody_wavs = [\n convert_audio(wav, melody_sample_rate, self.sample_rate, self.audio_channels)\n if wav is not None else None\n for wav in melody_wavs]\n \n if descriptions is None:\n descriptions = [None] * len(prompt)\n \n attributes, prompt_tokens = self._prepare_tokens_and_attributes(descriptions=descriptions, prompt=prompt, melody_wavs=melody_wavs)\n assert prompt_tokens is not None\n tokens = self._generate_tokens(attributes, prompt_tokens, progress)\n if return_tokens:\n return self.generate_audio(tokens), tokens\n return self.generate_audio(tokens)\n\n def generate_continuation_with_audio_tokens_and_audio_chroma(self, prompt, melody_wavs: MelodyType,\n melody_sample_rate: int, descriptions: tp.Optional[tp.List[tp.Optional[str]]] = None,\n progress: bool = False, return_tokens: bool = False) \\\n -> tp.Union[torch.Tensor, tp.Tuple[torch.Tensor, torch.Tensor]]:\n \"\"\"Generate samples conditioned on audio prompts.\n\n Args:\n prompt (torch.Tensor): A batch of waveforms used for continuation.\n Prompt should be [B, C, T], or [C, T] if only one sample is generated.\n prompt_sample_rate (int): Sampling rate of the given audio waveforms.\n descriptions (list of str, optional): A list of strings used as text conditioning. Defaults to None.\n progress (bool, optional): Flag to display progress of the generation process. Defaults to False.\n \"\"\"\n if isinstance(melody_wavs, torch.Tensor):\n if melody_wavs.dim() == 2:\n melody_wavs = melody_wavs[None]\n if melody_wavs.dim() != 3:\n raise ValueError(\"Melody wavs should have a shape [B, C, T].\")\n melody_wavs = list(melody_wavs)\n else:\n for melody in melody_wavs:\n if melody is not None:\n assert melody.dim() == 2, \"One melody in the list has the wrong number of dims.\"\n\n melody_wavs = [\n convert_audio(wav, melody_sample_rate, self.sample_rate, self.audio_channels)\n if wav is not None else None\n for wav in melody_wavs]\n \n if descriptions is None:\n descriptions = [None] * len(prompt)\n \n attributes, prompt_tokens = self._prepare_tokens_and_attributes(descriptions=descriptions, prompt=None, melody_wavs=melody_wavs)\n assert prompt_tokens is None\n prompt_tokens = prompt\n tokens = self._generate_tokens(attributes, prompt_tokens, progress)\n if return_tokens:\n return self.generate_audio(tokens), tokens\n return self.generate_audio(tokens)\n\n def generate_continuation_with_text_chroma(self, prompt: torch.Tensor, prompt_sample_rate: int, descriptions: tp.List[str], chord_texts: tp.Union[tp.List[str],str],\n progress: bool = False, bpm: tp.Union[float,int,tp.List[float],tp.List[int]] = 120, meter: tp.Optional[tp.Union[int,tp.List[int]]] = 4,\n return_tokens: bool = False) -> tp.Union[torch.Tensor,\n tp.Tuple[torch.Tensor, torch.Tensor]]:\n \"\"\"Generate samples conditioned on text and melody.\n\n Args:\n descriptions (list of str): A list of strings used as text conditioning.\n melody_wavs: (torch.Tensor or list of Tensor): A batch of waveforms used as\n melody conditioning. Should have shape [B, C, T] with B matching the description length,\n C=1 or 2. It can be [C, T] if there is a single description. It can also be\n a list of [C, T] tensors.\n melody_sample_rate: (int): Sample rate of the melody waveforms.\n progress (bool, optional): Flag to display progress of the generation process. Defaults to False.\n \"\"\"\n if prompt.dim() == 2:\n prompt = prompt[None]\n if prompt.dim() != 3:\n raise ValueError(\"prompt should have 3 dimensions: [B, C, T] (C = 1).\")\n prompt = convert_audio(prompt, prompt_sample_rate, self.sample_rate, self.audio_channels)\n\n if isinstance(chord_texts, str):\n chord_texts = [chord_texts]\n\n attributes, prompt_tokens = self._prepare_tokens_and_attributes(descriptions=descriptions, prompt=prompt,\n melody_wavs=chord_texts, bpm=bpm, meter=meter)\n\n tokens = self._generate_tokens(attributes, prompt_tokens, progress)\n if return_tokens:\n return self.generate_audio(tokens), tokens\n return self.generate_audio(tokens)\n\n def generate_continuation_with_audio_tokens_and_text_chroma(self, prompt, descriptions: tp.List[str], chord_texts: tp.Union[tp.List[str],str],\n progress: bool = False, bpm: tp.Union[float,int,tp.List[float],tp.List[int]] = 120, meter: tp.Optional[tp.Union[int,tp.List[int]]] = 4,\n return_tokens: bool = False) -> tp.Union[torch.Tensor,\n tp.Tuple[torch.Tensor, torch.Tensor]]:\n \"\"\"Generate samples conditioned on text and melody.\n\n Args:\n descriptions (list of str): A list of strings used as text conditioning.\n melody_wavs: (torch.Tensor or list of Tensor): A batch of waveforms used as\n melody conditioning. Should have shape [B, C, T] with B matching the description length,\n C=1 or 2. It can be [C, T] if there is a single description. It can also be\n a list of [C, T] tensors.\n melody_sample_rate: (int): Sample rate of the melody waveforms.\n progress (bool, optional): Flag to display progress of the generation process. Defaults to False.\n \"\"\"\n \n if isinstance(chord_texts, str):\n chord_texts = [chord_texts]\n\n attributes, prompt_tokens = self._prepare_tokens_and_attributes(descriptions=descriptions, prompt=None,\n melody_wavs=chord_texts, bpm=bpm, meter=meter)\n prompt_tokens = prompt\n tokens = self._generate_tokens(attributes, prompt_tokens, progress)\n if return_tokens:\n return self.generate_audio(tokens), tokens\n return self.generate_audio(tokens)\n \n def generate_with_text_chroma(self, descriptions: tp.List[str], chord_texts: tp.Union[tp.List[str],str],\n progress: bool = False, bpm: tp.Union[float,int,tp.List[float],tp.List[int]] = 120, meter: tp.Optional[tp.Union[int,tp.List[int]]] = 4,\n return_tokens: bool = False) -> tp.Union[torch.Tensor,\n tp.Tuple[torch.Tensor, torch.Tensor]]:\n \"\"\"Generate samples conditioned on text and melody.\n\n Args:\n descriptions (list of str): A list of strings used as text conditioning.\n melody_wavs: (torch.Tensor or list of Tensor): A batch of waveforms used as\n melody conditioning. Should have shape [B, C, T] with B matching the description length,\n C=1 or 2. It can be [C, T] if there is a single description. It can also be\n a list of [C, T] tensors.\n melody_sample_rate: (int): Sample rate of the melody waveforms.\n progress (bool, optional): Flag to display progress of the generation process. Defaults to False.\n \"\"\"\n if isinstance(chord_texts, str):\n chord_texts = [chord_texts]\n\n attributes, prompt_tokens = self._prepare_tokens_and_attributes(descriptions=descriptions, prompt=None,\n melody_wavs=chord_texts, bpm=bpm, meter=meter)\n assert prompt_tokens is None\n tokens = self._generate_tokens(attributes, prompt_tokens, progress)\n if return_tokens:\n return self.generate_audio(tokens), tokens\n return self.generate_audio(tokens)\n \n @torch.no_grad()\n def _prepare_tokens_and_attributes(\n self,\n descriptions: tp.Sequence[tp.Optional[str]],\n prompt: tp.Optional[torch.Tensor],\n melody_wavs: tp.Optional[tp.Union[MelodyList,tp.List[str]]] = None, bpm: tp.Optional[tp.Union[float,int,tp.List[float],tp.List[int]]] = None, meter:tp.Optional[tp.Union[int,tp.List[int]]] = None\n ) -> tp.Tuple[tp.List[ConditioningAttributes], tp.Optional[torch.Tensor]]:\n \"\"\"Prepare model inputs.\n\n Args:\n descriptions (list of str): A list of strings used as text conditioning.\n prompt (torch.Tensor): A batch of waveforms used for continuation.\n melody_wavs (torch.Tensor, optional): A batch of waveforms\n used as melody conditioning. Defaults to None.\n \"\"\"\n attributes = [\n ConditioningAttributes(text={'description': description})\n for description in descriptions]\n\n if melody_wavs is None:\n for attr in attributes:\n attr.wav['self_wav'] = WavCondition(\n torch.zeros((1, 1, 1), device=self.device),\n torch.tensor([0], device=self.device),\n sample_rate=[self.sample_rate],\n path=[None])\n else:\n if 'self_wav' not in self.lm.condition_provider.conditioners:\n raise RuntimeError(\"This model doesn't support melody conditioning. \"\n \"Use the `melody` model.\")\n assert len(melody_wavs) == len(descriptions), \\\n f\"number of melody wavs must match number of descriptions! \" \\\n f\"got melody len={len(melody_wavs)}, and descriptions len={len(descriptions)}\"\n\n if bpm is not None and (isinstance(bpm, int) or isinstance(bpm, float)):\n bpm = [bpm for i in range(len(melody_wavs))]\n elif bpm is not None and isinstance(bpm, tp.List):\n assert len(melody_wavs) == len(bpm)\n\n if meter is not None and (isinstance(meter, int) or isinstance(meter, float)):\n meter = [meter for i in range(len(melody_wavs))]\n elif meter is not None and isinstance(meter, tp.List):\n assert len(melody_wavs) == len(meter)\n\n for attr, melody, i in zip(attributes, melody_wavs, range(len(melody_wavs))):\n if melody is None:\n attr.wav['self_wav'] = WavCondition(\n torch.zeros((1, 1, 1), device=self.device),\n torch.tensor([0], device=self.device),\n sample_rate=[self.sample_rate],\n path=[None])\n elif isinstance(melody, torch.Tensor):\n attr.wav['self_wav'] = WavCondition(\n melody[None].to(device=self.device),\n torch.tensor([melody.shape[-1]], device=self.device),\n sample_rate=[self.sample_rate],\n path=[None],\n )\n else :\n attr.wav['self_wav'] = WavChordTextCondition(\n [melody],\n torch.tensor([self.duration*self.sample_rate], device=self.device),\n sample_rate=[self.sample_rate],\n path=[None],\n bpm = [bpm[i]],\n meter = [meter[i]]\n )\n\n if prompt is not None:\n if descriptions is not None:\n assert len(descriptions) == len(prompt), \"Prompt and nb. descriptions doesn't match\"\n prompt = prompt.to(self.device)\n prompt_tokens, scale = self.compression_model.encode(prompt)\n assert scale is None\n else:\n prompt_tokens = None\n return attributes, prompt_tokens\n\n def _generate_tokens(self, attributes: tp.List[ConditioningAttributes],\n prompt_tokens: tp.Optional[torch.Tensor], progress: bool = False) -> torch.Tensor:\n \"\"\"Generate discrete audio tokens given audio prompt and/or conditions.\n\n Args:\n attributes (list of ConditioningAttributes): Conditions used for generation (text/melody).\n prompt_tokens (torch.Tensor, optional): Audio prompt used for continuation.\n progress (bool, optional): Flag to display progress of the generation process. Defaults to False.\n Returns:\n torch.Tensor: Generated audio, of shape [B, C, T], T is defined by the generation params.\n \"\"\"\n total_gen_len = int(self.duration * self.frame_rate)\n max_prompt_len = int(min(self.duration, self.max_duration) * self.frame_rate)\n current_gen_offset: int = 0\n\n def _progress_callback(generated_tokens: int, tokens_to_generate: int):\n generated_tokens += current_gen_offset\n if self._progress_callback is not None:\n # Note that total_gen_len might be quite wrong depending on the\n # codebook pattern used, but with delay it is almost accurate.\n self._progress_callback(generated_tokens, total_gen_len)\n else:\n print(f'{generated_tokens: 6d} / {total_gen_len: 6d}', end='\\r')\n\n if prompt_tokens is not None:\n assert max_prompt_len >= prompt_tokens.shape[-1], \\\n \"Prompt is longer than audio to generate\"\n\n callback = None\n if progress:\n callback = _progress_callback\n\n if self.duration <= self.max_duration:\n # generate by sampling from LM, simple case.\n with self.autocast:\n gen_tokens = self.lm.generate(\n prompt_tokens, attributes,\n callback=callback, max_gen_len=total_gen_len, **self.generation_params)\n\n else:\n # now this gets a bit messier, we need to handle prompts,\n # melody conditioning etc.\n ref_wavs = [attr.wav['self_wav'] for attr in attributes]\n all_tokens = []\n if prompt_tokens is None:\n prompt_length = 0\n else:\n all_tokens.append(prompt_tokens)\n prompt_length = prompt_tokens.shape[-1]\n\n stride_tokens = int(self.frame_rate * self.extend_stride)\n step = 0\n\n while current_gen_offset + prompt_length < total_gen_len:\n self.lm.condition_provider.conditioners['self_wav'].set_continuation_count(self.extend_stride/self.max_duration, step) #For text based chord conditioning\n time_offset = current_gen_offset / self.frame_rate\n chunk_duration = min(self.duration - time_offset, self.max_duration)\n max_gen_len = int(chunk_duration * self.frame_rate)\n for attr, ref_wav in zip(attributes, ref_wavs):\n if isinstance(ref_wav, WavCondition):\n wav_length = ref_wav.length.item()\n if wav_length == 0:\n continue\n # We will extend the wav periodically if it not long enough.\n # we have to do it here rather than in conditioners.py as otherwise\n # we wouldn't have the full wav.\n initial_position = int(time_offset * self.sample_rate)\n wav_target_length = int(self.max_duration * self.sample_rate)\n positions = torch.arange(initial_position,\n initial_position + wav_target_length, device=self.device)\n attr.wav['self_wav'] = WavCondition(\n ref_wav[0][..., positions % wav_length],\n torch.full_like(ref_wav[1], wav_target_length),\n [self.sample_rate] * ref_wav[0].size(0),\n [None], [0.])\n with self.autocast:\n gen_tokens = self.lm.generate(\n prompt_tokens, attributes,\n callback=callback, max_gen_len=max_gen_len, **self.generation_params)\n if prompt_tokens is None:\n all_tokens.append(gen_tokens)\n else:\n all_tokens.append(gen_tokens[:, :, prompt_tokens.shape[-1]:])\n prompt_tokens = gen_tokens[:, :, stride_tokens:]\n prompt_length = prompt_tokens.shape[-1]\n current_gen_offset += stride_tokens\n step = step + 1\n\n gen_tokens = torch.cat(all_tokens, dim=-1)\n return gen_tokens\n\n def generate_audio(self, gen_tokens: torch.Tensor):\n \"\"\"Generate Audio from tokens\"\"\"\n assert gen_tokens.dim() == 3\n with torch.no_grad():\n gen_audio = self.compression_model.decode(gen_tokens, None)\n return gen_audio" }, { "identifier": "CompressionSolver", "path": "audiocraft/solvers/compression.py", "snippet": "class CompressionSolver(base.StandardSolver):\n \"\"\"Solver for compression task.\n\n The compression task combines a set of perceptual and objective losses\n to train an EncodecModel (composed of an encoder-decoder and a quantizer)\n to perform high fidelity audio reconstruction.\n \"\"\"\n def __init__(self, cfg: omegaconf.DictConfig):\n super().__init__(cfg)\n self.rng: torch.Generator # set at each epoch\n self.adv_losses = builders.get_adversarial_losses(self.cfg)\n self.aux_losses = nn.ModuleDict()\n self.info_losses = nn.ModuleDict()\n assert not cfg.fsdp.use, \"FSDP not supported by CompressionSolver.\"\n loss_weights = dict()\n for loss_name, weight in self.cfg.losses.items():\n if loss_name in ['adv', 'feat']:\n for adv_name, _ in self.adv_losses.items():\n loss_weights[f'{loss_name}_{adv_name}'] = weight\n elif weight > 0:\n self.aux_losses[loss_name] = builders.get_loss(loss_name, self.cfg)\n loss_weights[loss_name] = weight\n else:\n self.info_losses[loss_name] = builders.get_loss(loss_name, self.cfg)\n self.balancer = builders.get_balancer(loss_weights, self.cfg.balancer)\n self.register_stateful('adv_losses')\n\n @property\n def best_metric_name(self) -> tp.Optional[str]:\n # best model is the last for the compression model\n return None\n\n def build_model(self):\n \"\"\"Instantiate model and optimizer.\"\"\"\n # Model and optimizer\n self.model = models.builders.get_compression_model(self.cfg).to(self.device)\n self.optimizer = builders.get_optimizer(self.model.parameters(), self.cfg.optim)\n self.register_stateful('model', 'optimizer')\n self.register_best_state('model')\n self.register_ema('model')\n\n def build_dataloaders(self):\n \"\"\"Instantiate audio dataloaders for each stage.\"\"\"\n self.dataloaders = builders.get_audio_datasets(self.cfg)\n\n def show(self):\n \"\"\"Show the compression model and employed adversarial loss.\"\"\"\n self.logger.info(f\"Compression model with {self.model.quantizer.total_codebooks} codebooks:\")\n self.log_model_summary(self.model)\n self.logger.info(\"Adversarial loss:\")\n self.log_model_summary(self.adv_losses)\n self.logger.info(\"Auxiliary losses:\")\n self.logger.info(self.aux_losses)\n self.logger.info(\"Info losses:\")\n self.logger.info(self.info_losses)\n\n def run_step(self, idx: int, batch: torch.Tensor, metrics: dict):\n \"\"\"Perform one training or valid step on a given batch.\"\"\"\n x = batch.to(self.device)\n y = x.clone()\n\n qres = self.model(x)\n assert isinstance(qres, quantization.QuantizedResult)\n y_pred = qres.x\n # Log bandwidth in kb/s\n metrics['bandwidth'] = qres.bandwidth.mean()\n\n if self.is_training:\n d_losses: dict = {}\n if len(self.adv_losses) > 0 and torch.rand(1, generator=self.rng).item() <= 1 / self.cfg.adversarial.every:\n for adv_name, adversary in self.adv_losses.items():\n disc_loss = adversary.train_adv(y_pred, y)\n d_losses[f'd_{adv_name}'] = disc_loss\n metrics['d_loss'] = torch.sum(torch.stack(list(d_losses.values())))\n metrics.update(d_losses)\n\n balanced_losses: dict = {}\n other_losses: dict = {}\n\n # penalty from quantization\n if qres.penalty is not None and qres.penalty.requires_grad:\n other_losses['penalty'] = qres.penalty # penalty term from the quantizer\n\n # adversarial losses\n for adv_name, adversary in self.adv_losses.items():\n adv_loss, feat_loss = adversary(y_pred, y)\n balanced_losses[f'adv_{adv_name}'] = adv_loss\n balanced_losses[f'feat_{adv_name}'] = feat_loss\n\n # auxiliary losses\n for loss_name, criterion in self.aux_losses.items():\n loss = criterion(y_pred, y)\n balanced_losses[loss_name] = loss\n\n # weighted losses\n metrics.update(balanced_losses)\n metrics.update(other_losses)\n metrics.update(qres.metrics)\n\n if self.is_training:\n # backprop losses that are not handled by balancer\n other_loss = torch.tensor(0., device=self.device)\n if 'penalty' in other_losses:\n other_loss += other_losses['penalty']\n if other_loss.requires_grad:\n other_loss.backward(retain_graph=True)\n ratio1 = sum(p.grad.data.norm(p=2).pow(2)\n for p in self.model.parameters() if p.grad is not None)\n assert isinstance(ratio1, torch.Tensor)\n metrics['ratio1'] = ratio1.sqrt()\n\n # balancer losses backward, returns effective training loss\n # with effective weights at the current batch.\n metrics['g_loss'] = self.balancer.backward(balanced_losses, y_pred)\n # add metrics corresponding to weight ratios\n metrics.update(self.balancer.metrics)\n ratio2 = sum(p.grad.data.norm(p=2).pow(2)\n for p in self.model.parameters() if p.grad is not None)\n assert isinstance(ratio2, torch.Tensor)\n metrics['ratio2'] = ratio2.sqrt()\n\n # optim\n flashy.distrib.sync_model(self.model)\n if self.cfg.optim.max_norm:\n torch.nn.utils.clip_grad_norm_(\n self.model.parameters(), self.cfg.optim.max_norm\n )\n self.optimizer.step()\n self.optimizer.zero_grad()\n\n # informative losses only\n info_losses: dict = {}\n with torch.no_grad():\n for loss_name, criterion in self.info_losses.items():\n loss = criterion(y_pred, y)\n info_losses[loss_name] = loss\n\n metrics.update(info_losses)\n\n # aggregated GAN losses: this is useful to report adv and feat across different adversarial loss setups\n adv_losses = [loss for loss_name, loss in metrics.items() if loss_name.startswith('adv')]\n if len(adv_losses) > 0:\n metrics['adv'] = torch.sum(torch.stack(adv_losses))\n feat_losses = [loss for loss_name, loss in metrics.items() if loss_name.startswith('feat')]\n if len(feat_losses) > 0:\n metrics['feat'] = torch.sum(torch.stack(feat_losses))\n\n return metrics\n\n def run_epoch(self):\n # reset random seed at the beginning of the epoch\n self.rng = torch.Generator()\n self.rng.manual_seed(1234 + self.epoch)\n # run epoch\n super().run_epoch()\n\n def evaluate(self):\n \"\"\"Evaluate stage. Runs audio reconstruction evaluation.\"\"\"\n self.model.eval()\n evaluate_stage_name = str(self.current_stage)\n\n loader = self.dataloaders['evaluate']\n updates = len(loader)\n lp = self.log_progress(f'{evaluate_stage_name} inference', loader, total=updates, updates=self.log_updates)\n average = flashy.averager()\n\n pendings = []\n ctx = multiprocessing.get_context('spawn')\n with get_pool_executor(self.cfg.evaluate.num_workers, mp_context=ctx) as pool:\n for idx, batch in enumerate(lp):\n x = batch.to(self.device)\n with torch.no_grad():\n qres = self.model(x)\n\n y_pred = qres.x.cpu()\n y = batch.cpu() # should already be on CPU but just in case\n pendings.append(pool.submit(evaluate_audio_reconstruction, y_pred, y, self.cfg))\n\n metrics_lp = self.log_progress(f'{evaluate_stage_name} metrics', pendings, updates=self.log_updates)\n for pending in metrics_lp:\n metrics = pending.result()\n metrics = average(metrics)\n\n metrics = flashy.distrib.average_metrics(metrics, len(loader))\n return metrics\n\n def generate(self):\n \"\"\"Generate stage.\"\"\"\n self.model.eval()\n sample_manager = SampleManager(self.xp, map_reference_to_sample_id=True)\n generate_stage_name = str(self.current_stage)\n\n loader = self.dataloaders['generate']\n updates = len(loader)\n lp = self.log_progress(generate_stage_name, loader, total=updates, updates=self.log_updates)\n\n for batch in lp:\n reference, _ = batch\n reference = reference.to(self.device)\n with torch.no_grad():\n qres = self.model(reference)\n assert isinstance(qres, quantization.QuantizedResult)\n\n reference = reference.cpu()\n estimate = qres.x.cpu()\n sample_manager.add_samples(estimate, self.epoch, ground_truth_wavs=reference)\n\n flashy.distrib.barrier()\n\n def load_from_pretrained(self, name: str) -> dict:\n model = models.CompressionModel.get_pretrained(name)\n if isinstance(model, models.DAC):\n raise RuntimeError(\"Cannot fine tune a DAC model.\")\n elif isinstance(model, models.HFEncodecCompressionModel):\n self.logger.warning('Trying to automatically convert a HuggingFace model '\n 'to AudioCraft, this might fail!')\n state = model.model.state_dict()\n new_state = {}\n for k, v in state.items():\n if k.startswith('decoder.layers') and '.conv.' in k and '.block.' not in k:\n # We need to determine if this a convtr or a regular conv.\n layer = int(k.split('.')[2])\n if isinstance(model.model.decoder.layers[layer].conv, torch.nn.ConvTranspose1d):\n\n k = k.replace('.conv.', '.convtr.')\n k = k.replace('encoder.layers.', 'encoder.model.')\n k = k.replace('decoder.layers.', 'decoder.model.')\n k = k.replace('conv.', 'conv.conv.')\n k = k.replace('convtr.', 'convtr.convtr.')\n k = k.replace('quantizer.layers.', 'quantizer.vq.layers.')\n k = k.replace('.codebook.', '._codebook.')\n new_state[k] = v\n state = new_state\n elif isinstance(model, models.EncodecModel):\n state = model.state_dict()\n else:\n raise RuntimeError(f\"Cannot fine tune model type {type(model)}.\")\n return {\n 'best_state': {'model': state}\n }\n\n @staticmethod\n def model_from_checkpoint(checkpoint_path: tp.Union[Path, str],\n device: tp.Union[torch.device, str] = 'cpu') -> models.CompressionModel:\n \"\"\"Instantiate a CompressionModel from a given checkpoint path or dora sig.\n This method is a convenient endpoint to load a CompressionModel to use in other solvers.\n\n Args:\n checkpoint_path (Path or str): Path to checkpoint or dora sig from where the checkpoint is resolved.\n This also supports pre-trained models by using a path of the form //pretrained/NAME.\n See `model_from_pretrained` for a list of supported pretrained models.\n use_ema (bool): Use EMA variant of the model instead of the actual model.\n device (torch.device or str): Device on which the model is loaded.\n \"\"\"\n checkpoint_path = str(checkpoint_path)\n if checkpoint_path.startswith('//pretrained/'):\n name = checkpoint_path.split('/', 3)[-1]\n return models.CompressionModel.get_pretrained(name, device)\n logger = logging.getLogger(__name__)\n logger.info(f\"Loading compression model from checkpoint: {checkpoint_path}\")\n _checkpoint_path = checkpoint.resolve_checkpoint_path(checkpoint_path, use_fsdp=False)\n assert _checkpoint_path is not None, f\"Could not resolve compression model checkpoint path: {checkpoint_path}\"\n state = checkpoint.load_checkpoint(_checkpoint_path)\n assert state is not None and 'xp.cfg' in state, f\"Could not load compression model from ckpt: {checkpoint_path}\"\n cfg = state['xp.cfg']\n cfg.device = device\n compression_model = models.builders.get_compression_model(cfg).to(device)\n assert compression_model.sample_rate == cfg.sample_rate, \"Compression model sample rate should match\"\n\n assert 'best_state' in state and state['best_state'] != {}\n assert 'exported' not in state, \"When loading an exported checkpoint, use the //pretrained/ prefix.\"\n compression_model.load_state_dict(state['best_state']['model'])\n compression_model.eval()\n logger.info(\"Compression model loaded!\")\n return compression_model\n\n @staticmethod\n def wrapped_model_from_checkpoint(cfg: omegaconf.DictConfig,\n checkpoint_path: tp.Union[Path, str],\n device: tp.Union[torch.device, str] = 'cpu') -> models.CompressionModel:\n \"\"\"Instantiate a wrapped CompressionModel from a given checkpoint path or dora sig.\n\n Args:\n cfg (omegaconf.DictConfig): Configuration to read from for wrapped mode.\n checkpoint_path (Path or str): Path to checkpoint or dora sig from where the checkpoint is resolved.\n use_ema (bool): Use EMA variant of the model instead of the actual model.\n device (torch.device or str): Device on which the model is loaded.\n \"\"\"\n compression_model = CompressionSolver.model_from_checkpoint(checkpoint_path, device)\n compression_model = models.builders.get_wrapped_compression_model(compression_model, cfg)\n return compression_model" }, { "identifier": "load_compression_model", "path": "audiocraft/models/loaders.py", "snippet": "def load_compression_model(file_or_url_or_id: tp.Union[Path, str], device='cpu', cache_dir: tp.Optional[str] = None):\n pkg = load_compression_model_ckpt(file_or_url_or_id, cache_dir=cache_dir)\n if 'pretrained' in pkg:\n return CompressionModel.get_pretrained(pkg['pretrained'], device=device)\n cfg = OmegaConf.create(pkg['xp.cfg'])\n cfg.device = str(device)\n model = builders.get_compression_model(cfg)\n model.load_state_dict(pkg['best_state'])\n model.eval()\n return model" }, { "identifier": "load_lm_model", "path": "audiocraft/models/loaders.py", "snippet": "def load_lm_model(file_or_url_or_id: tp.Union[Path, str], device='cpu', cache_dir: tp.Optional[str] = None):\n pkg = load_lm_model_ckpt(file_or_url_or_id, cache_dir=cache_dir)\n cfg = OmegaConf.create(pkg['xp.cfg'])\n cfg.device = str(device)\n if cfg.device == 'cpu':\n cfg.dtype = 'float32'\n else:\n cfg.dtype = 'float16'\n _delete_param(cfg, 'conditioners.self_wav.chroma_chord.cache_path')\n _delete_param(cfg, 'conditioners.self_wav.chroma_stem.cache_path')\n _delete_param(cfg, 'conditioners.args.merge_text_conditions_p')\n _delete_param(cfg, 'conditioners.args.drop_desc_p')\n model = builders.get_lm_model(cfg)\n model.load_state_dict(pkg['best_state'])\n model.eval()\n model.cfg = cfg\n return model" }, { "identifier": "audio_write", "path": "audiocraft/data/audio.py", "snippet": "def audio_write(stem_name: tp.Union[str, Path],\n wav: torch.Tensor, sample_rate: int,\n format: str = 'wav', mp3_rate: int = 320, ogg_rate: tp.Optional[int] = None,\n normalize: bool = True, strategy: str = 'peak', peak_clip_headroom_db: float = 1,\n rms_headroom_db: float = 18, loudness_headroom_db: float = 14,\n loudness_compressor: bool = False,\n log_clipping: bool = True, make_parent_dir: bool = True,\n add_suffix: bool = True) -> Path:\n \"\"\"Convenience function for saving audio to disk. Returns the filename the audio was written to.\n\n Args:\n stem_name (str or Path): Filename without extension which will be added automatically.\n wav (torch.Tensor): Audio data to save.\n sample_rate (int): Sample rate of audio data.\n format (str): Either \"wav\", \"mp3\", \"ogg\", or \"flac\".\n mp3_rate (int): kbps when using mp3s.\n ogg_rate (int): kbps when using ogg/vorbis. If not provided, let ffmpeg decide for itself.\n normalize (bool): if `True` (default), normalizes according to the prescribed\n strategy (see after). If `False`, the strategy is only used in case clipping\n would happen.\n strategy (str): Can be either 'clip', 'peak', or 'rms'. Default is 'peak',\n i.e. audio is normalized by its largest value. RMS normalizes by root-mean-square\n with extra headroom to avoid clipping. 'clip' just clips.\n peak_clip_headroom_db (float): Headroom in dB when doing 'peak' or 'clip' strategy.\n rms_headroom_db (float): Headroom in dB when doing 'rms' strategy. This must be much larger\n than the `peak_clip` one to avoid further clipping.\n loudness_headroom_db (float): Target loudness for loudness normalization.\n loudness_compressor (bool): Uses tanh for soft clipping when strategy is 'loudness'.\n when strategy is 'loudness' log_clipping (bool): If True, basic logging on stderr when clipping still\n occurs despite strategy (only for 'rms').\n make_parent_dir (bool): Make parent directory if it doesn't exist.\n Returns:\n Path: Path of the saved audio.\n \"\"\"\n assert wav.dtype.is_floating_point, \"wav is not floating point\"\n if wav.dim() == 1:\n wav = wav[None]\n elif wav.dim() > 2:\n raise ValueError(\"Input wav should be at most 2 dimension.\")\n assert wav.isfinite().all()\n wav = normalize_audio(wav, normalize, strategy, peak_clip_headroom_db,\n rms_headroom_db, loudness_headroom_db, loudness_compressor,\n log_clipping=log_clipping, sample_rate=sample_rate,\n stem_name=str(stem_name))\n if format == 'mp3':\n suffix = '.mp3'\n flags = ['-f', 'mp3', '-c:a', 'libmp3lame', '-b:a', f'{mp3_rate}k']\n elif format == 'wav':\n suffix = '.wav'\n flags = ['-f', 'wav', '-c:a', 'pcm_s16le']\n elif format == 'ogg':\n suffix = '.ogg'\n flags = ['-f', 'ogg', '-c:a', 'libvorbis']\n if ogg_rate is not None:\n flags += ['-b:a', f'{ogg_rate}k']\n elif format == 'flac':\n suffix = '.flac'\n flags = ['-f', 'flac']\n else:\n raise RuntimeError(f\"Invalid format {format}. Only wav or mp3 are supported.\")\n if not add_suffix:\n suffix = ''\n path = Path(str(stem_name) + suffix)\n if make_parent_dir:\n path.parent.mkdir(exist_ok=True, parents=True)\n try:\n _piping_to_ffmpeg(path, wav, sample_rate, flags)\n except Exception:\n if path.exists():\n # we do not want to leave half written files around.\n path.unlink()\n raise\n return path" }, { "identifier": "get_lm_model", "path": "audiocraft/models/builders.py", "snippet": "def get_lm_model(cfg: omegaconf.DictConfig) -> LMModel:\n \"\"\"Instantiate a transformer LM.\"\"\"\n if cfg.lm_model == 'transformer_lm':\n kwargs = dict_from_config(getattr(cfg, 'transformer_lm'))\n n_q = kwargs['n_q']\n q_modeling = kwargs.pop('q_modeling', None)\n codebooks_pattern_cfg = getattr(cfg, 'codebooks_pattern')\n attribute_dropout = dict_from_config(getattr(cfg, 'attribute_dropout'))\n cls_free_guidance = dict_from_config(getattr(cfg, 'classifier_free_guidance'))\n cfg_prob, cfg_coef = cls_free_guidance['training_dropout'], cls_free_guidance['inference_coef']\n fuser = get_condition_fuser(cfg)\n condition_provider = get_conditioner_provider(kwargs[\"dim\"], cfg).to(cfg.device)\n if len(fuser.fuse2cond['cross']) > 0: # enforce cross-att programmatically\n kwargs['cross_attention'] = True\n if codebooks_pattern_cfg.modeling is None:\n assert q_modeling is not None, \\\n \"LM model should either have a codebook pattern defined or transformer_lm.q_modeling\"\n codebooks_pattern_cfg = omegaconf.OmegaConf.create(\n {'modeling': q_modeling, 'delay': {'delays': list(range(n_q))}}\n )\n pattern_provider = get_codebooks_pattern_provider(n_q, codebooks_pattern_cfg)\n return LMModel(\n pattern_provider=pattern_provider,\n condition_provider=condition_provider,\n fuser=fuser,\n cfg_dropout=cfg_prob,\n cfg_coef=cfg_coef,\n attribute_dropout=attribute_dropout,\n dtype=getattr(torch, cfg.dtype),\n device=cfg.device,\n **kwargs\n ).to(cfg.device)\n else:\n raise KeyError(f\"Unexpected LM model {cfg.lm_model}\")" } ]
import os import random import torchaudio import typing as tp import numpy as np import torch import subprocess from typing import Optional from cog import BasePredictor, Input, Path from audiocraft.solvers.compression import CompressionSolver from audiocraft.models import MusicGen, MultiBandDiffusion from audiocraft.solvers.compression import CompressionSolver from audiocraft.models.loaders import ( load_compression_model, load_lm_model, ) from audiocraft.data.audio import audio_write from audiocraft.models.builders import get_lm_model from omegaconf import OmegaConf
17,697
# Prediction interface for Cog ⚙️ # https://github.com/replicate/cog/blob/main/docs/python.md # We need to set `TRANSFORMERS_CACHE` before any imports, which is why this is up here. MODEL_PATH = "/src/models/" os.environ["TRANSFORMERS_CACHE"] = MODEL_PATH os.environ["TORCH_HOME"] = MODEL_PATH # Model specific imports def _delete_param(cfg, full_name: str): parts = full_name.split('.') for part in parts[:-1]: if part in cfg: cfg = cfg[part] else: return OmegaConf.set_struct(cfg, False) if parts[-1] in cfg: del cfg[parts[-1]] OmegaConf.set_struct(cfg, True) def load_ckpt(path, device, url=False): if url: loaded = torch.hub.load_state_dict_from_url(str(path)) else: loaded = torch.load(str(path)) cfg = OmegaConf.create(loaded['xp.cfg']) cfg.device = str(device) if cfg.device == 'cpu': cfg.dtype = 'float32' else: cfg.dtype = 'float16' _delete_param(cfg, 'conditioners.self_wav.chroma_chord.cache_path') _delete_param(cfg, 'conditioners.self_wav.chroma_stem.cache_path') _delete_param(cfg, 'conditioners.args.merge_text_conditions_p') _delete_param(cfg, 'conditioners.args.drop_desc_p') lm = get_lm_model(loaded['xp.cfg']) lm.load_state_dict(loaded['model']) lm.eval() lm.cfg = cfg
# Prediction interface for Cog ⚙️ # https://github.com/replicate/cog/blob/main/docs/python.md # We need to set `TRANSFORMERS_CACHE` before any imports, which is why this is up here. MODEL_PATH = "/src/models/" os.environ["TRANSFORMERS_CACHE"] = MODEL_PATH os.environ["TORCH_HOME"] = MODEL_PATH # Model specific imports def _delete_param(cfg, full_name: str): parts = full_name.split('.') for part in parts[:-1]: if part in cfg: cfg = cfg[part] else: return OmegaConf.set_struct(cfg, False) if parts[-1] in cfg: del cfg[parts[-1]] OmegaConf.set_struct(cfg, True) def load_ckpt(path, device, url=False): if url: loaded = torch.hub.load_state_dict_from_url(str(path)) else: loaded = torch.load(str(path)) cfg = OmegaConf.create(loaded['xp.cfg']) cfg.device = str(device) if cfg.device == 'cpu': cfg.dtype = 'float32' else: cfg.dtype = 'float16' _delete_param(cfg, 'conditioners.self_wav.chroma_chord.cache_path') _delete_param(cfg, 'conditioners.self_wav.chroma_stem.cache_path') _delete_param(cfg, 'conditioners.args.merge_text_conditions_p') _delete_param(cfg, 'conditioners.args.drop_desc_p') lm = get_lm_model(loaded['xp.cfg']) lm.load_state_dict(loaded['model']) lm.eval() lm.cfg = cfg
compression_model = CompressionSolver.model_from_checkpoint(cfg.compression_model_checkpoint, device=device)
3
2023-10-09 09:52:24+00:00
24k
zhijie-group/LOVECon
test_lovecon.py
[ { "identifier": "UNetPseudo3DConditionModel", "path": "video_diffusion/models/unet_3d_condition.py", "snippet": "class UNetPseudo3DConditionModel(ModelMixin, ConfigMixin):\n _supports_gradient_checkpointing = True\n\n @register_to_config\n def __init__(\n self,\n sample_size: Optional[int] = None,\n in_channels: int = 4,\n out_channels: int = 4,\n center_input_sample: bool = False,\n flip_sin_to_cos: bool = True,\n freq_shift: int = 0,\n down_block_types: Tuple[str] = (\n \"CrossAttnDownBlockPseudo3D\",\n \"CrossAttnDownBlockPseudo3D\",\n \"CrossAttnDownBlockPseudo3D\",\n \"DownBlockPseudo3D\",\n ),\n mid_block_type: str = \"UNetMidBlockPseudo3DCrossAttn\",\n up_block_types: Tuple[str] = (\n \"UpBlockPseudo3D\",\n \"CrossAttnUpBlockPseudo3D\",\n \"CrossAttnUpBlockPseudo3D\",\n \"CrossAttnUpBlockPseudo3D\",\n ),\n only_cross_attention: Union[bool, Tuple[bool]] = False,\n block_out_channels: Tuple[int] = (320, 640, 1280, 1280),\n layers_per_block: int = 2,\n downsample_padding: int = 1,\n mid_block_scale_factor: float = 1,\n act_fn: str = \"silu\",\n norm_num_groups: int = 32,\n norm_eps: float = 1e-5,\n cross_attention_dim: int = 1280,\n attention_head_dim: Union[int, Tuple[int]] = 8,\n dual_cross_attention: bool = False,\n use_linear_projection: bool = False,\n class_embed_type: Optional[str] = None,\n num_class_embeds: Optional[int] = None,\n upcast_attention: bool = False,\n resnet_time_scale_shift: str = \"default\",\n **kwargs\n ):\n super().__init__()\n\n self.sample_size = sample_size\n time_embed_dim = block_out_channels[0] * 4\n if 'temporal_downsample' in kwargs and kwargs['temporal_downsample'] is True:\n kwargs['temporal_downsample_time'] = 3\n self.temporal_downsample_time = kwargs.get('temporal_downsample_time', 0)\n \n # input\n self.conv_in = PseudoConv3d(in_channels, block_out_channels[0], \n kernel_size=3, padding=(1, 1), model_config=kwargs)\n\n # time\n self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift)\n timestep_input_dim = block_out_channels[0]\n\n self.time_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)\n\n # class embedding\n if class_embed_type is None and num_class_embeds is not None:\n self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim)\n elif class_embed_type == \"timestep\":\n self.class_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)\n elif class_embed_type == \"identity\":\n self.class_embedding = nn.Identity(time_embed_dim, time_embed_dim)\n else:\n self.class_embedding = None\n\n self.down_blocks = nn.ModuleList([])\n self.mid_block = None\n self.up_blocks = nn.ModuleList([])\n\n if isinstance(only_cross_attention, bool):\n only_cross_attention = [only_cross_attention] * len(down_block_types)\n\n if isinstance(attention_head_dim, int):\n attention_head_dim = (attention_head_dim,) * len(down_block_types)\n\n # down\n output_channel = block_out_channels[0]\n for i, down_block_type in enumerate(down_block_types):\n input_channel = output_channel\n output_channel = block_out_channels[i]\n is_final_block = i == len(block_out_channels) - 1\n kwargs_copy=copy.deepcopy(kwargs)\n temporal_downsample_i = ((i >= (len(down_block_types)-self.temporal_downsample_time))\n and (not is_final_block))\n kwargs_copy.update({'temporal_downsample': temporal_downsample_i} )\n # kwargs_copy.update({'SparseCausalAttention_index': temporal_downsample_i} )\n if temporal_downsample_i:\n print(f'Initialize model temporal downsample at layer {i}')\n down_block = get_down_block(\n down_block_type,\n num_layers=layers_per_block,\n in_channels=input_channel,\n out_channels=output_channel,\n temb_channels=time_embed_dim,\n add_downsample=not is_final_block,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n resnet_groups=norm_num_groups,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=attention_head_dim[i],\n downsample_padding=downsample_padding,\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention[i],\n upcast_attention=upcast_attention,\n resnet_time_scale_shift=resnet_time_scale_shift,\n model_config=kwargs_copy\n )\n self.down_blocks.append(down_block)\n # mid\n if mid_block_type == \"UNetMidBlockPseudo3DCrossAttn\":\n self.mid_block = UNetMidBlockPseudo3DCrossAttn(\n in_channels=block_out_channels[-1],\n temb_channels=time_embed_dim,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n output_scale_factor=mid_block_scale_factor,\n resnet_time_scale_shift=resnet_time_scale_shift,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=attention_head_dim[-1],\n resnet_groups=norm_num_groups,\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n upcast_attention=upcast_attention,\n model_config=kwargs\n )\n else:\n raise ValueError(f\"unknown mid_block_type : {mid_block_type}\")\n\n # count how many layers upsample the images\n self.num_upsamplers = 0\n\n # up\n reversed_block_out_channels = list(reversed(block_out_channels))\n reversed_attention_head_dim = list(reversed(attention_head_dim))\n only_cross_attention = list(reversed(only_cross_attention))\n output_channel = reversed_block_out_channels[0]\n for i, up_block_type in enumerate(up_block_types):\n is_final_block = i == len(block_out_channels) - 1\n\n prev_output_channel = output_channel\n output_channel = reversed_block_out_channels[i]\n input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)]\n\n # add upsample block for all BUT final layer\n if not is_final_block:\n add_upsample = True\n self.num_upsamplers += 1\n else:\n add_upsample = False\n \n kwargs_copy=copy.deepcopy(kwargs)\n kwargs_copy.update({'temporal_downsample': \n i < (self.temporal_downsample_time-1)})\n if i < (self.temporal_downsample_time-1):\n print(f'Initialize model temporal updample at layer {i}')\n\n up_block = get_up_block(\n up_block_type,\n num_layers=layers_per_block + 1,\n in_channels=input_channel,\n out_channels=output_channel,\n prev_output_channel=prev_output_channel,\n temb_channels=time_embed_dim,\n add_upsample=add_upsample,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n resnet_groups=norm_num_groups,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=reversed_attention_head_dim[i],\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention[i],\n upcast_attention=upcast_attention,\n resnet_time_scale_shift=resnet_time_scale_shift,\n model_config=kwargs_copy\n )\n self.up_blocks.append(up_block)\n prev_output_channel = output_channel\n\n # out\n self.conv_norm_out = nn.GroupNorm(\n num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=norm_eps\n )\n self.conv_act = nn.SiLU()\n self.conv_out = PseudoConv3d(block_out_channels[0], out_channels, \n kernel_size=3, padding=1, model_config=kwargs)\n\n def set_attention_slice(self, slice_size):\n r\"\"\"\n Enable sliced attention computation.\n\n When this option is enabled, the attention module will split the input tensor in slices, to compute attention\n in several steps. This is useful to save some memory in exchange for a small speed decrease.\n\n Args:\n slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `\"auto\"`):\n When `\"auto\"`, halves the input to the attention heads, so attention will be computed in two steps. If\n `\"max\"`, maxium amount of memory will be saved by running only one slice at a time. If a number is\n provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim`\n must be a multiple of `slice_size`.\n \"\"\"\n sliceable_head_dims = []\n\n def fn_recursive_retrieve_slicable_dims(module: torch.nn.Module):\n if hasattr(module, \"set_attention_slice\"):\n sliceable_head_dims.append(module.sliceable_head_dim)\n\n for child in module.children():\n fn_recursive_retrieve_slicable_dims(child)\n\n # retrieve number of attention layers\n for module in self.children():\n fn_recursive_retrieve_slicable_dims(module)\n\n num_slicable_layers = len(sliceable_head_dims)\n\n if slice_size == \"auto\":\n # half the attention head size is usually a good trade-off between\n # speed and memory\n slice_size = [dim // 2 for dim in sliceable_head_dims]\n elif slice_size == \"max\":\n # make smallest slice possible\n slice_size = num_slicable_layers * [1]\n\n slice_size = (\n num_slicable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size\n )\n\n if len(slice_size) != len(sliceable_head_dims):\n raise ValueError(\n f\"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different\"\n f\" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}.\"\n )\n\n for i in range(len(slice_size)):\n size = slice_size[i]\n dim = sliceable_head_dims[i]\n if size is not None and size > dim:\n raise ValueError(f\"size {size} has to be smaller or equal to {dim}.\")\n\n # Recursively walk through all the children.\n # Any children which exposes the set_attention_slice method\n # gets the message\n def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]):\n if hasattr(module, \"set_attention_slice\"):\n module.set_attention_slice(slice_size.pop())\n\n for child in module.children():\n fn_recursive_set_attention_slice(child, slice_size)\n\n reversed_slice_size = list(reversed(slice_size))\n for module in self.children():\n fn_recursive_set_attention_slice(module, reversed_slice_size)\n\n def _set_gradient_checkpointing(self, module, value=False):\n if isinstance(\n module,\n (CrossAttnDownBlockPseudo3D, DownBlockPseudo3D, CrossAttnUpBlockPseudo3D, UpBlockPseudo3D),\n ):\n module.gradient_checkpointing = value\n\n def forward(\n self,\n sample: torch.FloatTensor,\n timestep: Union[torch.Tensor, float, int],\n encoder_hidden_states: torch.Tensor,\n class_labels: Optional[torch.Tensor] = None, # None\n attention_mask: Optional[torch.Tensor] = None, # None\n down_block_additional_residuals: Optional[Tuple[torch.Tensor]] = None,\n mid_block_additional_residual: Optional[torch.Tensor] = None,\n return_dict: bool = True,\n ) -> Union[UNetPseudo3DConditionOutput, Tuple]:\n # By default samples have to be AT least a multiple of the overall upsampling factor.\n # The overall upsampling factor is equal to 2 ** (# num of upsampling layears).\n # However, the upsampling interpolation output size can be forced to fit any upsampling size\n # on the fly if necessary.\n default_overall_up_factor = 2**self.num_upsamplers\n\n # upsample size should be forwarded when sample is not a multiple of `default_overall_up_factor`\n forward_upsample_size = False\n upsample_size = None\n\n if any(s % default_overall_up_factor != 0 for s in sample.shape[-2:]):\n logger.info(\"Forward upsample size to force interpolation output size.\")\n forward_upsample_size = True\n\n # prepare attention_mask\n if attention_mask is not None: # None\n attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0\n attention_mask = attention_mask.unsqueeze(1)\n\n # 0. center input if necessary\n if self.config.center_input_sample: # False\n sample = 2 * sample - 1.0\n\n # 1. time\n timesteps = timestep\n if not torch.is_tensor(timesteps):\n # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can\n # This would be a good case for the `match` statement (Python 3.10+)\n is_mps = sample.device.type == \"mps\"\n if isinstance(timestep, float):\n dtype = torch.float32 if is_mps else torch.float64\n else:\n dtype = torch.int32 if is_mps else torch.int64\n timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device)\n elif len(timesteps.shape) == 0:\n timesteps = timesteps[None].to(sample.device)\n\n # broadcast to batch dimension in a way that's compatible with ONNX/Core ML\n timesteps = timesteps.expand(sample.shape[0])\n\n t_emb = self.time_proj(timesteps)\n\n # timesteps does not contain any weights and will always return f32 tensors\n # but time_embedding might actually be running in fp16. so we need to cast here.\n # there might be better ways to encapsulate this.\n t_emb = t_emb.to(dtype=self.dtype)\n emb = self.time_embedding(t_emb)\n\n if self.class_embedding is not None:\n if class_labels is None:\n raise ValueError(\"class_labels should be provided when num_class_embeds > 0\")\n\n if self.config.class_embed_type == \"timestep\":\n class_labels = self.time_proj(class_labels)\n\n class_emb = self.class_embedding(class_labels).to(dtype=self.dtype)\n emb = emb + class_emb\n\n # 2. pre-process\n sample = self.conv_in(sample)\n\n # 3. down\n down_block_res_samples = (sample,)\n for downsample_block in self.down_blocks:\n if hasattr(downsample_block, \"has_cross_attention\") and downsample_block.has_cross_attention:\n sample, res_samples = downsample_block(\n hidden_states=sample,\n temb=emb,\n encoder_hidden_states=encoder_hidden_states,\n attention_mask=attention_mask,\n )\n else:\n sample, res_samples = downsample_block(hidden_states=sample, temb=emb)\n\n down_block_res_samples += res_samples\n\n if down_block_additional_residuals is not None:\n new_down_block_res_samples = ()\n\n for down_block_res_sample, down_block_additional_residual in zip(\n down_block_res_samples, down_block_additional_residuals\n ):\n new_down_block_res_samples += (down_block_res_sample + down_block_additional_residual,)\n\n down_block_res_samples = new_down_block_res_samples\n\n # 4. mid\n sample = self.mid_block(\n sample, emb, encoder_hidden_states=encoder_hidden_states, attention_mask=attention_mask\n )\n # for i in down_block_res_samples: print(i.shape) \n # torch.Size([1, 320, 16, 64, 64])\n # torch.Size([1, 320, 16, 64, 64])\n # torch.Size([1, 320, 16, 64, 64])\n # torch.Size([1, 320, 8, 32, 32])\n # torch.Size([1, 640, 8, 32, 32])\n # torch.Size([1, 640, 8, 32, 32])\n # torch.Size([1, 640, 4, 16, 16])\n # torch.Size([1, 1280, 4, 16, 16])\n # torch.Size([1, 1280, 4, 16, 16])\n # torch.Size([1, 1280, 2, 8, 8])\n # torch.Size([1, 1280, 2, 8, 8])\n # torch.Size([1, 1280, 2, 8, 8])\n if mid_block_additional_residual is not None:\n sample = sample + mid_block_additional_residual\n \n # 5. up\n for i, upsample_block in enumerate(self.up_blocks):\n is_final_block = i == len(self.up_blocks) - 1\n\n res_samples = down_block_res_samples[-len(upsample_block.resnets) :]\n down_block_res_samples = down_block_res_samples[: -len(upsample_block.resnets)]\n\n # if we have not reached the final block and need to forward the\n # upsample size, we do it here\n if not is_final_block and forward_upsample_size:\n upsample_size = down_block_res_samples[-1].shape[2:]\n\n if hasattr(upsample_block, \"has_cross_attention\") and upsample_block.has_cross_attention:\n sample = upsample_block(\n hidden_states=sample,\n temb=emb,\n res_hidden_states_tuple=res_samples,\n encoder_hidden_states=encoder_hidden_states,\n upsample_size=upsample_size,\n attention_mask=attention_mask,\n )\n else:\n sample = upsample_block(\n hidden_states=sample,\n temb=emb,\n res_hidden_states_tuple=res_samples,\n upsample_size=upsample_size,\n )\n # 6. post-process\n sample = self.conv_norm_out(sample)\n sample = self.conv_act(sample)\n sample = self.conv_out(sample)\n\n if not return_dict:\n return (sample,)\n\n return UNetPseudo3DConditionOutput(sample=sample)\n\n @classmethod\n def from_2d_model(cls, model_path, model_config):\n config_path = os.path.join(model_path, \"config.json\")\n if not os.path.isfile(config_path):\n raise RuntimeError(f\"{config_path} does not exist\")\n with open(config_path, \"r\") as f:\n config = json.load(f)\n\n config.pop(\"_class_name\")\n config.pop(\"_diffusers_version\")\n\n block_replacer = {\n \"CrossAttnDownBlock2D\": \"CrossAttnDownBlockPseudo3D\",\n \"DownBlock2D\": \"DownBlockPseudo3D\",\n \"UpBlock2D\": \"UpBlockPseudo3D\",\n \"CrossAttnUpBlock2D\": \"CrossAttnUpBlockPseudo3D\",\n }\n\n def convert_2d_to_3d_block(block):\n return block_replacer[block] if block in block_replacer else block\n\n config[\"down_block_types\"] = [\n convert_2d_to_3d_block(block) for block in config[\"down_block_types\"]\n ]\n config[\"up_block_types\"] = [convert_2d_to_3d_block(block) for block in config[\"up_block_types\"]]\n if model_config is not None:\n config.update(model_config)\n\n model = cls(**config)\n\n state_dict_path_condidates = glob.glob(os.path.join(model_path, \"*.bin\"))\n if state_dict_path_condidates:\n state_dict = torch.load(state_dict_path_condidates[0], map_location=\"cpu\")\n model.load_2d_state_dict(state_dict=state_dict)\n\n return model\n\n def load_2d_state_dict(self, state_dict, **kwargs):\n state_dict_3d = self.state_dict()\n\n for k, v in state_dict.items():\n if k not in state_dict_3d:\n raise KeyError(f\"2d state_dict key {k} does not exist in 3d model\")\n elif v.shape != state_dict_3d[k].shape:\n raise ValueError(f\"state_dict shape mismatch, 2d {v.shape}, 3d {state_dict_3d[k].shape}\")\n\n for k, v in state_dict_3d.items():\n if \"_temporal\" in k:\n continue\n if k not in state_dict:\n raise KeyError(f\"3d state_dict key {k} does not exist in 2d model\")\n\n state_dict_3d.update(state_dict)\n self.load_state_dict(state_dict_3d, **kwargs)" }, { "identifier": "ControlNetPseudo3DModel", "path": "video_diffusion/models/controlnet_3d_condition.py", "snippet": "class ControlNetPseudo3DModel(ModelMixin, ConfigMixin):\n _supports_gradient_checkpointing = True\n\n @register_to_config\n def __init__(\n self,\n in_channels: int = 4,\n flip_sin_to_cos: bool = True,\n freq_shift: int = 0,\n down_block_types: Tuple[str] = (\n \"CrossAttnDownBlockPseudo3D\",\n \"CrossAttnDownBlockPseudo3D\",\n \"CrossAttnDownBlockPseudo3D\",\n \"DownBlockPseudo3D\",\n ),\n only_cross_attention: Union[bool, Tuple[bool]] = False,\n block_out_channels: Tuple[int] = (320, 640, 1280, 1280),\n layers_per_block: int = 2,\n downsample_padding: int = 1,\n mid_block_scale_factor: float = 1,\n act_fn: str = \"silu\",\n norm_num_groups: Optional[int] = 32,\n norm_eps: float = 1e-5,\n cross_attention_dim: int = 1280,\n attention_head_dim: Union[int, Tuple[int]] = 8,\n use_linear_projection: bool = False,\n class_embed_type: Optional[str] = None,\n num_class_embeds: Optional[int] = None,\n upcast_attention: bool = False,\n resnet_time_scale_shift: str = \"default\",\n projection_class_embeddings_input_dim: Optional[int] = None,\n controlnet_conditioning_channel_order: str = \"rgb\",\n conditioning_embedding_out_channels: Optional[Tuple[int]] = (16, 32, 96, 256),\n **kwargs\n ):\n super().__init__()\n\n if 'temporal_downsample' in kwargs and kwargs['temporal_downsample'] is True:\n kwargs['temporal_downsample_time'] = 3\n self.temporal_downsample_time = kwargs.get('temporal_downsample_time', 0)\n\n # Check inputs\n if len(block_out_channels) != len(down_block_types):\n raise ValueError(\n f\"Must provide the same number of `block_out_channels` as `down_block_types`. `block_out_channels`: {block_out_channels}. `down_block_types`: {down_block_types}.\"\n )\n\n if not isinstance(only_cross_attention, bool) and len(only_cross_attention) != len(down_block_types):\n raise ValueError(\n f\"Must provide the same number of `only_cross_attention` as `down_block_types`. `only_cross_attention`: {only_cross_attention}. `down_block_types`: {down_block_types}.\"\n )\n\n if not isinstance(attention_head_dim, int) and len(attention_head_dim) != len(down_block_types):\n raise ValueError(\n f\"Must provide the same number of `attention_head_dim` as `down_block_types`. `attention_head_dim`: {attention_head_dim}. `down_block_types`: {down_block_types}.\"\n )\n\n # input\n conv_in_kernel = 3\n conv_in_padding = (conv_in_kernel - 1) // 2\n # self.conv_in = PseudoConv3d(\n # in_channels, block_out_channels[0], kernel_size=conv_in_kernel, padding=conv_in_padding\n # )\n self.conv_in = InflatedConv3d(\n in_channels, block_out_channels[0], kernel_size=conv_in_kernel, padding=conv_in_padding\n )\n # time\n time_embed_dim = block_out_channels[0] * 4\n\n self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift)\n timestep_input_dim = block_out_channels[0]\n\n self.time_embedding = TimestepEmbedding(\n timestep_input_dim,\n time_embed_dim,\n act_fn=act_fn,\n )\n\n # class embedding\n if class_embed_type is None and num_class_embeds is not None:\n self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim)\n elif class_embed_type == \"timestep\":\n self.class_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)\n elif class_embed_type == \"identity\":\n self.class_embedding = nn.Identity(time_embed_dim, time_embed_dim)\n elif class_embed_type == \"projection\":\n if projection_class_embeddings_input_dim is None:\n raise ValueError(\n \"`class_embed_type`: 'projection' requires `projection_class_embeddings_input_dim` be set\"\n )\n # The projection `class_embed_type` is the same as the timestep `class_embed_type` except\n # 1. the `class_labels` inputs are not first converted to sinusoidal embeddings\n # 2. it projects from an arbitrary input dimension.\n #\n # Note that `TimestepEmbedding` is quite general, being mainly linear layers and activations.\n # When used for embedding actual timesteps, the timesteps are first converted to sinusoidal embeddings.\n # As a result, `TimestepEmbedding` can be passed arbitrary vectors.\n self.class_embedding = TimestepEmbedding(projection_class_embeddings_input_dim, time_embed_dim)\n else:\n self.class_embedding = None\n\n # control net conditioning embedding\n self.controlnet_cond_embedding = ControlNetPseudo3DConditioningEmbedding(\n conditioning_embedding_channels=block_out_channels[0],\n block_out_channels=conditioning_embedding_out_channels,\n )\n\n self.down_blocks = nn.ModuleList([])\n self.controlnet_down_blocks = nn.ModuleList([])\n\n if isinstance(only_cross_attention, bool):\n only_cross_attention = [only_cross_attention] * len(down_block_types)\n\n if isinstance(attention_head_dim, int):\n attention_head_dim = (attention_head_dim,) * len(down_block_types)\n\n # down\n output_channel = block_out_channels[0]\n\n # controlnet_block = PseudoConv3d(output_channel, output_channel, kernel_size=1)\n controlnet_block = InflatedConv3d(output_channel, output_channel, kernel_size=1)\n\n controlnet_block = zero_module(controlnet_block)\n self.controlnet_down_blocks.append(controlnet_block)\n\n for i, down_block_type in enumerate(down_block_types):\n input_channel = output_channel\n output_channel = block_out_channels[i]\n is_final_block = i == len(block_out_channels) - 1\n #non temperal \n # kwargs_copy=copy.deepcopy(kwargs)\n # temporal_downsample_i = ((i >= (len(down_block_types)-self.temporal_downsample_time))\n # and (not is_final_block))\n # kwargs_copy.update({'temporal_downsample': temporal_downsample_i} )\n\n down_block = get_down_block(\n down_block_type,\n num_layers=layers_per_block,\n in_channels=input_channel,\n out_channels=output_channel,\n temb_channels=time_embed_dim,\n add_downsample=not is_final_block,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n resnet_groups=norm_num_groups,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=attention_head_dim[i],\n downsample_padding=downsample_padding,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention[i],\n upcast_attention=upcast_attention,\n resnet_time_scale_shift=resnet_time_scale_shift,\n # model_config=kwargs_copy\n )\n self.down_blocks.append(down_block)\n\n for _ in range(layers_per_block):\n # controlnet_block = PseudoConv3d(output_channel, output_channel, kernel_size=1)\n controlnet_block = InflatedConv3d(output_channel, output_channel, kernel_size=1)\n controlnet_block = zero_module(controlnet_block)\n self.controlnet_down_blocks.append(controlnet_block)\n\n if not is_final_block:\n # controlnet_block = PseudoConv3d(output_channel, output_channel, kernel_size=1)\n controlnet_block = InflatedConv3d(output_channel, output_channel, kernel_size=1)\n controlnet_block = zero_module(controlnet_block)\n self.controlnet_down_blocks.append(controlnet_block)\n\n # mid\n mid_block_channel = block_out_channels[-1]\n\n # controlnet_block = PseudoConv3d(mid_block_channel, mid_block_channel, kernel_size=1)\n controlnet_block = InflatedConv3d(mid_block_channel, mid_block_channel, kernel_size=1)\n controlnet_block = zero_module(controlnet_block)\n self.controlnet_mid_block = controlnet_block\n\n self.mid_block = UNetMidBlockPseudo3DCrossAttn(\n in_channels=mid_block_channel,\n temb_channels=time_embed_dim,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n output_scale_factor=mid_block_scale_factor,\n resnet_time_scale_shift=resnet_time_scale_shift,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=attention_head_dim[-1],\n resnet_groups=norm_num_groups,\n use_linear_projection=use_linear_projection,\n upcast_attention=upcast_attention,\n # model_config=kwargs\n )\n\n def set_attention_slice(self, slice_size):\n r\"\"\"\n Enable sliced attention computation.\n\n When this option is enabled, the attention module will split the input tensor in slices, to compute attention\n in several steps. This is useful to save some memory in exchange for a small speed decrease.\n\n Args:\n slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `\"auto\"`):\n When `\"auto\"`, halves the input to the attention heads, so attention will be computed in two steps. If\n `\"max\"`, maxium amount of memory will be saved by running only one slice at a time. If a number is\n provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim`\n must be a multiple of `slice_size`.\n \"\"\"\n sliceable_head_dims = []\n\n def fn_recursive_retrieve_slicable_dims(module: torch.nn.Module):\n if hasattr(module, \"set_attention_slice\"):\n sliceable_head_dims.append(module.sliceable_head_dim)\n\n for child in module.children():\n fn_recursive_retrieve_slicable_dims(child)\n\n # retrieve number of attention layers\n for module in self.children():\n fn_recursive_retrieve_slicable_dims(module)\n\n num_slicable_layers = len(sliceable_head_dims)\n\n if slice_size == \"auto\":\n # half the attention head size is usually a good trade-off between\n # speed and memory\n slice_size = [dim // 2 for dim in sliceable_head_dims]\n elif slice_size == \"max\":\n # make smallest slice possible\n slice_size = num_slicable_layers * [1]\n\n slice_size = num_slicable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size\n\n if len(slice_size) != len(sliceable_head_dims):\n raise ValueError(\n f\"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different\"\n f\" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}.\"\n )\n\n for i in range(len(slice_size)):\n size = slice_size[i]\n dim = sliceable_head_dims[i]\n if size is not None and size > dim:\n raise ValueError(f\"size {size} has to be smaller or equal to {dim}.\")\n\n # Recursively walk through all the children.\n # Any children which exposes the set_attention_slice method\n # gets the message\n def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]):\n if hasattr(module, \"set_attention_slice\"):\n module.set_attention_slice(slice_size.pop())\n\n for child in module.children():\n fn_recursive_set_attention_slice(child, slice_size)\n\n reversed_slice_size = list(reversed(slice_size))\n for module in self.children():\n fn_recursive_set_attention_slice(module, reversed_slice_size)\n\n def _set_gradient_checkpointing(self, module, value=False):\n if isinstance(module, (CrossAttnDownBlockPseudo3D, DownBlockPseudo3D)):\n module.gradient_checkpointing = value\n\n def forward(\n self,\n sample: torch.FloatTensor,\n timestep: Union[torch.Tensor, float, int],\n encoder_hidden_states: torch.Tensor,\n controlnet_cond: torch.FloatTensor,\n class_labels: Optional[torch.Tensor] = None,\n timestep_cond: Optional[torch.Tensor] = None,\n attention_mask: Optional[torch.Tensor] = None,\n cross_attention_kwargs: Optional[Dict[str, Any]] = None,\n return_dict: bool = True,\n ) -> Union[ControlNetPseudo3DOutput, Tuple]:\n # check channel order\n channel_order = self.config.controlnet_conditioning_channel_order\n if channel_order == \"rgb\":\n # in rgb order by default\n ...\n elif channel_order == \"bgr\":\n controlnet_cond = torch.flip(controlnet_cond, dims=[1])\n else:\n raise ValueError(f\"unknown `controlnet_conditioning_channel_order`: {channel_order}\")\n\n # prepare attention_mask\n if attention_mask is not None:\n attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0\n attention_mask = attention_mask.unsqueeze(1)\n\n # 1. time\n timesteps = timestep\n if not torch.is_tensor(timesteps):\n # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can\n # This would be a good case for the `match` statement (Python 3.10+)\n is_mps = sample.device.type == \"mps\"\n if isinstance(timestep, float):\n dtype = torch.float32 if is_mps else torch.float64\n else:\n dtype = torch.int32 if is_mps else torch.int64\n timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device)\n elif len(timesteps.shape) == 0:\n timesteps = timesteps[None].to(sample.device)\n\n # broadcast to batch dimension in a way that's compatible with ONNX/Core ML\n timesteps = timesteps.expand(sample.shape[0])\n\n t_emb = self.time_proj(timesteps)\n \n\n # timesteps does not contain any weights and will always return f32 tensors\n # but time_embedding might actually be running in fp16. so we need to cast here.\n # there might be better ways to encapsulate this.\n t_emb = t_emb.to(dtype=self.dtype)\n\n emb = self.time_embedding(t_emb)\n\n\n if self.class_embedding is not None:\n if class_labels is None:\n raise ValueError(\"class_labels should be provided when num_class_embeds > 0\")\n\n if self.config.class_embed_type == \"timestep\":\n class_labels = self.time_proj(class_labels)\n\n class_emb = self.class_embedding(class_labels).to(dtype=self.dtype)\n emb = emb + class_emb\n\n # 2. pre-process\n sample = self.conv_in(sample)\n\n controlnet_cond = self.controlnet_cond_embedding(controlnet_cond)\n # print(sample.shape,controlnet_cond.shape)\n sample += controlnet_cond\n \n # 3. down\n down_block_res_samples = (sample,)\n for downsample_block in self.down_blocks:\n if hasattr(downsample_block, \"has_cross_attention\") and downsample_block.has_cross_attention:\n sample, res_samples = downsample_block(\n hidden_states=sample,\n temb=emb,\n encoder_hidden_states=encoder_hidden_states,\n attention_mask=attention_mask,\n )\n else:\n sample, res_samples = downsample_block(hidden_states=sample, temb=emb)\n\n down_block_res_samples += res_samples\n\n # 4. mid\n if self.mid_block is not None:\n sample = self.mid_block(\n sample,\n emb,\n encoder_hidden_states=encoder_hidden_states,\n attention_mask=attention_mask,\n )\n\n # 5. Control net blocks\n\n controlnet_down_block_res_samples = ()\n\n for down_block_res_sample, controlnet_block in zip(down_block_res_samples, self.controlnet_down_blocks):\n down_block_res_sample = controlnet_block(down_block_res_sample)\n controlnet_down_block_res_samples += (down_block_res_sample,)\n\n down_block_res_samples = controlnet_down_block_res_samples\n\n mid_block_res_sample = self.controlnet_mid_block(sample)\n\n if not return_dict:\n return (down_block_res_samples, mid_block_res_sample)\n\n return ControlNetPseudo3DOutput(\n down_block_res_samples=down_block_res_samples, mid_block_res_sample=mid_block_res_sample\n )\n\n @classmethod\n def from_pretrained_2d(cls, pretrained_model_path, subfolder=None, control_temporal_idx=None, control_mid_temporal=None):\n if subfolder is not None:\n pretrained_model_path = os.path.join(pretrained_model_path, subfolder)\n\n config_file = os.path.join(pretrained_model_path, 'config.json')\n if not os.path.isfile(config_file):\n raise RuntimeError(f\"{config_file} does not exist\")\n with open(config_file, \"r\") as f:\n config = json.load(f)\n config[\"_class_name\"] = cls.__name__\n config[\"down_block_types\"] = [\n \"CrossAttnDownBlockPseudo3D\",\n \"CrossAttnDownBlockPseudo3D\",\n \"CrossAttnDownBlockPseudo3D\",\n \"DownBlockPseudo3D\"\n ]\n # config[\"control_temporal_idx\"] = control_temporal_idx\n # config[\"control_mid_temporal\"] = control_mid_temporal\n\n from diffusers.utils import WEIGHTS_NAME\n model = cls.from_config(config)\n model_file = os.path.join(pretrained_model_path, WEIGHTS_NAME)\n if not os.path.isfile(model_file):\n raise RuntimeError(f\"{model_file} does not exist\")\n\n state_dict = torch.load(model_file, map_location=\"cpu\")\n for k, v in model.state_dict().items():\n if '_temp.' in k:\n if 'conv' in k:\n state_dict.update({k: v})\n else:\n copyk = k\n copyk = copyk.replace('_temp.', '1.')\n state_dict.update({k: state_dict[copyk]})\n model.load_state_dict(state_dict)\n\n return model\n\n\n @classmethod\n def from_2d_model(cls, model_path, model_config):\n config_path = os.path.join(model_path, \"config.json\")\n if not os.path.isfile(config_path):\n raise RuntimeError(f\"{config_path} does not exist\")\n with open(config_path, \"r\") as f:\n config = json.load(f)\n\n config.pop(\"_class_name\")\n config.pop(\"_diffusers_version\")\n\n block_replacer = {\n \"CrossAttnDownBlock2D\": \"CrossAttnDownBlockPseudo3D\",\n \"DownBlock2D\": \"DownBlockPseudo3D\",\n \"UpBlock2D\": \"UpBlockPseudo3D\",\n \"CrossAttnUpBlock2D\": \"CrossAttnUpBlockPseudo3D\",\n }\n\n def convert_2d_to_3d_block(block):\n return block_replacer[block] if block in block_replacer else block\n\n config[\"down_block_types\"] = [\n convert_2d_to_3d_block(block) for block in config[\"down_block_types\"]\n ]\n \n if model_config is not None:\n config.update(model_config)\n\n model = cls(**config)\n\n state_dict_path_condidates = glob.glob(os.path.join(model_path, \"*.bin\"))\n if state_dict_path_condidates:\n state_dict = torch.load(state_dict_path_condidates[0], map_location=\"cpu\")\n model.load_2d_state_dict(state_dict=state_dict)\n\n return model\n\n def load_2d_state_dict(self, state_dict, **kwargs):\n state_dict_3d = self.state_dict()\n\n for k, v in state_dict.items():\n if k not in state_dict_3d:\n raise KeyError(f\"2d state_dict key {k} does not exist in 3d model\")\n elif v.shape != state_dict_3d[k].shape:\n raise ValueError(f\"state_dict shape mismatch, 2d {v.shape}, 3d {state_dict_3d[k].shape}\")\n\n for k, v in state_dict_3d.items():\n if \"_temporal\" in k:\n continue\n if k not in state_dict:\n raise KeyError(f\"3d state_dict key {k} does not exist in 2d model\")\n\n state_dict_3d.update(state_dict)\n self.load_state_dict(state_dict_3d, **kwargs)" }, { "identifier": "ImageSequenceDataset", "path": "video_diffusion/data/dataset.py", "snippet": "class ImageSequenceDataset(Dataset):\n def __init__(\n self,\n path: str,\n prompt_ids: torch.Tensor,\n prompt: str,\n start_sample_frame: int=0,\n n_sample_frame: int = 8,\n sampling_rate: int = 1,\n stride: int = -1, # only used during tuning to sample a long video\n image_mode: str = \"RGB\",\n image_size: int = 512,\n crop: str = \"center\",\n \n class_data_root: str = None,\n class_prompt_ids: torch.Tensor = None,\n \n offset: dict = {\n \"left\": 0,\n \"right\": 0,\n \"top\": 0,\n \"bottom\": 0\n },\n **args\n \n ):\n self.path = path\n self.images = self.get_image_list(path)\n self.n_images = len(self.images)\n self.offset = offset\n self.start_sample_frame = start_sample_frame\n if n_sample_frame < 0:\n n_sample_frame = len(self.images) \n self.n_sample_frame = n_sample_frame\n # local sampling rate from the video\n self.sampling_rate = sampling_rate\n\n self.sequence_length = (n_sample_frame - 1) * sampling_rate + 1\n if self.n_images < self.sequence_length:\n raise ValueError(f\"self.n_images {self.n_images } < self.sequence_length {self.sequence_length}: Required number of frames {self.sequence_length} larger than total frames in the dataset {self.n_images }\")\n \n # During tuning if video is too long, we sample the long video every self.stride globally\n self.stride = stride if stride > 0 else (self.n_images+1)\n self.video_len = (self.n_images - self.sequence_length) // self.stride + 1\n\n self.image_mode = image_mode\n self.image_size = image_size\n crop_methods = {\n \"center\": center_crop,\n \"random\": random_crop,\n }\n if crop not in crop_methods:\n raise ValueError\n self.crop = crop_methods[crop]\n\n self.prompt = prompt\n self.prompt_ids = prompt_ids\n # Negative prompt for regularization to avoid overfitting during one-shot tuning\n if class_data_root is not None:\n self.class_data_root = Path(class_data_root)\n self.class_images_path = sorted(list(self.class_data_root.iterdir()))\n self.num_class_images = len(self.class_images_path)\n self.class_prompt_ids = class_prompt_ids\n \n \n def __len__(self):\n max_len = (self.n_images - self.sequence_length) // self.stride + 1\n \n if hasattr(self, 'num_class_images'):\n max_len = max(max_len, self.num_class_images)\n \n return max_len\n\n def __getitem__(self, index):\n return_batch = {}\n frame_indices = self.get_frame_indices(index%self.video_len)\n frames = [self.load_frame(i) for i in frame_indices]\n frames = self.transform(frames)\n\n return_batch.update(\n {\n \"images\": frames,\n \"prompt_ids\": self.prompt_ids,\n }\n )\n\n if hasattr(self, 'class_data_root'):\n class_index = index % (self.num_class_images - self.n_sample_frame)\n class_indices = self.get_class_indices(class_index) \n frames = [self.load_class_frame(i) for i in class_indices]\n return_batch[\"class_images\"] = self.tensorize_frames(frames)\n return_batch[\"class_prompt_ids\"] = self.class_prompt_ids\n return return_batch\n \n def transform(self, frames):\n frames = self.tensorize_frames(frames)\n frames = offset_crop(frames, **self.offset)\n frames = short_size_scale(frames, size=self.image_size)\n frames = self.crop(frames, height=self.image_size, width=self.image_size)\n return frames\n\n @staticmethod\n def tensorize_frames(frames):\n frames = rearrange(np.stack(frames), \"f h w c -> c f h w\")\n return torch.from_numpy(frames).div(255) * 2 - 1\n\n def load_frame(self, index):\n image_path = os.path.join(self.path, self.images[index])\n return Image.open(image_path).convert(self.image_mode)\n\n def load_class_frame(self, index):\n image_path = self.class_images_path[index]\n return Image.open(image_path).convert(self.image_mode)\n\n def get_frame_indices(self, index):\n if self.start_sample_frame is not None:\n frame_start = self.start_sample_frame + self.stride * index\n else:\n frame_start = self.stride * index\n return (frame_start + i * self.sampling_rate for i in range(self.n_sample_frame))\n\n def get_class_indices(self, index):\n frame_start = index\n return (frame_start + i for i in range(self.n_sample_frame))\n\n @staticmethod\n def get_image_list(path):\n images = []\n for file in sorted(os.listdir(path)):\n if file.endswith(IMAGE_EXTENSION):\n images.append(file)\n return images" }, { "identifier": "get_time_string", "path": "video_diffusion/common/util.py", "snippet": "def get_time_string() -> str:\n x = datetime.datetime.now()\n return f\"{(x.year - 2000):02d}{x.month:02d}{x.day:02d}-{x.hour:02d}{x.minute:02d}{x.second:02d}\"" }, { "identifier": "get_function_args", "path": "video_diffusion/common/util.py", "snippet": "def get_function_args() -> Dict:\n frame = sys._getframe(1)\n args, _, _, values = inspect.getargvalues(frame)\n args_dict = copy.deepcopy({arg: values[arg] for arg in args})\n\n return args_dict" }, { "identifier": "get_logger_config_path", "path": "video_diffusion/common/logger.py", "snippet": "def get_logger_config_path(logdir):\n # accelerate handles the logger in multiprocessing\n logger = get_logger(__name__)\n logging.basicConfig(\n level=logging.INFO, \n format='%(asctime)s:%(levelname)s : %(message)s', \n datefmt='%a, %d %b %Y %H:%M:%S', \n filename=os.path.join(logdir, 'log.log'),\n filemode='w')\n chlr = logging.StreamHandler()\n chlr.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s : %(message)s'))\n logger.logger.addHandler(chlr)\n return logger" }, { "identifier": "log_train_samples", "path": "video_diffusion/common/image_util.py", "snippet": "def log_train_samples(\n train_dataloader,\n save_path,\n num_batch: int = 4,\n):\n train_samples = []\n for idx, batch in enumerate(train_dataloader):\n if idx >= num_batch:\n break\n train_samples.append(batch[\"images\"])\n\n train_samples = torch.cat(train_samples).numpy()\n train_samples = rearrange(train_samples, \"b c f h w -> b f h w c\")\n train_samples = (train_samples * 0.5 + 0.5).clip(0, 1)\n train_samples = numpy_batch_seq_to_pil(train_samples)\n train_samples = [make_grid(images, cols=int(np.ceil(np.sqrt(len(train_samples))))) for images in zip(*train_samples)]\n # save_images_as_gif(train_samples, save_path)\n save_gif_mp4_folder_type(train_samples, save_path)" }, { "identifier": "instantiate_from_config", "path": "video_diffusion/common/instantiate_from_config.py", "snippet": "def instantiate_from_config(config:dict, **args_from_code):\n \"\"\"Util funciton to decompose differenct modules using config\n\n Args:\n config (dict): with key of \"target\" and \"params\", better from yaml\n static \n args_from_code: additional con\n\n\n Returns:\n a validation/training pipeline, a module\n \"\"\"\n if not \"target\" in config:\n if config == '__is_first_stage__':\n return None\n elif config == \"__is_unconditional__\":\n return None\n raise KeyError(\"Expected key `target` to instantiate.\")\n return get_obj_from_str(config[\"target\"])(**config.get(\"params\", dict()), **args_from_code)" }, { "identifier": "P2pSampleLogger", "path": "video_diffusion/pipelines/p2p_validation_loop_controlnet.py", "snippet": "class P2pSampleLogger:\n def __init__(\n self,\n editing_prompts: List[str],\n clip_length: int,\n logdir: str,\n subdir: str = \"sample\",\n num_samples_per_prompt: int = 1,\n sample_seeds: List[int] = None,\n num_inference_steps: int = 20,\n guidance_scale: float = 7,\n strength: float = None,\n annotate: bool = False,\n annotate_size: int = 15,\n use_make_grid: bool = True,\n grid_column_size: int = 2,\n prompt2prompt_edit: bool=False,\n p2p_config: dict = None,\n use_inversion_attention: bool = True,\n source_prompt: str = None,\n traverse_p2p_config: bool = False,\n **args\n ) -> None:\n self.editing_prompts = editing_prompts\n self.clip_length = clip_length\n self.guidance_scale = guidance_scale\n self.num_inference_steps = num_inference_steps\n self.strength = strength\n\n if sample_seeds is None:\n max_num_samples_per_prompt = int(1e5)\n if num_samples_per_prompt > max_num_samples_per_prompt:\n raise ValueError\n sample_seeds = torch.randint(0, max_num_samples_per_prompt, (num_samples_per_prompt,))\n sample_seeds = sorted(sample_seeds.numpy().tolist())\n self.sample_seeds = sample_seeds\n\n self.logdir = os.path.join(logdir, subdir)\n os.makedirs(self.logdir)\n\n self.annotate = annotate\n self.annotate_size = annotate_size\n self.make_grid = use_make_grid\n self.grid_column_size = grid_column_size\n self.prompt2prompt_edit = prompt2prompt_edit\n self.p2p_config = p2p_config\n self.use_inversion_attention = use_inversion_attention\n self.source_prompt = source_prompt\n self.traverse_p2p_config =traverse_p2p_config\n\n def log_sample_images(\n self, pipeline: DiffusionPipeline,\n device: torch.device, step: int,\n image: Union[torch.FloatTensor, PIL.Image.Image] = None,\n control_image: torch.FloatTensor = None,\n latents: torch.FloatTensor = None,\n mask:torch.FloatTensor = None,\n editing_type:str = \"attribute\",\n uncond_embeddings_list: List[torch.FloatTensor] = None,\n save_dir = None,\n duration = 100,\n fps = 10,\n use_interpolater = True\n ):\n torch.cuda.empty_cache()\n samples_all = []\n attention_all = []\n # handle input image\n if image is not None:\n input_pil_images = pipeline.numpy_to_pil(tensor_to_numpy(image))[0]\n if self.annotate :\n samples_all.append([\n annotate_image(image, \"input sequence\", font_size=self.annotate_size) for image in input_pil_images\n ])\n else:\n samples_all.append(input_pil_images)\n if isinstance(self.editing_prompts,str):\n self.editing_prompts = [self.editing_prompts]\n for idx, prompt in enumerate(tqdm(self.editing_prompts, desc=\"Generating sample images\")):\n # if self.prompt2prompt_edit:\n # if self.traverse_p2p_config:\n # p2p_config_now = copy.deepcopy(self.p2p_config[idx])\n # else:\n # p2p_config_now = copy.deepcopy(self.p2p_config[idx])\n\n # if idx == 0 and not self.use_inversion_attention:\n # edit_type = 'save'\n # p2p_config_now.update({'save_self_attention': True})\n # print('Reflash the attention map in pipeline')\n\n # else:\n # edit_type = 'swap'\n # p2p_config_now.update({'save_self_attention': False})\n\n # p2p_config_now.update({'use_inversion_attention': self.use_inversion_attention})\n # else:\n # edit_type = None\n\n input_prompt = prompt\n\n # generator = torch.Generator(device=device)\n # generator.manual_seed(seed)\n generator = None\n sequence = []\n window = 8\n window = min(window,self.clip_length)\n start_frame = 0\n end_frame = window\n patch_index = 0\n while start_frame < self.clip_length:\n torch.cuda.empty_cache()\n if patch_index == 0:\n sequence_return = pipeline(\n prompt=input_prompt,\n source_prompt = self.editing_prompts[0] if self.source_prompt is None else self.source_prompt,\n # edit_type = edit_type,\n image=image[[0] + [0] + list(range(start_frame,min(self.clip_length,end_frame))),], # torch.Size([8, 3, 512, 512])\n strength=self.strength,\n generator=generator,\n # window = 1,\n num_inference_steps=self.num_inference_steps,\n guidance_scale=self.guidance_scale,\n num_images_per_prompt=1,\n # used in null inversion\n editing_type = editing_type,\n latents = [timestep_latent[:, :,[0] + [0] + list(range(start_frame,min(self.clip_length,end_frame))), :, :] for timestep_latent in latents],\n mask = mask[:,:, [0] + [0] + list(range(start_frame, min(self.clip_length,end_frame))),] if mask is not None else None,\n # latents = [timestep_latent[:, :,list(range(start_frame,min(self.clip_length,end_frame))), :, :] for timestep_latent in latents],\n # mask = mask[:,:, list(range(start_frame, min(self.clip_length,end_frame))),] if mask is not None else None,\n uncond_embeddings_list = uncond_embeddings_list,\n save_path = save_dir,\n # **p2p_config_now,\n )\n else:\n sequence_return = pipeline(\n prompt=input_prompt,\n reference_global_latents = reference_global_latents,\n reference_latents = reference_latents,\n source_prompt = self.editing_prompts[0] if self.source_prompt is None else self.source_prompt,\n # edit_type = edit_type,\n image=image[[0] + list(range(start_frame - 1,min(self.clip_length,end_frame))),], # torch.Size([8, 3, 512, 512])\n strength=self.strength,\n generator=generator,\n # window = window,\n num_inference_steps=self.num_inference_steps,\n guidance_scale=self.guidance_scale,\n num_images_per_prompt=1,\n # used in null inversion\n editing_type = editing_type,\n latents = [timestep_latent[:, :,[0] + list(range(start_frame-1,min(self.clip_length,end_frame))), :, :] for timestep_latent in latents],\n mask = mask[:,:, [0] + list(range(start_frame-1, min(self.clip_length,end_frame))),] if mask is not None else None,\n # latents = [timestep_latent[:, :,list(range(start_frame,min(self.clip_length,end_frame))), :, :] for timestep_latent in latents],\n # mask = mask[:,:, list(range(start_frame, min(self.clip_length,end_frame))),] if mask is not None else None,\n uncond_embeddings_list = uncond_embeddings_list,\n save_path = save_dir,\n # **p2p_config_now,\n )\n start_frame = end_frame\n end_frame = end_frame + window\n if patch_index == 0:\n reference_global_latents = sequence_return['reference_global_latents']\n reference_latents = sequence_return['reference_latents']\n patch_index = patch_index + 1\n # if self.prompt2prompt_edit:\n # sequence_temp = sequence_return['sdimage_output'].images[0]\n # # attention_output = sequence_return['attention_output']\n # else:\n # sequence_temp = sequence_return.images[0]\n sequence_temp = sequence_return['sdimage_output'].images[0]\n sequence = sequence + sequence_temp\n torch.cuda.empty_cache()\n # sequence = torch.cat(sequence,dim = 2)\n\n if self.annotate:\n images = [\n annotate_image(image, prompt, font_size=self.annotate_size) for image in sequence\n ]\n else:\n images = sequence\n control_images = []\n for i in range(control_image.shape[2]):\n control_images.append(Image.fromarray((control_image[0,:,i]*255).cpu().numpy().transpose(1,2,0).astype(np.uint8)))\n #smoother start\n if use_interpolater:\n for i in range(len(images)):\n images[i] = np.array(images[i]).transpose(2,0,1)[None:]/255\n frames = torch.from_numpy(np.stack(images, axis= 0)).cuda()\n f, C, H, W = frames.shape\n ph = ((H - 1) // 32 + 1) * 32\n pw = ((W - 1) // 32 + 1) * 32\n padding = (0, pw - W, 0, ph - H)\n frames = F.pad(frames,padding)\n smoother = Model()\n smoother.load_model('RIFEModel', -1)\n print('using smoother')\n with torch.no_grad():\n for i in range(f - 2):\n img0 = frames[i:i+1].float()\n img1 = frames[i+2:i+3].float()\n mid = smoother.inference(img0,img1)\n mid_padded = F.pad(mid,padding)\n frames[i+1:i+2,] = (frames[i+1:i+2,] + mid_padded[None:])/2\n torch.cuda.empty_cache()\n images = []\n for i in range(len(frames)):\n images.append(Image.fromarray((frames[i] * 255).cpu().numpy().astype(np.uint8).transpose(1,2,0)))\n # smoother end\n if self.make_grid:\n samples_all.append(control_images)\n samples_all.append(images)\n # if self.prompt2prompt_edit:\n # if attention_output is not None:\n # attention_all.append(attention_output)\n\n save_path = os.path.join(self.logdir, f\"step_{step}_{idx}.gif\")\n save_gif_mp4_folder_type(images, save_path,duration = duration,fps = fps)\n\n # if self.prompt2prompt_edit:\n\n # if attention_output is not None:\n # save_gif_mp4_folder_type(attention_output, save_path.replace('.gif', 'atten.gif'),duration = duration,fps = fps)\n\n if self.make_grid:\n samples_all = [make_grid(images, cols=int(len(samples_all))) for images in zip(*samples_all)]\n save_path = os.path.join(self.logdir, f\"step_{step}.gif\")\n save_gif_mp4_folder_type(samples_all, save_path,duration = duration,fps = fps)\n if self.prompt2prompt_edit:\n if len(attention_all) > 0 :\n attention_all = [make_grid(images, cols=1) for images in zip(*attention_all)]\n if len(attention_all) > 0:\n save_gif_mp4_folder_type(attention_all, save_path.replace('.gif', 'atten.gif'),duration = duration,fps = fps)\n return samples_all" }, { "identifier": "get_control", "path": "annotator/util.py", "snippet": "def get_control(type):\n if type == 'canny':\n from .canny import CannyDetector\n apply_control = CannyDetector()\n elif type == 'openpose':\n from .openpose import OpenposeDetector\n apply_control = OpenposeDetector()\n elif type == 'depth' or type == 'normal':\n from .midas import MidasDetector\n apply_control = MidasDetector()\n elif type == 'hed':\n from .hed import HEDdetector\n apply_control = HEDdetector()\n elif type == 'scribble':\n apply_control = None\n elif type == 'seg':\n from .uniformer import UniformerDetector\n apply_control = UniformerDetector()\n elif type == 'mlsd':\n from .mlsd import MLSDdetector\n apply_control = MLSDdetector()\n else:\n raise TypeError(type)\n return apply_control" }, { "identifier": "DDIMInterpolationScheduler", "path": "video_diffusion/pipelines/DDIMInterpolationScheduler.py", "snippet": "class DDIMInterpolationScheduler(DDIMScheduler):\n \"\"\"\n Denoising diffusion implicit models is a scheduler that extends the denoising procedure introduced in denoising\n diffusion probabilistic models (DDPMs) with non-Markovian guidance.\n\n [`~ConfigMixin`] takes care of storing all config attributes that are passed in the scheduler's `__init__`\n function, such as `num_train_timesteps`. They can be accessed via `scheduler.config.num_train_timesteps`.\n [`SchedulerMixin`] provides general loading and saving functionality via the [`SchedulerMixin.save_pretrained`] and\n [`~SchedulerMixin.from_pretrained`] functions.\n\n For more details, see the original paper: https://arxiv.org/abs/2010.02502\n\n Args:\n num_train_timesteps (`int`): number of diffusion steps used to train the model.\n beta_start (`float`): the starting `beta` value of inference.\n beta_end (`float`): the final `beta` value.\n beta_schedule (`str`):\n the beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from\n `linear`, `scaled_linear`, or `squaredcos_cap_v2`.\n trained_betas (`np.ndarray`, optional):\n option to pass an array of betas directly to the constructor to bypass `beta_start`, `beta_end` etc.\n clip_sample (`bool`, default `True`):\n option to clip predicted sample between -1 and 1 for numerical stability.\n set_alpha_to_one (`bool`, default `True`):\n each diffusion step uses the value of alphas product at that step and at the previous one. For the final\n step there is no previous alpha. When this option is `True` the previous alpha product is fixed to `1`,\n otherwise it uses the value of alpha at step 0.\n steps_offset (`int`, default `0`):\n an offset added to the inference steps. You can use a combination of `offset=1` and\n `set_alpha_to_one=False`, to make the last step use step 0 for the previous alpha product, as done in\n stable diffusion.\n prediction_type (`str`, default `epsilon`, optional):\n prediction type of the scheduler function, one of `epsilon` (predicting the noise of the diffusion\n process), `sample` (directly predicting the noisy sample`) or `v_prediction` (see section 2.4\n https://imagen.research.google/video/paper.pdf)\n \"\"\"\n\n _compatibles = _COMPATIBLE_STABLE_DIFFUSION_SCHEDULERS.copy()\n _deprecated_kwargs = [\"predict_epsilon\"]\n order = 1\n\n def set_model(self,vae,interpolater):\n self.interpolater = interpolater\n self.vae = vae\n \n \n def decode_latents(self, latents):\n is_video = (latents.dim() == 5)\n b = latents.shape[0]\n latents = 1 / 0.18215 * latents\n \n if is_video:\n latents = rearrange(latents, \"b c f h w -> (b f) c h w\") # torch.Size([70, 4, 64, 64])\n\n latents_split = torch.split(latents, 16, dim=0)\n image = torch.cat([self.vae.decode(l).sample for l in latents_split], dim=0)\n \n # image_full = self.vae.decode(latents).sample\n # RuntimeError: upsample_nearest_nhwc only supports output tensors with less than INT_MAX elements\n # Pytorch upsample alogrithm not work for batch size 32 -> 64 \n image = (image / 2 + 0.5).clamp(0, 1)\n # we always cast to float32 as this does not cause significant overhead and is compatible with bfloa16\n\n # image = image.cpu().float().numpy()\n # if is_video:\n # image = rearrange(image, \"(b f) c h w -> b f h w c\", b=b)\n # else:\n # image = rearrange(image, \"b c h w -> b h w c\", b=b)\n return image\n def encode_latents(self,images,generator = None):\n if len(images.shape) == 4:\n images = images[None:]\n images = ((images - 0.5) * 2 ) \n latents = self.vae.encode(images).latent_dist.sample(generator)\n latents = latents * 0.18215\n return latents\n\n def step(\n self,\n model_output: torch.FloatTensor,\n timestep: int,\n sample: torch.FloatTensor,\n eta: float = 0.0,\n use_clipped_model_output: bool = False,\n generator=None,\n variance_noise: Optional[torch.FloatTensor] = None,\n return_dict: bool = True,\n ) -> Union[DDIMSchedulerOutput, Tuple]:\n \"\"\"\n Predict the sample at the previous timestep by reversing the SDE. Core function to propagate the diffusion\n process from the learned model outputs (most often the predicted noise).\n\n Args:\n model_output (`torch.FloatTensor`): direct output from learned diffusion model.\n timestep (`int`): current discrete timestep in the diffusion chain.\n sample (`torch.FloatTensor`):\n current instance of sample being created by diffusion process.\n eta (`float`): weight of noise for added noise in diffusion step.\n use_clipped_model_output (`bool`): if `True`, compute \"corrected\" `model_output` from the clipped\n predicted original sample. Necessary because predicted original sample is clipped to [-1, 1] when\n `self.config.clip_sample` is `True`. If no clipping has happened, \"corrected\" `model_output` would\n coincide with the one provided as input and `use_clipped_model_output` will have not effect.\n generator: random number generator.\n variance_noise (`torch.FloatTensor`): instead of generating noise for the variance using `generator`, we\n can directly provide the noise for the variance itself. This is useful for methods such as\n CycleDiffusion. (https://arxiv.org/abs/2210.05559)\n return_dict (`bool`): option for returning tuple rather than DDIMSchedulerOutput class\n\n Returns:\n [`~schedulers.scheduling_utils.DDIMSchedulerOutput`] or `tuple`:\n [`~schedulers.scheduling_utils.DDIMSchedulerOutput`] if `return_dict` is True, otherwise a `tuple`. When\n returning a tuple, the first element is the sample tensor.\n\n \"\"\"\n if self.num_inference_steps is None:\n raise ValueError(\n \"Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler\"\n )\n\n # See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf\n # Ideally, read DDIM paper in-detail understanding\n\n # Notation (<variable name> -> <name in paper>\n # - pred_noise_t -> e_theta(x_t, t)\n # - pred_original_sample -> f_theta(x_t, t) or x_0\n # - std_dev_t -> sigma_t\n # - eta -> η\n # - pred_sample_direction -> \"direction pointing to x_t\"\n # - pred_prev_sample -> \"x_t-1\"\n\n # 1. get previous step value (=t-1)\n prev_timestep = timestep - self.config.num_train_timesteps // self.num_inference_steps\n\n # 2. compute alphas, betas\n alpha_prod_t = self.alphas_cumprod[timestep]\n alpha_prod_t_prev = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod\n\n beta_prod_t = 1 - alpha_prod_t\n\n # 3. compute predicted original sample from predicted noise also called\n # \"predicted x_0\" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf\n if self.config.prediction_type == \"epsilon\":\n pred_original_sample = (sample - beta_prod_t ** (0.5) * model_output) / alpha_prod_t ** (0.5)\n elif self.config.prediction_type == \"sample\":\n pred_original_sample = model_output\n elif self.config.prediction_type == \"v_prediction\":\n pred_original_sample = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output\n # predict V\n model_output = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample\n else:\n raise ValueError(\n f\"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or\"\n \" `v_prediction`\"\n )\n\n # # add a interpolater\n images = self.decode_latents(pred_original_sample)\n\n f , C, H, W = images.shape\n # images = torch.from_numpy(images).cuda()\n ph = ((H - 1) // 32 + 1) * 32\n pw = ((W - 1) // 32 + 1) * 32\n padding = (0, pw - W, 0, ph - H)\n images= F.pad(images,padding).float()\n for i in range(1,f-2):\n img0 = images[i:i+1]\n img1 = images[i+2:i+3] \n inference_img = self.interpolater.inference(img0,img1)\n images[i+1:i+2] = inference_img\n pred_original_sample = self.encode_latents(images.to(self.vae.dtype),generator)\n pred_original_sample = rearrange(pred_original_sample[None], 'b f c h w -> b c f h w') \n\n \n # 4. Clip \"predicted x_0\"\n if self.config.clip_sample:\n pred_original_sample = torch.clamp(pred_original_sample, -1, 1)\n\n # 5. compute variance: \"sigma_t(η)\" -> see formula (16)\n # σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)\n variance = self._get_variance(timestep, prev_timestep)\n std_dev_t = eta * variance ** (0.5)\n\n if use_clipped_model_output:\n # the model_output is always re-derived from the clipped x_0 in Glide\n model_output = (sample - alpha_prod_t ** (0.5) * pred_original_sample) / beta_prod_t ** (0.5)\n\n # 6. compute \"direction pointing to x_t\" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf\n pred_sample_direction = (1 - alpha_prod_t_prev - std_dev_t**2) ** (0.5) * model_output\n\n # 7. compute x_t without \"random noise\" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf\n prev_sample = alpha_prod_t_prev ** (0.5) * pred_original_sample + pred_sample_direction\n\n if eta > 0:\n # randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072\n device = model_output.device\n if variance_noise is not None and generator is not None:\n raise ValueError(\n \"Cannot pass both generator and variance_noise. Please make sure that either `generator` or\"\n \" `variance_noise` stays `None`.\"\n )\n\n if variance_noise is None:\n if device.type == \"mps\":\n # randn does not work reproducibly on mps\n variance_noise = torch.randn(model_output.shape, dtype=model_output.dtype, generator=generator)\n variance_noise = variance_noise.to(device)\n else:\n variance_noise = torch.randn(\n model_output.shape, generator=generator, device=device, dtype=model_output.dtype\n )\n variance = self._get_variance(timestep, prev_timestep) ** (0.5) * eta * variance_noise\n\n prev_sample = prev_sample + variance\n\n if not return_dict:\n return (prev_sample,)\n\n return DDIMSchedulerOutput(prev_sample=prev_sample, pred_original_sample=pred_original_sample)" }, { "identifier": "Model", "path": "RIFEModel/RIFE_HDv3.py", "snippet": "class Model:\n def __init__(self, local_rank=-1):\n self.flownet = IFNet()\n self.device()\n self.optimG = AdamW(self.flownet.parameters(), lr=1e-6, weight_decay=1e-4)\n self.epe = EPE()\n # self.vgg = VGGPerceptualLoss().to(device)\n self.sobel = SOBEL()\n if local_rank != -1:\n self.flownet = DDP(self.flownet, device_ids=[local_rank], output_device=local_rank)\n\n def train(self):\n self.flownet.train()\n\n def eval(self):\n self.flownet.eval()\n\n def device(self):\n self.flownet.to(device)\n\n def load_model(self, path, rank=0):\n def convert(param):\n if rank == -1:\n return {\n k.replace(\"module.\", \"\"): v\n for k, v in param.items()\n if \"module.\" in k\n }\n else:\n return param\n if rank <= 0:\n if torch.cuda.is_available():\n self.flownet.load_state_dict(convert(torch.load('{}/flownet.pkl'.format(path))))\n else:\n self.flownet.load_state_dict(convert(torch.load('{}/flownet.pkl'.format(path), map_location ='cpu')))\n \n def save_model(self, path, rank=0):\n if rank == 0:\n torch.save(self.flownet.state_dict(),'{}/flownet.pkl'.format(path))\n\n def inference(self, img0, img1, scale=1.0):\n imgs = torch.cat((img0, img1), 1)\n scale_list = [4/scale, 2/scale, 1/scale]\n flow, mask, merged = self.flownet(imgs, scale_list)\n return merged[2]\n \n def update(self, imgs, gt, learning_rate=0, mul=1, training=True, flow_gt=None):\n for param_group in self.optimG.param_groups:\n param_group['lr'] = learning_rate\n img0 = imgs[:, :3]\n img1 = imgs[:, 3:]\n if training:\n self.train()\n else:\n self.eval()\n scale = [4, 2, 1]\n flow, mask, merged = self.flownet(torch.cat((imgs, gt), 1), scale=scale, training=training)\n loss_l1 = (merged[2] - gt).abs().mean()\n loss_smooth = self.sobel(flow[2], flow[2]*0).mean()\n # loss_vgg = self.vgg(merged[2], gt)\n if training:\n self.optimG.zero_grad()\n loss_G = loss_cons + loss_smooth * 0.1\n loss_G.backward()\n self.optimG.step()\n else:\n flow_teacher = flow[2]\n return merged[2], {\n 'mask': mask,\n 'flow': flow[2][:, :2],\n 'loss_l1': loss_l1,\n 'loss_cons': loss_cons,\n 'loss_smooth': loss_smooth,\n }" } ]
import os import copy import click import re import numpy as np import torch import torch.utils.data import torch.utils.checkpoint import decord import shutil from glob import glob from typing import Optional,Dict from tqdm.auto import tqdm from omegaconf import OmegaConf from PIL import Image from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.utils import set_seed from diffusers import ( AutoencoderKL, DDIMScheduler, ) from diffusers.utils.import_utils import is_xformers_available from transformers import AutoTokenizer, CLIPTextModel from einops import rearrange from video_diffusion.models.unet_3d_condition import UNetPseudo3DConditionModel from video_diffusion.models.controlnet_3d_condition import ControlNetPseudo3DModel from video_diffusion.data.dataset import ImageSequenceDataset from video_diffusion.common.util import get_time_string, get_function_args from video_diffusion.common.logger import get_logger_config_path from video_diffusion.common.image_util import log_train_samples from video_diffusion.common.instantiate_from_config import instantiate_from_config from video_diffusion.pipelines.p2p_validation_loop_controlnet import P2pSampleLogger from annotator.util import get_control from video_diffusion.pipelines.DDIMInterpolationScheduler import DDIMInterpolationScheduler from RIFEModel.RIFE_HDv3 import Model
19,167
decord.bridge.set_bridge('torch') # from video_diffusion.pipelines.p2p_validation_loop_controlnet_ablation import P2pSampleLogger # logger = get_logger(__name__) def collate_fn(examples): """Concat a batch of sampled image in dataloader """ batch = { "prompt_ids": torch.cat([example["prompt_ids"] for example in examples], dim=0), "images": torch.stack([example["images"] for example in examples]), } return batch def test( config: str, pretrained_model_path: str, control_type:str, pretrained_controlnet_model_path :str, dataset_config: Dict, logdir: str = None, editing_config: Optional[Dict] = None, test_pipeline_config: Optional[Dict] = None, gradient_accumulation_steps: int = 1, seed: Optional[int] = None, mixed_precision: Optional[str] = "fp16", batch_size: int = 1, model_config: dict={}, verbose: bool=True, **kwargs ):
decord.bridge.set_bridge('torch') # from video_diffusion.pipelines.p2p_validation_loop_controlnet_ablation import P2pSampleLogger # logger = get_logger(__name__) def collate_fn(examples): """Concat a batch of sampled image in dataloader """ batch = { "prompt_ids": torch.cat([example["prompt_ids"] for example in examples], dim=0), "images": torch.stack([example["images"] for example in examples]), } return batch def test( config: str, pretrained_model_path: str, control_type:str, pretrained_controlnet_model_path :str, dataset_config: Dict, logdir: str = None, editing_config: Optional[Dict] = None, test_pipeline_config: Optional[Dict] = None, gradient_accumulation_steps: int = 1, seed: Optional[int] = None, mixed_precision: Optional[str] = "fp16", batch_size: int = 1, model_config: dict={}, verbose: bool=True, **kwargs ):
args = get_function_args()
4
2023-10-09 14:38:28+00:00
24k
LiYunfengLYF/LightFC
lib/train/data/base_functions.py
[ { "identifier": "sampler", "path": "lib/train/data/sampler.py", "snippet": "def no_processing(data):\r\n def __init__(self, datasets, p_datasets, samples_per_epoch, max_gap,\r\n num_search_frames, num_template_frames=1, processing=no_processing, frame_sample_mode='causal',\r\n train_cls=False, pos_prob=0.5):\r\n def __len__(self):\r\n def _sample_visible_ids(self, visible, num_ids=1, min_id=None, max_id=None,\r\n allow_invisible=False, force_invisible=False):\r\n def __getitem__(self, index):\r\n def getitem(self):\r\n def getitem_cls(self):\r\n def get_center_box(self, H, W, ratio=1 / 8):\r\n def sample_seq_from_dataset(self, dataset, is_video_dataset):\r\n def get_one_search(self):\r\n def get_frame_ids_trident(self, visible):\r\n def get_frame_ids_stark(self, visible, valid):\r\nclass TrackingSampler(torch.utils.data.Dataset):\r\n H, W, _ = template_frames[0].shape\r\n H, W, _ = template_frames[0].shape\r\n H, W, _ = search_frames[0].shape\r" }, { "identifier": "processing", "path": "lib/train/data/processing.py", "snippet": "def stack_tensors(x):\r\n def __init__(self, transform=transforms.ToTensor(), template_transform=None, search_transform=None,\r\n joint_transform=None):\r\n def __call__(self, data: TensorDict):\r\n def __init__(self, search_area_factor, output_sz, center_jitter_factor, scale_jitter_factor,\r\n mode='pair', settings=None, *args, **kwargs):\r\n def _get_jittered_box(self, box, mode):\r\n def __call__(self, data: TensorDict):\r\nclass BaseProcessing:\r\nclass STARKProcessing(BaseProcessing):\r" }, { "identifier": "LTRLoader", "path": "lib/train/data/loader.py", "snippet": "class LTRLoader(torch.utils.data.dataloader.DataLoader):\r\n \"\"\"\r\n Data loader. Combines a dataset and a sampler, and provides\r\n single- or multi-process iterators over the dataset.\r\n\r\n Note: The only difference with default pytorch DataLoader is that an additional option stack_dim is available to\r\n select along which dimension the data should be stacked to form a batch.\r\n\r\n Arguments:\r\n dataset (Dataset): dataset from which to load the data.\r\n batch_size (int, optional): how many samples per batch to load\r\n (default: 1).\r\n shuffle (bool, optional): set to ``True`` to have the data reshuffled\r\n at every epoch (default: False).\r\n sampler (Sampler, optional): defines the strategy to draw samples from\r\n the dataset. If specified, ``shuffle`` must be False.\r\n batch_sampler (Sampler, optional): like sampler, but returns a batch of\r\n indices at a time. Mutually exclusive with batch_size, shuffle,\r\n sampler, and drop_last.\r\n num_workers (int, optional): how many subprocesses to use for data\r\n loading. 0 means that the data will be loaded in the main process.\r\n (default: 0)\r\n collate_fn (callable, optional): merges a list of samples to form a mini-batch.\r\n stack_dim (int): Dimension along which to stack to form the batch. (default: 0)\r\n pin_memory (bool, optional): If ``True``, the data loader will copy tensors\r\n into CUDA pinned memory before returning them.\r\n drop_last (bool, optional): set to ``True`` to drop the last incomplete batch,\r\n if the dataset size is not divisible by the batch size. If ``False`` and\r\n the size of dataset is not divisible by the batch size, then the last batch\r\n will be smaller. (default: False)\r\n timeout (numeric, optional): if positive, the timeout value for collecting a batch\r\n from workers. Should always be non-negative. (default: 0)\r\n worker_init_fn (callable, optional): If not None, this will be called on each\r\n worker subprocess with the worker id (an int in ``[0, num_workers - 1]``) as\r\n input, after seeding and before data loading. (default: None)\r\n\r\n .. note:: By default, each worker will have its PyTorch seed set to\r\n ``base_seed + worker_id``, where ``base_seed`` is a long generated\r\n by main process using its RNG. However, seeds for other libraries\r\n may be duplicated upon initializing workers (w.g., NumPy), causing\r\n each worker to return identical random numbers. (See\r\n :ref:`dataloader-workers-random-seed` section in FAQ.) You may\r\n use ``torch.initial_seed()`` to access the PyTorch seed for each\r\n worker in :attr:`worker_init_fn`, and use it to set other seeds\r\n before data loading.\r\n\r\n .. warning:: If ``spawn`` start method is used, :attr:`worker_init_fn` cannot be an\r\n unpicklable object, e.g., a lambda function.\r\n \"\"\"\r\n\r\n __initialized = False\r\n\r\n def __init__(self, name, dataset, training=True, batch_size=1, shuffle=False, sampler=None, batch_sampler=None,\r\n num_workers=0, epoch_interval=1, collate_fn=None, stack_dim=0, pin_memory=False, drop_last=False,\r\n timeout=0, worker_init_fn=None):\r\n if collate_fn is None:\r\n if stack_dim == 0:\r\n collate_fn = ltr_collate\r\n elif stack_dim == 1:\r\n collate_fn = ltr_collate_stack1\r\n else:\r\n raise ValueError('Stack dim no supported. Must be 0 or 1.')\r\n\r\n super(LTRLoader, self).__init__(dataset, batch_size, shuffle, sampler, batch_sampler,\r\n num_workers, collate_fn, pin_memory, drop_last,\r\n timeout, worker_init_fn)\r\n\r\n self.name = name\r\n self.training = training\r\n self.epoch_interval = epoch_interval\r\n self.stack_dim = stack_dim\r" }, { "identifier": "opencv_loader", "path": "lib/train/data/image_loader.py", "snippet": "def opencv_loader(path):\r\n \"\"\" Read image using opencv's imread function and returns it in rgb format\"\"\"\r\n try:\r\n im = cv.imread(path, cv.IMREAD_COLOR)\r\n\r\n # convert to rgb and return\r\n return cv.cvtColor(im, cv.COLOR_BGR2RGB)\r\n except Exception as e:\r\n print('ERROR: Could not read image \"{}\"'.format(path))\r\n print(e)\r\n return None\r" }, { "identifier": "Lasot", "path": "lib/train/dataset/lasot.py", "snippet": "class Lasot(BaseVideoDataset):\r\n \"\"\" LaSOT dataset.\r\n\r\n Publication:\r\n LaSOT: A High-quality Benchmark for Large-scale Single Object Tracking\r\n Heng Fan, Liting Lin, Fan Yang, Peng Chu, Ge Deng, Sijia Yu, Hexin Bai, Yong Xu, Chunyuan Liao and Haibin Ling\r\n CVPR, 2019\r\n https://arxiv.org/pdf/1809.07845.pdf\r\n\r\n Download the dataset from https://cis.temple.edu/lasot/download.html\r\n \"\"\"\r\n\r\n def __init__(self, root=None, image_loader=jpeg4py_loader, vid_ids=None, split=None, data_fraction=None,\r\n env_num=None):\r\n \"\"\"\r\n args:\r\n root - path to the lasot dataset.\r\n image_loader (jpeg4py_loader) - The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py)\r\n is used by default.\r\n vid_ids - List containing the ids of the videos (1 - 20) used for training. If vid_ids = [1, 3, 5], then the\r\n videos with subscripts -1, -3, and -5 from each class will be used for training.\r\n split - If split='train', the official train split (protocol-II) is used for training. Note: Only one of\r\n vid_ids or split option can be used at a time.\r\n data_fraction - Fraction of dataset to be used. The complete dataset is used by default\r\n \"\"\"\r\n root = env_settings(env_num).lasot_dir if root is None else root\r\n super().__init__('LaSOT', root, image_loader)\r\n\r\n # Keep a list of all classes\r\n self.class_list = [f for f in os.listdir(self.root)]\r\n self.class_to_id = {cls_name: cls_id for cls_id, cls_name in enumerate(self.class_list)}\r\n\r\n self.sequence_list = self._build_sequence_list(vid_ids, split)\r\n\r\n if data_fraction is not None:\r\n self.sequence_list = random.sample(self.sequence_list, int(len(self.sequence_list) * data_fraction))\r\n\r\n self.seq_per_class = self._build_class_list()\r\n\r\n def _build_sequence_list(self, vid_ids=None, split=None):\r\n if split is not None:\r\n if vid_ids is not None:\r\n raise ValueError('Cannot set both split_name and vid_ids.')\r\n ltr_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..')\r\n if split == 'train':\r\n file_path = os.path.join(ltr_path, 'data_specs', 'lasot_train_split.txt')\r\n else:\r\n raise ValueError('Unknown split name.')\r\n # sequence_list = pandas.read_csv(file_path, header=None, squeeze=True).values.tolist()\r\n sequence_list = pandas.read_csv(file_path, header=None).squeeze(\"columns\").values.tolist()\r\n elif vid_ids is not None:\r\n sequence_list = [c + '-' + str(v) for c in self.class_list for v in vid_ids]\r\n else:\r\n raise ValueError('Set either split_name or vid_ids.')\r\n\r\n return sequence_list\r\n\r\n def _build_class_list(self):\r\n seq_per_class = {}\r\n for seq_id, seq_name in enumerate(self.sequence_list):\r\n class_name = seq_name.split('-')[0]\r\n if class_name in seq_per_class:\r\n seq_per_class[class_name].append(seq_id)\r\n else:\r\n seq_per_class[class_name] = [seq_id]\r\n\r\n return seq_per_class\r\n\r\n def get_name(self):\r\n return 'lasot'\r\n\r\n def has_class_info(self):\r\n return True\r\n\r\n def has_occlusion_info(self):\r\n return True\r\n\r\n def get_num_sequences(self):\r\n return len(self.sequence_list)\r\n\r\n def get_num_classes(self):\r\n return len(self.class_list)\r\n\r\n def get_sequences_in_class(self, class_name):\r\n return self.seq_per_class[class_name]\r\n\r\n def _read_bb_anno(self, seq_path):\r\n bb_anno_file = os.path.join(seq_path, \"groundtruth.txt\")\r\n gt = pandas.read_csv(bb_anno_file, delimiter=',', header=None, dtype=np.float32, na_filter=False,\r\n low_memory=False).values\r\n return torch.tensor(gt)\r\n\r\n def _read_target_visible(self, seq_path):\r\n # Read full occlusion and out_of_view\r\n occlusion_file = os.path.join(seq_path, \"full_occlusion.txt\")\r\n out_of_view_file = os.path.join(seq_path, \"out_of_view.txt\")\r\n\r\n with open(occlusion_file, 'r', newline='') as f:\r\n occlusion = torch.ByteTensor([int(v) for v in list(csv.reader(f))[0]])\r\n with open(out_of_view_file, 'r') as f:\r\n out_of_view = torch.ByteTensor([int(v) for v in list(csv.reader(f))[0]])\r\n\r\n target_visible = ~occlusion & ~out_of_view\r\n\r\n return target_visible\r\n\r\n def _get_sequence_path(self, seq_id):\r\n seq_name = self.sequence_list[seq_id]\r\n class_name = seq_name.split('-')[0]\r\n vid_id = seq_name.split('-')[1]\r\n\r\n return os.path.join(self.root, class_name, class_name + '-' + vid_id)\r\n\r\n def get_sequence_info(self, seq_id):\r\n seq_path = self._get_sequence_path(seq_id)\r\n bbox = self._read_bb_anno(seq_path)\r\n\r\n valid = (bbox[:, 2] > 0) & (bbox[:, 3] > 0)\r\n visible = self._read_target_visible(seq_path) & valid.byte()\r\n\r\n return {'bbox': bbox, 'valid': valid, 'visible': visible}\r\n\r\n def _get_frame_path(self, seq_path, frame_id):\r\n return os.path.join(seq_path, 'img', '{:08}.jpg'.format(frame_id + 1)) # frames start from 1\r\n\r\n def _get_frame(self, seq_path, frame_id):\r\n return self.image_loader(self._get_frame_path(seq_path, frame_id))\r\n\r\n def _get_class(self, seq_path):\r\n raw_class = seq_path.split('/')[-2]\r\n return raw_class\r\n\r\n def get_class_name(self, seq_id):\r\n seq_path = self._get_sequence_path(seq_id)\r\n obj_class = self._get_class(seq_path)\r\n\r\n return obj_class\r\n\r\n def get_frames(self, seq_id, frame_ids, anno=None):\r\n seq_path = self._get_sequence_path(seq_id)\r\n\r\n obj_class = self._get_class(seq_path)\r\n frame_list = [self._get_frame(seq_path, f_id) for f_id in frame_ids]\r\n\r\n if anno is None:\r\n anno = self.get_sequence_info(seq_id)\r\n\r\n anno_frames = {}\r\n for key, value in anno.items():\r\n anno_frames[key] = [value[f_id, ...].clone() for f_id in frame_ids]\r\n\r\n object_meta = OrderedDict({'object_class_name': obj_class,\r\n 'motion_class': None,\r\n 'major_class': None,\r\n 'root_class': None,\r\n 'motion_adverb': None})\r\n\r\n return frame_list, anno_frames, object_meta\r" }, { "identifier": "Got10k", "path": "lib/train/dataset/got10k.py", "snippet": "class Got10k(BaseVideoDataset):\r\n \"\"\" GOT-10k dataset.\r\n\r\n Publication:\r\n GOT-10k: A Large High-Diversity Benchmark for Generic Object Tracking in the Wild\r\n Lianghua Huang, Xin Zhao, and Kaiqi Huang\r\n arXiv:1810.11981, 2018\r\n https://arxiv.org/pdf/1810.11981.pdf\r\n\r\n Download dataset from http://got-10k.aitestunion.com/downloads\r\n \"\"\"\r\n\r\n def __init__(self, root=None, image_loader=jpeg4py_loader, split=None, seq_ids=None, data_fraction=None,\r\n env_num=None):\r\n \"\"\"\r\n args:\r\n root - path to the got-10k training data. Note: This should point to the 'train' folder inside GOT-10k\r\n image_loader (jpeg4py_loader) - The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py)\r\n is used by default.\r\n split - 'train' or 'val'. Note: The validation split here is a subset of the official got-10k train split,\r\n not NOT the official got-10k validation split. To use the official validation split, provide that as\r\n the root folder instead.\r\n seq_ids - List containing the ids of the videos to be used for training. Note: Only one of 'split' or 'seq_ids'\r\n options can be used at the same time.\r\n data_fraction - Fraction of dataset to be used. The complete dataset is used by default\r\n \"\"\"\r\n root = env_settings(env_num).got10k_dir if root is None else root\r\n super().__init__('GOT10k', root, image_loader)\r\n\r\n # all folders inside the root\r\n self.sequence_list = self._get_sequence_list()\r\n\r\n # seq_id is the index of the folder inside the got10k root path\r\n if split is not None:\r\n if seq_ids is not None:\r\n raise ValueError('Cannot set both split_name and seq_ids.')\r\n ltr_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..')\r\n if split == 'train':\r\n file_path = os.path.join(ltr_path, 'data_specs', 'got10k_train_split.txt')\r\n elif split == 'val':\r\n file_path = os.path.join(ltr_path, 'data_specs', 'got10k_val_split.txt')\r\n elif split == 'train_full':\r\n file_path = os.path.join(ltr_path, 'data_specs', 'got10k_train_full_split.txt')\r\n elif split == 'vottrain':\r\n file_path = os.path.join(ltr_path, 'data_specs', 'got10k_vot_train_split.txt')\r\n elif split == 'votval':\r\n file_path = os.path.join(ltr_path, 'data_specs', 'got10k_vot_val_split.txt')\r\n else:\r\n raise ValueError('Unknown split name.')\r\n # seq_ids = pandas.read_csv(file_path, header=None, squeeze=True, dtype=np.int64).values.tolist()\r\n seq_ids = pandas.read_csv(file_path, header=None, dtype=np.int64).squeeze(\"columns\").values.tolist()\r\n elif seq_ids is None:\r\n seq_ids = list(range(0, len(self.sequence_list)))\r\n\r\n self.sequence_list = [self.sequence_list[i] for i in seq_ids]\r\n\r\n if data_fraction is not None:\r\n self.sequence_list = random.sample(self.sequence_list, int(len(self.sequence_list) * data_fraction))\r\n\r\n self.sequence_meta_info = self._load_meta_info()\r\n self.seq_per_class = self._build_seq_per_class()\r\n\r\n self.class_list = list(self.seq_per_class.keys())\r\n self.class_list.sort()\r\n\r\n def get_name(self):\r\n return 'got10k'\r\n\r\n def has_class_info(self):\r\n return True\r\n\r\n def has_occlusion_info(self):\r\n return True\r\n\r\n def _load_meta_info(self):\r\n sequence_meta_info = {s: self._read_meta(os.path.join(self.root, s)) for s in self.sequence_list}\r\n return sequence_meta_info\r\n\r\n def _read_meta(self, seq_path):\r\n try:\r\n with open(os.path.join(seq_path, 'meta_info.ini')) as f:\r\n meta_info = f.readlines()\r\n object_meta = OrderedDict({'object_class_name': meta_info[5].split(': ')[-1][:-1],\r\n 'motion_class': meta_info[6].split(': ')[-1][:-1],\r\n 'major_class': meta_info[7].split(': ')[-1][:-1],\r\n 'root_class': meta_info[8].split(': ')[-1][:-1],\r\n 'motion_adverb': meta_info[9].split(': ')[-1][:-1]})\r\n except:\r\n object_meta = OrderedDict({'object_class_name': None,\r\n 'motion_class': None,\r\n 'major_class': None,\r\n 'root_class': None,\r\n 'motion_adverb': None})\r\n return object_meta\r\n\r\n def _build_seq_per_class(self):\r\n seq_per_class = {}\r\n\r\n for i, s in enumerate(self.sequence_list):\r\n object_class = self.sequence_meta_info[s]['object_class_name']\r\n if object_class in seq_per_class:\r\n seq_per_class[object_class].append(i)\r\n else:\r\n seq_per_class[object_class] = [i]\r\n\r\n return seq_per_class\r\n\r\n def get_sequences_in_class(self, class_name):\r\n return self.seq_per_class[class_name]\r\n\r\n def _get_sequence_list(self):\r\n with open(os.path.join(self.root, 'list.txt')) as f:\r\n dir_list = list(csv.reader(f))\r\n dir_list = [dir_name[0] for dir_name in dir_list]\r\n return dir_list\r\n\r\n def _read_bb_anno(self, seq_path):\r\n bb_anno_file = os.path.join(seq_path, \"groundtruth.txt\")\r\n gt = pandas.read_csv(bb_anno_file, delimiter=',', header=None, dtype=np.float32, na_filter=False,\r\n low_memory=False).values\r\n return torch.tensor(gt)\r\n\r\n def _read_target_visible(self, seq_path):\r\n # Read full occlusion and out_of_view\r\n occlusion_file = os.path.join(seq_path, \"absence.label\")\r\n cover_file = os.path.join(seq_path, \"cover.label\")\r\n\r\n with open(occlusion_file, 'r', newline='') as f:\r\n occlusion = torch.ByteTensor([int(v[0]) for v in csv.reader(f)])\r\n with open(cover_file, 'r', newline='') as f:\r\n cover = torch.ByteTensor([int(v[0]) for v in csv.reader(f)])\r\n\r\n target_visible = ~occlusion & (cover > 0).byte()\r\n\r\n visible_ratio = cover.float() / 8\r\n return target_visible, visible_ratio\r\n\r\n def _get_sequence_path(self, seq_id):\r\n return os.path.join(self.root, self.sequence_list[seq_id])\r\n\r\n def get_sequence_info(self, seq_id):\r\n seq_path = self._get_sequence_path(seq_id)\r\n bbox = self._read_bb_anno(seq_path)\r\n\r\n valid = (bbox[:, 2] > 0) & (bbox[:, 3] > 0)\r\n visible, visible_ratio = self._read_target_visible(seq_path)\r\n visible = visible & valid.byte()\r\n\r\n return {'bbox': bbox, 'valid': valid, 'visible': visible, 'visible_ratio': visible_ratio}\r\n\r\n def _get_frame_path(self, seq_path, frame_id):\r\n return os.path.join(seq_path, '{:08}.jpg'.format(frame_id + 1)) # frames start from 1\r\n\r\n def _get_frame(self, seq_path, frame_id):\r\n return self.image_loader(self._get_frame_path(seq_path, frame_id))\r\n\r\n def get_class_name(self, seq_id):\r\n obj_meta = self.sequence_meta_info[self.sequence_list[seq_id]]\r\n\r\n return obj_meta['object_class_name']\r\n\r\n def get_frames(self, seq_id, frame_ids, anno=None):\r\n seq_path = self._get_sequence_path(seq_id)\r\n obj_meta = self.sequence_meta_info[self.sequence_list[seq_id]]\r\n\r\n frame_list = [self._get_frame(seq_path, f_id) for f_id in frame_ids]\r\n\r\n if anno is None:\r\n anno = self.get_sequence_info(seq_id)\r\n\r\n anno_frames = {}\r\n for key, value in anno.items():\r\n anno_frames[key] = [value[f_id, ...].clone() for f_id in frame_ids]\r\n\r\n return frame_list, anno_frames, obj_meta\r" }, { "identifier": "TrackingNet", "path": "lib/train/dataset/tracking_net.py", "snippet": "class TrackingNet(BaseVideoDataset):\r\n \"\"\" TrackingNet dataset.\r\n\r\n Publication:\r\n TrackingNet: A Large-Scale Dataset and Benchmark for Object Tracking in the Wild.\r\n Matthias Mueller,Adel Bibi, Silvio Giancola, Salman Al-Subaihi and Bernard Ghanem\r\n ECCV, 2018\r\n https://ivul.kaust.edu.sa/Documents/Publications/2018/TrackingNet%20A%20Large%20Scale%20Dataset%20and%20Benchmark%20for%20Object%20Tracking%20in%20the%20Wild.pdf\r\n\r\n Download the dataset using the toolkit https://github.com/SilvioGiancola/TrackingNet-devkit.\r\n \"\"\"\r\n\r\n def __init__(self, root=None, image_loader=jpeg4py_loader, set_ids=None, data_fraction=None, env_num=None):\r\n \"\"\"\r\n args:\r\n root - The path to the TrackingNet folder, containing the training sets.\r\n image_loader (jpeg4py_loader) - The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py)\r\n is used by default.\r\n set_ids (None) - List containing the ids of the TrackingNet sets to be used for training. If None, all the\r\n sets (0 - 11) will be used.\r\n data_fraction - Fraction of dataset to be used. The complete dataset is used by default\r\n \"\"\"\r\n root = env_settings(env_num).trackingnet_dir if root is None else root\r\n super().__init__('TrackingNet', root, image_loader)\r\n\r\n if set_ids is None:\r\n set_ids = [i for i in range(12)]\r\n\r\n self.set_ids = set_ids\r\n\r\n # Keep a list of all videos. Sequence list is a list of tuples (set_id, video_name) containing the set_id and\r\n # video_name for each sequence\r\n self.sequence_list = list_sequences(self.root, self.set_ids)\r\n\r\n if data_fraction is not None:\r\n self.sequence_list = random.sample(self.sequence_list, int(len(self.sequence_list) * data_fraction))\r\n\r\n self.seq_to_class_map, self.seq_per_class = self._load_class_info()\r\n\r\n # we do not have the class_lists for the tracking net\r\n self.class_list = list(self.seq_per_class.keys())\r\n self.class_list.sort()\r\n\r\n def _load_class_info(self):\r\n ltr_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..')\r\n class_map_path = os.path.join(ltr_path, 'data_specs', 'trackingnet_classmap.txt')\r\n\r\n with open(class_map_path, 'r') as f:\r\n seq_to_class_map = {seq_class.split('\\t')[0]: seq_class.rstrip().split('\\t')[1] for seq_class in f}\r\n\r\n seq_per_class = {}\r\n for i, seq in enumerate(self.sequence_list):\r\n class_name = seq_to_class_map.get(seq[1], 'Unknown')\r\n if class_name not in seq_per_class:\r\n seq_per_class[class_name] = [i]\r\n else:\r\n seq_per_class[class_name].append(i)\r\n\r\n return seq_to_class_map, seq_per_class\r\n\r\n def get_name(self):\r\n return 'trackingnet'\r\n\r\n def has_class_info(self):\r\n return True\r\n\r\n def get_sequences_in_class(self, class_name):\r\n return self.seq_per_class[class_name]\r\n\r\n def _read_bb_anno(self, seq_id):\r\n set_id = self.sequence_list[seq_id][0]\r\n vid_name = self.sequence_list[seq_id][1]\r\n bb_anno_file = os.path.join(self.root, \"TRAIN_\" + str(set_id), \"anno\", vid_name + \".txt\")\r\n gt = pandas.read_csv(bb_anno_file, delimiter=',', header=None, dtype=np.float32, na_filter=False,\r\n low_memory=False).values\r\n return torch.tensor(gt)\r\n\r\n def get_sequence_info(self, seq_id):\r\n bbox = self._read_bb_anno(seq_id)\r\n\r\n valid = (bbox[:, 2] > 0) & (bbox[:, 3] > 0)\r\n visible = valid.clone().byte()\r\n return {'bbox': bbox, 'valid': valid, 'visible': visible}\r\n\r\n def _get_frame(self, seq_id, frame_id):\r\n set_id = self.sequence_list[seq_id][0]\r\n vid_name = self.sequence_list[seq_id][1]\r\n frame_path = os.path.join(self.root, \"TRAIN_\" + str(set_id), \"frames\", vid_name, str(frame_id) + \".jpg\")\r\n return self.image_loader(frame_path)\r\n\r\n def _get_class(self, seq_id):\r\n seq_name = self.sequence_list[seq_id][1]\r\n return self.seq_to_class_map[seq_name]\r\n\r\n def get_class_name(self, seq_id):\r\n obj_class = self._get_class(seq_id)\r\n\r\n return obj_class\r\n\r\n def get_frames(self, seq_id, frame_ids, anno=None):\r\n frame_list = [self._get_frame(seq_id, f) for f in frame_ids]\r\n\r\n if anno is None:\r\n anno = self.get_sequence_info(seq_id)\r\n\r\n anno_frames = {}\r\n for key, value in anno.items():\r\n anno_frames[key] = [value[f_id, ...].clone() for f_id in frame_ids]\r\n\r\n obj_class = self._get_class(seq_id)\r\n\r\n object_meta = OrderedDict({'object_class_name': obj_class,\r\n 'motion_class': None,\r\n 'major_class': None,\r\n 'root_class': None,\r\n 'motion_adverb': None})\r\n\r\n return frame_list, anno_frames, object_meta\r" }, { "identifier": "ImagenetVID", "path": "lib/train/dataset/imagenetvid.py", "snippet": "class ImagenetVID(BaseVideoDataset):\r\n \"\"\" Imagenet VID dataset.\r\n\r\n Publication:\r\n ImageNet Large Scale Visual Recognition Challenge\r\n Olga Russakovsky, Jia Deng, Hao Su, Jonathan Krause, Sanjeev Satheesh, Sean Ma, Zhiheng Huang, Andrej Karpathy,\r\n Aditya Khosla, Michael Bernstein, Alexander C. Berg and Li Fei-Fei\r\n IJCV, 2015\r\n https://arxiv.org/pdf/1409.0575.pdf\r\n\r\n Download the dataset from http://image-net.org/\r\n \"\"\"\r\n def __init__(self, root=None, image_loader=jpeg4py_loader, min_length=0, max_target_area=1,env_num=None):\r\n \"\"\"\r\n args:\r\n root - path to the imagenet vid dataset.\r\n image_loader (default_image_loader) - The function to read the images. If installed,\r\n jpeg4py (https://github.com/ajkxyz/jpeg4py) is used by default. Else,\r\n opencv's imread is used.\r\n min_length - Minimum allowed sequence length.\r\n max_target_area - max allowed ratio between target area and image area. Can be used to filter out targets\r\n which cover complete image.\r\n \"\"\"\r\n root = env_settings(env_num).imagenet_dir if root is None else root\r\n super().__init__(\"imagenetvid\", root, image_loader)\r\n\r\n cache_file = os.path.join(root, 'cache.json')\r\n if os.path.isfile(cache_file):\r\n # If available, load the pre-processed cache file containing meta-info for each sequence\r\n with open(cache_file, 'r') as f:\r\n sequence_list_dict = json.load(f)\r\n\r\n self.sequence_list = sequence_list_dict\r\n else:\r\n # Else process the imagenet annotations and generate the cache file\r\n self.sequence_list = self._process_anno(root)\r\n\r\n with open(cache_file, 'w') as f:\r\n json.dump(self.sequence_list, f)\r\n\r\n # Filter the sequences based on min_length and max_target_area in the first frame\r\n self.sequence_list = [x for x in self.sequence_list if len(x['anno']) >= min_length and\r\n get_target_to_image_ratio(x) < max_target_area]\r\n\r\n def get_name(self):\r\n return 'imagenetvid'\r\n\r\n def get_num_sequences(self):\r\n return len(self.sequence_list)\r\n\r\n def get_sequence_info(self, seq_id):\r\n bb_anno = torch.Tensor(self.sequence_list[seq_id]['anno'])\r\n valid = (bb_anno[:, 2] > 0) & (bb_anno[:, 3] > 0)\r\n visible = torch.ByteTensor(self.sequence_list[seq_id]['target_visible']) & valid.byte()\r\n return {'bbox': bb_anno, 'valid': valid, 'visible': visible}\r\n\r\n def _get_frame(self, sequence, frame_id):\r\n set_name = 'ILSVRC2015_VID_train_{:04d}'.format(sequence['set_id'])\r\n vid_name = 'ILSVRC2015_train_{:08d}'.format(sequence['vid_id'])\r\n frame_number = frame_id + sequence['start_frame']\r\n frame_path = os.path.join(self.root, 'Data', 'VID', 'train', set_name, vid_name,\r\n '{:06d}.JPEG'.format(frame_number))\r\n return self.image_loader(frame_path)\r\n\r\n def get_frames(self, seq_id, frame_ids, anno=None):\r\n sequence = self.sequence_list[seq_id]\r\n\r\n frame_list = [self._get_frame(sequence, f) for f in frame_ids]\r\n\r\n if anno is None:\r\n anno = self.get_sequence_info(seq_id)\r\n\r\n # Create anno dict\r\n anno_frames = {}\r\n for key, value in anno.items():\r\n anno_frames[key] = [value[f_id, ...].clone() for f_id in frame_ids]\r\n\r\n # added the class info to the meta info\r\n object_meta = OrderedDict({'object_class': sequence['class_name'],\r\n 'motion_class': None,\r\n 'major_class': None,\r\n 'root_class': None,\r\n 'motion_adverb': None})\r\n\r\n return frame_list, anno_frames, object_meta\r\n\r\n def _process_anno(self, root):\r\n # Builds individual tracklets\r\n base_vid_anno_path = os.path.join(root, 'Annotations', 'VID', 'train')\r\n\r\n all_sequences = []\r\n for set in sorted(os.listdir(base_vid_anno_path)):\r\n set_id = int(set.split('_')[-1])\r\n for vid in sorted(os.listdir(os.path.join(base_vid_anno_path, set))):\r\n\r\n vid_id = int(vid.split('_')[-1])\r\n anno_files = sorted(os.listdir(os.path.join(base_vid_anno_path, set, vid)))\r\n\r\n frame1_anno = ET.parse(os.path.join(base_vid_anno_path, set, vid, anno_files[0]))\r\n image_size = [int(frame1_anno.find('size/width').text), int(frame1_anno.find('size/height').text)]\r\n\r\n objects = [ET.ElementTree(file=os.path.join(base_vid_anno_path, set, vid, f)).findall('object')\r\n for f in anno_files]\r\n\r\n tracklets = {}\r\n\r\n # Find all tracklets along with start frame\r\n for f_id, all_targets in enumerate(objects):\r\n for target in all_targets:\r\n tracklet_id = target.find('trackid').text\r\n if tracklet_id not in tracklets:\r\n tracklets[tracklet_id] = f_id\r\n\r\n for tracklet_id, tracklet_start in tracklets.items():\r\n tracklet_anno = []\r\n target_visible = []\r\n class_name_id = None\r\n\r\n for f_id in range(tracklet_start, len(objects)):\r\n found = False\r\n for target in objects[f_id]:\r\n if target.find('trackid').text == tracklet_id:\r\n if not class_name_id:\r\n class_name_id = target.find('name').text\r\n x1 = int(target.find('bndbox/xmin').text)\r\n y1 = int(target.find('bndbox/ymin').text)\r\n x2 = int(target.find('bndbox/xmax').text)\r\n y2 = int(target.find('bndbox/ymax').text)\r\n\r\n tracklet_anno.append([x1, y1, x2 - x1, y2 - y1])\r\n target_visible.append(target.find('occluded').text == '0')\r\n\r\n found = True\r\n break\r\n if not found:\r\n break\r\n\r\n new_sequence = {'set_id': set_id, 'vid_id': vid_id, 'class_name': class_name_id,\r\n 'start_frame': tracklet_start, 'anno': tracklet_anno,\r\n 'target_visible': target_visible, 'image_size': image_size}\r\n all_sequences.append(new_sequence)\r\n\r\n return all_sequences\r" }, { "identifier": "MSCOCOSeq", "path": "lib/train/dataset/coco_seq.py", "snippet": "class MSCOCOSeq(BaseVideoDataset):\r\n \"\"\" The COCO dataset. COCO is an image dataset. Thus, we treat each image as a sequence of length 1.\r\n\r\n Publication:\r\n Microsoft COCO: Common Objects in Context.\r\n Tsung-Yi Lin, Michael Maire, Serge J. Belongie, Lubomir D. Bourdev, Ross B. Girshick, James Hays, Pietro Perona,\r\n Deva Ramanan, Piotr Dollar and C. Lawrence Zitnick\r\n ECCV, 2014\r\n https://arxiv.org/pdf/1405.0312.pdf\r\n\r\n Download the images along with annotations from http://cocodataset.org/#download. The root folder should be\r\n organized as follows.\r\n - coco_root\r\n - annotations\r\n - instances_train2014.json\r\n - instances_train2017.json\r\n - images\r\n - train2014\r\n - train2017\r\n\r\n Note: You also have to install the coco pythonAPI from https://github.com/cocodataset/cocoapi.\r\n \"\"\"\r\n\r\n def __init__(self, root=None, image_loader=jpeg4py_loader, data_fraction=None, split=\"train\", version=\"2014\",env_num=None):\r\n \"\"\"\r\n args:\r\n root - path to the coco dataset.\r\n image_loader (default_image_loader) - The function to read the images. If installed,\r\n jpeg4py (https://github.com/ajkxyz/jpeg4py) is used by default. Else,\r\n opencv's imread is used.\r\n data_fraction (None) - Fraction of images to be used. The images are selected randomly. If None, all the\r\n images will be used\r\n split - 'train' or 'val'.\r\n version - version of coco dataset (2014 or 2017)\r\n \"\"\"\r\n root = env_settings(env_num).coco_dir if root is None else root\r\n super().__init__('COCO', root, image_loader)\r\n\r\n self.img_pth = os.path.join(root, 'images/{}{}/'.format(split, version))\r\n self.anno_path = os.path.join(root, 'annotations/instances_{}{}.json'.format(split, version))\r\n\r\n # Load the COCO set.\r\n self.coco_set = COCO(self.anno_path)\r\n\r\n self.cats = self.coco_set.cats\r\n\r\n self.class_list = self.get_class_list()\r\n\r\n self.sequence_list = self._get_sequence_list()\r\n\r\n if data_fraction is not None:\r\n self.sequence_list = random.sample(self.sequence_list, int(len(self.sequence_list)*data_fraction))\r\n self.seq_per_class = self._build_seq_per_class()\r\n\r\n def _get_sequence_list(self):\r\n ann_list = list(self.coco_set.anns.keys())\r\n seq_list = [a for a in ann_list if self.coco_set.anns[a]['iscrowd'] == 0]\r\n\r\n return seq_list\r\n\r\n def is_video_sequence(self):\r\n return False\r\n\r\n def get_num_classes(self):\r\n return len(self.class_list)\r\n\r\n def get_name(self):\r\n return 'coco'\r\n\r\n def has_class_info(self):\r\n return True\r\n\r\n def get_class_list(self):\r\n class_list = []\r\n for cat_id in self.cats.keys():\r\n class_list.append(self.cats[cat_id]['name'])\r\n return class_list\r\n\r\n def has_segmentation_info(self):\r\n return True\r\n\r\n def get_num_sequences(self):\r\n return len(self.sequence_list)\r\n\r\n def _build_seq_per_class(self):\r\n seq_per_class = {}\r\n for i, seq in enumerate(self.sequence_list):\r\n class_name = self.cats[self.coco_set.anns[seq]['category_id']]['name']\r\n if class_name not in seq_per_class:\r\n seq_per_class[class_name] = [i]\r\n else:\r\n seq_per_class[class_name].append(i)\r\n\r\n return seq_per_class\r\n\r\n def get_sequences_in_class(self, class_name):\r\n return self.seq_per_class[class_name]\r\n\r\n def get_sequence_info(self, seq_id):\r\n anno = self._get_anno(seq_id)\r\n\r\n bbox = torch.Tensor(anno['bbox']).view(1, 4)\r\n\r\n mask = torch.Tensor(self.coco_set.annToMask(anno)).unsqueeze(dim=0)\r\n\r\n '''2021.1.3 To avoid too small bounding boxes. Here we change the threshold to 50 pixels'''\r\n valid = (bbox[:, 2] > 50) & (bbox[:, 3] > 50)\r\n\r\n visible = valid.clone().byte()\r\n\r\n return {'bbox': bbox, 'mask': mask, 'valid': valid, 'visible': visible}\r\n\r\n def _get_anno(self, seq_id):\r\n anno = self.coco_set.anns[self.sequence_list[seq_id]]\r\n\r\n return anno\r\n\r\n def _get_frames(self, seq_id):\r\n path = self.coco_set.loadImgs([self.coco_set.anns[self.sequence_list[seq_id]]['image_id']])[0]['file_name']\r\n img = self.image_loader(os.path.join(self.img_pth, path))\r\n return img\r\n\r\n def get_meta_info(self, seq_id):\r\n try:\r\n cat_dict_current = self.cats[self.coco_set.anns[self.sequence_list[seq_id]]['category_id']]\r\n object_meta = OrderedDict({'object_class_name': cat_dict_current['name'],\r\n 'motion_class': None,\r\n 'major_class': cat_dict_current['supercategory'],\r\n 'root_class': None,\r\n 'motion_adverb': None})\r\n except:\r\n object_meta = OrderedDict({'object_class_name': None,\r\n 'motion_class': None,\r\n 'major_class': None,\r\n 'root_class': None,\r\n 'motion_adverb': None})\r\n return object_meta\r\n\r\n\r\n def get_class_name(self, seq_id):\r\n cat_dict_current = self.cats[self.coco_set.anns[self.sequence_list[seq_id]]['category_id']]\r\n return cat_dict_current['name']\r\n\r\n def get_frames(self, seq_id=None, frame_ids=None, anno=None):\r\n # COCO is an image dataset. Thus we replicate the image denoted by seq_id len(frame_ids) times, and return a\r\n # list containing these replicated images.\r\n frame = self._get_frames(seq_id)\r\n\r\n frame_list = [frame.copy() for _ in frame_ids]\r\n\r\n if anno is None:\r\n anno = self.get_sequence_info(seq_id)\r\n\r\n anno_frames = {}\r\n for key, value in anno.items():\r\n anno_frames[key] = [value[0, ...] for _ in frame_ids]\r\n\r\n object_meta = self.get_meta_info(seq_id)\r\n\r\n return frame_list, anno_frames, object_meta\r" }, { "identifier": "Got10k_lmdb", "path": "lib/train/dataset/got10k_lmdb.py", "snippet": "class Got10k_lmdb(BaseVideoDataset):\r\n\r\n def __init__(self, root=None, image_loader=jpeg4py_loader, split=None, seq_ids=None, data_fraction=None,\r\n env_num=None):\r\n \"\"\"\r\n args:\r\n root - path to the got-10k training data. Note: This should point to the 'train' folder inside GOT-10k\r\n image_loader (jpeg4py_loader) - The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py)\r\n is used by default.\r\n split - 'train' or 'val'. Note: The validation split here is a subset of the official got-10k train split,\r\n not NOT the official got-10k validation split. To use the official validation split, provide that as\r\n the root folder instead.\r\n seq_ids - List containing the ids of the videos to be used for training. Note: Only one of 'split' or 'seq_ids'\r\n options can be used at the same time.\r\n data_fraction - Fraction of dataset to be used. The complete dataset is used by default\r\n use_lmdb - whether the dataset is stored in lmdb format\r\n \"\"\"\r\n root = env_settings(env_num).got10k_lmdb_dir if root is None else root\r\n super().__init__('GOT10k_lmdb', root, image_loader)\r\n\r\n # all folders inside the root\r\n self.sequence_list = self._get_sequence_list()\r\n\r\n # seq_id is the index of the folder inside the got10k root path\r\n if split is not None:\r\n if seq_ids is not None:\r\n raise ValueError('Cannot set both split_name and seq_ids.')\r\n train_lib_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..')\r\n if split == 'train':\r\n file_path = os.path.join(train_lib_path, 'data_specs', 'got10k_train_split.txt')\r\n elif split == 'val':\r\n file_path = os.path.join(train_lib_path, 'data_specs', 'got10k_val_split.txt')\r\n elif split == 'train_full':\r\n file_path = os.path.join(train_lib_path, 'data_specs', 'got10k_train_full_split.txt')\r\n elif split == 'vottrain':\r\n file_path = os.path.join(train_lib_path, 'data_specs', 'got10k_vot_train_split.txt')\r\n elif split == 'votval':\r\n file_path = os.path.join(train_lib_path, 'data_specs', 'got10k_vot_val_split.txt')\r\n else:\r\n raise ValueError('Unknown split name.')\r\n seq_ids = pandas.read_csv(file_path, header=None, squeeze=True, dtype=np.int64).values.tolist()\r\n elif seq_ids is None:\r\n seq_ids = list(range(0, len(self.sequence_list)))\r\n\r\n self.sequence_list = [self.sequence_list[i] for i in seq_ids]\r\n\r\n if data_fraction is not None:\r\n self.sequence_list = random.sample(self.sequence_list, int(len(self.sequence_list) * data_fraction))\r\n\r\n self.sequence_meta_info = self._load_meta_info()\r\n self.seq_per_class = self._build_seq_per_class()\r\n\r\n self.class_list = list(self.seq_per_class.keys())\r\n self.class_list.sort()\r\n\r\n def get_name(self):\r\n return 'got10k_lmdb'\r\n\r\n def has_class_info(self):\r\n return True\r\n\r\n def has_occlusion_info(self):\r\n return True\r\n\r\n def _load_meta_info(self):\r\n def _read_meta(meta_info):\r\n\r\n object_meta = OrderedDict({'object_class_name': meta_info[5].split(': ')[-1],\r\n 'motion_class': meta_info[6].split(': ')[-1],\r\n 'major_class': meta_info[7].split(': ')[-1],\r\n 'root_class': meta_info[8].split(': ')[-1],\r\n 'motion_adverb': meta_info[9].split(': ')[-1]})\r\n\r\n return object_meta\r\n\r\n sequence_meta_info = {}\r\n for s in self.sequence_list:\r\n try:\r\n meta_str = decode_str(self.root, \"train/%s/meta_info.ini\" % s)\r\n sequence_meta_info[s] = _read_meta(meta_str.split('\\n'))\r\n except:\r\n sequence_meta_info[s] = OrderedDict({'object_class_name': None,\r\n 'motion_class': None,\r\n 'major_class': None,\r\n 'root_class': None,\r\n 'motion_adverb': None})\r\n return sequence_meta_info\r\n\r\n def _build_seq_per_class(self):\r\n seq_per_class = {}\r\n\r\n for i, s in enumerate(self.sequence_list):\r\n object_class = self.sequence_meta_info[s]['object_class_name']\r\n if object_class in seq_per_class:\r\n seq_per_class[object_class].append(i)\r\n else:\r\n seq_per_class[object_class] = [i]\r\n\r\n return seq_per_class\r\n\r\n def get_sequences_in_class(self, class_name):\r\n return self.seq_per_class[class_name]\r\n\r\n def _get_sequence_list(self):\r\n dir_str = decode_str(self.root, 'train/list.txt')\r\n dir_list = dir_str.split('\\n')\r\n return dir_list\r\n\r\n def _read_bb_anno(self, seq_path):\r\n bb_anno_file = os.path.join(seq_path, \"groundtruth.txt\")\r\n gt_str_list = decode_str(self.root, bb_anno_file).split('\\n')[:-1] # the last line in got10k is empty\r\n gt_list = [list(map(float, line.split(','))) for line in gt_str_list]\r\n gt_arr = np.array(gt_list).astype(np.float32)\r\n\r\n return torch.tensor(gt_arr)\r\n\r\n def _read_target_visible(self, seq_path):\r\n # full occlusion and out_of_view files\r\n occlusion_file = os.path.join(seq_path, \"absence.label\")\r\n cover_file = os.path.join(seq_path, \"cover.label\")\r\n # Read these files\r\n occ_list = list(\r\n map(int, decode_str(self.root, occlusion_file).split('\\n')[:-1])) # the last line in got10k is empty\r\n occlusion = torch.ByteTensor(occ_list)\r\n cover_list = list(\r\n map(int, decode_str(self.root, cover_file).split('\\n')[:-1])) # the last line in got10k is empty\r\n cover = torch.ByteTensor(cover_list)\r\n\r\n target_visible = ~occlusion & (cover > 0).byte()\r\n\r\n visible_ratio = cover.float() / 8\r\n return target_visible, visible_ratio\r\n\r\n def _get_sequence_path(self, seq_id):\r\n return os.path.join(\"train\", self.sequence_list[seq_id])\r\n\r\n def get_sequence_info(self, seq_id):\r\n seq_path = self._get_sequence_path(seq_id)\r\n bbox = self._read_bb_anno(seq_path)\r\n\r\n valid = (bbox[:, 2] > 0) & (bbox[:, 3] > 0)\r\n visible, visible_ratio = self._read_target_visible(seq_path)\r\n visible = visible & valid.byte()\r\n\r\n return {'bbox': bbox, 'valid': valid, 'visible': visible, 'visible_ratio': visible_ratio}\r\n\r\n def _get_frame_path(self, seq_path, frame_id):\r\n return os.path.join(seq_path, '{:08}.jpg'.format(frame_id + 1)) # frames start from 1\r\n\r\n def _get_frame(self, seq_path, frame_id):\r\n return decode_img(self.root, self._get_frame_path(seq_path, frame_id))\r\n\r\n def get_class_name(self, seq_id):\r\n obj_meta = self.sequence_meta_info[self.sequence_list[seq_id]]\r\n\r\n return obj_meta['object_class_name']\r\n\r\n def get_frames(self, seq_id, frame_ids, anno=None):\r\n seq_path = self._get_sequence_path(seq_id)\r\n obj_meta = self.sequence_meta_info[self.sequence_list[seq_id]]\r\n\r\n frame_list = [self._get_frame(seq_path, f_id) for f_id in frame_ids]\r\n\r\n if anno is None:\r\n anno = self.get_sequence_info(seq_id)\r\n\r\n anno_frames = {}\r\n for key, value in anno.items():\r\n anno_frames[key] = [value[f_id, ...].clone() for f_id in frame_ids]\r\n\r\n return frame_list, anno_frames, obj_meta\r" }, { "identifier": "Lasot_lmdb", "path": "lib/train/dataset/lasot_lmdb.py", "snippet": "class Lasot_lmdb(BaseVideoDataset):\r\n\r\n def __init__(self, root=None, image_loader=jpeg4py_loader, vid_ids=None, split=None, data_fraction=None,\r\n env_num=None):\r\n \"\"\"\r\n args:\r\n root - path to the lasot dataset.\r\n image_loader (jpeg4py_loader) - The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py)\r\n is used by default.\r\n vid_ids - List containing the ids of the videos (1 - 20) used for training. If vid_ids = [1, 3, 5], then the\r\n videos with subscripts -1, -3, and -5 from each class will be used for training.\r\n split - If split='train', the official train split (protocol-II) is used for training. Note: Only one of\r\n vid_ids or split option can be used at a time.\r\n data_fraction - Fraction of dataset to be used. The complete dataset is used by default\r\n \"\"\"\r\n root = env_settings(env_num).lasot_lmdb_dir if root is None else root\r\n super().__init__('LaSOT_lmdb', root, image_loader)\r\n\r\n self.sequence_list = self._build_sequence_list(vid_ids, split)\r\n class_list = [seq_name.split('-')[0] for seq_name in self.sequence_list]\r\n self.class_list = []\r\n for ele in class_list:\r\n if ele not in self.class_list:\r\n self.class_list.append(ele)\r\n # Keep a list of all classes\r\n self.class_to_id = {cls_name: cls_id for cls_id, cls_name in enumerate(self.class_list)}\r\n\r\n if data_fraction is not None:\r\n self.sequence_list = random.sample(self.sequence_list, int(len(self.sequence_list) * data_fraction))\r\n\r\n self.seq_per_class = self._build_class_list()\r\n\r\n def _build_sequence_list(self, vid_ids=None, split=None):\r\n if split is not None:\r\n if vid_ids is not None:\r\n raise ValueError('Cannot set both split_name and vid_ids.')\r\n ltr_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..')\r\n if split == 'train':\r\n file_path = os.path.join(ltr_path, 'data_specs', 'lasot_train_split.txt')\r\n else:\r\n raise ValueError('Unknown split name.')\r\n sequence_list = pandas.read_csv(file_path, header=None, squeeze=True).values.tolist()\r\n elif vid_ids is not None:\r\n sequence_list = [c + '-' + str(v) for c in self.class_list for v in vid_ids]\r\n else:\r\n raise ValueError('Set either split_name or vid_ids.')\r\n\r\n return sequence_list\r\n\r\n def _build_class_list(self):\r\n seq_per_class = {}\r\n for seq_id, seq_name in enumerate(self.sequence_list):\r\n class_name = seq_name.split('-')[0]\r\n if class_name in seq_per_class:\r\n seq_per_class[class_name].append(seq_id)\r\n else:\r\n seq_per_class[class_name] = [seq_id]\r\n\r\n return seq_per_class\r\n\r\n def get_name(self):\r\n return 'lasot_lmdb'\r\n\r\n def has_class_info(self):\r\n return True\r\n\r\n def has_occlusion_info(self):\r\n return True\r\n\r\n def get_num_sequences(self):\r\n return len(self.sequence_list)\r\n\r\n def get_num_classes(self):\r\n return len(self.class_list)\r\n\r\n def get_sequences_in_class(self, class_name):\r\n return self.seq_per_class[class_name]\r\n\r\n def _read_bb_anno(self, seq_path):\r\n bb_anno_file = os.path.join(seq_path, \"groundtruth.txt\")\r\n gt_str_list = decode_str(self.root, bb_anno_file).split('\\n')[:-1] # the last line is empty\r\n gt_list = [list(map(float, line.split(','))) for line in gt_str_list]\r\n gt_arr = np.array(gt_list).astype(np.float32)\r\n return torch.tensor(gt_arr)\r\n\r\n def _read_target_visible(self, seq_path):\r\n # Read full occlusion and out_of_view\r\n occlusion_file = os.path.join(seq_path, \"full_occlusion.txt\")\r\n out_of_view_file = os.path.join(seq_path, \"out_of_view.txt\")\r\n\r\n occ_list = list(map(int, decode_str(self.root, occlusion_file).split(',')))\r\n occlusion = torch.ByteTensor(occ_list)\r\n out_view_list = list(map(int, decode_str(self.root, out_of_view_file).split(',')))\r\n out_of_view = torch.ByteTensor(out_view_list)\r\n\r\n target_visible = ~occlusion & ~out_of_view\r\n\r\n return target_visible\r\n\r\n def _get_sequence_path(self, seq_id):\r\n seq_name = self.sequence_list[seq_id]\r\n class_name = seq_name.split('-')[0]\r\n vid_id = seq_name.split('-')[1]\r\n\r\n return os.path.join(class_name, class_name + '-' + vid_id)\r\n\r\n def get_sequence_info(self, seq_id):\r\n seq_path = self._get_sequence_path(seq_id)\r\n bbox = self._read_bb_anno(seq_path)\r\n\r\n valid = (bbox[:, 2] > 0) & (bbox[:, 3] > 0)\r\n visible = self._read_target_visible(seq_path) & valid.byte()\r\n\r\n return {'bbox': bbox, 'valid': valid, 'visible': visible}\r\n\r\n def _get_frame_path(self, seq_path, frame_id):\r\n return os.path.join(seq_path, 'img', '{:08}.jpg'.format(frame_id + 1)) # frames start from 1\r\n\r\n def _get_frame(self, seq_path, frame_id):\r\n return decode_img(self.root, self._get_frame_path(seq_path, frame_id))\r\n\r\n def _get_class(self, seq_path):\r\n raw_class = seq_path.split('/')[-2]\r\n return raw_class\r\n\r\n def get_class_name(self, seq_id):\r\n seq_path = self._get_sequence_path(seq_id)\r\n obj_class = self._get_class(seq_path)\r\n\r\n return obj_class\r\n\r\n def get_frames(self, seq_id, frame_ids, anno=None):\r\n seq_path = self._get_sequence_path(seq_id)\r\n\r\n obj_class = self._get_class(seq_path)\r\n frame_list = [self._get_frame(seq_path, f_id) for f_id in frame_ids]\r\n\r\n if anno is None:\r\n anno = self.get_sequence_info(seq_id)\r\n\r\n anno_frames = {}\r\n for key, value in anno.items():\r\n anno_frames[key] = [value[f_id, ...].clone() for f_id in frame_ids]\r\n\r\n object_meta = OrderedDict({'object_class_name': obj_class,\r\n 'motion_class': None,\r\n 'major_class': None,\r\n 'root_class': None,\r\n 'motion_adverb': None})\r\n\r\n return frame_list, anno_frames, object_meta\r" }, { "identifier": "ImagenetVID_lmdb", "path": "lib/train/dataset/imagenetvid_lmdb.py", "snippet": "class ImagenetVID_lmdb(BaseVideoDataset):\r\n \"\"\" Imagenet VID dataset.\r\n\r\n Publication:\r\n ImageNet Large Scale Visual Recognition Challenge\r\n Olga Russakovsky, Jia Deng, Hao Su, Jonathan Krause, Sanjeev Satheesh, Sean Ma, Zhiheng Huang, Andrej Karpathy,\r\n Aditya Khosla, Michael Bernstein, Alexander C. Berg and Li Fei-Fei\r\n IJCV, 2015\r\n https://arxiv.org/pdf/1409.0575.pdf\r\n\r\n Download the dataset from http://image-net.org/\r\n \"\"\"\r\n def __init__(self, root=None, image_loader=jpeg4py_loader, min_length=0, max_target_area=1,env_num=None):\r\n \"\"\"\r\n args:\r\n root - path to the imagenet vid dataset.\r\n image_loader (default_image_loader) - The function to read the images. If installed,\r\n jpeg4py (https://github.com/ajkxyz/jpeg4py) is used by default. Else,\r\n opencv's imread is used.\r\n min_length - Minimum allowed sequence length.\r\n max_target_area - max allowed ratio between target area and image area. Can be used to filter out targets\r\n which cover complete image.\r\n \"\"\"\r\n root = env_settings(env_num).imagenet_dir if root is None else root\r\n super().__init__(\"imagenetvid_lmdb\", root, image_loader)\r\n\r\n sequence_list_dict = decode_json(root, \"cache.json\")\r\n self.sequence_list = sequence_list_dict\r\n\r\n # Filter the sequences based on min_length and max_target_area in the first frame\r\n self.sequence_list = [x for x in self.sequence_list if len(x['anno']) >= min_length and\r\n get_target_to_image_ratio(x) < max_target_area]\r\n\r\n def get_name(self):\r\n return 'imagenetvid_lmdb'\r\n\r\n def get_num_sequences(self):\r\n return len(self.sequence_list)\r\n\r\n def get_sequence_info(self, seq_id):\r\n bb_anno = torch.Tensor(self.sequence_list[seq_id]['anno'])\r\n valid = (bb_anno[:, 2] > 0) & (bb_anno[:, 3] > 0)\r\n visible = torch.ByteTensor(self.sequence_list[seq_id]['target_visible']) & valid.byte()\r\n return {'bbox': bb_anno, 'valid': valid, 'visible': visible}\r\n\r\n def _get_frame(self, sequence, frame_id):\r\n set_name = 'ILSVRC2015_VID_train_{:04d}'.format(sequence['set_id'])\r\n vid_name = 'ILSVRC2015_train_{:08d}'.format(sequence['vid_id'])\r\n frame_number = frame_id + sequence['start_frame']\r\n frame_path = os.path.join('Data', 'VID', 'train', set_name, vid_name,\r\n '{:06d}.JPEG'.format(frame_number))\r\n return decode_img(self.root, frame_path)\r\n\r\n def get_frames(self, seq_id, frame_ids, anno=None):\r\n sequence = self.sequence_list[seq_id]\r\n\r\n frame_list = [self._get_frame(sequence, f) for f in frame_ids]\r\n\r\n if anno is None:\r\n anno = self.get_sequence_info(seq_id)\r\n\r\n # Create anno dict\r\n anno_frames = {}\r\n for key, value in anno.items():\r\n anno_frames[key] = [value[f_id, ...].clone() for f_id in frame_ids]\r\n\r\n # added the class info to the meta info\r\n object_meta = OrderedDict({'object_class': sequence['class_name'],\r\n 'motion_class': None,\r\n 'major_class': None,\r\n 'root_class': None,\r\n 'motion_adverb': None})\r\n\r\n return frame_list, anno_frames, object_meta\r" }, { "identifier": "MSCOCOSeq_lmdb", "path": "lib/train/dataset/coco_seq_lmdb.py", "snippet": "class MSCOCOSeq_lmdb(BaseVideoDataset):\r\n \"\"\" The COCO dataset. COCO is an image dataset. Thus, we treat each image as a sequence of length 1.\r\n\r\n Publication:\r\n Microsoft COCO: Common Objects in Context.\r\n Tsung-Yi Lin, Michael Maire, Serge J. Belongie, Lubomir D. Bourdev, Ross B. Girshick, James Hays, Pietro Perona,\r\n Deva Ramanan, Piotr Dollar and C. Lawrence Zitnick\r\n ECCV, 2014\r\n https://arxiv.org/pdf/1405.0312.pdf\r\n\r\n Download the images along with annotations from http://cocodataset.org/#download. The root folder should be\r\n organized as follows.\r\n - coco_root\r\n - annotations\r\n - instances_train2014.json\r\n - instances_train2017.json\r\n - images\r\n - train2014\r\n - train2017\r\n\r\n Note: You also have to install the coco pythonAPI from https://github.com/cocodataset/cocoapi.\r\n \"\"\"\r\n\r\n def __init__(self, root=None, image_loader=jpeg4py_loader, data_fraction=None, split=\"train\", version=\"2014\",\r\n env_num=None):\r\n \"\"\"\r\n args:\r\n root - path to the coco dataset.\r\n image_loader (default_image_loader) - The function to read the images. If installed,\r\n jpeg4py (https://github.com/ajkxyz/jpeg4py) is used by default. Else,\r\n opencv's imread is used.\r\n data_fraction (None) - Fraction of images to be used. The images are selected randomly. If None, all the\r\n images will be used\r\n split - 'train' or 'val'.\r\n version - version of coco dataset (2014 or 2017)\r\n \"\"\"\r\n root = env_settings(env_num).coco_dir if root is None else root\r\n super().__init__('COCO_lmdb', root, image_loader)\r\n self.root = root\r\n self.img_pth = 'images/{}{}/'.format(split, version)\r\n self.anno_path = 'annotations/instances_{}{}.json'.format(split, version)\r\n\r\n # Load the COCO set.\r\n print('loading annotations into memory...')\r\n tic = time.time()\r\n coco_json = decode_json(root, self.anno_path)\r\n print('Done (t={:0.2f}s)'.format(time.time() - tic))\r\n\r\n self.coco_set = COCO(coco_json)\r\n\r\n self.cats = self.coco_set.cats\r\n\r\n self.class_list = self.get_class_list()\r\n\r\n self.sequence_list = self._get_sequence_list()\r\n\r\n if data_fraction is not None:\r\n self.sequence_list = random.sample(self.sequence_list, int(len(self.sequence_list) * data_fraction))\r\n self.seq_per_class = self._build_seq_per_class()\r\n\r\n def _get_sequence_list(self):\r\n ann_list = list(self.coco_set.anns.keys())\r\n seq_list = [a for a in ann_list if self.coco_set.anns[a]['iscrowd'] == 0]\r\n\r\n return seq_list\r\n\r\n def is_video_sequence(self):\r\n return False\r\n\r\n def get_num_classes(self):\r\n return len(self.class_list)\r\n\r\n def get_name(self):\r\n return 'coco_lmdb'\r\n\r\n def has_class_info(self):\r\n return True\r\n\r\n def get_class_list(self):\r\n class_list = []\r\n for cat_id in self.cats.keys():\r\n class_list.append(self.cats[cat_id]['name'])\r\n return class_list\r\n\r\n def has_segmentation_info(self):\r\n return True\r\n\r\n def get_num_sequences(self):\r\n return len(self.sequence_list)\r\n\r\n def _build_seq_per_class(self):\r\n seq_per_class = {}\r\n for i, seq in enumerate(self.sequence_list):\r\n class_name = self.cats[self.coco_set.anns[seq]['category_id']]['name']\r\n if class_name not in seq_per_class:\r\n seq_per_class[class_name] = [i]\r\n else:\r\n seq_per_class[class_name].append(i)\r\n\r\n return seq_per_class\r\n\r\n def get_sequences_in_class(self, class_name):\r\n return self.seq_per_class[class_name]\r\n\r\n def get_sequence_info(self, seq_id):\r\n anno = self._get_anno(seq_id)\r\n\r\n bbox = torch.Tensor(anno['bbox']).view(1, 4)\r\n\r\n mask = torch.Tensor(self.coco_set.annToMask(anno)).unsqueeze(dim=0)\r\n\r\n '''2021.1.3 To avoid too small bounding boxes. Here we change the threshold to 50 pixels'''\r\n valid = (bbox[:, 2] > 50) & (bbox[:, 3] > 50)\r\n\r\n visible = valid.clone().byte()\r\n\r\n return {'bbox': bbox, 'mask': mask, 'valid': valid, 'visible': visible}\r\n\r\n def _get_anno(self, seq_id):\r\n anno = self.coco_set.anns[self.sequence_list[seq_id]]\r\n\r\n return anno\r\n\r\n def _get_frames(self, seq_id):\r\n path = self.coco_set.loadImgs([self.coco_set.anns[self.sequence_list[seq_id]]['image_id']])[0]['file_name']\r\n # img = self.image_loader(os.path.join(self.img_pth, path))\r\n img = decode_img(self.root, os.path.join(self.img_pth, path))\r\n return img\r\n\r\n def get_meta_info(self, seq_id):\r\n try:\r\n cat_dict_current = self.cats[self.coco_set.anns[self.sequence_list[seq_id]]['category_id']]\r\n object_meta = OrderedDict({'object_class_name': cat_dict_current['name'],\r\n 'motion_class': None,\r\n 'major_class': cat_dict_current['supercategory'],\r\n 'root_class': None,\r\n 'motion_adverb': None})\r\n except:\r\n object_meta = OrderedDict({'object_class_name': None,\r\n 'motion_class': None,\r\n 'major_class': None,\r\n 'root_class': None,\r\n 'motion_adverb': None})\r\n return object_meta\r\n\r\n def get_class_name(self, seq_id):\r\n cat_dict_current = self.cats[self.coco_set.anns[self.sequence_list[seq_id]]['category_id']]\r\n return cat_dict_current['name']\r\n\r\n def get_frames(self, seq_id=None, frame_ids=None, anno=None):\r\n # COCO is an image dataset. Thus we replicate the image denoted by seq_id len(frame_ids) times, and return a\r\n # list containing these replicated images.\r\n frame = self._get_frames(seq_id)\r\n\r\n frame_list = [frame.copy() for _ in frame_ids]\r\n\r\n if anno is None:\r\n anno = self.get_sequence_info(seq_id)\r\n\r\n anno_frames = {}\r\n for key, value in anno.items():\r\n anno_frames[key] = [value[0, ...] for _ in frame_ids]\r\n\r\n object_meta = self.get_meta_info(seq_id)\r\n\r\n return frame_list, anno_frames, object_meta\r" }, { "identifier": "TrackingNet_lmdb", "path": "lib/train/dataset/tracking_net_lmdb.py", "snippet": "class TrackingNet_lmdb(BaseVideoDataset):\r\n \"\"\" TrackingNet dataset.\r\n\r\n Publication:\r\n TrackingNet: A Large-Scale Dataset and Benchmark for Object Tracking in the Wild.\r\n Matthias Mueller,Adel Bibi, Silvio Giancola, Salman Al-Subaihi and Bernard Ghanem\r\n ECCV, 2018\r\n https://ivul.kaust.edu.sa/Documents/Publications/2018/TrackingNet%20A%20Large%20Scale%20Dataset%20and%20Benchmark%20for%20Object%20Tracking%20in%20the%20Wild.pdf\r\n\r\n Download the dataset using the toolkit https://github.com/SilvioGiancola/TrackingNet-devkit.\r\n \"\"\"\r\n def __init__(self, root=None, image_loader=jpeg4py_loader, set_ids=None, data_fraction=None,env_num=None):\r\n \"\"\"\r\n args:\r\n root - The path to the TrackingNet folder, containing the training sets.\r\n image_loader (jpeg4py_loader) - The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py)\r\n is used by default.\r\n set_ids (None) - List containing the ids of the TrackingNet sets to be used for training. If None, all the\r\n sets (0 - 11) will be used.\r\n data_fraction - Fraction of dataset to be used. The complete dataset is used by default\r\n \"\"\"\r\n root = env_settings(env_num).trackingnet_lmdb_dir if root is None else root\r\n super().__init__('TrackingNet_lmdb', root, image_loader)\r\n\r\n if set_ids is None:\r\n set_ids = [i for i in range(12)]\r\n\r\n self.set_ids = set_ids\r\n\r\n # Keep a list of all videos. Sequence list is a list of tuples (set_id, video_name) containing the set_id and\r\n # video_name for each sequence\r\n self.sequence_list = list_sequences(self.root)\r\n\r\n if data_fraction is not None:\r\n self.sequence_list = random.sample(self.sequence_list, int(len(self.sequence_list) * data_fraction))\r\n\r\n self.seq_to_class_map, self.seq_per_class = self._load_class_info()\r\n\r\n # we do not have the class_lists for the tracking net\r\n self.class_list = list(self.seq_per_class.keys())\r\n self.class_list.sort()\r\n\r\n def _load_class_info(self):\r\n ltr_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..')\r\n class_map_path = os.path.join(ltr_path, 'data_specs', 'trackingnet_classmap.txt')\r\n\r\n with open(class_map_path, 'r') as f:\r\n seq_to_class_map = {seq_class.split('\\t')[0]: seq_class.rstrip().split('\\t')[1] for seq_class in f}\r\n\r\n seq_per_class = {}\r\n for i, seq in enumerate(self.sequence_list):\r\n class_name = seq_to_class_map.get(seq[1], 'Unknown')\r\n if class_name not in seq_per_class:\r\n seq_per_class[class_name] = [i]\r\n else:\r\n seq_per_class[class_name].append(i)\r\n\r\n return seq_to_class_map, seq_per_class\r\n\r\n def get_name(self):\r\n return 'trackingnet_lmdb'\r\n\r\n def has_class_info(self):\r\n return True\r\n\r\n def get_sequences_in_class(self, class_name):\r\n return self.seq_per_class[class_name]\r\n\r\n def _read_bb_anno(self, seq_id):\r\n set_id = self.sequence_list[seq_id][0]\r\n vid_name = self.sequence_list[seq_id][1]\r\n gt_str_list = decode_str(os.path.join(self.root, \"TRAIN_%d_lmdb\" % set_id),\r\n os.path.join(\"anno\", vid_name + \".txt\")).split('\\n')[:-1]\r\n gt_list = [list(map(float, line.split(','))) for line in gt_str_list]\r\n gt_arr = np.array(gt_list).astype(np.float32)\r\n return torch.tensor(gt_arr)\r\n\r\n def get_sequence_info(self, seq_id):\r\n bbox = self._read_bb_anno(seq_id)\r\n\r\n valid = (bbox[:, 2] > 0) & (bbox[:, 3] > 0)\r\n visible = valid.clone().byte()\r\n return {'bbox': bbox, 'valid': valid, 'visible': visible}\r\n\r\n def _get_frame(self, seq_id, frame_id):\r\n set_id = self.sequence_list[seq_id][0]\r\n vid_name = self.sequence_list[seq_id][1]\r\n return decode_img(os.path.join(self.root, \"TRAIN_%d_lmdb\" % set_id),\r\n os.path.join(\"frames\", vid_name, str(frame_id) + \".jpg\"))\r\n\r\n def _get_class(self, seq_id):\r\n seq_name = self.sequence_list[seq_id][1]\r\n return self.seq_to_class_map[seq_name]\r\n\r\n def get_class_name(self, seq_id):\r\n obj_class = self._get_class(seq_id)\r\n\r\n return obj_class\r\n\r\n def get_frames(self, seq_id, frame_ids, anno=None):\r\n frame_list = [self._get_frame(seq_id, f) for f in frame_ids]\r\n\r\n if anno is None:\r\n anno = self.get_sequence_info(seq_id)\r\n\r\n anno_frames = {}\r\n for key, value in anno.items():\r\n anno_frames[key] = [value[f_id, ...].clone() for f_id in frame_ids]\r\n\r\n obj_class = self._get_class(seq_id)\r\n\r\n object_meta = OrderedDict({'object_class_name': obj_class,\r\n 'motion_class': None,\r\n 'major_class': None,\r\n 'root_class': None,\r\n 'motion_adverb': None})\r\n\r\n return frame_list, anno_frames, object_meta\r" }, { "identifier": "Adan", "path": "lib/train/optimizer/anan.py", "snippet": "class Adan(Optimizer):\r\n \"\"\"\r\n Implements a pytorch variant of Adan\r\n Adan was proposed in\r\n Adan: Adaptive Nesterov Momentum Algorithm for\r\n Faster Optimizing Deep Models[J].arXiv preprint arXiv:2208.06677, 2022.\r\n https://arxiv.org/abs/2208.06677\r\n Arguments:\r\n params (iterable): iterable of parameters to optimize or\r\n dicts defining parameter groups.\r\n lr (float, optional): learning rate. (default: 1e-3)\r\n betas (Tuple[float, float, flot], optional): coefficients used for\r\n first- and second-order moments. (default: (0.98, 0.92, 0.99))\r\n eps (float, optional): term added to the denominator to improve\r\n numerical stability. (default: 1e-8)\r\n weight_decay (float, optional): decoupled weight decay\r\n (L2 penalty) (default: 0)\r\n max_grad_norm (float, optional): value used to clip\r\n global grad norm (default: 0.0 no clip)\r\n no_prox (bool): how to perform the decoupled weight decay\r\n (default: False)\r\n foreach (bool): if True would use torch._foreach implementation.\r\n It's faster but uses slightly more memory. (default: True)\r\n fused (bool, optional): whether fused implementation is used.\r\n (default: False)\r\n\r\n VIT:\r\n 150\r\n lr 0.015\r\n betas (0.98, 0.92, 0.99)\r\n eps 1.0e-08\r\n weight_decay 0.02\r\n max_grad_norm 5.0\r\n no_prox\r\n foreach\r\n fused\r\n 300\r\n lr 0.015\r\n betas (0.98, 0.92, 0.99)\r\n eps 1.0e-08\r\n weight_decay 0.02\r\n max_grad_norm 5.0\r\n no_prox\r\n foreach\r\n fused\r\n \"\"\"\r\n def __init__(self,\r\n params,\r\n lr=1e-3,\r\n betas=(0.98, 0.92, 0.99),\r\n eps=1e-8,\r\n weight_decay=0.0,\r\n max_grad_norm=0.0,\r\n no_prox=False,\r\n foreach: bool = True,\r\n fused: bool = False):\r\n if not 0.0 <= max_grad_norm:\r\n raise ValueError('Invalid Max grad norm: {}'.format(max_grad_norm))\r\n if not 0.0 <= lr:\r\n raise ValueError('Invalid learning rate: {}'.format(lr))\r\n if not 0.0 <= eps:\r\n raise ValueError('Invalid epsilon value: {}'.format(eps))\r\n if not 0.0 <= betas[0] < 1.0:\r\n raise ValueError('Invalid beta parameter at index 0: {}'.format(\r\n betas[0]))\r\n if not 0.0 <= betas[1] < 1.0:\r\n raise ValueError('Invalid beta parameter at index 1: {}'.format(\r\n betas[1]))\r\n if not 0.0 <= betas[2] < 1.0:\r\n raise ValueError('Invalid beta parameter at index 2: {}'.format(\r\n betas[2]))\r\n defaults = dict(lr=lr,\r\n betas=betas,\r\n eps=eps,\r\n weight_decay=weight_decay,\r\n max_grad_norm=max_grad_norm,\r\n no_prox=no_prox,\r\n foreach=foreach,\r\n fused=fused)\r\n super().__init__(params, defaults)\r\n\r\n def __setstate__(self, state):\r\n super(Adan, self).__setstate__(state)\r\n for group in self.param_groups:\r\n group.setdefault('no_prox', False)\r\n\r\n @torch.no_grad()\r\n def restart_opt(self):\r\n for group in self.param_groups:\r\n group['step'] = 0\r\n for p in group['params']:\r\n if p.requires_grad:\r\n state = self.state[p]\r\n # State initialization\r\n\r\n # Exponential moving average of gradient values\r\n state['exp_avg'] = torch.zeros_like(p)\r\n # Exponential moving average of squared gradient values\r\n state['exp_avg_sq'] = torch.zeros_like(p)\r\n # Exponential moving average of gradient difference\r\n state['exp_avg_diff'] = torch.zeros_like(p)\r\n\r\n @torch.no_grad()\r\n def step(self, closure=None):\r\n \"\"\"Performs a single optimization step.\"\"\"\r\n\r\n loss = None\r\n if closure is not None:\r\n with torch.enable_grad():\r\n loss = closure()\r\n\r\n if self.defaults['max_grad_norm'] > 0:\r\n device = self.param_groups[0]['params'][0].device\r\n global_grad_norm = torch.zeros(1, device=device)\r\n\r\n max_grad_norm = torch.tensor(self.defaults['max_grad_norm'],\r\n device=device)\r\n for group in self.param_groups:\r\n\r\n for p in group['params']:\r\n if p.grad is not None:\r\n grad = p.grad\r\n global_grad_norm.add_(grad.pow(2).sum())\r\n\r\n global_grad_norm = torch.sqrt(global_grad_norm)\r\n\r\n clip_global_grad_norm = torch.clamp(\r\n max_grad_norm / (global_grad_norm + group['eps']),\r\n max=1.0).item()\r\n else:\r\n clip_global_grad_norm = 1.0\r\n\r\n for group in self.param_groups:\r\n params_with_grad = []\r\n grads = []\r\n exp_avgs = []\r\n exp_avg_sqs = []\r\n exp_avg_diffs = []\r\n neg_pre_grads = []\r\n\r\n beta1, beta2, beta3 = group['betas']\r\n # assume same step across group now to simplify things\r\n # per parameter step can be easily support\r\n # by making it tensor, or pass list into kernel\r\n if 'step' in group:\r\n group['step'] += 1\r\n else:\r\n group['step'] = 1\r\n\r\n bias_correction1 = 1.0 - beta1**group['step']\r\n bias_correction2 = 1.0 - beta2**group['step']\r\n bias_correction3 = 1.0 - beta3**group['step']\r\n\r\n for p in group['params']:\r\n if p.grad is None:\r\n continue\r\n params_with_grad.append(p)\r\n grads.append(p.grad)\r\n\r\n state = self.state[p]\r\n if len(state) == 0:\r\n state['exp_avg'] = torch.zeros_like(p)\r\n state['exp_avg_sq'] = torch.zeros_like(p)\r\n state['exp_avg_diff'] = torch.zeros_like(p)\r\n\r\n if 'neg_pre_grad' not in state or group['step'] == 1:\r\n state['neg_pre_grad'] = p.grad.clone().mul_(\r\n -clip_global_grad_norm)\r\n\r\n exp_avgs.append(state['exp_avg'])\r\n exp_avg_sqs.append(state['exp_avg_sq'])\r\n exp_avg_diffs.append(state['exp_avg_diff'])\r\n neg_pre_grads.append(state['neg_pre_grad'])\r\n\r\n kwargs = dict(\r\n params=params_with_grad,\r\n grads=grads,\r\n exp_avgs=exp_avgs,\r\n exp_avg_sqs=exp_avg_sqs,\r\n exp_avg_diffs=exp_avg_diffs,\r\n neg_pre_grads=neg_pre_grads,\r\n beta1=beta1,\r\n beta2=beta2,\r\n beta3=beta3,\r\n bias_correction1=bias_correction1,\r\n bias_correction2=bias_correction2,\r\n bias_correction3_sqrt=math.sqrt(bias_correction3),\r\n lr=group['lr'],\r\n weight_decay=group['weight_decay'],\r\n eps=group['eps'],\r\n no_prox=group['no_prox'],\r\n clip_global_grad_norm=clip_global_grad_norm,\r\n )\r\n\r\n if group['foreach']:\r\n if group['fused']:\r\n if torch.cuda.is_available():\r\n _fused_adan_multi_tensor(**kwargs)\r\n else:\r\n raise ValueError('Fused Adan does not support CPU')\r\n else:\r\n _multi_tensor_adan(**kwargs)\r\n elif group['fused']:\r\n if torch.cuda.is_available():\r\n _fused_adan_single_tensor(**kwargs)\r\n else:\r\n raise ValueError('Fused Adan does not support CPU')\r\n else:\r\n _single_tensor_adan(**kwargs)\r\n\r\n return loss\r" }, { "identifier": "Lion", "path": "lib/train/optimizer/lion.py", "snippet": "class Lion(Optimizer):\r\n r\"\"\"Implements Lion algorithm.\"\"\"\r\n\r\n def __init__(self, params, lr=1e-4, betas=(0.9, 0.99), weight_decay=0.0):\r\n \"\"\"Initialize the hyperparameters.\r\n\r\n Args:\r\n params (iterable): iterable of parameters to optimize or dicts defining\r\n parameter groups\r\n lr (float, optional): learning rate (default: 1e-4)\r\n betas (Tuple[float, float], optional): coefficients used for computing\r\n running averages of gradient and its square (default: (0.9, 0.99))\r\n weight_decay (float, optional): weight decay coefficient (default: 0)\r\n \"\"\"\r\n\r\n if not 0.0 <= lr:\r\n raise ValueError('Invalid learning rate: {}'.format(lr))\r\n if not 0.0 <= betas[0] < 1.0:\r\n raise ValueError('Invalid beta parameter at index 0: {}'.format(betas[0]))\r\n if not 0.0 <= betas[1] < 1.0:\r\n raise ValueError('Invalid beta parameter at index 1: {}'.format(betas[1]))\r\n defaults = dict(lr=lr, betas=betas, weight_decay=weight_decay)\r\n super().__init__(params, defaults)\r\n\r\n @torch.no_grad()\r\n def step(self, closure=None):\r\n \"\"\"Performs a single optimization step.\r\n\r\n Args:\r\n closure (callable, optional): A closure that reevaluates the model\r\n and returns the loss.\r\n\r\n Returns:\r\n the loss.\r\n \"\"\"\r\n loss = None\r\n if closure is not None:\r\n with torch.enable_grad():\r\n loss = closure()\r\n\r\n for group in self.param_groups:\r\n for p in group['params']:\r\n if p.grad is None:\r\n continue\r\n\r\n # Perform stepweight decay\r\n p.data.mul_(1 - group['lr'] * group['weight_decay'])\r\n\r\n grad = p.grad\r\n state = self.state[p]\r\n # State initialization\r\n if len(state) == 0:\r\n # Exponential moving average of gradient values\r\n state['exp_avg'] = torch.zeros_like(p)\r\n\r\n exp_avg = state['exp_avg']\r\n beta1, beta2 = group['betas']\r\n\r\n # Weight update\r\n update = exp_avg * beta1 + grad * (1 - beta1)\r\n p.add_(torch.sign(update), alpha=-group['lr'])\r\n # Decay the momentum running average coefficient\r\n exp_avg.mul_(beta2).add_(grad, alpha=1 - beta2)\r\n\r\n return loss" }, { "identifier": "is_main_process", "path": "lib/utils/misc.py", "snippet": "def is_main_process():\r\n return get_rank() == 0\r" } ]
import torch import lib.train.data.transforms as tfm from torch.utils.data.distributed import DistributedSampler from lib.train.data import sampler, opencv_loader, processing, LTRLoader from lib.train.dataset import Lasot, Got10k, MSCOCOSeq, ImagenetVID, TrackingNet from lib.train.dataset import Lasot_lmdb, Got10k_lmdb, MSCOCOSeq_lmdb, ImagenetVID_lmdb, TrackingNet_lmdb from lib.train.optimizer.anan import Adan from lib.train.optimizer.lion import Lion from lib.utils.misc import is_main_process
20,468
# datasets related def update_settings(settings, cfg): settings.print_interval = cfg.TRAIN.PRINT_INTERVAL settings.search_area_factor = {'template': cfg.DATA.TEMPLATE.FACTOR, 'search': cfg.DATA.SEARCH.FACTOR} settings.output_sz = {'template': cfg.DATA.TEMPLATE.SIZE, 'search': cfg.DATA.SEARCH.SIZE} settings.center_jitter_factor = {'template': cfg.DATA.TEMPLATE.CENTER_JITTER, 'search': cfg.DATA.SEARCH.CENTER_JITTER} settings.scale_jitter_factor = {'template': cfg.DATA.TEMPLATE.SCALE_JITTER, 'search': cfg.DATA.SEARCH.SCALE_JITTER} settings.grad_clip_norm = cfg.TRAIN.GRAD_CLIP_NORM settings.print_stats = None settings.batchsize = cfg.TRAIN.BATCH_SIZE settings.scheduler_type = cfg.TRAIN.SCHEDULER.TYPE settings.save_interval = cfg.TRAIN.SAVE_INTERVAL def names2datasets(name_list: list, settings, image_loader): assert isinstance(name_list, list) datasets = [] for name in name_list: # assert name in ["LASOT", "GOT10K_vottrain", "GOT10K_votval", "GOT10K_train_full", "GOT10K_official_val", # "COCO17", "VID", "TRACKINGNET"] if name == "LASOT": if settings.use_lmdb: print("Building lasot dataset from lmdb")
# datasets related def update_settings(settings, cfg): settings.print_interval = cfg.TRAIN.PRINT_INTERVAL settings.search_area_factor = {'template': cfg.DATA.TEMPLATE.FACTOR, 'search': cfg.DATA.SEARCH.FACTOR} settings.output_sz = {'template': cfg.DATA.TEMPLATE.SIZE, 'search': cfg.DATA.SEARCH.SIZE} settings.center_jitter_factor = {'template': cfg.DATA.TEMPLATE.CENTER_JITTER, 'search': cfg.DATA.SEARCH.CENTER_JITTER} settings.scale_jitter_factor = {'template': cfg.DATA.TEMPLATE.SCALE_JITTER, 'search': cfg.DATA.SEARCH.SCALE_JITTER} settings.grad_clip_norm = cfg.TRAIN.GRAD_CLIP_NORM settings.print_stats = None settings.batchsize = cfg.TRAIN.BATCH_SIZE settings.scheduler_type = cfg.TRAIN.SCHEDULER.TYPE settings.save_interval = cfg.TRAIN.SAVE_INTERVAL def names2datasets(name_list: list, settings, image_loader): assert isinstance(name_list, list) datasets = [] for name in name_list: # assert name in ["LASOT", "GOT10K_vottrain", "GOT10K_votval", "GOT10K_train_full", "GOT10K_official_val", # "COCO17", "VID", "TRACKINGNET"] if name == "LASOT": if settings.use_lmdb: print("Building lasot dataset from lmdb")
datasets.append(Lasot_lmdb(settings.env.lasot_lmdb_dir, split='train', image_loader=image_loader,
10
2023-10-08 11:44:32+00:00
24k
LiyaoTang/ERDA
utils/trainer.py
[ { "identifier": "log_config", "path": "config/utils.py", "snippet": "def log_config(config, title='', f_out=None, prefix='', base=None):\n if f_out is None:\n f_out = sys.stdout\n if base is None:\n root = os.path.join(os.getcwd(), os.path.dirname(__file__), '../')\n sys.path += [] if root in sys.path or os.path.realpath(root) in sys.path else [root]\n from config.base import Base as base\n\n print(f'\\n{prefix}<<< ======= {config._cls} ======= {title if title else config.name}', file=f_out)\n max_len = max([len(k) for k in dir(config) if not k.startswith('_')] + [0])\n for k in config.keys(): # dir would sort\n # if k.startswith('_') or _is_method(getattr(config, k)):\n # continue\n cur_attr = getattr(config, k)\n if isinstance(cur_attr, list) and len(str(cur_attr)) > 200: # overlong list\n cur_attr = '[' + f'\\n{prefix}\\t\\t'.join([''] + [str(s) for s in cur_attr]) + f'\\n{prefix}\\t]'\n\n print('\\t%s%s\\t= %s' % (prefix + k, ' ' * (max_len-len(k)), str(cur_attr)), file=f_out)\n if is_config(cur_attr, base=base):\n log_config(cur_attr, f_out=f_out, prefix=prefix+'\\t', base=base)\n print('\\n', file=f_out, flush=True)" }, { "identifier": "print_dict", "path": "utils/logger.py", "snippet": "def print_dict(d, prefix='', except_k=[], fn=None, head=None, dict_type=(dict,), list_type=(list, tuple), expand_len=120):\n if head is not None:\n d = {head: d}\n for k, v in d.items():\n if k in except_k:\n continue\n if isinstance(d[k], dict_type):\n print(f'{prefix}{str(k)}:')\n print_dict(d[k], prefix=f'{prefix}\\t', except_k=except_k, fn=fn, expand_len=120)\n else:\n if fn:\n rst = None\n try:\n if isinstance(v, list_type):\n rst = v.__class__([fn(vv) for vv in v])\n else:\n rst = fn(v)\n except:\n pass\n v = rst if rst else v\n line = f'{prefix}{str(k)}\\t{str(v)}'\n if isinstance(v, list_type) and expand_len and len(str(line)) > expand_len: # overlong\n line_pre = f'{prefix}{str(k)}\\t' + ('[' if isinstance(v, list) else '(')\n line_post = f'\\n{prefix}\\t' + (']' if isinstance(v, list) else ')')\n if set(dict_type).issuperset(set([type(s) for s in v])): # all dict in list\n print(line_pre)\n for s in v[:-1]:\n print_dict(s, prefix=f'{prefix}\\t\\t')\n print(f'{prefix}\\t\\t,')\n print_dict(v[-1], prefix=f'{prefix}\\t\\t')\n line = line_post\n else:\n line = line_pre + f'\\n{prefix}\\t\\t'.join([''] + [str(s) for s in v]) + line_post\n\n print(line)" }, { "identifier": "print_table", "path": "utils/logger.py", "snippet": "def print_table(t, prefix='', sep=' '): # assume a 2D-list\n max_len = np.array([[len(str(ii)) for ii in l] for l in t], dtype=int).max(axis=0)\n for line in t:\n print(prefix + sep.join([str(ii) + ' ' * (max_len[i] - len(str(ii))) for i, ii in enumerate(line)]))" }, { "identifier": "read_ply", "path": "utils/ply.py", "snippet": "def read_ply(filename, triangular_mesh=False):\n \"\"\"\n Read \".ply\" files\n\n Parameters\n ----------\n filename : string\n the name of the file to read.\n\n Returns\n -------\n result : array\n data stored in the file\n\n Examples\n --------\n Store data in file\n\n >>> points = np.random.rand(5, 3)\n >>> values = np.random.randint(2, size=10)\n >>> write_ply('example.ply', [points, values], ['x', 'y', 'z', 'values'])\n\n Read the file\n\n >>> data = read_ply('example.ply')\n >>> values = data['values']\n array([0, 0, 1, 1, 0])\n \n >>> points = np.vstack((data['x'], data['y'], data['z'])).T\n array([[ 0.466 0.595 0.324]\n [ 0.538 0.407 0.654]\n [ 0.850 0.018 0.988]\n [ 0.395 0.394 0.363]\n [ 0.873 0.996 0.092]])\n\n \"\"\"\n\n with open(filename, 'rb') as plyfile:\n\n\n # Check if the file start with ply\n if b'ply' not in plyfile.readline():\n raise ValueError('The file does not start whith the word ply')\n\n # get binary_little/big or ascii\n fmt = plyfile.readline().split()[1].decode()\n if fmt == \"ascii\":\n raise ValueError('The file is not binary')\n\n # get extension for building the numpy dtypes\n ext = valid_formats[fmt]\n\n # PointCloud reader vs mesh reader\n if triangular_mesh:\n\n # Parse header\n num_points, num_faces, properties = parse_mesh_header(plyfile, ext)\n\n # Get point data\n vertex_data = np.fromfile(plyfile, dtype=properties, count=num_points)\n\n # Get face data\n face_properties = [('k', ext + 'u1'),\n ('v1', ext + 'i4'),\n ('v2', ext + 'i4'),\n ('v3', ext + 'i4')]\n faces_data = np.fromfile(plyfile, dtype=face_properties, count=num_faces)\n\n # Return vertex data and concatenated faces\n faces = np.vstack((faces_data['v1'], faces_data['v2'], faces_data['v3'])).T\n data = [vertex_data, faces]\n\n else:\n\n # Parse header\n num_points, properties = parse_header(plyfile, ext)\n\n # Get data\n data = np.fromfile(plyfile, dtype=properties, count=num_points)\n\n return data" }, { "identifier": "write_ply", "path": "utils/ply.py", "snippet": "def write_ply(filename, field_list, field_names, triangular_faces=None):\n \"\"\"\n Write \".ply\" files\n\n Parameters\n ----------\n filename : string\n the name of the file to which the data is saved. A '.ply' extension will be appended to the \n file name if it does no already have one.\n\n field_list : list, tuple, numpy array\n the fields to be saved in the ply file. Either a numpy array, a list of numpy arrays or a \n tuple of numpy arrays. Each 1D numpy array and each column of 2D numpy arrays are considered \n as one field. \n\n field_names : list\n the name of each fields as a list of strings. Has to be the same length as the number of \n fields.\n\n Examples\n --------\n >>> points = np.random.rand(10, 3)\n >>> write_ply('example1.ply', points, ['x', 'y', 'z'])\n\n >>> values = np.random.randint(2, size=10)\n >>> write_ply('example2.ply', [points, values], ['x', 'y', 'z', 'values'])\n\n >>> colors = np.random.randint(255, size=(10,3), dtype=np.uint8)\n >>> field_names = ['x', 'y', 'z', 'red', 'green', 'blue', values']\n >>> write_ply('example3.ply', [points, colors, values], field_names)\n\n \"\"\"\n\n # Format list input to the right form\n field_list = list(field_list) if (type(field_list) == list or type(field_list) == tuple) else list((field_list,))\n for i, field in enumerate(field_list):\n if field.ndim < 2:\n field_list[i] = field.reshape(-1, 1)\n if field.ndim > 2:\n print('fields have more than 2 dimensions')\n return False \n\n # check all fields have the same number of data\n n_points = [field.shape[0] for field in field_list]\n if not np.all(np.equal(n_points, n_points[0])):\n print('wrong field dimensions')\n return False \n\n # Check if field_names and field_list have same nb of column\n n_fields = np.sum([field.shape[1] for field in field_list])\n if (n_fields != len(field_names)):\n print('wrong number of field names')\n return False\n\n # Add extension if not there\n if not filename.endswith('.ply'):\n filename += '.ply'\n\n # open in text mode to write the header\n with open(filename, 'w') as plyfile:\n\n # First magical word\n header = ['ply']\n\n # Encoding format\n header.append('format binary_' + sys.byteorder + '_endian 1.0')\n\n # Points properties description\n header.extend(header_properties(field_list, field_names))\n\n # Add faces if needded\n if triangular_faces is not None:\n header.append('element face {:d}'.format(triangular_faces.shape[0]))\n header.append('property list uchar int vertex_indices')\n\n # End of header\n header.append('end_header')\n\n # Write all lines\n for line in header:\n plyfile.write(\"%s\\n\" % line)\n\n # open in binary/append to use tofile\n with open(filename, 'ab') as plyfile:\n\n # Create a structured array\n i = 0\n type_list = []\n for fields in field_list:\n for field in fields.T:\n type_list += [(field_names[i], field.dtype.str)]\n i += 1\n data = np.empty(field_list[0].shape[0], dtype=type_list)\n i = 0\n for fields in field_list:\n for field in fields.T:\n data[field_names[i]] = field\n i += 1\n\n data.tofile(plyfile)\n\n if triangular_faces is not None:\n triangular_faces = triangular_faces.astype(np.int32)\n type_list = [('k', 'uint8')] + [(str(ind), 'int32') for ind in range(3)]\n data = np.empty(triangular_faces.shape[0], dtype=type_list)\n data['k'] = np.full((triangular_faces.shape[0],), 3, dtype=np.uint8)\n data['0'] = triangular_faces[:, 0]\n data['1'] = triangular_faces[:, 1]\n data['2'] = triangular_faces[:, 2]\n data.tofile(plyfile)\n\n return True" }, { "identifier": "ModelTester", "path": "utils/tester.py", "snippet": "class ModelTester:\n\n # Initiation methods\n # ------------------------------------------------------------------------------------------------------------------\n\n def __init__(self, config, verbose=True):\n self.config = config\n self.verbose = verbose\n\n self.save_extra = {} # for saving with extra ops\n\n if config.dataset in ['S3DIS', 'ScanNet', 'SensatUrban']:\n self.val_running_vote = self.val_running_vote_seg\n self.val_vote = self.val_vote_seg\n self.test_vote = self.test_vote_seg\n else:\n raise NotImplementedError(f'not supported dataset: {config.dataset}')\n\n def init_pointcloud_log(self, dataset, split, d, dtype=np.float32, init_fn=np.zeros):\n shape = lambda l: [l, d] if d else [l] # d - size of last dimension => each point d-dim [N, d] (d = None to have [N])\n log = [init_fn(shape=shape(t.data.shape[0]), dtype=dtype) for t in dataset.input_trees[split]]\n return log\n\n def initialize(self, ops, dataset, model, split):\n # initialize cum_dict & ops\n config = self.config\n ncls = config.num_classes\n\n run_ops = {k: ops['result_dict'][k] for k in ['inputs', 'seg']} # assumes per-gpu rst - support multi-gpu\n cum_dict = {\n 'prob': self.init_pointcloud_log(dataset, split, ncls)\n }\n\n extra_ops = [k for k in config.extra_ops.split('-') if k]\n extra_ops_solved = extra_ops.copy()\n for k in extra_ops:\n if k in ['prob', 'conf']:\n continue\n else:\n raise ValueError(f'not supported extra ops k = {k} from {config.extra_ops}')\n\n return run_ops, cum_dict, extra_ops_solved\n\n # Val methods\n # ------------------------------------------------------------------------------------------------------------------\n\n def val_running_vote_seg(self, sess, ops, dataset, model, validation_probs, epoch=1):\n \"\"\"\n One epoch validating - running voting used during training, main task results only\n \"\"\"\n\n val_smooth = 0.95 # Choose validation smoothing parameter (0 for no smothing, 0.99 for big smoothing)\n\n result_dict = {k: ops['result_dict'][k] for k in ['inputs', 'seg']} # result dict for seg\n val_ops = {'loss_dict': ops['loss_dict'], 'result_dict': result_dict}\n feed_dict = {ops['is_training']: False}\n\n # Initialise iterator\n sess.run(ops['val_init_op'])\n\n ep = 0\n loss_meter = {k: AverageMeter() for k in val_ops['loss_dict']} if 'loss_dict' in val_ops else{}\n cum_dict = {\n 'conf': 0, # conf from current validation\n 'prob': validation_probs, # accumulating probs\n }\n while ep < epoch:\n try:\n rst = sess.run(val_ops, feed_dict=feed_dict)\n\n loss_dict = rst['loss_dict'] if 'loss_dict' in rst else {}\n cur_rst = rst['result_dict'] # per-gpu result\n\n for k, v in loss_dict.items():\n loss_meter[k].update(v)\n\n # Stack all validation predictions for each class separately - iterate over each gpu & cloud\n self.cumulate_probs(dataset, model, cur_rst, cum_dict, task='seg', smooth=val_smooth)\n\n except tf.errors.OutOfRangeError:\n ep += 1\n pass\n\n if loss_meter:\n print(f'val loss avg:', ' '.join([f'{loss_n} = {meter.avg:.3f}' for loss_n, meter in loss_meter.items()]))\n\n label_to_idx = dataset.label_to_idx\n proportions = dataset.val_proportions\n cur_m = metrics_from_confusions(cum_dict['conf'], proportions=proportions) # use sampled pred-label of current epoch\n vote_m = metrics_from_result(validation_probs, dataset.input_labels['validation'], dataset.num_classes, label_to_idx=label_to_idx, proportions=proportions) # use the accumulated per-point voting\n\n print(f'metrics - current {cur_m}\\n'\n f' - accumulated {vote_m}', flush=True)\n return cur_m\n\n\n def val_vote_seg(self, sess, ops, dataset, model, num_votes=20):\n \"\"\"\n Voting validating\n \"\"\"\n\n feed_dict = {ops['is_training']: False}\n\n # Smoothing parameter for votes\n val_smooth = 0.95\n\n # Initialise iterator with val data\n sess.run(ops['val_init_op'])\n\n # Initiate global prediction over val clouds\n label_to_idx = dataset.label_to_idx\n proportions = dataset.val_proportions\n val_ops, cum_dict, extra_ops = self.initialize(ops, dataset, model, 'validation')\n val_probs = cum_dict['prob']\n\n vote_ind = 0\n last_min = -0.5\n if self.config.debug:\n print_dict(val_ops, head='val_vote_seg - val_ops')\n while last_min < num_votes:\n try:\n cur_rst = sess.run(val_ops, feed_dict=feed_dict)\n # Stack all validation predictions for each class separately - iterate over each gpu & cloud\n self.cumulate_probs(dataset, model, cur_rst, cum_dict, task='seg', smooth=val_smooth)\n\n except tf.errors.OutOfRangeError:\n new_min = np.min(dataset.min_potentials['validation'])\n if self.verbose:\n print(f'Step {vote_ind:3d}, end. Min potential = {new_min:.1f}', flush=True)\n if last_min + 1 < new_min:\n # Update last_min\n last_min += 1\n\n if self.verbose > 1:\n # Show vote results on subcloud (match original label to valid) => not the good values here\n vote_m = metrics_from_result(val_probs, dataset.input_labels['validation'], dataset.num_classes, label_to_idx=label_to_idx, proportions=proportions)\n print('==> Confusion on sub clouds: ', vote_m.scalar_str)\n\n if self.verbose > 1 and int(np.ceil(new_min)) % 2 == 0:\n # Project predictions\n vote_m = metrics_from_result(val_probs, dataset.validation_labels, dataset.num_classes, label_to_idx=label_to_idx, projections=dataset.validation_proj)\n print('==> Confusion on full clouds:', vote_m)\n\n sess.run(ops['val_init_op'])\n vote_ind += 1\n\n vote_m = metrics_from_result(val_probs, dataset.input_labels['validation'], dataset.num_classes, label_to_idx=label_to_idx, proportions=proportions)\n print('==> Confusion on sub clouds - final: ', vote_m.scalar_str)\n\n # Project predictions\n print('==> Confusion on full clouds - final:')\n vote_m = metrics_from_result(val_probs, dataset.validation_labels, dataset.num_classes, label_to_idx=label_to_idx, projections=dataset.validation_proj)\n vote_m.print()\n print('\\nfinished\\n', flush=True)\n\n return\n\n\n # Test methods\n # ------------------------------------------------------------------------------------------------------------------\n\n def test_classification(self, model, dataset, num_votes=100):\n\n # Initialise iterator with test data\n self.sess.run(dataset.test_init_op)\n\n # Number of classes predicted by the model\n nc_model = config.num_classes\n\n # Initiate votes\n average_probs = np.zeros((len(dataset.input_labels['test']), nc_model))\n average_counts = np.zeros((len(dataset.input_labels['test']), nc_model))\n\n mean_dt = np.zeros(2)\n last_display = time.time()\n while np.min(average_counts) < num_votes:\n\n # Run model on all test examples\n # ******************************\n\n # Initiate result containers\n probs = []\n targets = []\n obj_inds = []\n count = 0\n\n while True:\n try:\n\n # Run one step of the model\n t = [time.time()]\n ops = (self.prob_logits, model.labels, model.inputs['object_inds'])\n prob, labels, inds = self.sess.run(ops, {model.dropout_prob: 1.0})\n t += [time.time()]\n\n # Get probs and labels\n probs += [prob]\n targets += [labels]\n obj_inds += [inds]\n count += prob.shape[0]\n\n # Average timing\n t += [time.time()]\n mean_dt = 0.95 * mean_dt + 0.05 * (np.array(t[1:]) - np.array(t[:-1]))\n\n # Display\n if (t[-1] - last_display) > self.gap_display:\n last_display = t[-1]\n message = 'Vote {:.0f} : {:.1f}% (timings : {:4.2f} {:4.2f})'\n print(message.format(np.min(average_counts),\n 100 * count / dataset.num_test,\n 1000 * (mean_dt[0]),\n 1000 * (mean_dt[1])))\n\n except tf.errors.OutOfRangeError:\n break\n\n # Average votes\n # *************\n\n # Stack all validation predictions\n probs = np.vstack(probs)\n targets = np.hstack(targets)\n obj_inds = np.hstack(obj_inds)\n\n if np.any(dataset.input_labels['test'][obj_inds] != targets):\n raise ValueError('wrong object indices')\n\n # Compute incremental average (predictions are always ordered)\n average_counts[obj_inds] += 1\n average_probs[obj_inds] += (probs - average_probs[obj_inds]) / (average_counts[obj_inds])\n\n # Save/Display temporary results\n # ******************************\n\n test_labels = np.array(dataset.label_values)\n\n # Compute classification results\n C1 = confusion_matrix(dataset.input_labels['test'],\n np.argmax(average_probs, axis=1),\n test_labels)\n\n ACC = 100 * np.sum(np.diag(C1)) / (np.sum(C1) + 1e-6)\n print('Test Accuracy = {:.1f}%'.format(ACC))\n\n s = ''\n for cc in C1:\n for c in cc:\n s += '{:d} '.format(c)\n s += '\\n'\n print(s)\n\n\n\n # Initialise iterator with test data\n self.sess.run(dataset.test_init_op)\n\n return\n\n def test_multi_segmentation(self, model, dataset, num_votes=100, num_saves=10):\n\n ##################\n # Pre-computations\n ##################\n\n print('Preparing test structures')\n t1 = time.time()\n\n # Collect original test file names\n original_path = join(dataset.path, 'test_ply')\n test_names = [f[:-4] for f in listdir(original_path) if f[-4:] == '.ply']\n test_names = np.sort(test_names)\n\n original_labels = []\n original_points = []\n projection_inds = []\n for i, cloud_name in enumerate(test_names):\n\n # Read data in ply file\n data = read_ply(join(original_path, cloud_name + '.ply'))\n points = np.vstack((data['x'], -data['z'], data['y'])).T\n original_labels += [data['label'] - 1]\n original_points += [points]\n\n # Create tree structure to compute neighbors\n tree = KDTree(dataset.input_points['test'][i])\n projection_inds += [np.squeeze(tree.query(points, return_distance=False))]\n\n t2 = time.time()\n print('Done in {:.1f} s\\n'.format(t2 - t1))\n\n ##########\n # Initiate\n ##########\n\n # Test saving path\n if config.save_test:\n test_path = join(model.saving_path, 'test')\n if not exists(test_path):\n makedirs(test_path)\n else:\n test_path = None\n\n # Initialise iterator with test data\n self.sess.run(dataset.test_init_op)\n\n # Initiate result containers\n average_predictions = [np.zeros((1, 1), dtype=np.float32) for _ in test_names]\n\n #####################\n # Network predictions\n #####################\n\n mean_dt = np.zeros(2)\n last_display = time.time()\n for v in range(num_votes):\n\n # Run model on all test examples\n # ******************************\n\n # Initiate result containers\n all_predictions = []\n all_obj_inds = []\n\n while True:\n try:\n\n # Run one step of the model\n t = [time.time()]\n ops = (self.prob_logits,\n model.labels,\n model.inputs['super_labels'],\n model.inputs['object_inds'],\n model.inputs['in_batches'])\n preds, labels, obj_labels, o_inds, batches = self.sess.run(ops, {model.dropout_prob: 1.0})\n t += [time.time()]\n\n # Stack all predictions for each class separately\n max_ind = np.max(batches)\n for b_i, b in enumerate(batches):\n\n # Eliminate shadow indices\n b = b[b < max_ind - 0.5]\n\n # Get prediction (only for the concerned parts)\n obj = obj_labels[b[0]]\n predictions = preds[b][:, :config.num_classes[obj]]\n\n # Stack all results\n all_predictions += [predictions]\n all_obj_inds += [o_inds[b_i]]\n\n # Average timing\n t += [time.time()]\n mean_dt = 0.95 * mean_dt + 0.05 * (np.array(t[1:]) - np.array(t[:-1]))\n\n # Display\n if (t[-1] - last_display) > self.gap_display:\n last_display = t[-1]\n message = 'Vote {:d} : {:.1f}% (timings : {:4.2f} {:4.2f})'\n print(message.format(v,\n 100 * len(all_predictions) / dataset.num_test,\n 1000 * (mean_dt[0]),\n 1000 * (mean_dt[1])))\n\n except tf.errors.OutOfRangeError:\n break\n\n # Project predictions on original point clouds\n # ********************************************\n\n print('\\nGetting test confusions')\n t1 = time.time()\n\n for i, probs in enumerate(all_predictions):\n\n # Interpolate prediction from current positions to original points\n obj_i = all_obj_inds[i]\n proj_predictions = probs[projection_inds[obj_i]]\n\n # Average prediction across votes\n average_predictions[obj_i] = average_predictions[obj_i] + \\\n (proj_predictions - average_predictions[obj_i]) / (v + 1)\n\n Confs = []\n for obj_i, avg_probs in enumerate(average_predictions):\n\n # Compute confusion matrices\n parts = [j for j in range(avg_probs.shape[1])]\n Confs += [confusion_matrix(original_labels[obj_i], np.argmax(avg_probs, axis=1), parts)]\n\n\n t2 = time.time()\n print('Done in {:.1f} s\\n'.format(t2 - t1))\n\n # Save the best/worst segmentations per class\n # *******************************************\n\n print('Saving test examples')\n t1 = time.time()\n\n # Regroup confusions per object class\n Confs = np.array(Confs)\n obj_mIoUs = []\n for l in dataset.label_values:\n\n # Get confusions for this object\n obj_inds = np.where(dataset.input_labels['test'] == l)[0]\n obj_confs = np.stack(Confs[obj_inds])\n\n # Get IoU\n obj_IoUs = IoU_from_confusions(obj_confs)\n obj_mIoUs += [np.mean(obj_IoUs, axis=-1)]\n\n # Get X best and worst prediction\n order = np.argsort(obj_mIoUs[-1])\n worst_inds = obj_inds[order[:num_saves]]\n best_inds = obj_inds[order[:-num_saves-1:-1]]\n worst_IoUs = obj_IoUs[order[:num_saves]]\n best_IoUs = obj_IoUs[order[:-num_saves-1:-1]]\n\n # Save the names in a file\n if config.save_test:\n obj_path = join(test_path, dataset.label_to_names[l])\n if not exists(obj_path):\n makedirs(obj_path)\n worst_file = join(obj_path, 'worst_inds.txt')\n best_file = join(obj_path, 'best_inds.txt')\n with open(worst_file, \"w\") as text_file:\n for w_i, w_IoUs in zip(worst_inds, worst_IoUs):\n text_file.write('{:d} {:s} :'.format(w_i, test_names[w_i]))\n for IoU in w_IoUs:\n text_file.write(' {:.1f}'.format(100*IoU))\n text_file.write('\\n')\n\n with open(best_file, \"w\") as text_file:\n for b_i, b_IoUs in zip(best_inds, best_IoUs):\n text_file.write('{:d} {:s} :'.format(b_i, test_names[b_i]))\n for IoU in b_IoUs:\n text_file.write(' {:.1f}'.format(100*IoU))\n text_file.write('\\n')\n\n # Save the clouds\n for i, w_i in enumerate(worst_inds):\n filename = join(obj_path, 'worst_{:02d}.ply'.format(i+1))\n preds = np.argmax(average_predictions[w_i], axis=1).astype(np.int32)\n write_ply(filename,\n [original_points[w_i], original_labels[w_i], preds],\n ['x', 'y', 'z', 'gt', 'pre'])\n\n for i, b_i in enumerate(best_inds):\n filename = join(obj_path, 'best_{:02d}.ply'.format(i+1))\n preds = np.argmax(average_predictions[b_i], axis=1).astype(np.int32)\n write_ply(filename,\n [original_points[b_i], original_labels[b_i], preds],\n ['x', 'y', 'z', 'gt', 'pre'])\n\n t2 = time.time()\n print('Done in {:.1f} s\\n'.format(t2 - t1))\n\n # Display results\n # ***************\n\n objs_average = [np.mean(mIoUs) for mIoUs in obj_mIoUs]\n instance_average = np.mean(np.hstack(obj_mIoUs))\n class_average = np.mean(objs_average)\n\n print('Objs | Inst | Air Bag Cap Car Cha Ear Gui Kni Lam Lap Mot Mug Pis Roc Ska Tab')\n print('-----|------|--------------------------------------------------------------------------------')\n\n s = '{:4.1f} | {:4.1f} | '.format(100 * class_average, 100 * instance_average)\n for AmIoU in objs_average:\n s += '{:4.1f} '.format(100 * AmIoU)\n print(s + '\\n')\n\n # Initialise iterator with test data\n self.sess.run(dataset.test_init_op)\n\n return\n\n def test_vote_seg(self, sess, ops, dataset, model, num_votes=20, test_path=None, make_zip=True):\n\n config = self.config\n assert os.path.isdir(config.saving_path), f'not a dir: {config.saving_path}'\n if test_path is None:\n test_path = os.path.join(config.saving_path, 'test')\n os.makedirs(test_path, exist_ok=True)\n\n options = None # tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)\n run_metadata = None # tf.RunMetadata()\n feed_dict = {ops['is_training']: False}\n\n # Smoothing parameter for votes\n test_smooth = 0.98\n\n # Initialise iterator with test data\n sess.run(ops['test_init_op'])\n\n # Initiate global prediction over val clouds\n test_ops, cum_dict, extra_ops = self.initialize(ops, dataset, model, 'test')\n test_probs = cum_dict['prob']\n\n vote_ind = 0\n last_min = -0.5 \n if config.num_votes:\n num_votes = config.num_votes\n while last_min < num_votes:\n try:\n cur_rst = sess.run(test_ops, feed_dict=feed_dict, options=options, run_metadata=run_metadata)\n # Stack all test predictions for each class separately - iterate over each gpu & cloud\n self.cumulate_probs(dataset, model, cur_rst, cum_dict, task='seg', smooth=test_smooth)\n\n except tf.errors.OutOfRangeError:\n # NOTE: need to check\n new_min = np.min(dataset.min_potentials['test'])\n if self.verbose:\n print(f'Step {vote_ind:3d}, end. Min potential = {new_min:.1f}', flush=True)\n\n if last_min + 1 < new_min:\n # Update last_min\n last_min += 1\n\n # if int(last_min) > 0 and int(last_min) // 5 == 0: # periodic test results\n # self.project_test_predictions(dataset, test_path)\n\n sess.run(ops['test_init_op'])\n vote_ind += 1\n\n if self.verbose:\n new_min = np.min(dataset.min_potentials['test'])\n print(f'Step {vote_ind:3d}, end. Min potential = {new_min:.1f}', flush=True)\n\n self.project_test_predictions(dataset, test_probs, test_path)\n print('\\nfinished\\n', flush=True)\n\n if make_zip:\n zip_name = test_path.split(os.sep) # cfg name / Log_* / test_*\n zip_name = '_'.join([i for i in ['test', *zip_name[-3:-1], zip_name[-1][len('test'):].strip('_')] if i])\n # include test_* dir (except Semantic3D, ScanNet)\n j = 'j' if config.dataset in ['ScanNet', 'Semantic3D', 'SensatUrban'] else ''\n os.system(f'cd {os.path.dirname(test_path)}; zip -rmTq{j} {zip_name}.zip {test_path.split(os.sep)[-1]}/*') # -m to move, -j junk file, -T test integrity, -q quiet\n os.system(f'rm -r {test_path}')\n return\n\n def project_test_predictions(self, dataset, test_probs, test_path):\n\n # Project predictions\n t1 = time.time()\n files = dataset.test_files\n ignored_inds = None\n if hasattr(dataset, 'ignored_labels_test'):\n ignored_inds = dataset.label_to_idx[[l for l in dataset.ignored_labels_test if l not in dataset.ignored_labels]].astype(int)\n\n config = self.config\n if config.save_test:\n pred_path = os.sep.join([*test_path.split(os.sep)[:-1], test_path.split(os.sep)[-1].replace('test', 'predictions')]) # model pred\n os.makedirs(pred_path, exist_ok=True)\n\n for i_test, file_path in enumerate(files):\n\n # Reproject probs\n probs = test_probs[i_test][dataset.test_proj[i_test], :]\n\n # Remove invalid classes in test\n if ignored_inds is not None:\n probs[:, ignored_inds] = 0\n\n # Get the predicted labels\n preds = dataset.idx_to_label[np.argmax(probs, axis=-1)]\n\n # Save plys - predictions & probs\n cloud_name = file_path.split('/')[-1]\n if config.save_test:\n points = dataset.load_evaluation_points(file_path) # test original points\n pots = dataset.potentials['test'][i_test][dataset.test_proj[i_test]] # project potentials on original points\n test_name = os.path.join(pred_path, cloud_name)\n prob_names = ['_'.join(dataset.label_to_names[label].split()) for label in dataset.label_values if label not in dataset.ignored_labels]\n write_ply(test_name,\n [points, preds, pots, probs],\n ['x', 'y', 'z', 'preds', 'pots'] + prob_names)\n\n # Save ascii preds - submission files\n if config.dataset == 'Semantic3D':\n ascii_name = os.path.join(test_path, dataset.ascii_files[cloud_name])\n np.savetxt(ascii_name, preds, fmt='%d')\n elif config.dataset == 'SensatUrban':\n ascii_name = os.path.join(test_path, f'{cloud_name[:-4]}.label')\n preds.astype(np.uint8).tofile(ascii_name)\n else:\n ascii_name = os.path.join(test_path, cloud_name[:-4] + '.txt')\n np.savetxt(ascii_name, preds, fmt='%d')\n\n t2 = time.time()\n if self.verbose:\n print('\\nReproject Vote in {:.1f}s\\n'.format(t2-t1))\n\n\n # Utilities\n # ------------------------------------------------------------------------------------------------------------------\n\n def cumulate_probs(self, dataset, model, rst, cum_dict, task, smooth):\n # cum_dict - {cum_dict name : {args : rst_dict}}\n\n # iterate over gpu\n for gpu_i, cloud_inds in enumerate(rst['inputs']['cloud_inds']):\n point_inds = rst['inputs']['point_inds'][gpu_i]\n\n b_start = 0\n # iterate over clouds\n for b_i, c_i in enumerate(cloud_inds): # [B]\n if 'batches_len' in rst['inputs']: # [BxN] - stacked\n b_len = rst['inputs']['batches_len'][gpu_i][0][b_i] # npoints in cloud\n b_i = np.arange(b_start, b_start + b_len)\n b_start += b_len\n else: # [B, N] - batched\n pass\n inds = point_inds[b_i] # input point inds\n\n probs = rst[task]['probs'][gpu_i][b_i]\n labels = rst[task]['labels'][gpu_i][b_i]\n if np.all(labels == -1):\n # c_pts = np.array(dataset.input_trees['validation'][c_i].data, copy=False)[inds].mean(axis=0)\n # unique_l_cnt = np.unique(dataset.input_labels['validation'][c_i][inds], return_counts=True)\n # raise ValueError(f'all invalid labels found in cumulate_prob: cloud_inds={c_i}, center_pts={c_pts}'\n # f'input_labels & counts - {unique_l_cnt}')\n continue\n if 'conf' in cum_dict:\n cur_conf = confusion_matrix(labels, np.argmax(probs, axis=-1).astype(np.int), labels=np.arange(dataset.num_classes))\n cum_dict['conf'] += cur_conf\n if 'prob' in cum_dict:\n cum_dict['prob'][c_i][inds] = smooth * cum_dict['prob'][c_i][inds] + (1 - smooth) * probs\n if 'feature' in cum_dict:\n cum_dict['feature'][c_i][inds] = smooth * cum_dict['feature'][c_i][inds] + (1 - smooth) * rst[task]['latent'][gpu_i][b_i]\n\n def _search_func(self, k_r, cloud_idx, split, dataset, neighbor_dict, verbose=True): # create tf_ops of generating neighbor_idx & get result\n if cloud_idx in neighbor_dict[k_r]:\n return neighbor_dict[k_r][cloud_idx]\n\n config = self.config\n points = np.array(dataset.input_trees[split][cloud_idx].data, copy=False) # [N, 3]\n\n from ops import get_tf_func\n func = get_tf_func(config.search, verbose=verbose)\n\n if config.search in ['knn']:\n tf_ops = tf.squeeze(func(points[None, ...], points[None, ...], k_r), axis=0)\n elif config.search in ['radius']:\n tf_ops = func(points, points, [len(points)], [len(points)], k_r)\n # if hasattr(dataset, 'neighborhood_limits'):\n # print('neighborhood_limits', dataset.neighborhood_limits[0])\n # tf_ops = tf_ops[..., :dataset.neighborhood_limits[0]]\n else:\n raise\n\n if verbose:\n print_mem(f'k = {k_r} - start', check_time=True, check_sys=True, flush=True)\n with tf.Session(config=tf.ConfigProto(device_count={'GPU': 0}, allow_soft_placement=True)) as s:\n neighbor_idx = s.run(tf_ops)\n if verbose:\n print_mem(f'neighbor_idx {neighbor_idx.shape}', check_time=True, check_sys=True, flush=True)\n\n neighbor_dict[k_r][cloud_idx] = neighbor_idx # neighbor idx - np arr\n return neighbor_idx" }, { "identifier": "average_gradients", "path": "utils/average_gradients.py", "snippet": "def average_gradients(tower_grads, grad_norm, raise_on_none=True, grad_reduce=None, device=None):\n \"\"\"Calculate the average gradient for each shared variable across all towers.\n Note that this function provides a synchronization point across all towers.\n From tensorflow tutorial: cifar10/cifar10_multi_gpu_train.py\n Args:\n tower_grads: List of lists of (gradient, variable) tuples. The outer list\n is over individual gradients. The inner list is over the gradient\n calculation for each tower.\n - [[(g,v), ... at gpu 0], ..., [(g,v), ... at gpu N]]\n Returns:\n List of pairs of (gradient, variable) where the gradient has been averaged\n across all towers.\n \"\"\"\n if device:\n with tf.device(device):\n return average_gradients(tower_grads, grad_norm, raise_on_none, grad_reduce, None)\n\n use_clip = grad_norm and grad_norm > 0\n average_grads = []\n for grad_and_vars in zip(*tower_grads):\n # Note that each grad_and_vars containes (grad, var) calculated at each gpu, looks like the following:\n # ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))\n grads = []\n for g, v in grad_and_vars:\n if g is not None:\n if use_clip:\n g = tf.clip_by_norm(g, grad_norm)\n elif raise_on_none:\n raise ValueError(f'variable {v} got None gradients')\n else:\n continue\n # g = tf.zeros_like(v)\n\n # Append on a 'tower' dimension which we will average over below.\n grads.append(g)\n\n # Average over the 'tower' dimension.\n if len(grads) > 1 and (grad_reduce == 'concat' or not grad_reduce):\n # Add 0 dimension to the gradients to represent the tower.\n # grad = tf.stack(grads)\n grads = [tf.expand_dims(g, 0) for g in grads]\n grad = tf.concat(axis=0, values=grads)\n grad = tf.reduce_mean(grad, 0)\n elif len(grads) > 1 and grad_reduce == 'mean':\n # Direct mean\n grad = tf.accumulate_n(grads) / len(grads)\n elif len(grads) == 1:\n # skip if only 1 gpu\n grad = grads[0]\n elif len(grads) == 0:\n grad = None\n else:\n raise ValueError(f'not support grad_reduce = {grad_reduce}')\n\n # Keep in mind that the Variables are redundant because they are shared\n # across towers. So .. we will just return the first tower's pointer to\n # the Variable.\n v = grad_and_vars[0][1]\n grad_and_var = (grad, v)\n average_grads.append(grad_and_var)\n return average_grads" }, { "identifier": "AdamWeightDecayOptimizer", "path": "utils/AdamWOptimizer.py", "snippet": "class AdamWeightDecayOptimizer(tf.train.Optimizer):\n \"\"\"A basic Adam optimizer that includes \"correct\" L2 weight decay.\"\"\"\n\n def __init__(self,\n learning_rate,\n weight_decay_rate=0.0,\n beta_1=0.9,\n beta_2=0.999,\n epsilon=1e-6,\n exclude_from_weight_decay=None,\n name=\"AdamWeightDecayOptimizer\"):\n \"\"\"Constructs a AdamWeightDecayOptimizer.\"\"\"\n super(AdamWeightDecayOptimizer, self).__init__(False, name)\n\n self.learning_rate = learning_rate\n self.weight_decay_rate = weight_decay_rate\n self.beta_1 = beta_1\n self.beta_2 = beta_2\n self.epsilon = epsilon\n self.exclude_from_weight_decay = exclude_from_weight_decay\n\n def apply_gradients(self, grads_and_vars, global_step=None, name=None):\n \"\"\"See base class.\"\"\"\n assignments = []\n for (grad, param) in grads_and_vars:\n if grad is None or param is None:\n continue\n\n param_name = self._get_variable_name(param.name)\n\n m = tf.get_variable(\n name=param_name + \"/adam_m\",\n shape=param.shape.as_list(),\n dtype=tf.float32,\n trainable=False,\n initializer=tf.zeros_initializer())\n v = tf.get_variable(\n name=param_name + \"/adam_v\",\n shape=param.shape.as_list(),\n dtype=tf.float32,\n trainable=False,\n initializer=tf.zeros_initializer())\n\n # Standard Adam update.\n next_m = (\n tf.multiply(self.beta_1, m) + tf.multiply(1.0 - self.beta_1, grad))\n next_v = (\n tf.multiply(self.beta_2, v) + tf.multiply(1.0 - self.beta_2,\n tf.square(grad)))\n\n update = next_m / (tf.sqrt(next_v) + self.epsilon)\n\n # Just adding the square of the weights to the loss function is *not*\n # the correct way of using L2 regularization/weight decay with Adam,\n # since that will interact with the m and v parameters in strange ways.\n #\n # Instead we want ot decay the weights in a manner that doesn't interact\n # with the m/v parameters. This is equivalent to adding the square\n # of the weights to the loss with plain (non-momentum) SGD.\n if self._do_use_weight_decay(param_name):\n update += self.weight_decay_rate * param\n\n update_with_lr = self.learning_rate * update\n\n next_param = param - update_with_lr\n\n assignments.extend(\n [param.assign(next_param),\n m.assign(next_m),\n v.assign(next_v)])\n return tf.group(*assignments, name=name)\n\n def _do_use_weight_decay(self, param_name):\n \"\"\"Whether to use L2 weight decay for `param_name`.\"\"\"\n if not self.weight_decay_rate:\n return False\n if self.exclude_from_weight_decay:\n for r in self.exclude_from_weight_decay:\n if re.search(r, param_name) is not None:\n return False\n return True\n\n def _get_variable_name(self, param_name):\n \"\"\"Get the variable name from the tensor name.\"\"\"\n m = re.match(\"^(.*):\\\\d+$\", param_name)\n if m is not None:\n param_name = m.group(1)\n return param_name" }, { "identifier": "setup_logger", "path": "utils/logger.py", "snippet": "@functools.lru_cache()\ndef setup_logger(\n output=None, distributed_rank=0, *, color=True, name=\"\", abbrev_name=None\n):\n \"\"\"\n Initialize the detectron2 logger and set its verbosity level to \"INFO\".\n\n Args:\n output (str): a file name or a directory to save log. If None, will not save log file.\n If ends with \".txt\" or \".log\", assumed to be a file name.\n Otherwise, logs will be saved to `output/log.txt`.\n name (str): the root module name of this logger\n\n Returns:\n logging.Logger: a logger\n \"\"\"\n logger = logging.getLogger(name) # a global named logger\n logger.setLevel(logging.DEBUG)\n logger.propagate = False\n\n if abbrev_name is None:\n abbrev_name = name\n\n plain_formatter = logging.Formatter(\n \"[%(asctime)s] %(name)s %(levelname)s: %(message)s\", datefmt=\"%m/%d %H:%M:%S\"\n )\n # stdout logging: master only\n if distributed_rank == 0:\n ch = logging.StreamHandler(stream=sys.stdout)\n ch.setLevel(logging.DEBUG)\n if color:\n formatter = _ColorfulFormatter(\n colored(\"[%(asctime)s %(name)s]: \", \"green\") + \"%(message)s\",\n datefmt=\"%m/%d %H:%M:%S\",\n root_name=name,\n abbrev_name=str(abbrev_name),\n )\n else:\n formatter = plain_formatter\n ch.setFormatter(formatter)\n logger.addHandler(ch)\n\n # file logging: all workers\n if output is not None:\n if output.endswith(\".txt\") or output.endswith(\".log\"):\n filename = output\n else:\n filename = os.path.join(output, \"log.txt\")\n if distributed_rank > 0:\n filename = filename + f\".rank{distributed_rank}\"\n os.makedirs(os.path.dirname(filename), exist_ok=True)\n\n fh = logging.StreamHandler(_cached_log_stream(filename))\n fh.setLevel(logging.DEBUG)\n fh.setFormatter(plain_formatter)\n logger.addHandler(fh)\n\n return logger" }, { "identifier": "StepScheduler", "path": "utils/scheduler.py", "snippet": "class StepScheduler(object):\n def __init__(self, name, base_value, decay_rate, decay_step, max_steps, clip_min=0):\n self.name = name\n self.clip_min = clip_min\n self.cur_step = 0\n self.values = [base_value * decay_rate ** (i // decay_step) for i in range(max_steps)]\n\n def reset(self):\n self.cur_step = 0\n\n def step(self):\n # cur_value = self.base_value * self.decay_rate ** (cur_step // decay_step)\n cur_value = max(self.values[self.cur_step], self.clip_min)\n self.cur_step += 1\n return cur_value" }, { "identifier": "LrScheduler", "path": "utils/scheduler.py", "snippet": "class LrScheduler(object):\n def __init__(self, config):\n self.config = config\n self.start_lr = float(config.learning_rate)\n self.clip_min = config.clip_min if config.clip_min else 0\n\n self.decay = config.decay\n if self.decay.startswith('cos'):\n self._get_lr = self._get_lr_cos\n\n self.reset()\n\n # from matplotlib import pyplot as plt\n # plt.plot(self.to_list(config.max_epoch))\n # plt.savefig(config.name)\n\n def reset(self):\n self.cur_ep = 0\n self.cur_step = 0\n self.learning_rate = None # None to denote not initalized\n self.learning_rate = self._get_lr()\n\n def _get_lr_cos(self):\n # simple implementation for cos annealing (epoch based)\n # borrowing from https://github.com/katsura-jp/pytorch-cosine-annealing-with-warmup/blob/master/cosine_annealing_warmup/scheduler.py\n # e.g. cos_w10, cos_w10_c3_m2_g.5\n cfg = self.config\n cur_ep = self.cur_ep\n total_ep = cfg.max_epoch\n max_lr = self.start_lr\n base_lr = self.clip_min if self.clip_min > 0 else 1e-5 # starting lr (min)\n\n warm_ep = re.search('w\\d+', self.decay)\n warm_ep = float(warm_ep.group()[1:]) if warm_ep else 0\n if 0 < warm_ep and warm_ep < 1:\n warm_ep = total_ep * warm_ep\n\n # solve cycle\n cycle_ep = re.search('c\\d+', self.decay)\n cycle_ep = int(cycle_ep.group()[1:]) if cycle_ep else 0 # total num of cycles\n cycle_m = re.search('m\\d+', self.decay)\n cycle_m = float(cycle_m.group()[1:]) if cycle_m else 1 # extending len per cycle\n if cycle_m > 1:\n assert cycle_ep > 0, f'#cycle must > 0'\n cycle_ep_base = total_ep * (cycle_m - 1) / (cycle_m ** cycle_ep - 1) # solving the first cycle len - sum of geometric sequence (等比求和)\n cycle_ep = [cycle_ep_base * cycle_m ** i for i in range(cycle_ep)]\n cycle_n = len([i for i in np.cumsum(cycle_ep) if i < cur_ep]) # num of cycles\n cycle_base = np.sum(cycle_ep[:cycle_n]) # start ep of current cycle\n cycle_ep = cycle_ep[cycle_n] # current cycle length\n elif cycle_ep:\n assert total_ep % cycle_ep == 0, f'#cycle={cycle_ep} does not align with #total={total_ep}'\n cycle_ep = total_ep / cycle_ep # length of each cycle - default to total_ep (1 cycle)\n cycle_n = int(cur_ep / cycle_ep)\n cycle_base = cycle_n * cycle_ep\n else:\n cycle_ep, cycle_n, cycle_base = total_ep, 0, 0\n cur_ep = cur_ep - cycle_base\n\n # modulate max lr\n gamma = [i[1:] for i in self.decay.split('_') if i.startswith('g')]\n gamma = float(gamma[0]) if gamma else 1\n max_lr = max_lr * gamma ** cycle_n\n\n if cur_ep < warm_ep:\n # warmup stage - linear increasing\n return cur_ep / warm_ep * (max_lr - base_lr) + base_lr\n else:\n # cos decay stage\n cur_ep = cur_ep - warm_ep\n cycle_ep = cycle_ep - warm_ep\n decay = (1 + np.cos(np.pi * cur_ep / cycle_ep)) / 2 # rescaled cos weight in [0, 1]\n return base_lr + (max_lr - base_lr) * decay\n\n def _get_lr(self):\n # exponential decay (default)\n cfg = self.config\n cur_ep = self.cur_ep\n base_lr = self.clip_min if self.clip_min > 0 else 1e-5\n\n warm_ep = re.search('w\\d+', self.decay)\n warm_ep = float(warm_ep.group()[1:]) if warm_ep else 0\n\n if cur_ep < warm_ep:\n # warmup stage - linear increasing\n return cur_ep / warm_ep * (self.start_lr - base_lr) + base_lr\n\n # normal decay\n cur_ep = cur_ep - warm_ep\n if cfg.decay_step:\n times = self.cur_step // cfg.decay_step if isinstance(cfg.decay_step, int) else (np.array(cfg.decay_step) <= self.cur_step).sum()\n else:\n decay_epoch = cfg.decay_epoch if cfg.decay_epoch else 1 # decay per epoch by default\n if isinstance(decay_epoch, (list, tuple)):\n assert all(i >= 1 for i in decay_epoch), f'need to specify as real epoch, not {decay_epoch}'\n times = cur_ep // decay_epoch if isinstance(decay_epoch, int) else (np.array(decay_epoch) <= cur_ep).sum()\n\n cum_decay = (cfg.decay_rate ** times) if type(cfg.decay_rate) in [int, float] else np.prod(cfg.decay_rate[:times]) # np.prod([]) = 1.0\n cur_lr = self.start_lr * cum_decay\n return cur_lr\n\n def to_list(self, max_epoch=None):\n lrs = []\n max_epoch = max_epoch if max_epoch is not None else self.config.max_epoch\n for i in range(max_epoch):\n self.cur_ep = i\n lrs.append(self._get_lr())\n self.learning_rate = lrs[-1]\n self.reset()\n return lrs\n\n def step(self, epoch, step):\n self.cur_ep += epoch\n self.cur_step += step\n cur_lr = max(self._get_lr(), self.clip_min)\n self.learning_rate = cur_lr\n return cur_lr\n\n def to_plot(self, max_epoch=None):\n lrs = []\n max_epoch = max_epoch if max_epoch is not None else self.config.max_epoch\n for i in range(max_epoch):\n self.cur_ep = i\n lrs.append(self._get_lr())\n self.learning_rate = lrs[-1]\n self.reset()\n import matplotlib.pyplot as plt\n plt.plot(lrs)\n plt.show()\n return " }, { "identifier": "AverageMeter", "path": "utils/metrics.py", "snippet": "class AverageMeter(object):\n \"\"\"Computes and stores the average and current value\"\"\"\n\n def __init__(self):\n self.reset()\n\n def reset(self):\n self.val = 0\n self.sum = 0\n self.count = 0\n\n def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.count += n\n \n @property\n def avg(self):\n return self.sum / self.count" }, { "identifier": "GraphBuilder", "path": "utils/tf_graph_builder.py", "snippet": "class GraphBuilder(object):\n\n def __init__(self, config, graph=None, verbose=True):\n \"\"\"\n get the full compute graph including dataset, model inference, loss, optimizer, lr scheduler and required ops\n \"\"\"\n\n if graph is not None: # if graph specified\n with graph.as_default():\n return self.__init__(config, None, verbose)\n\n if isinstance(config.rand_seed, int): # set seed\n tf.set_random_seed(config.rand_seed)\n np.random.seed(config.rand_seed)\n if verbose:\n print(f'==> np random seed = {np.random.get_state()[1][0]}')\n\n # model & dataset fn\n self.get_dataset = getattr(datasets, f'{config.dataset}Dataset') # datasets.[name]Dataset\n self.get_model = models.get_model\n # if config.distribute == 'tf_device': # full compute graph (handle devices & platforms)\n # self.build = self.build_devices\n # else:\n # raise NotImplementedError(f'not supported type of distributing graphs: config.distribute={config.distribute}')\n\n # Get dataset\n if verbose:\n print('==> Preparing datasets...')\n dataset = self.get_dataset(config, verbose)\n dataset.initialize(verbose)\n if verbose:\n print('==> setting dataset info:')\n print_dict(dataset.info, prefix='\\t')\n print_mem('>>> dataset built')\n config.update(dataset.info)\n\n # placeholder\n is_training = tf.placeholder(tf.bool, shape=())\n learning_rate = tf.placeholder(tf.float32, shape=(), name='learning_rate')\n # learning_rate = tf.get_variable('learning_rate', [], initializer=tf.constant_initializer(float('nan')), trainable=False)\n\n # # build model\n # grads, total_loss_dict, total_result_dict, model = self.build(dataset, is_training, config, verbose=verbose)\n\n # -------------------------------------------\n # Get model and loss on multiple GPU devices\n # -------------------------------------------\n # Allocating variables on CPU first will greatly accelerate multi-gpu training.\n # Ref: https://github.com/kuza55/keras-extras/issues/21\n flat_inputs = dataset.flat_inputs\n if config.cpu_variables:\n self.get_model(flat_inputs[0], is_training, config=config, verbose=verbose)\n tower_grads = []\n total_losses = []\n total_result = []\n for igpu in range(config.gpu_num):\n with tf.variable_scope(tf.get_variable_scope(), reuse=True if config.cpu_variables else tf.AUTO_REUSE):\n name_scope = f'gpu_{igpu}' if config.cpu_variables or igpu > 0 else ''\n verbose = not bool(name_scope)\n with tf.device(f'/gpu:{igpu}'), tf.name_scope(name_scope) as scope:\n flat_inputs_i = flat_inputs[igpu]\n model = self.get_model(flat_inputs_i, is_training, config=config, scope=scope, verbose=verbose) # inference model\n\n # collect per-gpu info\n result_dict = model.get_result() # inference result\n total_result.append(result_dict)\n\n loss_dict = model.get_loss() # loss\n total_losses.append(loss_dict)\n\n var_list = tf.trainable_variables() # vars & grads\n var_list = self.collect_vars(var_list, include_k=config.vars_train, except_k=config.vars_freeze)\n grads = tf.gradients(loss_dict['loss'], var_list, colocate_gradients_with_ops=config.colocate_gradients_with_ops) # normally, should NOT co-locate\n grads = list(zip(grads, var_list))\n tower_grads.append(grads)\n total_inputs = dict_list(flat_inputs)\n total_result = dict_list(total_result)\n total_losses = dict_list(total_losses)\n\n # average losses from multiple GPUs\n with tf.variable_scope('losses'):\n total_losses = {k: tf.reduce_mean(v, name=k) if len(v) > 1 else v[0] for k, v in total_losses.items()}\n\n # average grad\n with tf.variable_scope('gradients'):\n # [(gradient, variable), ...] - gradient averaged over gpu towers (if >1)\n grads = average_gradients(tower_grads, grad_norm=config.grad_norm, raise_on_none=config.grad_raise_none, grad_reduce=config.grad_reduce)\n\n # setup optimizer\n with tf.variable_scope('optimizer'):\n if config.optimizer == 'sgd':\n optimizer = tf.train.MomentumOptimizer(learning_rate, momentum=config.momentum)\n elif config.optimizer == 'adam':\n optimizer = tf.train.AdamOptimizer(learning_rate)\n elif config.optimizer == 'adamW':\n from utils.AdamWOptimizer import AdamWeightDecayOptimizer\n optimizer = AdamWeightDecayOptimizer(learning_rate=learning_rate, weight_decay_rate=config.weight_decay, exclude_from_weight_decay=[\"bias\"])\n\n # if config.mixed_precision:\n # optimizer = tf.train.experimental.enable_mixed_precision_graph_rewrite(optimizer)\n\n # momentume as update ops\n update_ops = self.get_momentum_update(model, config, total_inputs, total_result)\n for ops in update_ops: # add to collection\n tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, ops)\n\n # train op\n update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n with tf.control_dependencies(update_ops):\n train_op = optimizer.apply_gradients(grads)\n # train_op = optimizer.apply_gradients(grads)\n # train_op = tf.group([train_op, update_ops])\n\n # saver\n save_vars = None\n if config.save_compact:\n save_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='model')\n if isinstance(config.save_compact, bool):\n pass\n elif isinstance(config.save_compact, str) and config.save_compact == 'trained':\n vars_grads = {v: g for g, v in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='model')}\n save_vars = [v for v in save_vars if v in vars_grads and vars_grads[v] is not None] # save only trained\n else:\n raise ValueError(f'not support save_compact={config.save_compact}')\n saver = tf.train.Saver(save_vars, max_to_keep=int(config.max_to_keep))\n\n # summary\n with tf.variable_scope('summary'):\n if config.summary and isinstance(config.summary, str):\n inputs = model.inputs\n if 'summary' not in inputs:\n inputs['summary'] = defaultdict(lambda: [])\n if config.summary == 'loss':\n inputs['summary']['per_step'] += [tf.summary.scalar(k, v) for k, v in total_losses.items()]\n # log grads - debug use\n # inputs = model.inputs\n # inputs['summary'] = defaultdict(lambda: [])\n # from models.utils import tf_Print\n # for i, (g, v) in enumerate(grads):\n # if config.summary:\n # inputs['summary']['per_step'] += [tf.summary.histogram(f'{v.name}/v', v)]\n # inputs['summary']['per_step'] += [tf.summary.histogram(f'{v.name}/g', g)]\n # if v.name in [\n # 'model/resnet_scene_segmentation_head/up_conv3/weights:0',\n # 'model/resnet_scene_segmentation_head/segmentation_head/weights:0',\n # ]:\n # print(f'print grad - {v.name}')\n # g = tf_Print(g, [f'grads - {v.name}', g])\n # grads[i] = (g, v)\n # input('\\nprint above grads')\n # summary - merge\n summary_dict = {} # {level : merged op}\n if config.summary:\n sum_levels = ['per_step', 'per_log', 'per_epoch']\n summary_ops = model.inputs['summary'] if 'summary' in model.inputs else {k: [] for k in sum_levels}\n assert all([k in sum_levels for k in summary_ops]), f'undesired keys in summary ops: {summary_ops.keys()}'\n for i in range(len(sum_levels)):\n lv = sum_levels[-i - 1]\n ops = sum([summary_ops[k] for k in sum_levels[:len(sum_levels)-i]], [])\n summary_dict[lv] = tf.summary.merge(ops) if len(ops) > 0 else tf.no_op()\n\n # Create a session\n cProto = tf.ConfigProto()\n if config.gpu_allow_growth:\n cProto.gpu_options.allow_growth = True\n if config.debug_single:\n cProto.device_count['CPU'] = 1\n # config.intra_op_parallelism_threads = config.inter_op_parallelism_threads = psutil.cpu_count(logical=False) # set to num of physical (default to logical) cpu cores\n cProto.allow_soft_placement = bool(config.allow_soft_placement) or not bool(config.gpu_devices) # if specified or cpu-only\n cProto.log_device_placement = False\n sess = tf.Session(config=cProto)\n\n ops = {\n 'train_init_op': dataset.train_init_op,\n 'val_init_op': dataset.val_init_op,\n 'test_init_op': dataset.test_init_op,\n\n 'train_op': train_op,\n 'is_training': is_training,\n 'learning_rate': learning_rate,\n\n 'inputs': dict(total_inputs),\n 'loss_dict': dict(total_losses),\n 'result_dict': dict(total_result),\n 'summary_dict': dict(summary_dict),\n }\n if verbose:\n print_mem('>>> model built')\n print('\\n -------- inputs {')\n print_dict(model.inputs, prefix='\\t')\n print('} --------- inputs')\n print('\\n -------- loss_dict {')\n print_dict(total_losses, prefix='\\t')\n print('} --------- loss_dict')\n print('\\n -------- result_dict {')\n print_dict(total_result, prefix='\\t')\n print('} --------- result_dict')\n\n self.ops = ops\n self.sess = sess\n self.grads = grads\n self.saver = saver\n\n self.model = model\n self.dataset = dataset\n\n # -------------------------------------------\n # Other utils & interfaces\n # -------------------------------------------\n\n def collect_vars(self, var_list, include_k=[], except_k=[], match='search'):\n # collect specified vars - default to all vars\n var_collect = []\n match_func = getattr(re, match)\n include_k = [include_k] if include_k and isinstance(include_k, str) else include_k\n except_k = [include_k] if except_k and isinstance(except_k, str) else except_k\n for v in var_list:\n if include_k and not any(match_func(k, v.name) for k in include_k):\n continue\n if except_k and any(match_func(k, v.name) for k in except_k):\n continue\n var_collect.append(v)\n return var_collect\n\n def get_momentum_update(self, model, config, total_inputs, total_result):\n # collect update ops for momentum update\n update_ops = []\n\n # update ops - momentum dict\n # NOTE - can be done in per-head fashion\n # => check only sepcial 'momentum_update_stage'\n for head_n, head_d in total_result.items():\n if 'momentum_dict' not in head_d or 'momentum_dict' not in total_inputs: continue\n if head_n not in total_inputs['momentum_dict']:\n raise KeyError(f'building momentum cycle for head {head_n}: missing tensor for momentum dict')\n head_cfg = model.head_dict['config'][head_n]\n\n # per-device input/output\n mom_in = total_inputs['momentum_dict'][head_n] # {k : [v = tensor]}, with inputs['momentum_dict'] = {head_n: {k : placeholder/vars}}\n mom_out = head_d['momentum_dict'] # {k: [v = tensor]}\n for k, v_out in mom_out.items():\n v_in = mom_in[k]\n\n # collect for update\n mom_avg = head_cfg.momentum_update\n mom_avg = float(mom_avg) if isinstance(mom_avg, (str, int)) else mom_avg # can be variable\n with tf.variable_scope(f'mom_dict_update/{head_n}/{k}'):\n if head_cfg.momentum_update_stage == 'glb_avg':\n # average over devices\n v_out = tf.reduce_mean(tf.stack(v_out, axis=0), axis=0)\n v_out = [v_in[i] * mom_avg + v_out * (1 - mom_avg) for i in range(config.gpu_num)]\n\n elif head_cfg.momentum_update_stage == 'glb_sum':\n # sum over devices\n v_out = tf.reduce_sum(tf.stack(v_out, axis=0), axis=0)\n v_out = [v_in[i] * mom_avg + v_out * (1 - mom_avg) for i in range(config.gpu_num)]\n\n # create update ops\n for igpu in range(config.gpu_num): # assign to each device input\n with tf.variable_scope(f'gpu_{igpu}/mom_dict_update/{head_n}/{k}', reuse=True):\n update_ops += [tf.assign(v_in[igpu], v_out[igpu])]\n\n return update_ops\n\n\n\n def restore(self, *args, **kwargs):\n argspec = inspect.getfullargspec(restore)\n kwargs.update(zip(argspec.args, args))\n kw_self = {'session': self.sess} # , 'saver': self.saver\n for k, v in kw_self.items():\n if k not in kwargs:\n kwargs[k] = v\n return restore(**kwargs)\n\n def close(self):\n self.sess.close()\n tf.reset_default_graph()" } ]
import os, re, gc, sys, time, pickle, psutil, subprocess import numpy as np import tensorflow as tf from config import log_config from utils.logger import print_dict, print_table from utils.ply import read_ply, write_ply from utils.tester import ModelTester from utils.average_gradients import average_gradients from utils.AdamWOptimizer import AdamWeightDecayOptimizer from utils.logger import setup_logger from utils.scheduler import StepScheduler, LrScheduler from utils.metrics import AverageMeter from utils.tf_graph_builder import GraphBuilder
19,683
batch_idx = 0 end = time.time() while True: try: rst = sess.run(train_ops, feed_dict=feed_dict) if (batch_idx + 1) % config.update_freq == 0: for k, v in rst['loss_dict'].items(): loss_meter[k].update(v) batch_time.update(time.time() - end) end = time.time() if (batch_idx + 1) % config.print_freq == 0: loss_str = ' '.join([f'{n}={meter.avg:<6.2f}' for n, meter in loss_meter.items()]) print(f'Step {batch_idx+1:08d} ' + loss_str + f' ---{batch_time.avg:5.3f} s/batch', flush=True) batch_idx += 1 except tf.errors.OutOfRangeError: break return batch_idx # Debug methods # ------------------------------------------------------------------------------------------------------------------ def show_memory_usage(self, batch_to_feed): for l in range(self.config.num_layers): neighb_size = list(batch_to_feed[self.in_neighbors_f32[l]].shape) dist_size = neighb_size + [self.config.num_kernel_points, 3] dist_memory = np.prod(dist_size) * 4 * 1e-9 in_feature_size = neighb_size + [self.config.first_features_dim * 2**l] in_feature_memory = np.prod(in_feature_size) * 4 * 1e-9 out_feature_size = [neighb_size[0], self.config.num_kernel_points, self.config.first_features_dim * 2**(l+1)] out_feature_memory = np.prod(out_feature_size) * 4 * 1e-9 print('Layer {:d} => {:.1f}GB {:.1f}GB {:.1f}GB'.format(l, dist_memory, in_feature_memory, out_feature_memory)) print('************************************') def train_one_epoch_debug(self, sess, ops, epoch, lr, g=None): """ One epoch training """ config = self.config is_training = True batch_time = AverageMeter() loss_meter = {k: AverageMeter() for k in ops['loss_dict']} inputs = self.model.inputs inputs_flat = {k: v for k, v in inputs.items() if not isinstance(v, (list, dict))} train_ops = {'train_op': ops['train_op'], 'loss_dict': ops['loss_dict'], 'inputs': inputs_flat, 'result_dict': ops['result_dict']} assert_ops = inputs['assert_ops'] if 'assert_ops' in inputs and len(inputs['assert_ops']) > 0 else [] feed_dict = {ops['is_training']: is_training, ops['learning_rate']: lr} sess.run(ops['train_init_op']) if config.debug_grads: assert g is not None # [(g, v), ...] train_ops['grads'] = g.grads batch_idx = 0 end = time.time() while True: try: with tf.control_dependencies(assert_ops): rst = sess.run(train_ops, feed_dict=feed_dict) # NaN appears if config.debug_grads: self.debug_grads_nan(sess, inputs, train_ops, rst) if any([np.isnan(v) for v in rst['loss_dict'].values()]): self.debug_nan(sess, rst['inputs'], rst['result_dict'], rst['loss_dict']) raise ArithmeticError(f'NaN encountered !!!') if (batch_idx + 1) % config.update_freq == 0: for k, v in rst['loss_dict'].items(): loss_meter[k].update(v) batch_time.update(time.time() - end) end = time.time() if (batch_idx + 1) % config.print_freq == 0: loss_str = ' '.join([f'{n}={meter.avg:<6.2f}' for n, meter in loss_meter.items()]) print(f'Step {batch_idx+1:08d} ' + loss_str + f' ---{batch_time.avg:5.3f} s/batch', flush=True) batch_idx += 1 except tf.errors.OutOfRangeError: break return batch_idx def debug_grads_nan(self, sess, inputs, ops, rst): grads = ops['grads'] grads_v = rst['grads'] nan_grads = [(g, v, g_val, v_val) for (g, v), (g_val, v_val) in zip(grads, grads_v) if np.isnan(g_val).any() or np.isnan(v_val).any()] if not nan_grads: return lines = [] for g, v, g_val, v_val in nan_grads: g_nan = 100 * np.sum(np.isnan(g_val)) / np.prod(g_val.shape) v_nan = 100 * np.sum(np.isnan(v_val)) / np.prod(v_val.shape) lines.append([v.name, g, '-', v_val.shape, f'/ {v_nan:.1f}', 'val nan', g_val.shape, f'/ {g_nan:.1f}', 'grad nan']) print_table(lines) self.debug_nan(sess, rst['inputs'], rst['result_dict'], rst['loss_dict']) raise ArithmeticError(f'NaN encountered in grads checking !!!') return def debug_nan(self, sess, inputs, result_dict, loss_dict): """ NaN happened, find where """ print('\n\n------------------------ NaN DEBUG ------------------------\n') print('loss_dict :') print('*******************\n')
if tf.__version__.split('.')[0] == '2': tf = tf.compat.v1 tf.disable_v2_behavior() # PLY reader FILE_DIR = os.path.abspath(__file__) BASE_DIR = os.path.dirname(FILE_DIR) ROOT_DIR = os.path.dirname(BASE_DIR) sys.path.insert(0, ROOT_DIR) sys.path.insert(0, BASE_DIR) sys.path.insert(0, os.path.join(ROOT_DIR, 'models')) sys.path.insert(0, os.path.join(ROOT_DIR, 'utils')) DEBUG = False class ModelTrainer: """ get & train the model (potential multi-gpu training) """ def __init__(self, config, verbose=True): self.config = config self.verbose = verbose self.tester = ModelTester(config, verbose=False) def add_summary(self, model): with tf.variable_scope('summary'): summary = model.summary log_content = self.config.log_content if 'var' in log_content: summary['per_log'] += [tf.summary.histogram(v.name, v) for g, v in gvs] if 'gard' in log_content: summary['per_log'] += [tf.summary.histogram(f'{v.name}_grad', g) for g, v in gvs] sum_levels = ['per_step', 'per_log', 'per_epoch'] assert all([k in sum_levels for k in summary.keys()]), f'undesired keys in summary dict: {str(summary.keys())}' for i in range(len(sum_levels)): summary[lv] = tf.summary.merge(summary[lv]) if summary[lv] else [tf.no_op] self.summary = summary return # Training main method # ------------------------------------------------------------------------------------------------------------------ def train(self): config = self.config with tf.Graph().as_default(): # use one graph # prepare compute graph g = GraphBuilder(config, verbose=self.verbose) ops, sess, grads, saver = g.ops, g.sess, g.grads, g.saver model, dataset = g.model, g.dataset self.model = model # printing model parameters if self.verbose: print('\n --------- printing grads {') re_list = ['.*bias:.*', '.*batch_normalization.*'] # skipping print_table([(v.name, g) for g, v in grads if not any([bool(re.fullmatch(expr, v.name)) for expr in re_list])], prefix='\t') print('} --------- printing grads') # all ops in graph print('\n --------- all ops {') re_list = ['optimizer.*', 'gpu_.*', 'gradients.*', 'save.*'] # '.*/batch_normalization/.*', '.*/bias:.*' # skipping for n in tf.get_default_graph().as_graph_def().node: if any([bool(re.fullmatch(expr, n.name)) for expr in re_list]): continue print('\t', n.name) print('} --------- all ops') # model params all_params_size = sum([np.prod(v.shape) for _, v in grads]) # all_params_size = tf.reduce_sum([tf.reduce_prod(v.shape) for _, v in grads]) # all_params_size = sess.run(all_params_size) print(f'==> Model have {all_params_size} total Params', flush=True) # init sess sess.run(tf.global_variables_initializer()) if self.config.model_path: except_list = [f'.*{n}.*' for n in self.config.exclude_vars] + ['optimizer.*'] if not self.config.continue_training else [] g.restore(sess, self.config.model_path, except_list=except_list) print(f'Model restored -- {self.config.model_path}') # running voting - used throughout the training process (accumulated voting) validation_probs = self.tester.init_pointcloud_log(dataset, 'validation', config.num_classes) # train func if config.debug_nan: self.train_one_epoch = self.train_one_epoch_debug # train metric_best = None # save_snap = [i for i in range(1, config.max_epoch + 1) if i % config.save_freq == 0] lr_scheduler = LrScheduler(config) snap_path = os.path.join(config.saving_path, config.snap_dir, config.snap_prefix) for epoch in range(1, config.max_epoch + 1): print(f'\n****EPOCH {epoch}****') lr = lr_scheduler.learning_rate tic1 = time.time() step = self.train_one_epoch(sess, ops, epoch, lr, g=g) tic2 = time.time() print(f'total time: {(tic2 - tic1)/60:.1f}min, learning rate = {lr:.7f}', flush=True) if epoch % config.val_freq == 0: metric = self.tester.val_running_vote(sess, ops, dataset, model, validation_probs) # running voting if metric_best is None or metric > metric_best: # keep the best val metric_best = metric saver.save(sess, snap_path + '-best') print('best saved') # if config.save_best: # saver.save(sess, snap_path + '-best') # if config.save_best == 'center': # epoch_start = max(epoch // config.save_freq - config.max_to_keep // 2, 1) # save_snap = [i * config.save_freq for i in range(epoch_start, epoch_start + config.max_to_keep + 1)] # save_snap = [i for i in save_snap if i != epoch] # if epoch in save_snap: if config.save_freq and epoch % config.save_freq == 0: saver.save(sess, snap_path, global_step=epoch) lr_scheduler.step(epoch=1, step=step) # val & save last model if missed if epoch % config.val_freq != 0: self.tester.val_running_vote(sess, ops, dataset, model, validation_probs) if config.save_freq and epoch % config.save_freq != 0: saver.save(sess, snap_path, global_step=epoch) print('\nfinished\n', flush=True) return def train_one_epoch(self, sess, ops, epoch, lr, g=None): """ One epoch training """ config = self.config is_training = True batch_time = AverageMeter() loss_meter = {k: AverageMeter() for k in ops['loss_dict']} train_ops = {'train_op': ops['train_op'], 'loss_dict': ops['loss_dict']} feed_dict = {ops['is_training']: is_training, ops['learning_rate']: lr} sess.run(ops['train_init_op']) batch_idx = 0 end = time.time() while True: try: rst = sess.run(train_ops, feed_dict=feed_dict) if (batch_idx + 1) % config.update_freq == 0: for k, v in rst['loss_dict'].items(): loss_meter[k].update(v) batch_time.update(time.time() - end) end = time.time() if (batch_idx + 1) % config.print_freq == 0: loss_str = ' '.join([f'{n}={meter.avg:<6.2f}' for n, meter in loss_meter.items()]) print(f'Step {batch_idx+1:08d} ' + loss_str + f' ---{batch_time.avg:5.3f} s/batch', flush=True) batch_idx += 1 except tf.errors.OutOfRangeError: break return batch_idx # Debug methods # ------------------------------------------------------------------------------------------------------------------ def show_memory_usage(self, batch_to_feed): for l in range(self.config.num_layers): neighb_size = list(batch_to_feed[self.in_neighbors_f32[l]].shape) dist_size = neighb_size + [self.config.num_kernel_points, 3] dist_memory = np.prod(dist_size) * 4 * 1e-9 in_feature_size = neighb_size + [self.config.first_features_dim * 2**l] in_feature_memory = np.prod(in_feature_size) * 4 * 1e-9 out_feature_size = [neighb_size[0], self.config.num_kernel_points, self.config.first_features_dim * 2**(l+1)] out_feature_memory = np.prod(out_feature_size) * 4 * 1e-9 print('Layer {:d} => {:.1f}GB {:.1f}GB {:.1f}GB'.format(l, dist_memory, in_feature_memory, out_feature_memory)) print('************************************') def train_one_epoch_debug(self, sess, ops, epoch, lr, g=None): """ One epoch training """ config = self.config is_training = True batch_time = AverageMeter() loss_meter = {k: AverageMeter() for k in ops['loss_dict']} inputs = self.model.inputs inputs_flat = {k: v for k, v in inputs.items() if not isinstance(v, (list, dict))} train_ops = {'train_op': ops['train_op'], 'loss_dict': ops['loss_dict'], 'inputs': inputs_flat, 'result_dict': ops['result_dict']} assert_ops = inputs['assert_ops'] if 'assert_ops' in inputs and len(inputs['assert_ops']) > 0 else [] feed_dict = {ops['is_training']: is_training, ops['learning_rate']: lr} sess.run(ops['train_init_op']) if config.debug_grads: assert g is not None # [(g, v), ...] train_ops['grads'] = g.grads batch_idx = 0 end = time.time() while True: try: with tf.control_dependencies(assert_ops): rst = sess.run(train_ops, feed_dict=feed_dict) # NaN appears if config.debug_grads: self.debug_grads_nan(sess, inputs, train_ops, rst) if any([np.isnan(v) for v in rst['loss_dict'].values()]): self.debug_nan(sess, rst['inputs'], rst['result_dict'], rst['loss_dict']) raise ArithmeticError(f'NaN encountered !!!') if (batch_idx + 1) % config.update_freq == 0: for k, v in rst['loss_dict'].items(): loss_meter[k].update(v) batch_time.update(time.time() - end) end = time.time() if (batch_idx + 1) % config.print_freq == 0: loss_str = ' '.join([f'{n}={meter.avg:<6.2f}' for n, meter in loss_meter.items()]) print(f'Step {batch_idx+1:08d} ' + loss_str + f' ---{batch_time.avg:5.3f} s/batch', flush=True) batch_idx += 1 except tf.errors.OutOfRangeError: break return batch_idx def debug_grads_nan(self, sess, inputs, ops, rst): grads = ops['grads'] grads_v = rst['grads'] nan_grads = [(g, v, g_val, v_val) for (g, v), (g_val, v_val) in zip(grads, grads_v) if np.isnan(g_val).any() or np.isnan(v_val).any()] if not nan_grads: return lines = [] for g, v, g_val, v_val in nan_grads: g_nan = 100 * np.sum(np.isnan(g_val)) / np.prod(g_val.shape) v_nan = 100 * np.sum(np.isnan(v_val)) / np.prod(v_val.shape) lines.append([v.name, g, '-', v_val.shape, f'/ {v_nan:.1f}', 'val nan', g_val.shape, f'/ {g_nan:.1f}', 'grad nan']) print_table(lines) self.debug_nan(sess, rst['inputs'], rst['result_dict'], rst['loss_dict']) raise ArithmeticError(f'NaN encountered in grads checking !!!') return def debug_nan(self, sess, inputs, result_dict, loss_dict): """ NaN happened, find where """ print('\n\n------------------------ NaN DEBUG ------------------------\n') print('loss_dict :') print('*******************\n')
print_dict(loss_dict)
1
2023-10-13 08:03:07+00:00
24k
bilibini/Lovely_Image_Downloader
dist/py/Python38/site-packages/urllib3/connectionpool.py
[ { "identifier": "_TYPE_BODY", "path": "dist/py/Python38/site-packages/urllib3/_base_connection.py", "snippet": "_TYPE_BODY = typing.Union[bytes, typing.IO[typing.Any], typing.Iterable[bytes], str]" }, { "identifier": "HTTPHeaderDict", "path": "dist/py/Python38/site-packages/urllib3/_collections.py", "snippet": "class HTTPHeaderDict(typing.MutableMapping[str, str]):\n \"\"\"\n :param headers:\n An iterable of field-value pairs. Must not contain multiple field names\n when compared case-insensitively.\n\n :param kwargs:\n Additional field-value pairs to pass in to ``dict.update``.\n\n A ``dict`` like container for storing HTTP Headers.\n\n Field names are stored and compared case-insensitively in compliance with\n RFC 7230. Iteration provides the first case-sensitive key seen for each\n case-insensitive pair.\n\n Using ``__setitem__`` syntax overwrites fields that compare equal\n case-insensitively in order to maintain ``dict``'s api. For fields that\n compare equal, instead create a new ``HTTPHeaderDict`` and use ``.add``\n in a loop.\n\n If multiple fields that are equal case-insensitively are passed to the\n constructor or ``.update``, the behavior is undefined and some will be\n lost.\n\n >>> headers = HTTPHeaderDict()\n >>> headers.add('Set-Cookie', 'foo=bar')\n >>> headers.add('set-cookie', 'baz=quxx')\n >>> headers['content-length'] = '7'\n >>> headers['SET-cookie']\n 'foo=bar, baz=quxx'\n >>> headers['Content-Length']\n '7'\n \"\"\"\n\n _container: typing.MutableMapping[str, list[str]]\n\n def __init__(self, headers: ValidHTTPHeaderSource | None = None, **kwargs: str):\n super().__init__()\n self._container = {} # 'dict' is insert-ordered in Python 3.7+\n if headers is not None:\n if isinstance(headers, HTTPHeaderDict):\n self._copy_from(headers)\n else:\n self.extend(headers)\n if kwargs:\n self.extend(kwargs)\n\n def __setitem__(self, key: str, val: str) -> None:\n # avoid a bytes/str comparison by decoding before httplib\n if isinstance(key, bytes):\n key = key.decode(\"latin-1\")\n self._container[key.lower()] = [key, val]\n\n def __getitem__(self, key: str) -> str:\n val = self._container[key.lower()]\n return \", \".join(val[1:])\n\n def __delitem__(self, key: str) -> None:\n del self._container[key.lower()]\n\n def __contains__(self, key: object) -> bool:\n if isinstance(key, str):\n return key.lower() in self._container\n return False\n\n def setdefault(self, key: str, default: str = \"\") -> str:\n return super().setdefault(key, default)\n\n def __eq__(self, other: object) -> bool:\n maybe_constructable = ensure_can_construct_http_header_dict(other)\n if maybe_constructable is None:\n return False\n else:\n other_as_http_header_dict = type(self)(maybe_constructable)\n\n return {k.lower(): v for k, v in self.itermerged()} == {\n k.lower(): v for k, v in other_as_http_header_dict.itermerged()\n }\n\n def __ne__(self, other: object) -> bool:\n return not self.__eq__(other)\n\n def __len__(self) -> int:\n return len(self._container)\n\n def __iter__(self) -> typing.Iterator[str]:\n # Only provide the originally cased names\n for vals in self._container.values():\n yield vals[0]\n\n def discard(self, key: str) -> None:\n try:\n del self[key]\n except KeyError:\n pass\n\n def add(self, key: str, val: str, *, combine: bool = False) -> None:\n \"\"\"Adds a (name, value) pair, doesn't overwrite the value if it already\n exists.\n\n If this is called with combine=True, instead of adding a new header value\n as a distinct item during iteration, this will instead append the value to\n any existing header value with a comma. If no existing header value exists\n for the key, then the value will simply be added, ignoring the combine parameter.\n\n >>> headers = HTTPHeaderDict(foo='bar')\n >>> headers.add('Foo', 'baz')\n >>> headers['foo']\n 'bar, baz'\n >>> list(headers.items())\n [('foo', 'bar'), ('foo', 'baz')]\n >>> headers.add('foo', 'quz', combine=True)\n >>> list(headers.items())\n [('foo', 'bar, baz, quz')]\n \"\"\"\n # avoid a bytes/str comparison by decoding before httplib\n if isinstance(key, bytes):\n key = key.decode(\"latin-1\")\n key_lower = key.lower()\n new_vals = [key, val]\n # Keep the common case aka no item present as fast as possible\n vals = self._container.setdefault(key_lower, new_vals)\n if new_vals is not vals:\n # if there are values here, then there is at least the initial\n # key/value pair\n assert len(vals) >= 2\n if combine:\n vals[-1] = vals[-1] + \", \" + val\n else:\n vals.append(val)\n\n def extend(self, *args: ValidHTTPHeaderSource, **kwargs: str) -> None:\n \"\"\"Generic import function for any type of header-like object.\n Adapted version of MutableMapping.update in order to insert items\n with self.add instead of self.__setitem__\n \"\"\"\n if len(args) > 1:\n raise TypeError(\n f\"extend() takes at most 1 positional arguments ({len(args)} given)\"\n )\n other = args[0] if len(args) >= 1 else ()\n\n if isinstance(other, HTTPHeaderDict):\n for key, val in other.iteritems():\n self.add(key, val)\n elif isinstance(other, typing.Mapping):\n for key, val in other.items():\n self.add(key, val)\n elif isinstance(other, typing.Iterable):\n other = typing.cast(typing.Iterable[typing.Tuple[str, str]], other)\n for key, value in other:\n self.add(key, value)\n elif hasattr(other, \"keys\") and hasattr(other, \"__getitem__\"):\n # THIS IS NOT A TYPESAFE BRANCH\n # In this branch, the object has a `keys` attr but is not a Mapping or any of\n # the other types indicated in the method signature. We do some stuff with\n # it as though it partially implements the Mapping interface, but we're not\n # doing that stuff safely AT ALL.\n for key in other.keys():\n self.add(key, other[key])\n\n for key, value in kwargs.items():\n self.add(key, value)\n\n @typing.overload\n def getlist(self, key: str) -> list[str]:\n ...\n\n @typing.overload\n def getlist(self, key: str, default: _DT) -> list[str] | _DT:\n ...\n\n def getlist(\n self, key: str, default: _Sentinel | _DT = _Sentinel.not_passed\n ) -> list[str] | _DT:\n \"\"\"Returns a list of all the values for the named field. Returns an\n empty list if the key doesn't exist.\"\"\"\n try:\n vals = self._container[key.lower()]\n except KeyError:\n if default is _Sentinel.not_passed:\n # _DT is unbound; empty list is instance of List[str]\n return []\n # _DT is bound; default is instance of _DT\n return default\n else:\n # _DT may or may not be bound; vals[1:] is instance of List[str], which\n # meets our external interface requirement of `Union[List[str], _DT]`.\n return vals[1:]\n\n def _prepare_for_method_change(self) -> Self:\n \"\"\"\n Remove content-specific header fields before changing the request\n method to GET or HEAD according to RFC 9110, Section 15.4.\n \"\"\"\n content_specific_headers = [\n \"Content-Encoding\",\n \"Content-Language\",\n \"Content-Location\",\n \"Content-Type\",\n \"Content-Length\",\n \"Digest\",\n \"Last-Modified\",\n ]\n for header in content_specific_headers:\n self.discard(header)\n return self\n\n # Backwards compatibility for httplib\n getheaders = getlist\n getallmatchingheaders = getlist\n iget = getlist\n\n # Backwards compatibility for http.cookiejar\n get_all = getlist\n\n def __repr__(self) -> str:\n return f\"{type(self).__name__}({dict(self.itermerged())})\"\n\n def _copy_from(self, other: HTTPHeaderDict) -> None:\n for key in other:\n val = other.getlist(key)\n self._container[key.lower()] = [key, *val]\n\n def copy(self) -> HTTPHeaderDict:\n clone = type(self)()\n clone._copy_from(self)\n return clone\n\n def iteritems(self) -> typing.Iterator[tuple[str, str]]:\n \"\"\"Iterate over all header lines, including duplicate ones.\"\"\"\n for key in self:\n vals = self._container[key.lower()]\n for val in vals[1:]:\n yield vals[0], val\n\n def itermerged(self) -> typing.Iterator[tuple[str, str]]:\n \"\"\"Iterate over all headers, merging duplicate ones together.\"\"\"\n for key in self:\n val = self._container[key.lower()]\n yield val[0], \", \".join(val[1:])\n\n def items(self) -> HTTPHeaderDictItemView: # type: ignore[override]\n return HTTPHeaderDictItemView(self)\n\n def _has_value_for_header(self, header_name: str, potential_value: str) -> bool:\n if header_name in self:\n return potential_value in self._container[header_name.lower()][1:]\n return False\n\n def __ior__(self, other: object) -> HTTPHeaderDict:\n # Supports extending a header dict in-place using operator |=\n # combining items with add instead of __setitem__\n maybe_constructable = ensure_can_construct_http_header_dict(other)\n if maybe_constructable is None:\n return NotImplemented\n self.extend(maybe_constructable)\n return self\n\n def __or__(self, other: object) -> HTTPHeaderDict:\n # Supports merging header dicts using operator |\n # combining items with add instead of __setitem__\n maybe_constructable = ensure_can_construct_http_header_dict(other)\n if maybe_constructable is None:\n return NotImplemented\n result = self.copy()\n result.extend(maybe_constructable)\n return result\n\n def __ror__(self, other: object) -> HTTPHeaderDict:\n # Supports merging header dicts using operator | when other is on left side\n # combining items with add instead of __setitem__\n maybe_constructable = ensure_can_construct_http_header_dict(other)\n if maybe_constructable is None:\n return NotImplemented\n result = type(self)(maybe_constructable)\n result.extend(self)\n return result" }, { "identifier": "RequestMethods", "path": "dist/py/Python38/site-packages/urllib3/_request_methods.py", "snippet": "class RequestMethods:\n \"\"\"\n Convenience mixin for classes who implement a :meth:`urlopen` method, such\n as :class:`urllib3.HTTPConnectionPool` and\n :class:`urllib3.PoolManager`.\n\n Provides behavior for making common types of HTTP request methods and\n decides which type of request field encoding to use.\n\n Specifically,\n\n :meth:`.request_encode_url` is for sending requests whose fields are\n encoded in the URL (such as GET, HEAD, DELETE).\n\n :meth:`.request_encode_body` is for sending requests whose fields are\n encoded in the *body* of the request using multipart or www-form-urlencoded\n (such as for POST, PUT, PATCH).\n\n :meth:`.request` is for making any kind of request, it will look up the\n appropriate encoding format and use one of the above two methods to make\n the request.\n\n Initializer parameters:\n\n :param headers:\n Headers to include with all requests, unless other headers are given\n explicitly.\n \"\"\"\n\n _encode_url_methods = {\"DELETE\", \"GET\", \"HEAD\", \"OPTIONS\"}\n\n def __init__(self, headers: typing.Mapping[str, str] | None = None) -> None:\n self.headers = headers or {}\n\n def urlopen(\n self,\n method: str,\n url: str,\n body: _TYPE_BODY | None = None,\n headers: typing.Mapping[str, str] | None = None,\n encode_multipart: bool = True,\n multipart_boundary: str | None = None,\n **kw: typing.Any,\n ) -> BaseHTTPResponse: # Abstract\n raise NotImplementedError(\n \"Classes extending RequestMethods must implement \"\n \"their own ``urlopen`` method.\"\n )\n\n def request(\n self,\n method: str,\n url: str,\n body: _TYPE_BODY | None = None,\n fields: _TYPE_FIELDS | None = None,\n headers: typing.Mapping[str, str] | None = None,\n json: typing.Any | None = None,\n **urlopen_kw: typing.Any,\n ) -> BaseHTTPResponse:\n \"\"\"\n Make a request using :meth:`urlopen` with the appropriate encoding of\n ``fields`` based on the ``method`` used.\n\n This is a convenience method that requires the least amount of manual\n effort. It can be used in most situations, while still having the\n option to drop down to more specific methods when necessary, such as\n :meth:`request_encode_url`, :meth:`request_encode_body`,\n or even the lowest level :meth:`urlopen`.\n \"\"\"\n method = method.upper()\n\n if json is not None and body is not None:\n raise TypeError(\n \"request got values for both 'body' and 'json' parameters which are mutually exclusive\"\n )\n\n if json is not None:\n if headers is None:\n headers = self.headers.copy() # type: ignore\n if not (\"content-type\" in map(str.lower, headers.keys())):\n headers[\"Content-Type\"] = \"application/json\" # type: ignore\n\n body = _json.dumps(json, separators=(\",\", \":\"), ensure_ascii=False).encode(\n \"utf-8\"\n )\n\n if body is not None:\n urlopen_kw[\"body\"] = body\n\n if method in self._encode_url_methods:\n return self.request_encode_url(\n method,\n url,\n fields=fields, # type: ignore[arg-type]\n headers=headers,\n **urlopen_kw,\n )\n else:\n return self.request_encode_body(\n method, url, fields=fields, headers=headers, **urlopen_kw\n )\n\n def request_encode_url(\n self,\n method: str,\n url: str,\n fields: _TYPE_ENCODE_URL_FIELDS | None = None,\n headers: typing.Mapping[str, str] | None = None,\n **urlopen_kw: str,\n ) -> BaseHTTPResponse:\n \"\"\"\n Make a request using :meth:`urlopen` with the ``fields`` encoded in\n the url. This is useful for request methods like GET, HEAD, DELETE, etc.\n \"\"\"\n if headers is None:\n headers = self.headers\n\n extra_kw: dict[str, typing.Any] = {\"headers\": headers}\n extra_kw.update(urlopen_kw)\n\n if fields:\n url += \"?\" + urlencode(fields)\n\n return self.urlopen(method, url, **extra_kw)\n\n def request_encode_body(\n self,\n method: str,\n url: str,\n fields: _TYPE_FIELDS | None = None,\n headers: typing.Mapping[str, str] | None = None,\n encode_multipart: bool = True,\n multipart_boundary: str | None = None,\n **urlopen_kw: str,\n ) -> BaseHTTPResponse:\n \"\"\"\n Make a request using :meth:`urlopen` with the ``fields`` encoded in\n the body. This is useful for request methods like POST, PUT, PATCH, etc.\n\n When ``encode_multipart=True`` (default), then\n :func:`urllib3.encode_multipart_formdata` is used to encode\n the payload with the appropriate content type. Otherwise\n :func:`urllib.parse.urlencode` is used with the\n 'application/x-www-form-urlencoded' content type.\n\n Multipart encoding must be used when posting files, and it's reasonably\n safe to use it in other times too. However, it may break request\n signing, such as with OAuth.\n\n Supports an optional ``fields`` parameter of key/value strings AND\n key/filetuple. A filetuple is a (filename, data, MIME type) tuple where\n the MIME type is optional. For example::\n\n fields = {\n 'foo': 'bar',\n 'fakefile': ('foofile.txt', 'contents of foofile'),\n 'realfile': ('barfile.txt', open('realfile').read()),\n 'typedfile': ('bazfile.bin', open('bazfile').read(),\n 'image/jpeg'),\n 'nonamefile': 'contents of nonamefile field',\n }\n\n When uploading a file, providing a filename (the first parameter of the\n tuple) is optional but recommended to best mimic behavior of browsers.\n\n Note that if ``headers`` are supplied, the 'Content-Type' header will\n be overwritten because it depends on the dynamic random boundary string\n which is used to compose the body of the request. The random boundary\n string can be explicitly set with the ``multipart_boundary`` parameter.\n \"\"\"\n if headers is None:\n headers = self.headers\n\n extra_kw: dict[str, typing.Any] = {\"headers\": HTTPHeaderDict(headers)}\n body: bytes | str\n\n if fields:\n if \"body\" in urlopen_kw:\n raise TypeError(\n \"request got values for both 'fields' and 'body', can only specify one.\"\n )\n\n if encode_multipart:\n body, content_type = encode_multipart_formdata(\n fields, boundary=multipart_boundary\n )\n else:\n body, content_type = (\n urlencode(fields), # type: ignore[arg-type]\n \"application/x-www-form-urlencoded\",\n )\n\n extra_kw[\"body\"] = body\n extra_kw[\"headers\"].setdefault(\"Content-Type\", content_type)\n\n extra_kw.update(urlopen_kw)\n\n return self.urlopen(method, url, **extra_kw)" }, { "identifier": "BaseSSLError", "path": "dist/py/Python38/site-packages/urllib3/connection.py", "snippet": " class BaseSSLError(BaseException): # type: ignore[no-redef]\nclass HTTPConnection(_HTTPConnection):\nclass HTTPSConnection(HTTPConnection):\nclass _WrappedAndVerifiedSocket(typing.NamedTuple):\nclass DummyConnection:\nRECENT_DATE = datetime.date(2022, 1, 1)\n_CONTAINS_CONTROL_CHAR_RE = re.compile(r\"[^-!#$%&'*+.^_`|~0-9a-zA-Z]\")\n_HAS_SYS_AUDIT = hasattr(sys, \"audit\")\n def __init__(\n self,\n host: str,\n port: int | None = None,\n *,\n timeout: _TYPE_TIMEOUT = _DEFAULT_TIMEOUT,\n source_address: tuple[str, int] | None = None,\n blocksize: int = 16384,\n socket_options: None\n | (connection._TYPE_SOCKET_OPTIONS) = default_socket_options,\n proxy: Url | None = None,\n proxy_config: ProxyConfig | None = None,\n ) -> None:\n def host(self) -> str:\n def host(self, value: str) -> None:\n def _new_conn(self) -> socket.socket:\n def set_tunnel(\n self,\n host: str,\n port: int | None = None,\n headers: typing.Mapping[str, str] | None = None,\n scheme: str = \"http\",\n ) -> None:\n def connect(self) -> None:\n def is_closed(self) -> bool:\n def is_connected(self) -> bool:\n def has_connected_to_proxy(self) -> bool:\n def close(self) -> None:\n def putrequest(\n self,\n method: str,\n url: str,\n skip_host: bool = False,\n skip_accept_encoding: bool = False,\n ) -> None:\n def putheader(self, header: str, *values: str) -> None:\n def request( # type: ignore[override]\n self,\n method: str,\n url: str,\n body: _TYPE_BODY | None = None,\n headers: typing.Mapping[str, str] | None = None,\n *,\n chunked: bool = False,\n preload_content: bool = True,\n decode_content: bool = True,\n enforce_content_length: bool = True,\n ) -> None:\n def request_chunked(\n self,\n method: str,\n url: str,\n body: _TYPE_BODY | None = None,\n headers: typing.Mapping[str, str] | None = None,\n ) -> None:\n def getresponse( # type: ignore[override]\n self,\n ) -> HTTPResponse:\n def __init__(\n self,\n host: str,\n port: int | None = None,\n *,\n timeout: _TYPE_TIMEOUT = _DEFAULT_TIMEOUT,\n source_address: tuple[str, int] | None = None,\n blocksize: int = 16384,\n socket_options: None\n | (connection._TYPE_SOCKET_OPTIONS) = HTTPConnection.default_socket_options,\n proxy: Url | None = None,\n proxy_config: ProxyConfig | None = None,\n cert_reqs: int | str | None = None,\n assert_hostname: None | str | Literal[False] = None,\n assert_fingerprint: str | None = None,\n server_hostname: str | None = None,\n ssl_context: ssl.SSLContext | None = None,\n ca_certs: str | None = None,\n ca_cert_dir: str | None = None,\n ca_cert_data: None | str | bytes = None,\n ssl_minimum_version: int | None = None,\n ssl_maximum_version: int | None = None,\n ssl_version: int | str | None = None, # Deprecated\n cert_file: str | None = None,\n key_file: str | None = None,\n key_password: str | None = None,\n ) -> None:\n def set_cert(\n self,\n key_file: str | None = None,\n cert_file: str | None = None,\n cert_reqs: int | str | None = None,\n key_password: str | None = None,\n ca_certs: str | None = None,\n assert_hostname: None | str | Literal[False] = None,\n assert_fingerprint: str | None = None,\n ca_cert_dir: str | None = None,\n ca_cert_data: None | str | bytes = None,\n ) -> None:\n def connect(self) -> None:\n def _connect_tls_proxy(self, hostname: str, sock: socket.socket) -> ssl.SSLSocket:\ndef _ssl_wrap_socket_and_match_hostname(\n sock: socket.socket,\n *,\n cert_reqs: None | str | int,\n ssl_version: None | str | int,\n ssl_minimum_version: int | None,\n ssl_maximum_version: int | None,\n cert_file: str | None,\n key_file: str | None,\n key_password: str | None,\n ca_certs: str | None,\n ca_cert_dir: str | None,\n ca_cert_data: None | str | bytes,\n assert_hostname: None | str | Literal[False],\n assert_fingerprint: str | None,\n server_hostname: str | None,\n ssl_context: ssl.SSLContext | None,\n tls_in_tls: bool = False,\n) -> _WrappedAndVerifiedSocket:\ndef _match_hostname(\n cert: _TYPE_PEER_CERT_RET_DICT | None,\n asserted_hostname: str,\n hostname_checks_common_name: bool = False,\n) -> None:\ndef _wrap_proxy_error(err: Exception, proxy_scheme: str | None) -> ProxyError:\ndef _get_default_user_agent() -> str:\ndef _url_from_connection(\n conn: HTTPConnection | HTTPSConnection, path: str | None = None\n) -> str:" }, { "identifier": "port_by_scheme", "path": "dist/py/Python38/site-packages/urllib3/connection.py", "snippet": " class BaseSSLError(BaseException): # type: ignore[no-redef]\nclass HTTPConnection(_HTTPConnection):\nclass HTTPSConnection(HTTPConnection):\nclass _WrappedAndVerifiedSocket(typing.NamedTuple):\nclass DummyConnection:\nRECENT_DATE = datetime.date(2022, 1, 1)\n_CONTAINS_CONTROL_CHAR_RE = re.compile(r\"[^-!#$%&'*+.^_`|~0-9a-zA-Z]\")\n_HAS_SYS_AUDIT = hasattr(sys, \"audit\")\n def __init__(\n self,\n host: str,\n port: int | None = None,\n *,\n timeout: _TYPE_TIMEOUT = _DEFAULT_TIMEOUT,\n source_address: tuple[str, int] | None = None,\n blocksize: int = 16384,\n socket_options: None\n | (connection._TYPE_SOCKET_OPTIONS) = default_socket_options,\n proxy: Url | None = None,\n proxy_config: ProxyConfig | None = None,\n ) -> None:\n def host(self) -> str:\n def host(self, value: str) -> None:\n def _new_conn(self) -> socket.socket:\n def set_tunnel(\n self,\n host: str,\n port: int | None = None,\n headers: typing.Mapping[str, str] | None = None,\n scheme: str = \"http\",\n ) -> None:\n def connect(self) -> None:\n def is_closed(self) -> bool:\n def is_connected(self) -> bool:\n def has_connected_to_proxy(self) -> bool:\n def close(self) -> None:\n def putrequest(\n self,\n method: str,\n url: str,\n skip_host: bool = False,\n skip_accept_encoding: bool = False,\n ) -> None:\n def putheader(self, header: str, *values: str) -> None:\n def request( # type: ignore[override]\n self,\n method: str,\n url: str,\n body: _TYPE_BODY | None = None,\n headers: typing.Mapping[str, str] | None = None,\n *,\n chunked: bool = False,\n preload_content: bool = True,\n decode_content: bool = True,\n enforce_content_length: bool = True,\n ) -> None:\n def request_chunked(\n self,\n method: str,\n url: str,\n body: _TYPE_BODY | None = None,\n headers: typing.Mapping[str, str] | None = None,\n ) -> None:\n def getresponse( # type: ignore[override]\n self,\n ) -> HTTPResponse:\n def __init__(\n self,\n host: str,\n port: int | None = None,\n *,\n timeout: _TYPE_TIMEOUT = _DEFAULT_TIMEOUT,\n source_address: tuple[str, int] | None = None,\n blocksize: int = 16384,\n socket_options: None\n | (connection._TYPE_SOCKET_OPTIONS) = HTTPConnection.default_socket_options,\n proxy: Url | None = None,\n proxy_config: ProxyConfig | None = None,\n cert_reqs: int | str | None = None,\n assert_hostname: None | str | Literal[False] = None,\n assert_fingerprint: str | None = None,\n server_hostname: str | None = None,\n ssl_context: ssl.SSLContext | None = None,\n ca_certs: str | None = None,\n ca_cert_dir: str | None = None,\n ca_cert_data: None | str | bytes = None,\n ssl_minimum_version: int | None = None,\n ssl_maximum_version: int | None = None,\n ssl_version: int | str | None = None, # Deprecated\n cert_file: str | None = None,\n key_file: str | None = None,\n key_password: str | None = None,\n ) -> None:\n def set_cert(\n self,\n key_file: str | None = None,\n cert_file: str | None = None,\n cert_reqs: int | str | None = None,\n key_password: str | None = None,\n ca_certs: str | None = None,\n assert_hostname: None | str | Literal[False] = None,\n assert_fingerprint: str | None = None,\n ca_cert_dir: str | None = None,\n ca_cert_data: None | str | bytes = None,\n ) -> None:\n def connect(self) -> None:\n def _connect_tls_proxy(self, hostname: str, sock: socket.socket) -> ssl.SSLSocket:\ndef _ssl_wrap_socket_and_match_hostname(\n sock: socket.socket,\n *,\n cert_reqs: None | str | int,\n ssl_version: None | str | int,\n ssl_minimum_version: int | None,\n ssl_maximum_version: int | None,\n cert_file: str | None,\n key_file: str | None,\n key_password: str | None,\n ca_certs: str | None,\n ca_cert_dir: str | None,\n ca_cert_data: None | str | bytes,\n assert_hostname: None | str | Literal[False],\n assert_fingerprint: str | None,\n server_hostname: str | None,\n ssl_context: ssl.SSLContext | None,\n tls_in_tls: bool = False,\n) -> _WrappedAndVerifiedSocket:\ndef _match_hostname(\n cert: _TYPE_PEER_CERT_RET_DICT | None,\n asserted_hostname: str,\n hostname_checks_common_name: bool = False,\n) -> None:\ndef _wrap_proxy_error(err: Exception, proxy_scheme: str | None) -> ProxyError:\ndef _get_default_user_agent() -> str:\ndef _url_from_connection(\n conn: HTTPConnection | HTTPSConnection, path: str | None = None\n) -> str:" }, { "identifier": "ClosedPoolError", "path": "dist/py/Python38/site-packages/urllib3/exceptions.py", "snippet": "class ClosedPoolError(PoolError):\n \"\"\"Raised when a request enters a pool after the pool has been closed.\"\"\"" }, { "identifier": "EmptyPoolError", "path": "dist/py/Python38/site-packages/urllib3/exceptions.py", "snippet": "class EmptyPoolError(PoolError):\n \"\"\"Raised when a pool runs out of connections and no more are allowed.\"\"\"" }, { "identifier": "FullPoolError", "path": "dist/py/Python38/site-packages/urllib3/exceptions.py", "snippet": "class FullPoolError(PoolError):\n \"\"\"Raised when we try to add a connection to a full pool in blocking mode.\"\"\"" }, { "identifier": "HostChangedError", "path": "dist/py/Python38/site-packages/urllib3/exceptions.py", "snippet": "class HostChangedError(RequestError):\n \"\"\"Raised when an existing pool gets a request for a foreign host.\"\"\"\n\n def __init__(\n self, pool: ConnectionPool, url: str, retries: Retry | int = 3\n ) -> None:\n message = f\"Tried to open a foreign host with url: {url}\"\n super().__init__(pool, url, message)\n self.retries = retries" }, { "identifier": "InsecureRequestWarning", "path": "dist/py/Python38/site-packages/urllib3/exceptions.py", "snippet": "class InsecureRequestWarning(SecurityWarning):\n \"\"\"Warned when making an unverified HTTPS request.\"\"\"" }, { "identifier": "LocationValueError", "path": "dist/py/Python38/site-packages/urllib3/exceptions.py", "snippet": "class LocationValueError(ValueError, HTTPError):\n \"\"\"Raised when there is something wrong with a given URL input.\"\"\"" }, { "identifier": "MaxRetryError", "path": "dist/py/Python38/site-packages/urllib3/exceptions.py", "snippet": "class MaxRetryError(RequestError):\n \"\"\"Raised when the maximum number of retries is exceeded.\n\n :param pool: The connection pool\n :type pool: :class:`~urllib3.connectionpool.HTTPConnectionPool`\n :param str url: The requested Url\n :param reason: The underlying error\n :type reason: :class:`Exception`\n\n \"\"\"\n\n def __init__(\n self, pool: ConnectionPool, url: str, reason: Exception | None = None\n ) -> None:\n self.reason = reason\n\n message = f\"Max retries exceeded with url: {url} (Caused by {reason!r})\"\n\n super().__init__(pool, url, message)" }, { "identifier": "NewConnectionError", "path": "dist/py/Python38/site-packages/urllib3/exceptions.py", "snippet": "class NewConnectionError(ConnectTimeoutError, HTTPError):\n \"\"\"Raised when we fail to establish a new connection. Usually ECONNREFUSED.\"\"\"\n\n def __init__(self, conn: HTTPConnection, message: str) -> None:\n self.conn = conn\n super().__init__(f\"{conn}: {message}\")\n\n @property\n def pool(self) -> HTTPConnection:\n warnings.warn(\n \"The 'pool' property is deprecated and will be removed \"\n \"in urllib3 v2.1.0. Use 'conn' instead.\",\n DeprecationWarning,\n stacklevel=2,\n )\n\n return self.conn" }, { "identifier": "ProtocolError", "path": "dist/py/Python38/site-packages/urllib3/exceptions.py", "snippet": "class ProtocolError(HTTPError):\n \"\"\"Raised when something unexpected happens mid-request/response.\"\"\"" }, { "identifier": "ProxyError", "path": "dist/py/Python38/site-packages/urllib3/exceptions.py", "snippet": "class ProxyError(HTTPError):\n \"\"\"Raised when the connection to a proxy fails.\"\"\"\n\n # The original error is also available as __cause__.\n original_error: Exception\n\n def __init__(self, message: str, error: Exception) -> None:\n super().__init__(message, error)\n self.original_error = error" }, { "identifier": "ReadTimeoutError", "path": "dist/py/Python38/site-packages/urllib3/exceptions.py", "snippet": "class ReadTimeoutError(TimeoutError, RequestError):\n \"\"\"Raised when a socket timeout occurs while receiving data from a server\"\"\"" }, { "identifier": "SSLError", "path": "dist/py/Python38/site-packages/urllib3/exceptions.py", "snippet": "class SSLError(HTTPError):\n \"\"\"Raised when SSL certificate fails in an HTTPS connection.\"\"\"" }, { "identifier": "TimeoutError", "path": "dist/py/Python38/site-packages/urllib3/exceptions.py", "snippet": "class TimeoutError(HTTPError):\n \"\"\"Raised when a socket timeout error occurs.\n\n Catching this error will catch both :exc:`ReadTimeoutErrors\n <ReadTimeoutError>` and :exc:`ConnectTimeoutErrors <ConnectTimeoutError>`.\n \"\"\"" }, { "identifier": "BaseHTTPResponse", "path": "dist/py/Python38/site-packages/urllib3/response.py", "snippet": "class BaseHTTPResponse(io.IOBase):\n CONTENT_DECODERS = [\"gzip\", \"deflate\"]\n if brotli is not None:\n CONTENT_DECODERS += [\"br\"]\n if zstd is not None:\n CONTENT_DECODERS += [\"zstd\"]\n REDIRECT_STATUSES = [301, 302, 303, 307, 308]\n\n DECODER_ERROR_CLASSES: tuple[type[Exception], ...] = (IOError, zlib.error)\n if brotli is not None:\n DECODER_ERROR_CLASSES += (brotli.error,)\n\n if zstd is not None:\n DECODER_ERROR_CLASSES += (zstd.ZstdError,)\n\n def __init__(\n self,\n *,\n headers: typing.Mapping[str, str] | typing.Mapping[bytes, bytes] | None = None,\n status: int,\n version: int,\n reason: str | None,\n decode_content: bool,\n request_url: str | None,\n retries: Retry | None = None,\n ) -> None:\n if isinstance(headers, HTTPHeaderDict):\n self.headers = headers\n else:\n self.headers = HTTPHeaderDict(headers) # type: ignore[arg-type]\n self.status = status\n self.version = version\n self.reason = reason\n self.decode_content = decode_content\n self._has_decoded_content = False\n self._request_url: str | None = request_url\n self.retries = retries\n\n self.chunked = False\n tr_enc = self.headers.get(\"transfer-encoding\", \"\").lower()\n # Don't incur the penalty of creating a list and then discarding it\n encodings = (enc.strip() for enc in tr_enc.split(\",\"))\n if \"chunked\" in encodings:\n self.chunked = True\n\n self._decoder: ContentDecoder | None = None\n\n def get_redirect_location(self) -> str | None | Literal[False]:\n \"\"\"\n Should we redirect and where to?\n\n :returns: Truthy redirect location string if we got a redirect status\n code and valid location. ``None`` if redirect status and no\n location. ``False`` if not a redirect status code.\n \"\"\"\n if self.status in self.REDIRECT_STATUSES:\n return self.headers.get(\"location\")\n return False\n\n @property\n def data(self) -> bytes:\n raise NotImplementedError()\n\n def json(self) -> typing.Any:\n \"\"\"\n Parses the body of the HTTP response as JSON.\n\n To use a custom JSON decoder pass the result of :attr:`HTTPResponse.data` to the decoder.\n\n This method can raise either `UnicodeDecodeError` or `json.JSONDecodeError`.\n\n Read more :ref:`here <json>`.\n \"\"\"\n data = self.data.decode(\"utf-8\")\n return _json.loads(data)\n\n @property\n def url(self) -> str | None:\n raise NotImplementedError()\n\n @url.setter\n def url(self, url: str | None) -> None:\n raise NotImplementedError()\n\n @property\n def connection(self) -> HTTPConnection | None:\n raise NotImplementedError()\n\n @property\n def retries(self) -> Retry | None:\n return self._retries\n\n @retries.setter\n def retries(self, retries: Retry | None) -> None:\n # Override the request_url if retries has a redirect location.\n if retries is not None and retries.history:\n self.url = retries.history[-1].redirect_location\n self._retries = retries\n\n def stream(\n self, amt: int | None = 2**16, decode_content: bool | None = None\n ) -> typing.Iterator[bytes]:\n raise NotImplementedError()\n\n def read(\n self,\n amt: int | None = None,\n decode_content: bool | None = None,\n cache_content: bool = False,\n ) -> bytes:\n raise NotImplementedError()\n\n def read_chunked(\n self,\n amt: int | None = None,\n decode_content: bool | None = None,\n ) -> typing.Iterator[bytes]:\n raise NotImplementedError()\n\n def release_conn(self) -> None:\n raise NotImplementedError()\n\n def drain_conn(self) -> None:\n raise NotImplementedError()\n\n def close(self) -> None:\n raise NotImplementedError()\n\n def _init_decoder(self) -> None:\n \"\"\"\n Set-up the _decoder attribute if necessary.\n \"\"\"\n # Note: content-encoding value should be case-insensitive, per RFC 7230\n # Section 3.2\n content_encoding = self.headers.get(\"content-encoding\", \"\").lower()\n if self._decoder is None:\n if content_encoding in self.CONTENT_DECODERS:\n self._decoder = _get_decoder(content_encoding)\n elif \",\" in content_encoding:\n encodings = [\n e.strip()\n for e in content_encoding.split(\",\")\n if e.strip() in self.CONTENT_DECODERS\n ]\n if encodings:\n self._decoder = _get_decoder(content_encoding)\n\n def _decode(\n self, data: bytes, decode_content: bool | None, flush_decoder: bool\n ) -> bytes:\n \"\"\"\n Decode the data passed in and potentially flush the decoder.\n \"\"\"\n if not decode_content:\n if self._has_decoded_content:\n raise RuntimeError(\n \"Calling read(decode_content=False) is not supported after \"\n \"read(decode_content=True) was called.\"\n )\n return data\n\n try:\n if self._decoder:\n data = self._decoder.decompress(data)\n self._has_decoded_content = True\n except self.DECODER_ERROR_CLASSES as e:\n content_encoding = self.headers.get(\"content-encoding\", \"\").lower()\n raise DecodeError(\n \"Received response with content-encoding: %s, but \"\n \"failed to decode it.\" % content_encoding,\n e,\n ) from e\n if flush_decoder:\n data += self._flush_decoder()\n\n return data\n\n def _flush_decoder(self) -> bytes:\n \"\"\"\n Flushes the decoder. Should only be called if the decoder is actually\n being used.\n \"\"\"\n if self._decoder:\n return self._decoder.decompress(b\"\") + self._decoder.flush()\n return b\"\"\n\n # Compatibility methods for `io` module\n def readinto(self, b: bytearray) -> int:\n temp = self.read(len(b))\n if len(temp) == 0:\n return 0\n else:\n b[: len(temp)] = temp\n return len(temp)\n\n # Compatibility methods for http.client.HTTPResponse\n def getheaders(self) -> HTTPHeaderDict:\n warnings.warn(\n \"HTTPResponse.getheaders() is deprecated and will be removed \"\n \"in urllib3 v2.1.0. Instead access HTTPResponse.headers directly.\",\n category=DeprecationWarning,\n stacklevel=2,\n )\n return self.headers\n\n def getheader(self, name: str, default: str | None = None) -> str | None:\n warnings.warn(\n \"HTTPResponse.getheader() is deprecated and will be removed \"\n \"in urllib3 v2.1.0. Instead use HTTPResponse.headers.get(name, default).\",\n category=DeprecationWarning,\n stacklevel=2,\n )\n return self.headers.get(name, default)\n\n # Compatibility method for http.cookiejar\n def info(self) -> HTTPHeaderDict:\n return self.headers\n\n def geturl(self) -> str | None:\n return self.url" }, { "identifier": "is_connection_dropped", "path": "dist/py/Python38/site-packages/urllib3/util/connection.py", "snippet": "def is_connection_dropped(conn: BaseHTTPConnection) -> bool: # Platform-specific\n \"\"\"\n Returns True if the connection is dropped and should be closed.\n :param conn: :class:`urllib3.connection.HTTPConnection` object.\n \"\"\"\n return not conn.is_connected" }, { "identifier": "connection_requires_http_tunnel", "path": "dist/py/Python38/site-packages/urllib3/util/proxy.py", "snippet": "def connection_requires_http_tunnel(\n proxy_url: Url | None = None,\n proxy_config: ProxyConfig | None = None,\n destination_scheme: str | None = None,\n) -> bool:\n \"\"\"\n Returns True if the connection requires an HTTP CONNECT through the proxy.\n\n :param URL proxy_url:\n URL of the proxy.\n :param ProxyConfig proxy_config:\n Proxy configuration from poolmanager.py\n :param str destination_scheme:\n The scheme of the destination. (i.e https, http, etc)\n \"\"\"\n # If we're not using a proxy, no way to use a tunnel.\n if proxy_url is None:\n return False\n\n # HTTP destinations never require tunneling, we always forward.\n if destination_scheme == \"http\":\n return False\n\n # Support for forwarding with HTTPS proxies and HTTPS destinations.\n if (\n proxy_url.scheme == \"https\"\n and proxy_config\n and proxy_config.use_forwarding_for_https\n ):\n return False\n\n # Otherwise always use a tunnel.\n return True" }, { "identifier": "_TYPE_BODY_POSITION", "path": "dist/py/Python38/site-packages/urllib3/util/request.py", "snippet": "_TYPE_BODY_POSITION = typing.Union[int, _TYPE_FAILEDTELL]" }, { "identifier": "set_file_position", "path": "dist/py/Python38/site-packages/urllib3/util/request.py", "snippet": "def set_file_position(\n body: typing.Any, pos: _TYPE_BODY_POSITION | None\n) -> _TYPE_BODY_POSITION | None:\n \"\"\"\n If a position is provided, move file to that point.\n Otherwise, we'll attempt to record a position for future use.\n \"\"\"\n if pos is not None:\n rewind_body(body, pos)\n elif getattr(body, \"tell\", None) is not None:\n try:\n pos = body.tell()\n except OSError:\n # This differentiates from None, allowing us to catch\n # a failed `tell()` later when trying to rewind the body.\n pos = _FAILEDTELL\n\n return pos" }, { "identifier": "Retry", "path": "dist/py/Python38/site-packages/urllib3/util/retry.py", "snippet": "class Retry:\n \"\"\"Retry configuration.\n\n Each retry attempt will create a new Retry object with updated values, so\n they can be safely reused.\n\n Retries can be defined as a default for a pool:\n\n .. code-block:: python\n\n retries = Retry(connect=5, read=2, redirect=5)\n http = PoolManager(retries=retries)\n response = http.request(\"GET\", \"https://example.com/\")\n\n Or per-request (which overrides the default for the pool):\n\n .. code-block:: python\n\n response = http.request(\"GET\", \"https://example.com/\", retries=Retry(10))\n\n Retries can be disabled by passing ``False``:\n\n .. code-block:: python\n\n response = http.request(\"GET\", \"https://example.com/\", retries=False)\n\n Errors will be wrapped in :class:`~urllib3.exceptions.MaxRetryError` unless\n retries are disabled, in which case the causing exception will be raised.\n\n :param int total:\n Total number of retries to allow. Takes precedence over other counts.\n\n Set to ``None`` to remove this constraint and fall back on other\n counts.\n\n Set to ``0`` to fail on the first retry.\n\n Set to ``False`` to disable and imply ``raise_on_redirect=False``.\n\n :param int connect:\n How many connection-related errors to retry on.\n\n These are errors raised before the request is sent to the remote server,\n which we assume has not triggered the server to process the request.\n\n Set to ``0`` to fail on the first retry of this type.\n\n :param int read:\n How many times to retry on read errors.\n\n These errors are raised after the request was sent to the server, so the\n request may have side-effects.\n\n Set to ``0`` to fail on the first retry of this type.\n\n :param int redirect:\n How many redirects to perform. Limit this to avoid infinite redirect\n loops.\n\n A redirect is a HTTP response with a status code 301, 302, 303, 307 or\n 308.\n\n Set to ``0`` to fail on the first retry of this type.\n\n Set to ``False`` to disable and imply ``raise_on_redirect=False``.\n\n :param int status:\n How many times to retry on bad status codes.\n\n These are retries made on responses, where status code matches\n ``status_forcelist``.\n\n Set to ``0`` to fail on the first retry of this type.\n\n :param int other:\n How many times to retry on other errors.\n\n Other errors are errors that are not connect, read, redirect or status errors.\n These errors might be raised after the request was sent to the server, so the\n request might have side-effects.\n\n Set to ``0`` to fail on the first retry of this type.\n\n If ``total`` is not set, it's a good idea to set this to 0 to account\n for unexpected edge cases and avoid infinite retry loops.\n\n :param Collection allowed_methods:\n Set of uppercased HTTP method verbs that we should retry on.\n\n By default, we only retry on methods which are considered to be\n idempotent (multiple requests with the same parameters end with the\n same state). See :attr:`Retry.DEFAULT_ALLOWED_METHODS`.\n\n Set to a ``None`` value to retry on any verb.\n\n :param Collection status_forcelist:\n A set of integer HTTP status codes that we should force a retry on.\n A retry is initiated if the request method is in ``allowed_methods``\n and the response status code is in ``status_forcelist``.\n\n By default, this is disabled with ``None``.\n\n :param float backoff_factor:\n A backoff factor to apply between attempts after the second try\n (most errors are resolved immediately by a second try without a\n delay). urllib3 will sleep for::\n\n {backoff factor} * (2 ** ({number of previous retries}))\n\n seconds. If `backoff_jitter` is non-zero, this sleep is extended by::\n\n random.uniform(0, {backoff jitter})\n\n seconds. For example, if the backoff_factor is 0.1, then :func:`Retry.sleep` will\n sleep for [0.0s, 0.2s, 0.4s, 0.8s, ...] between retries. No backoff will ever\n be longer than `backoff_max`.\n\n By default, backoff is disabled (factor set to 0).\n\n :param bool raise_on_redirect: Whether, if the number of redirects is\n exhausted, to raise a MaxRetryError, or to return a response with a\n response code in the 3xx range.\n\n :param bool raise_on_status: Similar meaning to ``raise_on_redirect``:\n whether we should raise an exception, or return a response,\n if status falls in ``status_forcelist`` range and retries have\n been exhausted.\n\n :param tuple history: The history of the request encountered during\n each call to :meth:`~Retry.increment`. The list is in the order\n the requests occurred. Each list item is of class :class:`RequestHistory`.\n\n :param bool respect_retry_after_header:\n Whether to respect Retry-After header on status codes defined as\n :attr:`Retry.RETRY_AFTER_STATUS_CODES` or not.\n\n :param Collection remove_headers_on_redirect:\n Sequence of headers to remove from the request when a response\n indicating a redirect is returned before firing off the redirected\n request.\n \"\"\"\n\n #: Default methods to be used for ``allowed_methods``\n DEFAULT_ALLOWED_METHODS = frozenset(\n [\"HEAD\", \"GET\", \"PUT\", \"DELETE\", \"OPTIONS\", \"TRACE\"]\n )\n\n #: Default status codes to be used for ``status_forcelist``\n RETRY_AFTER_STATUS_CODES = frozenset([413, 429, 503])\n\n #: Default headers to be used for ``remove_headers_on_redirect``\n DEFAULT_REMOVE_HEADERS_ON_REDIRECT = frozenset([\"Cookie\", \"Authorization\"])\n\n #: Default maximum backoff time.\n DEFAULT_BACKOFF_MAX = 120\n\n # Backward compatibility; assigned outside of the class.\n DEFAULT: typing.ClassVar[Retry]\n\n def __init__(\n self,\n total: bool | int | None = 10,\n connect: int | None = None,\n read: int | None = None,\n redirect: bool | int | None = None,\n status: int | None = None,\n other: int | None = None,\n allowed_methods: typing.Collection[str] | None = DEFAULT_ALLOWED_METHODS,\n status_forcelist: typing.Collection[int] | None = None,\n backoff_factor: float = 0,\n backoff_max: float = DEFAULT_BACKOFF_MAX,\n raise_on_redirect: bool = True,\n raise_on_status: bool = True,\n history: tuple[RequestHistory, ...] | None = None,\n respect_retry_after_header: bool = True,\n remove_headers_on_redirect: typing.Collection[\n str\n ] = DEFAULT_REMOVE_HEADERS_ON_REDIRECT,\n backoff_jitter: float = 0.0,\n ) -> None:\n self.total = total\n self.connect = connect\n self.read = read\n self.status = status\n self.other = other\n\n if redirect is False or total is False:\n redirect = 0\n raise_on_redirect = False\n\n self.redirect = redirect\n self.status_forcelist = status_forcelist or set()\n self.allowed_methods = allowed_methods\n self.backoff_factor = backoff_factor\n self.backoff_max = backoff_max\n self.raise_on_redirect = raise_on_redirect\n self.raise_on_status = raise_on_status\n self.history = history or ()\n self.respect_retry_after_header = respect_retry_after_header\n self.remove_headers_on_redirect = frozenset(\n h.lower() for h in remove_headers_on_redirect\n )\n self.backoff_jitter = backoff_jitter\n\n def new(self, **kw: typing.Any) -> Retry:\n params = dict(\n total=self.total,\n connect=self.connect,\n read=self.read,\n redirect=self.redirect,\n status=self.status,\n other=self.other,\n allowed_methods=self.allowed_methods,\n status_forcelist=self.status_forcelist,\n backoff_factor=self.backoff_factor,\n backoff_max=self.backoff_max,\n raise_on_redirect=self.raise_on_redirect,\n raise_on_status=self.raise_on_status,\n history=self.history,\n remove_headers_on_redirect=self.remove_headers_on_redirect,\n respect_retry_after_header=self.respect_retry_after_header,\n backoff_jitter=self.backoff_jitter,\n )\n\n params.update(kw)\n return type(self)(**params) # type: ignore[arg-type]\n\n @classmethod\n def from_int(\n cls,\n retries: Retry | bool | int | None,\n redirect: bool | int | None = True,\n default: Retry | bool | int | None = None,\n ) -> Retry:\n \"\"\"Backwards-compatibility for the old retries format.\"\"\"\n if retries is None:\n retries = default if default is not None else cls.DEFAULT\n\n if isinstance(retries, Retry):\n return retries\n\n redirect = bool(redirect) and None\n new_retries = cls(retries, redirect=redirect)\n log.debug(\"Converted retries value: %r -> %r\", retries, new_retries)\n return new_retries\n\n def get_backoff_time(self) -> float:\n \"\"\"Formula for computing the current backoff\n\n :rtype: float\n \"\"\"\n # We want to consider only the last consecutive errors sequence (Ignore redirects).\n consecutive_errors_len = len(\n list(\n takewhile(lambda x: x.redirect_location is None, reversed(self.history))\n )\n )\n if consecutive_errors_len <= 1:\n return 0\n\n backoff_value = self.backoff_factor * (2 ** (consecutive_errors_len - 1))\n if self.backoff_jitter != 0.0:\n backoff_value += random.random() * self.backoff_jitter\n return float(max(0, min(self.backoff_max, backoff_value)))\n\n def parse_retry_after(self, retry_after: str) -> float:\n seconds: float\n # Whitespace: https://tools.ietf.org/html/rfc7230#section-3.2.4\n if re.match(r\"^\\s*[0-9]+\\s*$\", retry_after):\n seconds = int(retry_after)\n else:\n retry_date_tuple = email.utils.parsedate_tz(retry_after)\n if retry_date_tuple is None:\n raise InvalidHeader(f\"Invalid Retry-After header: {retry_after}\")\n\n retry_date = email.utils.mktime_tz(retry_date_tuple)\n seconds = retry_date - time.time()\n\n seconds = max(seconds, 0)\n\n return seconds\n\n def get_retry_after(self, response: BaseHTTPResponse) -> float | None:\n \"\"\"Get the value of Retry-After in seconds.\"\"\"\n\n retry_after = response.headers.get(\"Retry-After\")\n\n if retry_after is None:\n return None\n\n return self.parse_retry_after(retry_after)\n\n def sleep_for_retry(self, response: BaseHTTPResponse) -> bool:\n retry_after = self.get_retry_after(response)\n if retry_after:\n time.sleep(retry_after)\n return True\n\n return False\n\n def _sleep_backoff(self) -> None:\n backoff = self.get_backoff_time()\n if backoff <= 0:\n return\n time.sleep(backoff)\n\n def sleep(self, response: BaseHTTPResponse | None = None) -> None:\n \"\"\"Sleep between retry attempts.\n\n This method will respect a server's ``Retry-After`` response header\n and sleep the duration of the time requested. If that is not present, it\n will use an exponential backoff. By default, the backoff factor is 0 and\n this method will return immediately.\n \"\"\"\n\n if self.respect_retry_after_header and response:\n slept = self.sleep_for_retry(response)\n if slept:\n return\n\n self._sleep_backoff()\n\n def _is_connection_error(self, err: Exception) -> bool:\n \"\"\"Errors when we're fairly sure that the server did not receive the\n request, so it should be safe to retry.\n \"\"\"\n if isinstance(err, ProxyError):\n err = err.original_error\n return isinstance(err, ConnectTimeoutError)\n\n def _is_read_error(self, err: Exception) -> bool:\n \"\"\"Errors that occur after the request has been started, so we should\n assume that the server began processing it.\n \"\"\"\n return isinstance(err, (ReadTimeoutError, ProtocolError))\n\n def _is_method_retryable(self, method: str) -> bool:\n \"\"\"Checks if a given HTTP method should be retried upon, depending if\n it is included in the allowed_methods\n \"\"\"\n if self.allowed_methods and method.upper() not in self.allowed_methods:\n return False\n return True\n\n def is_retry(\n self, method: str, status_code: int, has_retry_after: bool = False\n ) -> bool:\n \"\"\"Is this method/status code retryable? (Based on allowlists and control\n variables such as the number of total retries to allow, whether to\n respect the Retry-After header, whether this header is present, and\n whether the returned status code is on the list of status codes to\n be retried upon on the presence of the aforementioned header)\n \"\"\"\n if not self._is_method_retryable(method):\n return False\n\n if self.status_forcelist and status_code in self.status_forcelist:\n return True\n\n return bool(\n self.total\n and self.respect_retry_after_header\n and has_retry_after\n and (status_code in self.RETRY_AFTER_STATUS_CODES)\n )\n\n def is_exhausted(self) -> bool:\n \"\"\"Are we out of retries?\"\"\"\n retry_counts = [\n x\n for x in (\n self.total,\n self.connect,\n self.read,\n self.redirect,\n self.status,\n self.other,\n )\n if x\n ]\n if not retry_counts:\n return False\n\n return min(retry_counts) < 0\n\n def increment(\n self,\n method: str | None = None,\n url: str | None = None,\n response: BaseHTTPResponse | None = None,\n error: Exception | None = None,\n _pool: ConnectionPool | None = None,\n _stacktrace: TracebackType | None = None,\n ) -> Retry:\n \"\"\"Return a new Retry object with incremented retry counters.\n\n :param response: A response object, or None, if the server did not\n return a response.\n :type response: :class:`~urllib3.response.BaseHTTPResponse`\n :param Exception error: An error encountered during the request, or\n None if the response was received successfully.\n\n :return: A new ``Retry`` object.\n \"\"\"\n if self.total is False and error:\n # Disabled, indicate to re-raise the error.\n raise reraise(type(error), error, _stacktrace)\n\n total = self.total\n if total is not None:\n total -= 1\n\n connect = self.connect\n read = self.read\n redirect = self.redirect\n status_count = self.status\n other = self.other\n cause = \"unknown\"\n status = None\n redirect_location = None\n\n if error and self._is_connection_error(error):\n # Connect retry?\n if connect is False:\n raise reraise(type(error), error, _stacktrace)\n elif connect is not None:\n connect -= 1\n\n elif error and self._is_read_error(error):\n # Read retry?\n if read is False or method is None or not self._is_method_retryable(method):\n raise reraise(type(error), error, _stacktrace)\n elif read is not None:\n read -= 1\n\n elif error:\n # Other retry?\n if other is not None:\n other -= 1\n\n elif response and response.get_redirect_location():\n # Redirect retry?\n if redirect is not None:\n redirect -= 1\n cause = \"too many redirects\"\n response_redirect_location = response.get_redirect_location()\n if response_redirect_location:\n redirect_location = response_redirect_location\n status = response.status\n\n else:\n # Incrementing because of a server error like a 500 in\n # status_forcelist and the given method is in the allowed_methods\n cause = ResponseError.GENERIC_ERROR\n if response and response.status:\n if status_count is not None:\n status_count -= 1\n cause = ResponseError.SPECIFIC_ERROR.format(status_code=response.status)\n status = response.status\n\n history = self.history + (\n RequestHistory(method, url, error, status, redirect_location),\n )\n\n new_retry = self.new(\n total=total,\n connect=connect,\n read=read,\n redirect=redirect,\n status=status_count,\n other=other,\n history=history,\n )\n\n if new_retry.is_exhausted():\n reason = error or ResponseError(cause)\n raise MaxRetryError(_pool, url, reason) from reason # type: ignore[arg-type]\n\n log.debug(\"Incremented Retry for (url='%s'): %r\", url, new_retry)\n\n return new_retry\n\n def __repr__(self) -> str:\n return (\n f\"{type(self).__name__}(total={self.total}, connect={self.connect}, \"\n f\"read={self.read}, redirect={self.redirect}, status={self.status})\"\n )" }, { "identifier": "CertificateError", "path": "dist/py/Python38/site-packages/urllib3/util/ssl_match_hostname.py", "snippet": "class CertificateError(ValueError):\n pass" }, { "identifier": "_DEFAULT_TIMEOUT", "path": "dist/py/Python38/site-packages/urllib3/util/timeout.py", "snippet": "_DEFAULT_TIMEOUT: Final[_TYPE_DEFAULT] = _TYPE_DEFAULT.token" }, { "identifier": "_TYPE_DEFAULT", "path": "dist/py/Python38/site-packages/urllib3/util/timeout.py", "snippet": "class _TYPE_DEFAULT(Enum):\n # This value should never be passed to socket.settimeout() so for safety we use a -1.\n # socket.settimout() raises a ValueError for negative values.\n token = -1" }, { "identifier": "Timeout", "path": "dist/py/Python38/site-packages/urllib3/util/timeout.py", "snippet": "class Timeout:\n \"\"\"Timeout configuration.\n\n Timeouts can be defined as a default for a pool:\n\n .. code-block:: python\n\n import urllib3\n\n timeout = urllib3.util.Timeout(connect=2.0, read=7.0)\n\n http = urllib3.PoolManager(timeout=timeout)\n\n resp = http.request(\"GET\", \"https://example.com/\")\n\n print(resp.status)\n\n Or per-request (which overrides the default for the pool):\n\n .. code-block:: python\n\n response = http.request(\"GET\", \"https://example.com/\", timeout=Timeout(10))\n\n Timeouts can be disabled by setting all the parameters to ``None``:\n\n .. code-block:: python\n\n no_timeout = Timeout(connect=None, read=None)\n response = http.request(\"GET\", \"https://example.com/\", timeout=no_timeout)\n\n\n :param total:\n This combines the connect and read timeouts into one; the read timeout\n will be set to the time leftover from the connect attempt. In the\n event that both a connect timeout and a total are specified, or a read\n timeout and a total are specified, the shorter timeout will be applied.\n\n Defaults to None.\n\n :type total: int, float, or None\n\n :param connect:\n The maximum amount of time (in seconds) to wait for a connection\n attempt to a server to succeed. Omitting the parameter will default the\n connect timeout to the system default, probably `the global default\n timeout in socket.py\n <http://hg.python.org/cpython/file/603b4d593758/Lib/socket.py#l535>`_.\n None will set an infinite timeout for connection attempts.\n\n :type connect: int, float, or None\n\n :param read:\n The maximum amount of time (in seconds) to wait between consecutive\n read operations for a response from the server. Omitting the parameter\n will default the read timeout to the system default, probably `the\n global default timeout in socket.py\n <http://hg.python.org/cpython/file/603b4d593758/Lib/socket.py#l535>`_.\n None will set an infinite timeout.\n\n :type read: int, float, or None\n\n .. note::\n\n Many factors can affect the total amount of time for urllib3 to return\n an HTTP response.\n\n For example, Python's DNS resolver does not obey the timeout specified\n on the socket. Other factors that can affect total request time include\n high CPU load, high swap, the program running at a low priority level,\n or other behaviors.\n\n In addition, the read and total timeouts only measure the time between\n read operations on the socket connecting the client and the server,\n not the total amount of time for the request to return a complete\n response. For most requests, the timeout is raised because the server\n has not sent the first byte in the specified time. This is not always\n the case; if a server streams one byte every fifteen seconds, a timeout\n of 20 seconds will not trigger, even though the request will take\n several minutes to complete.\n\n If your goal is to cut off any request after a set amount of wall clock\n time, consider having a second \"watcher\" thread to cut off a slow\n request.\n \"\"\"\n\n #: A sentinel object representing the default timeout value\n DEFAULT_TIMEOUT: _TYPE_TIMEOUT = _DEFAULT_TIMEOUT\n\n def __init__(\n self,\n total: _TYPE_TIMEOUT = None,\n connect: _TYPE_TIMEOUT = _DEFAULT_TIMEOUT,\n read: _TYPE_TIMEOUT = _DEFAULT_TIMEOUT,\n ) -> None:\n self._connect = self._validate_timeout(connect, \"connect\")\n self._read = self._validate_timeout(read, \"read\")\n self.total = self._validate_timeout(total, \"total\")\n self._start_connect: float | None = None\n\n def __repr__(self) -> str:\n return f\"{type(self).__name__}(connect={self._connect!r}, read={self._read!r}, total={self.total!r})\"\n\n # __str__ provided for backwards compatibility\n __str__ = __repr__\n\n @staticmethod\n def resolve_default_timeout(timeout: _TYPE_TIMEOUT) -> float | None:\n return getdefaulttimeout() if timeout is _DEFAULT_TIMEOUT else timeout\n\n @classmethod\n def _validate_timeout(cls, value: _TYPE_TIMEOUT, name: str) -> _TYPE_TIMEOUT:\n \"\"\"Check that a timeout attribute is valid.\n\n :param value: The timeout value to validate\n :param name: The name of the timeout attribute to validate. This is\n used to specify in error messages.\n :return: The validated and casted version of the given value.\n :raises ValueError: If it is a numeric value less than or equal to\n zero, or the type is not an integer, float, or None.\n \"\"\"\n if value is None or value is _DEFAULT_TIMEOUT:\n return value\n\n if isinstance(value, bool):\n raise ValueError(\n \"Timeout cannot be a boolean value. It must \"\n \"be an int, float or None.\"\n )\n try:\n float(value)\n except (TypeError, ValueError):\n raise ValueError(\n \"Timeout value %s was %s, but it must be an \"\n \"int, float or None.\" % (name, value)\n ) from None\n\n try:\n if value <= 0:\n raise ValueError(\n \"Attempted to set %s timeout to %s, but the \"\n \"timeout cannot be set to a value less \"\n \"than or equal to 0.\" % (name, value)\n )\n except TypeError:\n raise ValueError(\n \"Timeout value %s was %s, but it must be an \"\n \"int, float or None.\" % (name, value)\n ) from None\n\n return value\n\n @classmethod\n def from_float(cls, timeout: _TYPE_TIMEOUT) -> Timeout:\n \"\"\"Create a new Timeout from a legacy timeout value.\n\n The timeout value used by httplib.py sets the same timeout on the\n connect(), and recv() socket requests. This creates a :class:`Timeout`\n object that sets the individual timeouts to the ``timeout`` value\n passed to this function.\n\n :param timeout: The legacy timeout value.\n :type timeout: integer, float, :attr:`urllib3.util.Timeout.DEFAULT_TIMEOUT`, or None\n :return: Timeout object\n :rtype: :class:`Timeout`\n \"\"\"\n return Timeout(read=timeout, connect=timeout)\n\n def clone(self) -> Timeout:\n \"\"\"Create a copy of the timeout object\n\n Timeout properties are stored per-pool but each request needs a fresh\n Timeout object to ensure each one has its own start/stop configured.\n\n :return: a copy of the timeout object\n :rtype: :class:`Timeout`\n \"\"\"\n # We can't use copy.deepcopy because that will also create a new object\n # for _GLOBAL_DEFAULT_TIMEOUT, which socket.py uses as a sentinel to\n # detect the user default.\n return Timeout(connect=self._connect, read=self._read, total=self.total)\n\n def start_connect(self) -> float:\n \"\"\"Start the timeout clock, used during a connect() attempt\n\n :raises urllib3.exceptions.TimeoutStateError: if you attempt\n to start a timer that has been started already.\n \"\"\"\n if self._start_connect is not None:\n raise TimeoutStateError(\"Timeout timer has already been started.\")\n self._start_connect = time.monotonic()\n return self._start_connect\n\n def get_connect_duration(self) -> float:\n \"\"\"Gets the time elapsed since the call to :meth:`start_connect`.\n\n :return: Elapsed time in seconds.\n :rtype: float\n :raises urllib3.exceptions.TimeoutStateError: if you attempt\n to get duration for a timer that hasn't been started.\n \"\"\"\n if self._start_connect is None:\n raise TimeoutStateError(\n \"Can't get connect duration for timer that has not started.\"\n )\n return time.monotonic() - self._start_connect\n\n @property\n def connect_timeout(self) -> _TYPE_TIMEOUT:\n \"\"\"Get the value to use when setting a connection timeout.\n\n This will be a positive float or integer, the value None\n (never timeout), or the default system timeout.\n\n :return: Connect timeout.\n :rtype: int, float, :attr:`Timeout.DEFAULT_TIMEOUT` or None\n \"\"\"\n if self.total is None:\n return self._connect\n\n if self._connect is None or self._connect is _DEFAULT_TIMEOUT:\n return self.total\n\n return min(self._connect, self.total) # type: ignore[type-var]\n\n @property\n def read_timeout(self) -> float | None:\n \"\"\"Get the value for the read timeout.\n\n This assumes some time has elapsed in the connection timeout and\n computes the read timeout appropriately.\n\n If self.total is set, the read timeout is dependent on the amount of\n time taken by the connect timeout. If the connection time has not been\n established, a :exc:`~urllib3.exceptions.TimeoutStateError` will be\n raised.\n\n :return: Value to use for the read timeout.\n :rtype: int, float or None\n :raises urllib3.exceptions.TimeoutStateError: If :meth:`start_connect`\n has not yet been called on this object.\n \"\"\"\n if (\n self.total is not None\n and self.total is not _DEFAULT_TIMEOUT\n and self._read is not None\n and self._read is not _DEFAULT_TIMEOUT\n ):\n # In case the connect timeout has not yet been established.\n if self._start_connect is None:\n return self._read\n return max(0, min(self.total - self.get_connect_duration(), self._read))\n elif self.total is not None and self.total is not _DEFAULT_TIMEOUT:\n return max(0, self.total - self.get_connect_duration())\n else:\n return self.resolve_default_timeout(self._read)" }, { "identifier": "Url", "path": "dist/py/Python38/site-packages/urllib3/util/url.py", "snippet": "class Url(\n typing.NamedTuple(\n \"Url\",\n [\n (\"scheme\", typing.Optional[str]),\n (\"auth\", typing.Optional[str]),\n (\"host\", typing.Optional[str]),\n (\"port\", typing.Optional[int]),\n (\"path\", typing.Optional[str]),\n (\"query\", typing.Optional[str]),\n (\"fragment\", typing.Optional[str]),\n ],\n )\n):\n \"\"\"\n Data structure for representing an HTTP URL. Used as a return value for\n :func:`parse_url`. Both the scheme and host are normalized as they are\n both case-insensitive according to RFC 3986.\n \"\"\"\n\n def __new__( # type: ignore[no-untyped-def]\n cls,\n scheme: str | None = None,\n auth: str | None = None,\n host: str | None = None,\n port: int | None = None,\n path: str | None = None,\n query: str | None = None,\n fragment: str | None = None,\n ):\n if path and not path.startswith(\"/\"):\n path = \"/\" + path\n if scheme is not None:\n scheme = scheme.lower()\n return super().__new__(cls, scheme, auth, host, port, path, query, fragment)\n\n @property\n def hostname(self) -> str | None:\n \"\"\"For backwards-compatibility with urlparse. We're nice like that.\"\"\"\n return self.host\n\n @property\n def request_uri(self) -> str:\n \"\"\"Absolute path including the query string.\"\"\"\n uri = self.path or \"/\"\n\n if self.query is not None:\n uri += \"?\" + self.query\n\n return uri\n\n @property\n def authority(self) -> str | None:\n \"\"\"\n Authority component as defined in RFC 3986 3.2.\n This includes userinfo (auth), host and port.\n\n i.e.\n userinfo@host:port\n \"\"\"\n userinfo = self.auth\n netloc = self.netloc\n if netloc is None or userinfo is None:\n return netloc\n else:\n return f\"{userinfo}@{netloc}\"\n\n @property\n def netloc(self) -> str | None:\n \"\"\"\n Network location including host and port.\n\n If you need the equivalent of urllib.parse's ``netloc``,\n use the ``authority`` property instead.\n \"\"\"\n if self.host is None:\n return None\n if self.port:\n return f\"{self.host}:{self.port}\"\n return self.host\n\n @property\n def url(self) -> str:\n \"\"\"\n Convert self into a url\n\n This function should more or less round-trip with :func:`.parse_url`. The\n returned url may not be exactly the same as the url inputted to\n :func:`.parse_url`, but it should be equivalent by the RFC (e.g., urls\n with a blank port will have : removed).\n\n Example:\n\n .. code-block:: python\n\n import urllib3\n\n U = urllib3.util.parse_url(\"https://google.com/mail/\")\n\n print(U.url)\n # \"https://google.com/mail/\"\n\n print( urllib3.util.Url(\"https\", \"username:password\",\n \"host.com\", 80, \"/path\", \"query\", \"fragment\"\n ).url\n )\n # \"https://username:[email protected]:80/path?query#fragment\"\n \"\"\"\n scheme, auth, host, port, path, query, fragment = self\n url = \"\"\n\n # We use \"is not None\" we want things to happen with empty strings (or 0 port)\n if scheme is not None:\n url += scheme + \"://\"\n if auth is not None:\n url += auth + \"@\"\n if host is not None:\n url += host\n if port is not None:\n url += \":\" + str(port)\n if path is not None:\n url += path\n if query is not None:\n url += \"?\" + query\n if fragment is not None:\n url += \"#\" + fragment\n\n return url\n\n def __str__(self) -> str:\n return self.url" }, { "identifier": "_encode_target", "path": "dist/py/Python38/site-packages/urllib3/util/url.py", "snippet": "def _encode_target(target: str) -> str:\n \"\"\"Percent-encodes a request target so that there are no invalid characters\n\n Pre-condition for this function is that 'target' must start with '/'.\n If that is the case then _TARGET_RE will always produce a match.\n \"\"\"\n match = _TARGET_RE.match(target)\n if not match: # Defensive:\n raise LocationParseError(f\"{target!r} is not a valid request URI\")\n\n path, query = match.groups()\n encoded_target = _encode_invalid_chars(path, _PATH_CHARS)\n if query is not None:\n query = _encode_invalid_chars(query, _QUERY_CHARS)\n encoded_target += \"?\" + query\n return encoded_target" }, { "identifier": "_normalize_host", "path": "dist/py/Python38/site-packages/urllib3/util/url.py", "snippet": "@typing.overload\ndef _normalize_host(host: None, scheme: str | None) -> None:\n ..." }, { "identifier": "parse_url", "path": "dist/py/Python38/site-packages/urllib3/util/url.py", "snippet": "def parse_url(url: str) -> Url:\n \"\"\"\n Given a url, return a parsed :class:`.Url` namedtuple. Best-effort is\n performed to parse incomplete urls. Fields not provided will be None.\n This parser is RFC 3986 and RFC 6874 compliant.\n\n The parser logic and helper functions are based heavily on\n work done in the ``rfc3986`` module.\n\n :param str url: URL to parse into a :class:`.Url` namedtuple.\n\n Partly backwards-compatible with :mod:`urllib.parse`.\n\n Example:\n\n .. code-block:: python\n\n import urllib3\n\n print( urllib3.util.parse_url('http://google.com/mail/'))\n # Url(scheme='http', host='google.com', port=None, path='/mail/', ...)\n\n print( urllib3.util.parse_url('google.com:80'))\n # Url(scheme=None, host='google.com', port=80, path=None, ...)\n\n print( urllib3.util.parse_url('/foo?bar'))\n # Url(scheme=None, host=None, port=None, path='/foo', query='bar', ...)\n \"\"\"\n if not url:\n # Empty\n return Url()\n\n source_url = url\n if not _SCHEME_RE.search(url):\n url = \"//\" + url\n\n scheme: str | None\n authority: str | None\n auth: str | None\n host: str | None\n port: str | None\n port_int: int | None\n path: str | None\n query: str | None\n fragment: str | None\n\n try:\n scheme, authority, path, query, fragment = _URI_RE.match(url).groups() # type: ignore[union-attr]\n normalize_uri = scheme is None or scheme.lower() in _NORMALIZABLE_SCHEMES\n\n if scheme:\n scheme = scheme.lower()\n\n if authority:\n auth, _, host_port = authority.rpartition(\"@\")\n auth = auth or None\n host, port = _HOST_PORT_RE.match(host_port).groups() # type: ignore[union-attr]\n if auth and normalize_uri:\n auth = _encode_invalid_chars(auth, _USERINFO_CHARS)\n if port == \"\":\n port = None\n else:\n auth, host, port = None, None, None\n\n if port is not None:\n port_int = int(port)\n if not (0 <= port_int <= 65535):\n raise LocationParseError(url)\n else:\n port_int = None\n\n host = _normalize_host(host, scheme)\n\n if normalize_uri and path:\n path = _remove_path_dot_segments(path)\n path = _encode_invalid_chars(path, _PATH_CHARS)\n if normalize_uri and query:\n query = _encode_invalid_chars(query, _QUERY_CHARS)\n if normalize_uri and fragment:\n fragment = _encode_invalid_chars(fragment, _FRAGMENT_CHARS)\n\n except (ValueError, AttributeError) as e:\n raise LocationParseError(source_url) from e\n\n # For the sake of backwards compatibility we put empty\n # string values for path if there are any defined values\n # beyond the path in the URL.\n # TODO: Remove this when we break backwards compatibility.\n if not path:\n if query is not None or fragment is not None:\n path = \"\"\n else:\n path = None\n\n return Url(\n scheme=scheme,\n auth=auth,\n host=host,\n port=port_int,\n path=path,\n query=query,\n fragment=fragment,\n )" }, { "identifier": "to_str", "path": "dist/py/Python38/site-packages/urllib3/util/util.py", "snippet": "def to_str(\n x: str | bytes, encoding: str | None = None, errors: str | None = None\n) -> str:\n if isinstance(x, str):\n return x\n elif not isinstance(x, bytes):\n raise TypeError(f\"not expecting type {type(x).__name__}\")\n if encoding or errors:\n return x.decode(encoding or \"utf-8\", errors=errors or \"strict\")\n return x.decode()" } ]
import errno import logging import queue import sys import typing import warnings import weakref import ssl from socket import timeout as SocketTimeout from types import TracebackType from ._base_connection import _TYPE_BODY from ._collections import HTTPHeaderDict from ._request_methods import RequestMethods from .connection import ( BaseSSLError, BrokenPipeError, DummyConnection, HTTPConnection, HTTPException, HTTPSConnection, ProxyConfig, _wrap_proxy_error, ) from .connection import port_by_scheme as port_by_scheme from .exceptions import ( ClosedPoolError, EmptyPoolError, FullPoolError, HostChangedError, InsecureRequestWarning, LocationValueError, MaxRetryError, NewConnectionError, ProtocolError, ProxyError, ReadTimeoutError, SSLError, TimeoutError, ) from .response import BaseHTTPResponse from .util.connection import is_connection_dropped from .util.proxy import connection_requires_http_tunnel from .util.request import _TYPE_BODY_POSITION, set_file_position from .util.retry import Retry from .util.ssl_match_hostname import CertificateError from .util.timeout import _DEFAULT_TIMEOUT, _TYPE_DEFAULT, Timeout from .util.url import Url, _encode_target from .util.url import _normalize_host as normalize_host from .util.url import parse_url from .util.util import to_str from typing_extensions import Literal from ._base_connection import BaseHTTPConnection, BaseHTTPSConnection
21,463
""" Base class for all connection pools, such as :class:`.HTTPConnectionPool` and :class:`.HTTPSConnectionPool`. .. note:: ConnectionPool.urlopen() does not normalize or percent-encode target URIs which is useful if your target server doesn't support percent-encoded target URIs. """ scheme: str | None = None QueueCls = queue.LifoQueue def __init__(self, host: str, port: int | None = None) -> None: if not host: raise LocationValueError("No host specified.") self.host = _normalize_host(host, scheme=self.scheme) self.port = port # This property uses 'normalize_host()' (not '_normalize_host()') # to avoid removing square braces around IPv6 addresses. # This value is sent to `HTTPConnection.set_tunnel()` if called # because square braces are required for HTTP CONNECT tunneling. self._tunnel_host = normalize_host(host, scheme=self.scheme).lower() def __str__(self) -> str: return f"{type(self).__name__}(host={self.host!r}, port={self.port!r})" def __enter__(self: _SelfT) -> _SelfT: return self def __exit__( self, exc_type: type[BaseException] | None, exc_val: BaseException | None, exc_tb: TracebackType | None, ) -> Literal[False]: self.close() # Return False to re-raise any potential exceptions return False def close(self) -> None: """ Close all pooled connections and disable the pool. """ # This is taken from http://hg.python.org/cpython/file/7aaba721ebc0/Lib/socket.py#l252 _blocking_errnos = {errno.EAGAIN, errno.EWOULDBLOCK} class HTTPConnectionPool(ConnectionPool, RequestMethods): """ Thread-safe connection pool for one host. :param host: Host used for this HTTP Connection (e.g. "localhost"), passed into :class:`http.client.HTTPConnection`. :param port: Port used for this HTTP Connection (None is equivalent to 80), passed into :class:`http.client.HTTPConnection`. :param timeout: Socket timeout in seconds for each individual connection. This can be a float or integer, which sets the timeout for the HTTP request, or an instance of :class:`urllib3.util.Timeout` which gives you more fine-grained control over request timeouts. After the constructor has been parsed, this is always a `urllib3.util.Timeout` object. :param maxsize: Number of connections to save that can be reused. More than 1 is useful in multithreaded situations. If ``block`` is set to False, more connections will be created but they will not be saved once they've been used. :param block: If set to True, no more than ``maxsize`` connections will be used at a time. When no free connections are available, the call will block until a connection has been released. This is a useful side effect for particular multithreaded situations where one does not want to use more than maxsize connections per host to prevent flooding. :param headers: Headers to include with all requests, unless other headers are given explicitly. :param retries: Retry configuration to use by default with requests in this pool. :param _proxy: Parsed proxy URL, should not be used directly, instead, see :class:`urllib3.ProxyManager` :param _proxy_headers: A dictionary with proxy headers, should not be used directly, instead, see :class:`urllib3.ProxyManager` :param \\**conn_kw: Additional parameters are used to create fresh :class:`urllib3.connection.HTTPConnection`, :class:`urllib3.connection.HTTPSConnection` instances. """ scheme = "http" ConnectionCls: ( type[BaseHTTPConnection] | type[BaseHTTPSConnection] ) = HTTPConnection def __init__( self, host: str, port: int | None = None, timeout: _TYPE_TIMEOUT | None = _DEFAULT_TIMEOUT, maxsize: int = 1, block: bool = False, headers: typing.Mapping[str, str] | None = None, retries: Retry | bool | int | None = None, _proxy: Url | None = None, _proxy_headers: typing.Mapping[str, str] | None = None,
from __future__ import annotations if typing.TYPE_CHECKING: log = logging.getLogger(__name__) _TYPE_TIMEOUT = typing.Union[Timeout, float, _TYPE_DEFAULT, None] _SelfT = typing.TypeVar("_SelfT") # Pool objects class ConnectionPool: """ Base class for all connection pools, such as :class:`.HTTPConnectionPool` and :class:`.HTTPSConnectionPool`. .. note:: ConnectionPool.urlopen() does not normalize or percent-encode target URIs which is useful if your target server doesn't support percent-encoded target URIs. """ scheme: str | None = None QueueCls = queue.LifoQueue def __init__(self, host: str, port: int | None = None) -> None: if not host: raise LocationValueError("No host specified.") self.host = _normalize_host(host, scheme=self.scheme) self.port = port # This property uses 'normalize_host()' (not '_normalize_host()') # to avoid removing square braces around IPv6 addresses. # This value is sent to `HTTPConnection.set_tunnel()` if called # because square braces are required for HTTP CONNECT tunneling. self._tunnel_host = normalize_host(host, scheme=self.scheme).lower() def __str__(self) -> str: return f"{type(self).__name__}(host={self.host!r}, port={self.port!r})" def __enter__(self: _SelfT) -> _SelfT: return self def __exit__( self, exc_type: type[BaseException] | None, exc_val: BaseException | None, exc_tb: TracebackType | None, ) -> Literal[False]: self.close() # Return False to re-raise any potential exceptions return False def close(self) -> None: """ Close all pooled connections and disable the pool. """ # This is taken from http://hg.python.org/cpython/file/7aaba721ebc0/Lib/socket.py#l252 _blocking_errnos = {errno.EAGAIN, errno.EWOULDBLOCK} class HTTPConnectionPool(ConnectionPool, RequestMethods): """ Thread-safe connection pool for one host. :param host: Host used for this HTTP Connection (e.g. "localhost"), passed into :class:`http.client.HTTPConnection`. :param port: Port used for this HTTP Connection (None is equivalent to 80), passed into :class:`http.client.HTTPConnection`. :param timeout: Socket timeout in seconds for each individual connection. This can be a float or integer, which sets the timeout for the HTTP request, or an instance of :class:`urllib3.util.Timeout` which gives you more fine-grained control over request timeouts. After the constructor has been parsed, this is always a `urllib3.util.Timeout` object. :param maxsize: Number of connections to save that can be reused. More than 1 is useful in multithreaded situations. If ``block`` is set to False, more connections will be created but they will not be saved once they've been used. :param block: If set to True, no more than ``maxsize`` connections will be used at a time. When no free connections are available, the call will block until a connection has been released. This is a useful side effect for particular multithreaded situations where one does not want to use more than maxsize connections per host to prevent flooding. :param headers: Headers to include with all requests, unless other headers are given explicitly. :param retries: Retry configuration to use by default with requests in this pool. :param _proxy: Parsed proxy URL, should not be used directly, instead, see :class:`urllib3.ProxyManager` :param _proxy_headers: A dictionary with proxy headers, should not be used directly, instead, see :class:`urllib3.ProxyManager` :param \\**conn_kw: Additional parameters are used to create fresh :class:`urllib3.connection.HTTPConnection`, :class:`urllib3.connection.HTTPSConnection` instances. """ scheme = "http" ConnectionCls: ( type[BaseHTTPConnection] | type[BaseHTTPSConnection] ) = HTTPConnection def __init__( self, host: str, port: int | None = None, timeout: _TYPE_TIMEOUT | None = _DEFAULT_TIMEOUT, maxsize: int = 1, block: bool = False, headers: typing.Mapping[str, str] | None = None, retries: Retry | bool | int | None = None, _proxy: Url | None = None, _proxy_headers: typing.Mapping[str, str] | None = None,
_proxy_config: ProxyConfig | None = None,
3
2023-10-11 09:08:57+00:00
24k
MTgeophysics/mtpy-v2
mtpy/modeling/modem/residual.py
[ { "identifier": "Data", "path": "mtpy/modeling/modem/data.py", "snippet": "class Data:\n \"\"\"\n Data will read and write .dat files for ModEM and convert a WS data file\n to ModEM format.\n\n ..note: :: the data is interpolated onto the given periods such that all\n stations invert for the same periods. The interpolation is\n a linear interpolation of each of the real and imaginary parts\n of the impedance tensor and induction tensor.\n See mtpy.core.mt.MT.interpolate for more details\n \n :param edi_list: list of edi files to read\n\n ====================== ====================================================\n Attributes Description\n ====================== ====================================================\n _dtype internal variable defining the data type of\n data_array\n _logger python logging object that put messages in logging\n format defined in logging configure file, see MtPyLog\n for more information\n _t_shape internal variable defining shape of tipper array in\n _dtype\n _z_shape internal variable defining shape of Z array in\n _dtype\n center_position (east, north, evel) for center point of station\n array. All stations are relative to this location\n for plotting purposes.\n comp_index_dict dictionary for index values of component of Z and T\n station_locations Stations object\n data_array numpy.ndarray (num_stations) structured to store\n data. keys are:\n * station --> station name\n * lat --> latitude in decimal degrees\n * lon --> longitude in decimal degrees\n * elev --> elevation (m)\n * rel_east -- > relative east location to\n center_position (m)\n * rel_north --> relative north location to\n center_position (m)\n * east --> UTM east (m)\n * north --> UTM north (m)\n * zone --> UTM zone\n * z --> impedance tensor array with shape\n (num_freq, 2, 2)\n * z_err --> impedance tensor error array with\n shape (num_freq, 2, 2)\n * tip --> Tipper array with shape\n (num_freq, 1, 2)\n * tipperr --> Tipper array with shape\n (num_freq, 1, 2)\n data_fn full path to data file\n data_period_list period list from all the data\n edi_list list of full paths to edi files\n error_type_tipper [ 'abs' | 'floor' ]\n *default* is 'abs'\n error_type_z [ 'egbert' | 'mean_od' | 'eigen' | 'median']\n *default* is 'egbert_floor'\n * add '_floor' to any of the above to set the\n error as an error floor, otherwise all\n components are give weighted the same\n\n * 'egbert' sets error to\n error_value_z * sqrt(abs(zxy*zyx))\n * 'mean_od' sets error to\n error_value_z * mean([Zxy, Zyx])\n (non zeros)\n * 'eigen' sets error to\n error_value_z * eigenvalues(Z[ii])\n * 'median' sets error to\n error_value_z * median([Zxx, Zxy, Zyx, Zyy])\n (non zeros)\n A 2x2 numpy array of error_type_z can be specified to\n explicitly set the error_type_z for each component.\n\n error_value_z percentage to multiply Z by to set error\n *default* is 5 for 5% of Z as error\n A 2x2 numpy array of values can be specified to\n explicitly set the error_value_z for each component.\n\n error_value_tipper absolute error between 0 and 1.\n fn_basename basename of data file. *default* is 'ModEM_Data.dat'\n formatting ['1' | '2'], format of the output data file, *default* is '1'\n header_strings strings for header of data file following the format\n outlined in the ModEM documentation\n inv_comp_dict dictionary of inversion components\n inv_mode inversion mode, options are: *default* is '1'\n * '1' --> for 'Full_Impedance' and\n 'Full_Vertical_Components'\n * '2' --> 'Full_Impedance'\n * '3' --> 'Off_Diagonal_Impedance' and\n 'Full_Vertical_Components'\n * '4' --> 'Off_Diagonal_Impedance'\n * '5' --> 'Full_Vertical_Components'\n * '6' --> 'Full_Interstation_TF'\n * '7' --> 'Off_Diagonal_Rho_Phase'\n\n inv_mode_dict dictionary for inversion modes\n max_num_periods maximum number of periods\n model_epsg epsg code for model projection, provide this to\n project model to non-utm coordinates. Find the epsg\n code for your projection on\n http://spatialreference.org/ref/ or google search\n epsg \"your projection\"\n model_utm_zone alternative to model_epsg, choose a utm zone to\n project all sites to (e.g. '55S')\n mt_dict dictionary of mtpy.core.mt.MT objects with keys\n being station names\n period_buffer float or int\n if specified, apply a buffer so that interpolation doesn't\n stretch too far over periods\n period_dict dictionary of period index for period_list\n period_list list of periods to invert for\n period_max maximum value of period to invert for\n period_min minimum value of period to invert for\n period_buffer buffer so that interpolation doesn't stretch too far\n over periods. Provide a float or integer factor, \n greater than which interpolation will not stretch.\n e.g. 1.5 means only interpolate to a maximum of\n 1.5 times each side of each frequency value\n rotate_angle Angle to rotate data to assuming 0 is N and E is 90\n save_path path to save data file to\n units [ [V/m]/[T] | [mV/km]/[nT] | Ohm ] units of Z\n *default* is [mV/km]/[nT]\n wave_sign_impedance [ + | - ] sign of time dependent wave.\n *default* is '+' as positive downwards.\n wave_sign_tipper [ + | - ] sign of time dependent wave.\n *default* is '+' as positive downwards.\n ====================== ====================================================\n\n\n :Example 1 --> create inversion period list: ::\n\n >>> from pathlib import Path\n >>> import mtpy.modeling.modem as modem\n >>> edi_path = Path(r\"/home/mt/edi_files\")\n >>> edi_list = list(edi_path.glob(\"*.edi\"))\n >>> md = modem.Data(edi_list, period_min=.1, period_max=300,\\\n >>> ... max_num_periods=12)\n >>> md.write_data_file(save_path=r\"/home/modem/inv1\")\n >>> md\n \n\n :Example 2 --> set inverions period list from data: ::\n\n >>> md = modem.Data(edi_list)\n >>> #get period list from an .edi file\n >>> inv_period_list = 1./md.mt_dict[\"mt01\"].Z.freq\n >>> #invert for every third period in inv_period_list\n >>> inv_period_list = inv_period_list[np.arange(0, len(inv_period_list, 3))]\n >>> md.period_list = inv_period_list\n >>> md.write_data_file(save_path=r\"/home/modem/inv1\")\n\n :Example 3 --> change error values: ::\n\n >>> mdr.error_type = 'floor'\n >>> mdr.error_floor = 10\n >>> mdr.error_tipper = .03\n >>> mdr.write_data_file(save_path=r\"/home/modem/inv2\")\n\n :Example 4 --> change inversion type: ::\n\n >>> mdr.inv_mode = '3'\n >>> mdr.write_data_file(save_path=r\"/home/modem/inv2\")\n\n :Example 5 --> rotate data: ::\n\n >>> md.rotation_angle = 60\n >>> md.write_data_file(save_path=r\"/home/modem/Inv1\")\n >>> # or\n >>> md.write_data_file(save_path=r\"/home/modem/Inv1\", \\\n rotation_angle=60)\n\n\n \"\"\"\n\n def __init__(self, dataframe=None, center_point=None, **kwargs):\n\n self.logger = logger\n\n self.dataframe = dataframe\n\n if center_point is None:\n self.center_point = MTLocation()\n else:\n self.center_point = center_point\n\n self.wave_sign_impedance = \"+\"\n self.wave_sign_tipper = \"+\"\n self.z_units = \"[mV/km]/[nT]\"\n self.t_units = \"\"\n self.inv_mode = \"1\"\n self.formatting = \"1\"\n self.rotation_angle = 0\n\n self.z_model_error = ModelErrors(\n error_value=5,\n error_type=\"geometric_mean\",\n floor=True,\n mode=\"impedance\",\n )\n self.t_model_error = ModelErrors(\n error_value=0.02,\n error_type=\"absolute\",\n floor=True,\n mode=\"tipper\",\n )\n\n self.fn_basename = \"ModEM_Data.dat\"\n self.save_path = Path.cwd()\n\n self.topography = True\n\n self.inv_mode_dict = {\n \"1\": [\"Full_Impedance\", \"Full_Vertical_Components\"],\n \"2\": [\"Full_Impedance\"],\n \"3\": [\"Off_Diagonal_Impedance\", \"Full_Vertical_Components\"],\n \"4\": [\"Off_Diagonal_Impedance\"],\n \"5\": [\"Full_Vertical_Components\"],\n \"6\": [\"Full_Interstation_TF\"],\n \"7\": [\"Off_Diagonal_Rho_Phase\"],\n }\n self.inv_comp_dict = {\n \"Full_Impedance\": [\"zxx\", \"zxy\", \"zyx\", \"zyy\"],\n \"Off_Diagonal_Impedance\": [\"zxy\", \"zyx\"],\n \"Full_Vertical_Components\": [\"tzx\", \"tzy\"],\n }\n\n self.header_string = \" \".join(\n [\n \"# Period(s)\",\n \"Code\",\n \"GG_Lat\",\n \"GG_Lon\",\n \"X(m)\",\n \"Y(m)\",\n \"Z(m)\",\n \"Component\",\n \"Real\",\n \"Imag\",\n \"Error\",\n ]\n )\n\n self._df_keys = [\n \"period\",\n \"station\",\n \"latitude\",\n \"longitude\",\n \"model_north\",\n \"model_east\",\n \"model_elevation\",\n \"comp\",\n \"real\",\n \"imag\",\n \"error\",\n ]\n\n for key, value in kwargs.items():\n setattr(self, key, value)\n\n def __str__(self):\n lines = [\"ModEM Data Object:\"]\n if self.dataframe is not None:\n lines += [\n f\"\\tNumber of impedance stations: {self.get_n_stations('impedance')}\"\n ]\n lines += [\n f\"\\tNumber of tipper stations: {self.get_n_stations('vertical')}\"\n ]\n lines += [\n f\"\\tNumber of phase tensor stations: {self.get_n_stations('phase_tensor')}\"\n ]\n lines += [f\"\\tNumber of periods: {self.n_periods}\"]\n lines += [\"\\tPeriod range (s): \"]\n lines += [f\"\\t\\tMin: {self.period.min():.5g}\"]\n lines += [f\"\\t\\tMax: {self.period.max():.5g}\"]\n lines += [f\"\\tRotation angle: {self.rotation_angle}\"]\n lines += [\"\\tData center: \"]\n lines += [\n f\"\\t\\tLatitude: {self.center_point.latitude:>8.4f} deg \"\n f\"\\tNorthing: {self.center_point.north:.4f} m\"\n ]\n lines += [\n f\"\\t\\tLongitude: {self.center_point.longitude:>8.4f} deg \"\n f\"\\tEasting: {self.center_point.east:.4f} m\"\n ]\n lines += [\n f\"\\t\\tDatum epsg: {self.center_point.datum_epsg}\"\n f\"\\t\\t\\tUTM epsg: {self.center_point.utm_epsg}\"\n ]\n lines += [f\"\\t\\tElevation: {self.center_point.elevation:.1f} m\"]\n\n lines += [\n f\"\\tImpedance data: {self.dataframe.zxy.mean() != 0.0}\"\n ]\n lines += [\n f\"\\tTipper data: {self.dataframe.tzx.mean() != 0.0}\"\n ]\n lines += [\n f\"\\tInversion Mode: {', '.join(self.inv_mode_dict[self.inv_mode])}\"\n ]\n\n return \"\\n\".join(lines)\n\n def __repr__(self):\n return self.__str__()\n\n @property\n def dataframe(self):\n return self._mt_dataframe.dataframe\n\n @dataframe.setter\n def dataframe(self, df):\n \"\"\"\n Set dataframe to an MTDataframe\n :param df: DESCRIPTION\n :type df: TYPE\n :return: DESCRIPTION\n :rtype: TYPE\n\n \"\"\"\n\n if df is None:\n self._mt_dataframe = MTDataFrame()\n\n elif isinstance(df, (pd.DataFrame, MTDataFrame, np.ndarray)):\n self._mt_dataframe = MTDataFrame(df)\n\n else:\n raise TypeError(\n f\"Input must be a dataframe or MTDataFrame object not {type(df)}\"\n )\n\n self._mt_dataframe.dataframe.reset_index(drop=True, inplace=True)\n\n @property\n def model_parameters(self):\n params = {\n \"wave_sign_impedance\": self.wave_sign_impedance,\n \"wave_sign_tipper\": self.wave_sign_tipper,\n \"z_units\": self.z_units,\n \"t_units\": self.t_units,\n \"inv_mode\": self.inv_mode,\n \"formatting\": self.formatting,\n \"data_filename\": self.data_filename,\n \"topography\": self.topography,\n \"rotation_angle\": self.rotation_angle,\n \"center_point.latitude\": self.center_point.latitude,\n \"center_point.longitue\": self.center_point.longitude,\n \"center_point.elevation\": self.center_point.elevation,\n \"center_point.utm_epsg\": self.center_point.utm_epsg,\n \"center_point.datum_epsg\": self.center_point.datum_epsg,\n }\n\n for key, value in self.z_model_error.error_parameters.items():\n params[f\"z_model_error.{key}\"] = value\n for key, value in self.t_model_error.error_parameters.items():\n params[f\"t_model_error.{key}\"] = value\n\n return params\n\n @property\n def data_filename(self):\n return self.save_path.joinpath(self.fn_basename)\n\n @data_filename.setter\n def data_filename(self, value):\n if value is not None:\n value = Path(value)\n if value.parent == Path(\".\"):\n self.fn_basename = value.name\n else:\n self.save_path = value.parent\n self.fn_basename = value.name\n\n @property\n def period(self):\n if self.dataframe is not None:\n return np.sort(self.dataframe.period.unique())\n\n def get_n_stations(self, mode):\n if self.dataframe is not None:\n if \"impedance\" in mode.lower():\n return (\n self.dataframe.loc[\n (self.dataframe.zxx != 0)\n | (self.dataframe.zxy != 0)\n | (self.dataframe.zyx != 0)\n | (self.dataframe.zyy != 0),\n \"station\",\n ]\n .unique()\n .size\n )\n elif \"vertical\" in mode.lower():\n return (\n self.dataframe.loc[\n (self.dataframe.tzx != 0) | (self.dataframe.tzy != 0),\n \"station\",\n ]\n .unique()\n .size\n )\n elif \"phase_tensor\" in mode.lower():\n return (\n self.dataframe.loc[\n (self.dataframe.ptxx != 0)\n | (self.dataframe.ptxy != 0)\n | (self.dataframe.ptyx != 0)\n | (self.dataframe.ptyy != 0),\n \"station\",\n ]\n .unique()\n .size\n )\n\n @property\n def n_periods(self):\n return self.period.size\n\n def _get_components(self):\n \"\"\"\n get components to write out\n \"\"\"\n\n comps = []\n for inv_modes in self.inv_mode_dict[self.inv_mode]:\n comps += self.inv_comp_dict[inv_modes]\n\n return comps\n\n def _get_header_string(self, error_type, error_value):\n \"\"\"\n Create the header strings\n\n # Created using MTpy calculated egbert_floor error of 5% data rotated 0.0_deg\n clockwise from N\n\n :param error_type: The method to calculate the errors\n :type error_type: string\n :param error_value: value of error or error floor\n :type error_value: float\n :param rotation_angle: angle data have been rotated by\n :type rotation_angle: float\n\n \"\"\"\n\n h_str = []\n if np.atleast_1d(error_type).ndim == 2:\n h_str = (\n f\"# Creating_software: MTpy v2, \"\n f\"error: [{error_type[0, 0]}, {error_type[0, 1]}, \"\n f\"{error_type[1, 0]}, {error_type[1, 1]}], \"\n )\n else:\n h_str = f\"# Creating_software: MTpy v2, error: {error_type}, \"\n\n if np.atleast_1d(error_value).ndim == 2:\n h_str += (\n f\"error floors of {error_value[0, 0]:.0f}%, \"\n f\"{error_value[0, 1]:.0f}%, \"\n f\"{error_value[1, 0]:.0f}%, \"\n f\"{error_value[1, 1]:.0f}%, \"\n f\"data rotated {self.rotation_angle:.1f}_deg clockwise from N, \"\n f\"{self.center_point.utm_crs}\"\n )\n\n else:\n if error_value > 1:\n fmt = \".0f\"\n units = \"%\"\n elif error_value < 1:\n fmt = \".2f\"\n units = \"\"\n h_str += (\n f\"error_value: {error_value:{fmt}}{units}, data_rotation: \"\n f\"{self.rotation_angle:.1f} deg clockwise, \"\n f\"model_{self.center_point.utm_crs}\"\n )\n\n return h_str\n\n def _write_header(self, mode):\n \"\"\" \"\"\"\n d_lines = []\n if \"impedance\" in mode.lower():\n d_lines.append(\n self._get_header_string(\n self.z_model_error.error_type,\n self.z_model_error.error_value,\n )\n )\n d_lines.append(self.header_string)\n d_lines.append(f\"> {mode}\")\n d_lines.append(f\"> exp({self.wave_sign_impedance}i\\omega t)\")\n d_lines.append(f\"> {self.z_units}\")\n\n elif \"vertical\" in mode.lower():\n d_lines.append(\n self._get_header_string(\n self.t_model_error.error_type,\n self.t_model_error.error_value,\n )\n )\n d_lines.append(self.header_string)\n d_lines.append(f\"> {mode}\")\n d_lines.append(f\"> exp({self.wave_sign_tipper}i\\omega t)\")\n d_lines.append(f\"> [{self.t_units}]\")\n\n d_lines.append(\n f\"> {self.rotation_angle:.3g}\"\n ) # orientation, need to add at some point\n if self.topography:\n d_lines.append(\n f\"> {self.center_point.latitude:>10.6f} \"\n f\"{self.center_point.longitude:>10.6f} \"\n f\"{self.center_point.model_elevation:>10.2f}\"\n )\n else:\n d_lines.append(\n f\"> {self.center_point.latitude:>10.6f} \"\n f\"{self.center_point.longitude:>10.6f}\"\n )\n\n n_stations = self.get_n_stations(mode)\n d_lines.append(f\"> {self.n_periods} {n_stations}\")\n\n return d_lines\n\n def _write_comp(self, row, comp):\n \"\"\"\n write a single row\n\n :param row: DESCRIPTION\n :type row: TYPE\n :param comp: DESCRIPTION\n :type comp: TYPE\n :return: DESCRIPTION\n :rtype: TYPE\n\n \"\"\"\n\n value = np.nan_to_num(getattr(row, comp))\n err = getattr(row, f\"{comp}_model_error\")\n\n if (\n value.real != 0.0\n and value.imag != 0.0\n and value.real != 1e32\n and value.imag != 1e32\n ):\n if self.formatting == \"1\":\n per = f\"{row.period:<12.5e}\"\n sta = f\"{row.station:>7}\"\n lat = f\"{row.latitude:> 9.3f}\"\n lon = f\"{row.longitude:> 9.3f}\"\n eas = f\"{row.model_east:> 12.3f}\"\n nor = f\"{row.model_north:> 12.3f}\"\n if self.topography:\n ele = f\"{row.model_elevation:> 12.3f}\"\n else:\n ele = f\"{0:> 12.3f}\"\n if comp[1].lower() == \"z\":\n comp = comp.replace(\"z\", \"\")\n com = f\"{comp:>4}\".upper()\n if self.z_units.lower() == \"ohm\":\n rea = f\"{value.real / 796.:> 14.6e}\"\n ima = f\"{value.imag / 796.:> 14.6e}\"\n elif self.z_units.lower() not in (\n \"[v/m]/[t]\",\n \"[mv/km]/[nt]\",\n ):\n raise ValueError(f\"Unsupported unit '{self.z_units}'\")\n else:\n rea = f\"{value.real:> 14.6e}\"\n ima = f\"{value.imag:> 14.6e}\"\n\n elif self.formatting == \"2\":\n per = f\"{row.period:<14.6e}\"\n sta = f\"{row.station:>10}\"\n lat = f\"{row.latitude:> 14.6f}\"\n lon = f\"{row.longitude:> 14.6f}\"\n eas = f\"{row.model_east:> 15.3f}\"\n nor = f\"{row.model_north:> 15.3f}\"\n if self.topography:\n ele = f\"{row.model_elevation:> 10.3f}\"\n else:\n ele = f\"{0:> 10.3f}\"\n if comp[1].lower() == \"z\":\n comp = comp.replace(\"z\", \"\")\n com = f\"{comp:>4}\".upper()\n if self.z_units.lower() == \"ohm\":\n rea = f\"{value.real / 796.:> 17.6e}\"\n ima = f\"{value.imag / 796.:> 17.6e}\"\n elif self.z_units.lower() not in (\n \"[v/m]/[t]\",\n \"[mv/km]/[nt]\",\n ):\n raise ValueError(f\"Unsupported unit '{self.z_units}'\")\n else:\n rea = f\"{value.real:> 17.6e}\"\n ima = f\"{value.imag:> 17.6e}\"\n\n else:\n raise NotImplementedError(\n f\"format {self.formatting} ({type(self.formatting)}) is \"\n \"not supported.\"\n )\n\n if np.isinf(err) or np.isnan(err):\n err = 10 ** (\n np.floor(np.log10(abs(max([float(rea), float(ima)]))))\n )\n abs_err = f\"{err:> 14.6e}\"\n\n return \"\".join(\n [\n per,\n sta,\n lat,\n lon,\n nor,\n eas,\n ele,\n com,\n rea,\n ima,\n abs_err,\n ]\n )\n\n def _check_for_errors_of_zero(self):\n \"\"\"\n Need to check for any zeros in the error values which can prevent\n ModEM from running.\n\n :return: DESCRIPTION\n :rtype: TYPE\n\n \"\"\"\n\n ## check for zeros in model error\n for comp in [\"zxx\", \"zxy\", \"zyx\", \"zyy\", \"tzx\", \"tzy\"]:\n find_zeros = np.where(self.dataframe[f\"{comp}_model_error\"] == 0)[\n 0\n ]\n if find_zeros.shape[0] > 0:\n if comp in [\"zxx\", \"zxy\", \"zyx\", \"zyy\"]:\n error_percent = self.z_model_error.error_value\n elif \"t\" in comp:\n error_percent = self.t_model_error.error_value\n\n self.logger.warning(\n f\"Found errors with values of 0 in {comp} \"\n f\"{len(find_zeros)} times. Setting error as {comp} x \"\n f\"{error_percent}.\"\n )\n\n self.dataframe.loc[\n find_zeros.tolist(), f\"{comp}_model_error\"\n ] = (\n abs(self.dataframe[f\"{comp}\"].iloc[list(find_zeros)])\n * error_percent\n )\n\n def _check_for_too_small_errors(self, tol=0.02):\n \"\"\"\n Check for too small of errors relative to the error floor\n \"\"\"\n\n for comp in [\"zxx\", \"zxy\", \"zyx\", \"zyy\", \"tzx\", \"tzy\"]:\n find_small = np.where(\n self.dataframe[f\"{comp}_model_error\"]\n / abs(self.dataframe[comp])\n < tol\n )[0]\n if find_small.shape[0] > 0:\n\n if comp.startswith(\"z\"):\n error_percent = self.z_model_error.error_value\n elif comp.startswith(\"t\"):\n error_percent = self.t_model_error.error_value\n\n self.logger.warning(\n f\"Found errors with values less than {tol} in {comp} \"\n f\"{len(find_small)} times. Setting error as {comp} x \"\n f\"{error_percent}.\"\n )\n self.dataframe.loc[\n find_small.tolist(), f\"{comp}_model_error\"\n ] = (\n abs(self.dataframe[f\"{comp}\"].iloc[list(find_small)])\n * error_percent\n )\n\n def write_data_file(\n self,\n file_name=None,\n save_path=None,\n fn_basename=None,\n elevation=False,\n ):\n \"\"\"\n \n :param save_path: full directory to save file to, defaults to None\n :type save_path: string or Path, optional\n :param fn_basename: Basename of the saved file, defaults to None\n :type fn_basename: string, optional\n :param elevation: If True adds in elevation from 'rel_elev' column in data\n array, defaults to False\n :type elevation: boolean, optional\n\n :raises NotImplementedError: If the inversion mode is not supported\n :raises ValueError: :class:`mtpy.utils.exceptions.ValueError` if a parameter\n is missing\n :return: full path to data file\n :rtype: Path\n\n .. code-block::\n :linenos:\n\n >>> from pathlib import Path\n >>> import mtpy.modeling.modem as modem\n >>> edi_path = Path(r\"/home/mt/edi_files\")\n >>> edi_list = list(edi_path.glob(\"*.ed\"))\n >>> md = modem.Data(edi_list, period_min=.1, period_max=300,\\\n >>> ... max_num_periods=12)\n >>> md.write_data_file(save_path=r\"/home/modem/inv1\")\n /home/modem/inv1/ModemDataFile.dat\n \n \"\"\"\n\n if self.dataframe is None:\n raise ValueError(\n \"A DataFrame needs to be present to write a ModEM data file\"\n )\n\n if file_name is not None:\n self.data_filename = file_name\n\n if save_path is not None:\n self.save_path = Path(save_path)\n if fn_basename is not None:\n self.data_filename = fn_basename\n\n self._check_for_errors_of_zero()\n self._check_for_too_small_errors()\n\n for inv_mode in self.inv_mode_dict[self.inv_mode]:\n if \"impedance\" in inv_mode.lower():\n z_lines = self._write_header(inv_mode)\n\n elif \"vertical\" in inv_mode.lower():\n t_lines = self._write_header(inv_mode)\n\n else:\n # maybe error here\n raise NotImplementedError(\n f\"inv_mode {inv_mode} is not supported yet\"\n )\n\n comps = self._get_components()\n # Iterate over stations and sort by period\n for station in self.dataframe.station.unique():\n sdf = self.dataframe.loc[self.dataframe.station == station]\n sdf.sort_values(\"period\")\n\n for row in sdf.itertuples():\n for comp in comps:\n d_line = self._write_comp(row, comp)\n if d_line is None:\n continue\n\n if comp.startswith(\"z\"):\n z_lines.append(d_line)\n elif comp.startswith(\"t\"):\n t_lines.append(d_line)\n\n with open(self.data_filename, \"w\") as dfid:\n dfid.write(\"\\n\".join(z_lines + t_lines))\n\n self.logger.info(\n \"Wrote ModEM data file to {0}\".format(self.data_filename)\n )\n return self.data_filename\n\n def _read_header(self, header_lines):\n \"\"\"\n Read header lines\n\n :param header_lines: DESCRIPTION\n :type header_lines: TYPE\n :return: DESCRIPTION\n :rtype: TYPE\n\n \"\"\"\n\n mode = None\n inv_list = []\n header_list = []\n metadata_list = []\n n_periods = 0\n n_stations = 0\n self.center_point = MTLocation()\n for hline in header_lines:\n if hline.find(\"#\") == 0:\n if \"period\" not in hline.lower():\n header_list.append(hline.strip())\n elif hline.find(\">\") == 0:\n # modem outputs only 7 characters for the lat and lon\n # if there is a negative they merge together, need to split\n # them up\n hline = hline.replace(\"-\", \" -\")\n metadata_list.append(hline[1:].strip())\n if hline.lower().find(\"ohm\") > 0:\n self.z_units = \"ohm\"\n continue\n elif hline.lower().find(\"mv\") > 0:\n self.z_units = \"[mV/km]/[nT]\"\n continue\n elif hline.lower().find(\"vertical\") > 0:\n mode = \"vertical\"\n inv_list.append(\"Full_Vertical_Components\")\n continue\n elif hline.lower().find(\"impedance\") > 0:\n mode = \"impedance\"\n inv_list.append(\"Full_Impedance\")\n continue\n\n if hline.find(\"exp\") > 0:\n if mode in [\"impedance\"]:\n self.wave_sign_impedance = hline[hline.find(\"(\") + 1]\n elif mode in [\"vertical\"]:\n self.wave_sign_tipper = hline[hline.find(\"(\") + 1]\n\n elif (\n len(hline[1:].strip().split()) >= 2\n and hline.count(\".\") > 0\n ):\n value_list = [\n float(value) for value in hline[1:].strip().split()\n ]\n if value_list[0] != 0.0:\n self.center_point.latitude = value_list[0]\n if value_list[1] != 0.0:\n self.center_point.longitude = value_list[1]\n try:\n self.center_point.elevation = value_list[2]\n except IndexError:\n self.center_point.elevation = 0.0\n self.logger.debug(\n \"Did not find center elevation in data file\"\n )\n elif len(hline[1:].strip().split()) < 2:\n try:\n self.rotation_angle = float(hline[1:].strip())\n except ValueError:\n continue\n elif len(hline[1:].strip().split()) == 2:\n n_periods = int(hline[1:].strip().split()[0])\n n_stations = int(hline[1:].strip().split()[1])\n\n for head_line, inv_mode in zip(header_list, inv_list):\n self._parse_header_line(head_line, inv_mode)\n\n self._get_inv_mode(inv_list)\n\n return n_periods, n_stations\n\n def _parse_header_line(self, header_line, mode):\n \"\"\"\n Parse header line\n\n \"\"\"\n\n if header_line == self.header_string:\n return\n\n item_dict = {\n \"error\": \"error_type\",\n \"error_value\": \"error_value\",\n \"data_rotation\": \"rotation_angle\",\n \"model_epsg\": \"center_point.utm_epsg\",\n }\n\n if header_line.count(\",\") > 0:\n header_list = header_line.split(\",\")\n else:\n header_list = header_line.split()\n\n if \"impedance\" in mode.lower():\n obj = self.z_model_error\n\n elif \"vertical\" in mode.lower():\n obj = self.t_model_error\n\n for ii, item in enumerate(header_list):\n item = item.lower()\n if item.count(\":\") > 0:\n item_list = [k.strip() for k in item.split(\":\")]\n if len(item_list) == 2:\n key = item_list[0]\n value = item_list[1].replace(\"%\", \"\").split()[0]\n if key in [\"error_value\", \"data_rotation\"]:\n try:\n value = float(value)\n except ValueError:\n pass\n try:\n if key in [\"model_epsg\"]:\n setattr(self.center_point, \"utm_epsg\", value)\n elif \"error\" in key:\n\n setattr(\n obj,\n item_dict[key],\n value,\n )\n else:\n setattr(self, item_dict[\"key\"], value)\n except KeyError:\n continue\n\n ## Older files\n else:\n if item in [\"calculated\"]:\n value = header_list[ii + 1]\n\n if \"floor\" in value:\n setattr(obj, \"floor\", True)\n value = value.replace(\"_floor\", \"\")\n setattr(obj, \"error_type\", value)\n\n if item in [\"of\"]:\n value = float(header_list[ii + 1].replace(\"%\", \"\"))\n setattr(obj, item_dict[\"error_value\"], value)\n\n if \"deg\" in item:\n setattr(\n self,\n item_dict[\"data_rotation\"],\n float(item.split(\"_\")[0]),\n )\n\n def _get_rotation_angle(self, header_line):\n # try to find rotation angle\n h_list = header_line.split()\n for hh, h_str in enumerate(h_list):\n if h_str.find(\"_deg\") > 0:\n try:\n self.rotation_angle = float(h_str[0 : h_str.find(\"_deg\")])\n except ValueError:\n pass\n\n def _get_inv_mode(self, inv_list):\n # find inversion mode\n for inv_key in list(self.inv_mode_dict.keys()):\n inv_mode_list = self.inv_mode_dict[inv_key]\n if len(inv_mode_list) != inv_list:\n continue\n else:\n tf_arr = np.zeros(len(inv_list), dtype=bool)\n\n for tf, data_inv in enumerate(inv_list):\n if data_inv in self.inv_mode_dict[inv_key]:\n tf_arr[tf] = True\n\n if np.alltrue(tf_arr):\n self.inv_mode = inv_key\n break\n\n def _read_line(self, line):\n \"\"\"\n read a single line\n :param line: DESCRIPTION\n :type line: TYPE\n :return: DESCRIPTION\n :rtype: TYPE\n\n .. note:: Pandas Groupby does not play nice with complex numbers so\n we will be keeping the real and imaginary part separate for now.\n\n \"\"\"\n\n line_dict = dict(\n [(key, value) for key, value in zip(self._df_keys, line.split())]\n )\n for key in [\n \"period\",\n \"latitude\",\n \"longitude\",\n \"model_east\",\n \"model_north\",\n \"model_elevation\",\n \"real\",\n \"imag\",\n \"error\",\n ]:\n line_dict[key] = float(line_dict[key])\n\n comp = line_dict.pop(\"comp\").lower()\n if comp.startswith(\"t\"):\n comp = comp.replace(\"t\", \"tz\")\n line_dict[f\"{comp}_real\"] = line_dict.pop(\"real\")\n line_dict[f\"{comp}_imag\"] = line_dict.pop(\"imag\")\n line_dict[f\"{comp}_model_error\"] = line_dict.pop(\"error\")\n if line_dict[f\"{comp}_model_error\"] > 1e10:\n line_dict[f\"{comp}_model_error\"] = np.nan\n\n return line_dict\n\n def read_data_file(self, data_fn):\n \"\"\"\n\n :param data_fn: full path to data file name\n :type data_fn: string or Path\n :raises ValueError: If cannot compute component\n\n Fills attributes:\n * data_array\n * period_list\n * mt_dict\n\n .. code-block::\n\n >>> md = Data()\n >>> md.read_data_file(r\"/home/modem_data.dat\")\n >>> md\n ModEM Data Object:\n Number of stations: 169\n Number of periods: 22\n Period range:\n Min: 0.01 s\n Max: 15230.2 s\n Rotation angle: 0.0\n Data center:\n latitude: 39.6351 deg\n longitude: -119.8039 deg\n Elevation: 0.0 m\n Easting: 259368.9746 m\n Northing: 4391021.1981 m\n UTM zone: 11S\n Model EPSG: None\n Model UTM zone: None\n Impedance data: True\n Tipper data: True\n\n\n \"\"\"\n\n self.data_filename = Path(data_fn)\n\n if self.data_filename is None:\n raise ValueError(\"data_fn is None, enter a data file to read.\")\n elif not self.data_filename.is_file():\n raise ValueError(\n \"Could not find {0}, check path\".format(self.data_filename)\n )\n\n self.center_point = MTLocation()\n\n # open file get lines\n with open(self.data_filename, \"r\") as dfid:\n dlines = dfid.readlines()\n\n # read header information\n n_periods, n_stations = self._read_header(\n [line for line in dlines if \">\" in line or \"#\" in line]\n )\n\n # create a list of dictionaries to make into a pandas dataframe\n entries = []\n for dline in dlines:\n if \"#\" in dline or \">\" in dline:\n continue\n\n elif len(dline.split()) == len(self._df_keys):\n line_dict = self._read_line(dline)\n entries.append(line_dict)\n\n full_df = pd.DataFrame(entries)\n\n # group by period and station so that there is 1 row per period per station\n combined_df = full_df.groupby(\n [\"station\", \"period\"], as_index=False\n ).first()\n\n # combine real and imaginary\n cols = [c.split(\"_\")[0] for c in combined_df.columns if \"real\" in c]\n for col in cols:\n combined_df[col] = (\n combined_df[f\"{col}_real\"] + 1j * combined_df[f\"{col}_imag\"]\n )\n combined_df.drop(\n [f\"{col}_real\", f\"{col}_imag\"], axis=1, inplace=True\n )\n\n return MTDataFrame(combined_df)\n\n def fix_data_file(self, fn=None, n=3):\n \"\"\"\n A newer compiled version of Modem outputs slightly different headers\n This aims to convert that into the older format\n\n :param fn: DESCRIPTION, defaults to None\n :type fn: TYPE, optional\n :param n: DESCRIPTION, defaults to 3\n :type n: TYPE, optional\n :return: DESCRIPTION\n :rtype: TYPE\n\n \"\"\"\n if fn:\n self.data_filename = Path(fn)\n with self.data_filename.open() as fid:\n lines = fid.readlines()\n\n def fix_line(line_list):\n return (\n \" \".join(\"\".join(line_list).replace(\"\\n\", \"\").split()) + \"\\n\"\n )\n\n h1 = fix_line(lines[0:n])\n h2 = fix_line(lines[n : 2 * n])\n\n find = None\n for index, line in enumerate(lines[2 * n + 1 :], start=2 * n + 1):\n if line.find(\"#\") >= 0:\n find = index\n break\n\n if find is not None:\n h3 = fix_line(lines[find : find + n])\n h4 = fix_line(lines[find + n : find + 2 * n])\n\n new_lines = (\n [h1, h2]\n + lines[2 * n : find]\n + [h3, h4]\n + lines[find + 2 * n :]\n )\n else:\n new_lines = [h1, h2] + lines[2 * n :]\n\n with self.data_filename.open(\"w\") as fid:\n fid.writelines(new_lines)\n\n return self.data_filename" }, { "identifier": "PlotRMS", "path": "mtpy/modeling/plots/plot_modem_rms.py", "snippet": "class PlotRMS(PlotBaseMaps):\n def __init__(self, dataframe, **kwargs):\n super().__init__(**kwargs)\n\n self.dataframe = dataframe\n self.dx = 0.035\n self.rms_min = 0\n self.rms_max = 5\n self.rms_step = 0.5\n self.plot_station = True\n self.station_id = None\n self.stack_bottom = False\n\n self.comp_list = [\n \"rms_zxx\",\n \"rms_zxy\",\n \"rms_zyx\",\n \"rms_zyy\",\n \"rms_tzx\",\n \"rms_tzy\",\n ]\n self.distance_multiplier = [\n (-0.5, 1),\n (0.5, 1),\n (-0.5, 0),\n (0.5, 0),\n (-0.5, -1),\n (0.5, -1),\n ]\n\n self.color_dict = {\n \"rms_z\": (0, 162 / 255, 255 / 255),\n \"rms_t\": (255 / 255, 162 / 255, 0),\n \"rms_zxx\": (136 / 255, 235 / 255, 193 / 255),\n \"rms_zxy\": (84 / 255, 189 / 255, 215 / 255),\n \"rms_zyx\": (136 / 255, 84 / 255, 215 / 255),\n \"rms_zyy\": (206 / 255, 84 / 255, 215 / 255),\n \"rms_tzx\": (215 / 255, 210 / 255, 84 / 255),\n \"rms_tzy\": (215 / 255, 154 / 255, 84 / 255),\n }\n\n self.label_dict = {\n \"rms_z\": \"Z\",\n \"rms_t\": \"Tipper\",\n \"rms_zxx\": \"$Z_{xx}$\",\n \"rms_zxy\": \"$Z_{xy}$\",\n \"rms_zyx\": \"$Z_{yx}$\",\n \"rms_zyy\": \"$Z_{yy}$\",\n \"rms_tzx\": \"$T_{zx}$\",\n \"rms_tzy\": \"$T_{zy}$\",\n }\n\n self.rms_cmap = \"jet\"\n\n self.subplot_left = 0.05\n self.subplot_right = 0.99\n self.subplot_bottom = 0.09\n self.subplot_top = 0.99\n\n self.box_size = 30\n\n self.cx_source = None\n self.cx_zoom = None\n if has_cx:\n self.cx_source = cx.providers.USGS.USTopo\n\n for key, value in kwargs.items():\n setattr(self, key, value)\n\n @property\n def dataframe(self):\n return self._mt_dataframe.dataframe\n\n @dataframe.setter\n def dataframe(self, df):\n \"\"\"\n Set dataframe to an MTDataframe\n :param df: DESCRIPTION\n :type df: TYPE\n :return: DESCRIPTION\n :rtype: TYPE\n\n \"\"\"\n\n if df is None:\n self._mt_dataframe = MTDataFrame()\n\n elif isinstance(df, (pd.DataFrame, MTDataFrame, np.ndarray)):\n self._mt_dataframe = MTDataFrame(df)\n\n else:\n raise TypeError(\n f\"Input must be a dataframe or MTDataFrame object not {type(df)}\"\n )\n\n @property\n def rms_cmap(self):\n return self._rms_cmap\n\n @rms_cmap.setter\n def rms_cmap(self, value):\n if isinstance(value, str):\n self._rms_cmap = cm.get_cmap(value)\n\n elif isinstance(value, colors.LinearSegmentedColormap):\n self._rms_cmap = value\n\n else:\n self._rms_cmap = cm.get_cmap(\"jet\")\n\n def _plot_rms_map(self):\n \"\"\"\n plot rms map\n\n :return: DESCRIPTION\n :rtype: TYPE\n\n \"\"\"\n\n cb_norm = colors.BoundaryNorm(\n np.arange(\n self.rms_min, self.rms_max + self.rms_step, self.rms_step\n ),\n self.rms_cmap.N,\n )\n\n for dm, comp in zip(self.distance_multiplier, self.comp_list):\n for station in self.dataframe.station.unique():\n\n sdf = self._mt_dataframe.get_station_df(station)\n rms = sdf[comp].mean()\n self.ax1.scatter(\n sdf.longitude.iloc[0] + (self.dx / 2) * dm[0],\n sdf.latitude.iloc[0] + (self.dx / 2) * dm[1],\n c=rms,\n marker=\"s\",\n s=self.box_size,\n edgecolors=(0, 0, 0),\n cmap=self.rms_cmap,\n norm=cb_norm,\n )\n if self.plot_station:\n self.ax1.text(\n sdf.longitude.iloc[0],\n sdf.latitude.iloc[0] + self.dx,\n station,\n ha=\"center\",\n va=\"baseline\",\n clip_on=True,\n )\n\n if has_cx:\n if has_cx:\n try:\n cx_kwargs = {\"source\": self.cx_source, \"crs\": \"EPSG:4326\"}\n if self.cx_zoom is not None:\n cx_kwargs[\"zoom\"] = self.cx_zoom\n cx.add_basemap(\n self.ax1,\n **cx_kwargs,\n )\n except Exception as error:\n self.logger.warning(\n f\"Could not add base map because {error}\"\n )\n\n cb_ax, _ = mcb.make_axes(self.ax1, shrink=0.5)\n cb = mcb.ColorbarBase(cb_ax, cmap=self.rms_cmap, norm=cb_norm)\n\n @property\n def rms_per_period_all(self):\n \"\"\"\n RMS per period\n \"\"\"\n\n if self.dataframe is not None:\n rms_list = []\n for period in self.dataframe.period.unique():\n comp_df = self.dataframe.loc[\n self.dataframe.period == period,\n [\n \"rms_zxx\",\n \"rms_zxy\",\n \"rms_zyx\",\n \"rms_zyy\",\n \"rms_tzx\",\n \"rms_tzy\",\n ],\n ]\n\n mean_dict = {\"period\": period}\n for comp in comp_df.columns:\n mean_dict[comp] = comp_df.loc[:, comp].mean()\n\n rms_list.append(mean_dict)\n\n df = pd.DataFrame(rms_list)\n df = df.set_index(\"period\")\n df = df.sort_index()\n\n return df\n\n @property\n def rms_per_station(self):\n \"\"\"\n RMS per period\n \"\"\"\n\n if self.dataframe is not None:\n rms_list = []\n for station in self.dataframe.station.unique():\n z_df = self.dataframe.loc[\n self.dataframe.station == station,\n [\"rms_zxx\", \"rms_zxy\", \"rms_zyx\", \"rms_zyy\"],\n ]\n t_df = self.dataframe.loc[\n self.dataframe.station == station, [\"rms_tzx\", \"rms_tzy\"]\n ]\n\n rms_list.append(\n {\n \"station\": station,\n \"rms_z\": z_df.mean().mean(),\n \"rms_t\": t_df.mean().mean(),\n }\n )\n\n df = pd.DataFrame(rms_list)\n df = df.set_index(\"station\")\n df = df.sort_index()\n\n return df\n\n @property\n def rms_array(self):\n \"\"\"\n arrays for color maps\n\n :return: DESCRIPTION\n :rtype: TYPE\n\n \"\"\"\n\n period_dict = dict(\n [\n (f\"{ff:.4g}\", ii)\n for ii, ff in enumerate(self.dataframe.period.unique())\n ]\n )\n\n station_dict = dict(\n [(ss, ii) for ii, ss in enumerate(self.dataframe.station.unique())]\n )\n\n rms_array = np.zeros(\n (\n self.dataframe.station.unique().size,\n self.dataframe.period.unique().size,\n 6,\n )\n )\n\n for row in self.dataframe.itertuples():\n p_index = period_dict[f\"{row.period:.4g}\"]\n s_index = station_dict[row.station]\n\n for ii, comp in enumerate(\n [\"zxx\", \"zxy\", \"zyx\", \"zyy\", \"tzx\", \"tzy\"]\n ):\n rms_array[s_index, p_index, ii] = getattr(row, f\"rms_{comp}\")\n\n return rms_array\n\n def _plot_colormesh(self):\n \"\"\"\n plot as color maps\n\n :return: DESCRIPTION\n :rtype: TYPE\n\n \"\"\"\n\n x = self.dataframe.period.unique()\n y = np.arange(self.dataframe.station.unique().size)\n xg, yg = np.meshgrid(x, y)\n\n rms_array = self.rms_array.copy()\n\n fig = plt.figure()\n fig.subplotpars.hspace = 0.15\n fig.subplotpars.vspace = 0.15\n\n ax_list = []\n for ii in range(6):\n if ii == 0:\n ax = fig.add_subplot(3, 2, ii + 1)\n else:\n ax = fig.add_subplot(3, 2, ii + 1, sharex=ax_list[0])\n\n ax.pcolormesh(\n xg, yg, rms_array[:, :, ii], cmap=self.rms_cmap, vmin=0, vmax=5\n )\n ax.text(\n x[0],\n y[-3],\n self.label_dict[self.comp_list[ii]],\n ha=\"left\",\n va=\"bottom\",\n bbox={\"facecolor\": \"w\"},\n )\n ax.set_xscale(\"log\")\n\n ax_list.append(ax)\n\n for ax in ax_list[-2:]:\n ax.set_xlabel(\"Period (s)\")\n\n plt.show()\n return fig, ax_list\n\n def print_suspect_stations(self, rms_threshold=4):\n \"\"\"\n print stations that are suspect\n :return: DESCRIPTION\n :rtype: TYPE\n\n \"\"\"\n red_begin = \"\\033[1;31;48m\"\n red_end = \"\\033[1;37;0m\"\n\n df = self.rms_per_station\n max_len = max([len(ii) for ii in df.index])\n\n for row in df.itertuples():\n if row.rms_z > rms_threshold or row.rms_t > rms_threshold:\n if row.rms_z > rms_threshold:\n z_value = f\"{red_begin}Z = {row.rms_z:<6.2f}{red_end}\"\n else:\n z_value = f\"Z = {row.rms_z:<6.2f}\"\n\n if row.rms_t > rms_threshold:\n t_value = f\"{red_begin}T = {row.rms_t:<6.2f}{red_end}\"\n else:\n t_value = f\"T = {row.rms_t:<6.2f}\"\n print(f\"{row.Index:<{max_len}} {z_value} {t_value}\")\n\n def _plot_by_period(self):\n \"\"\"\n plot by period\n\n :return: DESCRIPTION\n :rtype: TYPE\n\n \"\"\"\n\n df = self.rms_per_period_all.copy()\n plot_list = []\n color_list = []\n for comp in df.columns:\n if not np.all(np.isnan(df[comp])):\n plot_list.append(comp)\n color_list.append(self.color_dict[comp])\n\n ax = df.plot.bar(\n y=plot_list,\n color=color_list,\n xlabel=\"Period (s)\",\n ylabel=\"normalized RMS\",\n grid=True,\n ax=self.ax2,\n )\n ax.set_axisbelow(True)\n\n ax.set_xticklabels(\n [f\"{float(x.get_text()):.4g}\" for x in ax.get_xticklabels()]\n )\n ax.tick_params(left=True)\n # ticks_loc = ax.get_yticks().tolist()\n # ax.yaxis.set_major_locator(ticker.FixedLocator(ticks_loc))\n # ax.set_yticklabels([f\"{x:.1f}\" for x in ticks_loc])\n\n return ax\n\n def _plot_by_station(self):\n \"\"\"\n plot by station\n\n :return: DESCRIPTION\n :rtype: TYPE\n\n \"\"\"\n\n df = self.rms_per_station.copy()\n plot_list = []\n color_list = []\n for comp in df.columns:\n if not np.all(np.isnan(df[comp])):\n plot_list.append(comp)\n color_list.append(self.color_dict[comp])\n\n ax = df.plot.bar(\n y=plot_list,\n color=color_list,\n xlabel=\"Station\",\n ylabel=\"normalized RMS\",\n grid=True,\n ax=self.ax3,\n )\n\n ax.tick_params(left=True)\n # ticks_loc = ax.get_yticks().tolist()\n # ax.yaxis.set_major_locator(ticker.FixedLocator(ticks_loc))\n # ax.set_yticklabels([f\"{x:.1f}\" for x in ticks_loc])\n\n ax.set_axisbelow(True)\n\n return ax\n\n def _get_subplots(self, fig):\n\n if self.stack_bottom:\n gs1 = gridspec.GridSpec(2, 2, hspace=0.25, wspace=0.075)\n\n self.ax1 = fig.add_subplot(gs1[0, :], aspect=\"equal\")\n self.ax2 = fig.add_subplot(gs1[1, 0])\n self.ax3 = fig.add_subplot(gs1[1, 1])\n else:\n gs1 = gridspec.GridSpec(2, 2, hspace=0.35, wspace=0.075)\n\n self.ax1 = fig.add_subplot(gs1[:, 0], aspect=\"equal\")\n self.ax2 = fig.add_subplot(gs1[0, 1])\n self.ax3 = fig.add_subplot(gs1[1, 1])\n\n def plot(self, **kwargs):\n \"\"\"\n\n :param **kwargs: DESCRIPTION\n :type **kwargs: TYPE\n :return: DESCRIPTION\n :rtype: TYPE\n\n \"\"\"\n self._set_subplot_params()\n\n self.fig = plt.figure(\n self.fig_num, figsize=self.fig_size, dpi=self.fig_dpi\n )\n\n plt.clf()\n\n self._get_subplots(self.fig)\n\n self._plot_rms_map()\n self._plot_by_period()\n self._plot_by_station()" } ]
from pathlib import Path from .data import Data from mtpy.modeling.plots import PlotRMS import numpy as np import pandas as pd
15,569
""" if self.dataframe is None: return for col in ["zxx", "zxy", "zyx", "zyy", "tzx", "tzy"]: with np.errstate(divide="ignore", invalid="ignore"): self.dataframe[f"rms_{col}"] = np.abs(self.dataframe[col]) / ( np.real(self.dataframe[f"{col}_model_error"]) * np.sqrt(2) ) @property def rms_per_period_all(self): """ RMS per period """ if self.dataframe is not None: rms_list = [] for period in self.dataframe.period.unique(): z_df = self.dataframe.loc[ self.dataframe.period == period, ["rms_zxx", "rms_zxy", "rms_zyx", "rms_zyy"], ] t_df = self.dataframe.loc[ self.dataframe.period == period, ["rms_tzx", "rms_tzy"] ] rms_list.append( { "period": period, "rms_z": z_df.mean().mean(), "rms_t": t_df.mean().mean(), } ) df = pd.DataFrame(rms_list) df = df.set_index("period") return df @property def rms_per_period_per_component(self): """ RMS per period by component :return: DESCRIPTION :rtype: TYPE """ rms_list = [] for period in self.dataframe.period.unique(): comp_df = self.dataframe.loc[ self.dataframe.period == period, [ "rms_zxx", "rms_zxy", "rms_zyx", "rms_zyy", "rms_tzx", "rms_tzy", ], ] mean_dict = {"period": period} for comp in comp_df.columns: mean_dict[comp] = comp_df.loc[:, comp].mean() rms_list.append(mean_dict) df = pd.DataFrame(rms_list) df = df.set_index("period") return df def plot_rms_per_period(self, plot_type="all", **kwargs): """ :param **kwargs: DESCRIPTION :type **kwargs: TYPE :return: DESCRIPTION :rtype: TYPE """ if plot_type == "all": df = self.rms_per_period_all.copy() elif plot_type == "comp": df = self.rms_per_period_per_component.copy() plot_list = [] color_list = [] for comp in df.columns: if not np.all(np.isnan(df[comp])): plot_list.append(comp) color_list.append(self.color_dict[comp]) ax = df.plot.bar( y=plot_list, color=color_list, xlabel="Period (s)", ylabel="normalized RMS", grid=True, **kwargs, ) ax.set_axisbelow(True) return ax def plot_rms(self, **kwargs): """ plot RMS in different views :param **kwargs: DESCRIPTION :type **kwargs: TYPE :return: DESCRIPTION :rtype: TYPE """
""" ================== ModEM ================== residuals class to contain RMS information revised by JP 2017 revised by AK 2017 to bring across functionality from ak branch """ # ============================================================================= # Imports # ============================================================================= # ============================================================================= class Residual(Data): """ class to contain residuals for each data point, and rms values for each station ====================== ==================================================== Attributes/Key Words Description ====================== ==================================================== work_dir residual_fn full path to data file residual_array numpy.ndarray (num_stations) structured to store data. keys are: * station --> station name * lat --> latitude in decimal degrees * lon --> longitude in decimal degrees * elev --> elevation (m) * rel_east -- > relative east location to center_position (m) * rel_north --> relative north location to center_position (m) * east --> UTM east (m) * north --> UTM north (m) * zone --> UTM zone * z --> impedance tensor residual (measured - modelled) (num_freq, 2, 2) * z_err --> impedance tensor error array with shape (num_freq, 2, 2) * tip --> Tipper residual (measured - modelled) (num_freq, 1, 2) * tipperr --> Tipper array with shape (num_freq, 1, 2) rms rms_array numpy.ndarray structured to store station location values and rms. Keys are: * station --> station name * east --> UTM east (m) * north --> UTM north (m) * lat --> latitude in decimal degrees * lon --> longitude in decimal degrees * elev --> elevation (m) * zone --> UTM zone * rel_east -- > relative east location to center_position (m) * rel_north --> relative north location to center_position (m) * rms --> root-mean-square residual for each station rms_tip rms_z ====================== ==================================================== """ # todo complete the doc above def __init__(self, **kwargs): self.work_dir = Path() self.residual_fn = None self.residual_array = None self.rms = None self.rms_array = None self.rms_tip = None self.rms_z = None super().__init__(**kwargs) self.color_dict = { "rms_z": (0, 162 / 255, 255 / 255), "rms_t": (255 / 255, 162 / 255, 0), "rms_zxx": (136 / 255, 235 / 255, 193 / 255), "rms_zxy": (84 / 255, 189 / 255, 215 / 255), "rms_zyx": (136 / 255, 84 / 255, 215 / 255), "rms_zyy": (206 / 255, 84 / 255, 215 / 255), "rms_tzx": (215 / 255, 210 / 255, 84 / 255), "rms_tzy": (215 / 255, 154 / 255, 84 / 255), } for key, value in kwargs.items(): setattr(self, key, value) def read_residual_file(self, residual_fn): """ :param residual_fn: DESCRIPTION, defaults to None :type residual_fn: TYPE, optional :return: DESCRIPTION :rtype: TYPE """ self.dataframe = self.read_data_file(residual_fn) self.calculate_rms() def calculate_rms(self): """ add columns for rms :return: DESCRIPTION :rtype: TYPE """ if self.dataframe is None: return for col in ["zxx", "zxy", "zyx", "zyy", "tzx", "tzy"]: with np.errstate(divide="ignore", invalid="ignore"): self.dataframe[f"rms_{col}"] = np.abs(self.dataframe[col]) / ( np.real(self.dataframe[f"{col}_model_error"]) * np.sqrt(2) ) @property def rms_per_period_all(self): """ RMS per period """ if self.dataframe is not None: rms_list = [] for period in self.dataframe.period.unique(): z_df = self.dataframe.loc[ self.dataframe.period == period, ["rms_zxx", "rms_zxy", "rms_zyx", "rms_zyy"], ] t_df = self.dataframe.loc[ self.dataframe.period == period, ["rms_tzx", "rms_tzy"] ] rms_list.append( { "period": period, "rms_z": z_df.mean().mean(), "rms_t": t_df.mean().mean(), } ) df = pd.DataFrame(rms_list) df = df.set_index("period") return df @property def rms_per_period_per_component(self): """ RMS per period by component :return: DESCRIPTION :rtype: TYPE """ rms_list = [] for period in self.dataframe.period.unique(): comp_df = self.dataframe.loc[ self.dataframe.period == period, [ "rms_zxx", "rms_zxy", "rms_zyx", "rms_zyy", "rms_tzx", "rms_tzy", ], ] mean_dict = {"period": period} for comp in comp_df.columns: mean_dict[comp] = comp_df.loc[:, comp].mean() rms_list.append(mean_dict) df = pd.DataFrame(rms_list) df = df.set_index("period") return df def plot_rms_per_period(self, plot_type="all", **kwargs): """ :param **kwargs: DESCRIPTION :type **kwargs: TYPE :return: DESCRIPTION :rtype: TYPE """ if plot_type == "all": df = self.rms_per_period_all.copy() elif plot_type == "comp": df = self.rms_per_period_per_component.copy() plot_list = [] color_list = [] for comp in df.columns: if not np.all(np.isnan(df[comp])): plot_list.append(comp) color_list.append(self.color_dict[comp]) ax = df.plot.bar( y=plot_list, color=color_list, xlabel="Period (s)", ylabel="normalized RMS", grid=True, **kwargs, ) ax.set_axisbelow(True) return ax def plot_rms(self, **kwargs): """ plot RMS in different views :param **kwargs: DESCRIPTION :type **kwargs: TYPE :return: DESCRIPTION :rtype: TYPE """
plot_rms = PlotRMS(self.dataframe, **kwargs)
1
2023-10-11 22:24:50+00:00
24k
weavel-ai/promptmodel-python
promptmodel/llms/llm_proxy.py
[ { "identifier": "LLM", "path": "promptmodel/llms/llm.py", "snippet": "class LLM:\n def __init__(self):\n pass\n\n @classmethod\n def __parse_output_pattern__(\n cls,\n raw_output: Optional[str] = None,\n parsing_type: Optional[ParsingType] = None,\n ) -> ParseResult:\n if parsing_type is None:\n return ParseResult(parsed_outputs={}, error=False, error_log=None)\n if raw_output is None:\n return ParseResult(parsed_outputs={}, error=True, error_log=\"No content\")\n parsing_pattern = get_pattern_by_type(parsing_type)\n whole_pattern = parsing_pattern[\"whole\"]\n parsed_results = re.findall(whole_pattern, raw_output, flags=re.DOTALL)\n parsed_outputs = {}\n error: bool = False\n error_log: str = None\n\n try:\n for parsed_result in parsed_results:\n key = parsed_result[0]\n type_str = parsed_result[1]\n value = convert_str_to_type(parsed_result[2], type_str)\n parsed_outputs[key] = value\n except Exception as e:\n error = True\n error_log = str(e)\n\n return ParseResult(\n parsed_outputs=parsed_outputs,\n error=error,\n error_log=error_log,\n )\n\n def __validate_openai_messages(\n self, messages: List[Dict[str, str]]\n ) -> List[OpenAIMessage]:\n \"\"\"Validate and convert list of dictionaries to list of OpenAIMessage.\"\"\"\n res = []\n for message in messages:\n res.append(OpenAIMessage(**message))\n return res\n\n def run(\n self,\n messages: List[Dict[str, str]],\n functions: Optional[List[Any]] = None,\n tools: Optional[List[Any]] = None,\n model: Optional[str] = DEFAULT_MODEL,\n api_key: Optional[str] = None,\n *args,\n **kwargs,\n ) -> LLMResponse:\n \"\"\"Return the response from openai chat completion.\"\"\"\n response = None\n if functions == []:\n functions = None\n try:\n response: ModelResponse = completion(\n model=model,\n messages=[\n message.model_dump(exclude_none=True)\n for message in self.__validate_openai_messages(messages)\n ],\n functions=functions,\n tools=tools,\n api_key=api_key,\n )\n\n content: Optional[str] = getattr(\n response.choices[0].message, \"content\", None\n )\n\n call_func: Optional[FunctionCall] = getattr(\n response.choices[0].message, \"function_call\", None\n )\n\n call_tools: Optional[List[ChatCompletionMessageToolCall]] = getattr(\n response.choices[0].message, \"tool_calls\", None\n )\n\n return LLMResponse(\n api_response=response,\n raw_output=content,\n function_call=call_func if call_func else None,\n tool_calls=call_tools if call_tools else None,\n )\n except Exception as e:\n if response is not None:\n return LLMResponse(api_response=response, error=True, error_log=str(e))\n else:\n return LLMResponse(api_response=None, error=True, error_log=str(e))\n\n async def arun(\n self,\n messages: List[Dict[str, str]],\n functions: Optional[List[Any]] = None,\n tools: Optional[List[Any]] = None,\n model: Optional[str] = DEFAULT_MODEL,\n api_key: Optional[str] = None,\n *args,\n **kwargs,\n ) -> LLMResponse:\n \"\"\"Return the response from openai chat completion.\"\"\"\n if functions == []:\n functions = None\n response = None\n try:\n response: ModelResponse = await acompletion(\n model=model,\n messages=[\n message.model_dump(exclude_none=True)\n for message in self.__validate_openai_messages(messages)\n ],\n functions=functions,\n tools=tools,\n api_key=api_key,\n )\n content: Optional[str] = getattr(\n response.choices[0].message, \"content\", None\n )\n\n call_func: Optional[FunctionCall] = getattr(\n response.choices[0].message, \"function_call\", None\n )\n\n call_tools: Optional[ChatCompletionMessageToolCall] = getattr(\n response.choices[0].message, \"tool_calls\", None\n )\n\n return LLMResponse(\n api_response=response,\n raw_output=content,\n function_call=call_func if call_func else None,\n tool_calls=call_tools if call_tools else None,\n )\n\n except Exception as e:\n if response is not None:\n return LLMResponse(api_response=response, error=True, error_log=str(e))\n else:\n return LLMResponse(api_response=None, error=True, error_log=str(e))\n\n def stream(\n self,\n messages: List[Dict[str, str]], # input\n functions: Optional[List[Any]] = None,\n tools: Optional[List[Any]] = None,\n model: Optional[str] = DEFAULT_MODEL,\n api_key: Optional[str] = None,\n *args,\n **kwargs,\n ) -> Generator[LLMStreamResponse, None, None]:\n \"\"\"Stream openai chat completion.\"\"\"\n if functions == []:\n functions = None\n response = None\n try:\n # load_prompt()\n start_time = datetime.datetime.now()\n response = completion(\n model=model,\n messages=[\n message.model_dump(exclude_none=True)\n for message in self.__validate_openai_messages(messages)\n ],\n stream=True,\n functions=functions,\n tools=tools,\n api_key=api_key,\n )\n\n for chunk in self.__llm_stream_response_generator__(\n messages, response, start_time, functions, tools\n ):\n yield chunk\n except Exception as e:\n yield LLMStreamResponse(error=True, error_log=str(e))\n\n async def astream(\n self,\n messages: List[Dict[str, str]],\n functions: Optional[List[Any]] = None,\n tools: Optional[List[Any]] = None,\n model: Optional[str] = DEFAULT_MODEL,\n api_key: Optional[str] = None,\n *args,\n **kwargs,\n ) -> AsyncGenerator[LLMStreamResponse, None]:\n \"\"\"Parse & stream output from openai chat completion.\"\"\"\n if functions == []:\n functions = None\n response = None\n try:\n start_time = datetime.datetime.now()\n response = await acompletion(\n model=model,\n messages=[\n message.model_dump(exclude_none=True)\n for message in self.__validate_openai_messages(messages)\n ],\n stream=True,\n functions=functions,\n tools=tools,\n api_key=api_key,\n )\n\n async for chunk in self.__llm_stream_response_agenerator__(\n messages, response, start_time, functions, tools\n ):\n yield chunk\n except Exception as e:\n yield LLMStreamResponse(error=True, error_log=str(e))\n\n def run_and_parse(\n self,\n messages: List[Dict[str, str]],\n parsing_type: Optional[ParsingType] = None,\n functions: Optional[List[Any]] = None,\n tools: Optional[List[Any]] = None,\n output_keys: Optional[List[str]] = None,\n model: Optional[str] = DEFAULT_MODEL,\n api_key: Optional[str] = None,\n ) -> LLMResponse:\n \"\"\"Parse and return output from openai chat completion.\"\"\"\n if functions == []:\n functions = None\n response = None\n parsed_success = True\n parse_result = None\n error_log = None\n try:\n response: ModelResponse = completion(\n model=model,\n messages=[\n message.model_dump(exclude_none=True)\n for message in self.__validate_openai_messages(messages)\n ],\n functions=functions,\n tools=tools,\n api_key=api_key,\n )\n raw_output = getattr(response.choices[0].message, \"content\", None)\n\n call_func: Optional[FunctionCall] = getattr(\n response.choices[0].message, \"function_call\", None\n )\n\n call_tools: Optional[List[ChatCompletionMessageToolCall]] = getattr(\n response.choices[0].message, \"tool_calls\", None\n )\n\n if not call_func and not call_tools:\n # function call does not appear in output\n\n parse_result: ParseResult = self.__parse_output_pattern__(\n raw_output, parsing_type\n )\n\n # if output_keys exist & parsed_outputs does not match with output_keys -> error\n # if parse_result.error -> error\n if (\n output_keys is not None\n and set(parse_result.parsed_outputs.keys()) != set(output_keys)\n ) or parse_result.error:\n parsed_success = False\n error_log = (\n \"Output keys do not match with parsed output keys\"\n if not parse_result.error_log\n else parse_result.error_log\n )\n\n return LLMResponse(\n api_response=response,\n raw_output=raw_output,\n parsed_outputs=parse_result.parsed_outputs if parse_result else None,\n function_call=call_func if call_func else None,\n tool_calls=call_tools if call_tools else None,\n error=not parsed_success,\n error_log=error_log,\n )\n except Exception as e:\n if response is not None:\n return LLMResponse(api_response=response, error=True, error_log=str(e))\n else:\n return LLMResponse(api_response=None, error=True, error_log=str(e))\n\n async def arun_and_parse(\n self,\n messages: List[Dict[str, str]],\n parsing_type: Optional[ParsingType] = None,\n functions: Optional[List[Any]] = None,\n tools: Optional[List[Any]] = None,\n output_keys: Optional[List[str]] = None,\n model: Optional[str] = DEFAULT_MODEL,\n api_key: Optional[str] = None,\n ) -> LLMResponse:\n \"\"\"Generate openai chat completion asynchronously, and parse the output.\n Example prompt is as follows:\n -----\n Given a topic, you are required to generate a story.\n You must follow the provided output format.\n\n Topic:\n {topic}\n\n Output format:\n [Story]\n ...\n [/Story]\n\n Now generate the output:\n \"\"\"\n if functions == []:\n functions = None\n response = None\n parsed_success = True\n parse_result = None\n error_log = None\n try:\n response: ModelResponse = await acompletion(\n model=model,\n messages=[\n message.model_dump(exclude_none=True)\n for message in self.__validate_openai_messages(messages)\n ],\n functions=functions,\n tools=tools,\n api_key=api_key,\n )\n raw_output = getattr(response.choices[0].message, \"content\", None)\n\n call_func: Optional[FunctionCall] = getattr(\n response.choices[0].message, \"function_call\", None\n )\n\n call_tools: Optional[List[ChatCompletionMessageToolCall]] = getattr(\n response.choices[0].message, \"tool_calls\", None\n )\n\n if not call_func and not call_tools:\n # function call does not appear in output\n parse_result: ParseResult = self.__parse_output_pattern__(\n raw_output, parsing_type\n )\n\n # if output_keys exist & parsed_outputs does not match with output_keys -> error\n # if parse_result.error -> error\n if (\n output_keys is not None\n and set(parse_result.parsed_outputs.keys()) != set(output_keys)\n ) or parse_result.error:\n parsed_success = False\n error_log = (\n \"Output keys do not match with parsed output keys\"\n if not parse_result.error_log\n else parse_result.error_log\n )\n\n return LLMResponse(\n api_response=response,\n raw_output=raw_output,\n parsed_outputs=parse_result.parsed_outputs if parse_result else None,\n function_call=call_func if call_func else None,\n tool_calls=call_tools if call_tools else None,\n error=not parsed_success,\n error_log=error_log,\n )\n except Exception as e:\n if response is not None:\n return LLMResponse(api_response=response, error=True, error_log=str(e))\n else:\n return LLMResponse(api_response=None, error=True, error_log=str(e))\n\n def stream_and_parse(\n self,\n messages: List[Dict[str, str]],\n parsing_type: Optional[ParsingType] = None,\n functions: Optional[List[Any]] = None,\n tools: Optional[List[Any]] = None,\n output_keys: Optional[List[str]] = None,\n model: Optional[str] = DEFAULT_MODEL,\n api_key: Optional[str] = None,\n **kwargs,\n ) -> Generator[LLMStreamResponse, None, None]:\n \"\"\"Parse & stream output from openai chat completion.\"\"\"\n if functions == []:\n functions = None\n response = None\n try:\n if parsing_type == ParsingType.COLON.value:\n # cannot stream colon type\n yield LLMStreamResponse(\n error=True, error_log=\"Cannot stream colon type\"\n )\n return\n start_time = datetime.datetime.now()\n response = completion(\n model=model,\n messages=[\n message.model_dump(exclude_none=True)\n for message in self.__validate_openai_messages(messages)\n ],\n stream=True,\n functions=functions,\n tools=tools,\n api_key=api_key,\n )\n\n parsed_outputs = {}\n error_occurs = False\n error_log = None\n\n if (functions and len(functions) > 0) or (tools and len(tools) > 0):\n # if function exists, cannot parsing in stream time\n # just stream raw output and parse after stream\n streamed_outputs = {\n \"content\": \"\",\n \"function_call\": None,\n \"api_response\": None,\n }\n response_with_api_res = None\n for chunk in self.__llm_stream_response_generator__(\n messages, response, start_time, functions, tools\n ):\n if chunk.raw_output:\n streamed_outputs[\"content\"] += chunk.raw_output\n if chunk.function_call:\n streamed_outputs[\"function_call\"] = chunk.function_call\n if (\n chunk.api_response\n and getattr(chunk.api_response.choices[0], \"delta\", None)\n is None\n ): # only get the last api_response, not delta response\n streamed_outputs[\"api_response\"] = chunk.api_response\n response_with_api_res = chunk\n else:\n yield chunk\n\n if chunk.error and not error_occurs:\n error_occurs = True\n error_log = chunk.error_log\n\n if not streamed_outputs[\"function_call\"]:\n # if function call does not exist in output\n # able to parse\n parse_result: ParseResult = self.__parse_output_pattern__(\n streamed_outputs[\"content\"], parsing_type\n )\n\n error_occurs = parse_result.error or error_occurs\n error_log = parse_result.error_log if not error_log else error_log\n\n if (\n output_keys is not None\n and set(parse_result.parsed_outputs.keys()) != set(output_keys)\n ) or error_occurs:\n error_occurs = True\n error_log = (\n \"Output keys do not match with parsed output keys\"\n if not error_log\n else error_log\n )\n yield LLMStreamResponse(\n api_response=streamed_outputs[\"api_response\"],\n error=True,\n error_log=error_log,\n )\n else:\n response_with_api_res.parsed_outputs = (\n parse_result.parsed_outputs\n )\n yield response_with_api_res\n else:\n yield response_with_api_res\n else:\n if parsing_type is None:\n for chunk in self.__llm_stream_response_generator__(\n messages, response, start_time, functions, tools\n ):\n yield chunk\n\n if chunk.error and not error_occurs:\n error_occurs = True\n error_log = chunk.error_log\n\n elif parsing_type == ParsingType.DOUBLE_SQUARE_BRACKET.value:\n for chunk in self.__double_type_sp_generator__(\n messages, response, parsing_type, start_time, functions, tools\n ):\n yield chunk\n if chunk.parsed_outputs:\n parsed_outputs = update_dict(\n parsed_outputs, chunk.parsed_outputs\n )\n if chunk.error and not error_occurs:\n error_occurs = True\n error_log = chunk.error_log\n else:\n for chunk in self.__single_type_sp_generator__(\n messages, response, parsing_type, start_time, functions, tools\n ):\n yield chunk\n if chunk.parsed_outputs:\n parsed_outputs = update_dict(\n parsed_outputs, chunk.parsed_outputs\n )\n if chunk.error and not error_occurs:\n error_occurs = True\n error_log = chunk.error_log\n\n if (\n output_keys is not None\n and set(parsed_outputs.keys()) != set(output_keys)\n ) and not error_occurs:\n error_occurs = True\n error_log = \"Output keys do not match with parsed output keys\"\n yield LLMStreamResponse(error=True, error_log=error_log)\n\n except Exception as e:\n yield LLMStreamResponse(error=True, error_log=str(e))\n\n async def astream_and_parse(\n self,\n messages: List[Dict[str, str]],\n parsing_type: Optional[ParsingType] = None,\n functions: Optional[List[Any]] = None,\n tools: Optional[List[Any]] = None,\n output_keys: Optional[List[str]] = None,\n model: Optional[str] = DEFAULT_MODEL,\n api_key: Optional[str] = None,\n ) -> AsyncGenerator[LLMStreamResponse, None]:\n \"\"\"Parse & stream output from openai chat completion.\"\"\"\n if functions == []:\n functions = None\n response = None\n try:\n if parsing_type == ParsingType.COLON.value:\n # cannot stream colon type\n yield LLMStreamResponse(\n error=True, error_log=\"Cannot stream colon type\"\n )\n return\n start_time = datetime.datetime.now()\n response = await acompletion(\n model=model,\n messages=[\n message.model_dump(exclude_none=True)\n for message in self.__validate_openai_messages(messages)\n ],\n stream=True,\n functions=functions,\n tools=tools,\n api_key=api_key,\n )\n\n parsed_outputs = {}\n error_occurs = False # error in stream time\n error_log = None\n if (functions and len(functions) > 0) or (tools and len(tools) > 0):\n # if function exists, cannot parsing in stream time\n # just stream raw output and parse after stream\n streamed_outputs = {\n \"content\": \"\",\n \"function_call\": None,\n \"api_response\": None,\n }\n response_with_api_res = None\n async for chunk in self.__llm_stream_response_agenerator__(\n messages, response, start_time, functions, tools\n ):\n if chunk.raw_output:\n streamed_outputs[\"content\"] += chunk.raw_output\n if chunk.function_call:\n streamed_outputs[\"function_call\"] = chunk.function_call\n if (\n chunk.api_response\n and getattr(chunk.api_response.choices[0], \"delta\", None)\n is None\n ):\n streamed_outputs[\"api_response\"] = chunk.api_response\n response_with_api_res = chunk\n else:\n yield chunk\n\n if chunk.error and not error_occurs:\n error_occurs = True\n error_log = chunk.error_log\n\n if not streamed_outputs[\"function_call\"]:\n # if function call does not exist in output\n # able to parse\n parse_result: ParseResult = self.__parse_output_pattern__(\n streamed_outputs[\"content\"], parsing_type\n )\n\n error_occurs = parse_result.error or error_occurs\n error_log = parse_result.error_log if not error_log else error_log\n if (\n output_keys is not None\n and set(parse_result.parsed_outputs.keys()) != set(output_keys)\n ) or error_occurs:\n error_occurs = True\n error_log = (\n \"Output keys do not match with parsed output keys\"\n if not error_log\n else error_log\n )\n yield LLMStreamResponse(\n api_response=streamed_outputs[\"api_response\"],\n error=True,\n error_log=error_log,\n )\n else:\n response_with_api_res.parsed_outputs = (\n parse_result.parsed_outputs\n )\n yield response_with_api_res\n else:\n yield response_with_api_res\n else:\n if parsing_type is None:\n async for chunk in self.__llm_stream_response_agenerator__(\n messages, response, start_time, functions, tools\n ):\n yield chunk\n\n if chunk.error and not error_occurs:\n error_occurs = True\n error_log = chunk.error_log\n\n elif parsing_type == ParsingType.DOUBLE_SQUARE_BRACKET.value:\n async for chunk in self.__double_type_sp_agenerator__(\n messages, response, parsing_type, start_time, functions, tools\n ):\n yield chunk\n if chunk.parsed_outputs:\n parsed_outputs = update_dict(\n parsed_outputs, chunk.parsed_outputs\n )\n if chunk.error and not error_occurs:\n error_occurs = True\n else:\n async for chunk in self.__single_type_sp_agenerator__(\n messages, response, parsing_type, start_time, functions, tools\n ):\n yield chunk\n if chunk.parsed_outputs:\n parsed_outputs = update_dict(\n parsed_outputs, chunk.parsed_outputs\n )\n if chunk.error and not error_occurs:\n error_occurs = True\n\n if (\n output_keys is not None\n and set(parsed_outputs.keys()) != set(output_keys)\n ) and not error_occurs:\n error_occurs = True\n error_log = \"Output keys do not match with parsed output keys\"\n yield LLMStreamResponse(error=True, error_log=error_log)\n\n except Exception as e:\n yield LLMStreamResponse(error=True, error_log=str(e))\n\n def make_model_response(\n self,\n chunk: ModelResponse,\n response_ms,\n messages: List[Dict[str, str]],\n raw_output: str,\n functions: Optional[List[Any]] = None,\n function_call: Optional[Dict[str, Any]] = None,\n tools: Optional[List[Any]] = None,\n tool_calls: Optional[List[Dict[str, Any]]] = None,\n ) -> ModelResponse:\n count_start_time = datetime.datetime.now()\n prompt_token: int = num_tokens_for_messages(\n messages=messages, model=chunk[\"model\"]\n )\n completion_token: int = num_tokens_for_messages(\n model=chunk[\"model\"],\n messages=[{\"role\": \"assistant\", \"content\": raw_output}],\n )\n\n if functions and len(functions) > 0:\n functions_token = num_tokens_from_functions_input(\n functions=functions, model=chunk[\"model\"]\n )\n prompt_token += functions_token\n\n if tools and len(tools) > 0:\n tools_token = num_tokens_from_functions_input(\n functions=[tool[\"function\"] for tool in tools], model=chunk[\"model\"]\n )\n prompt_token += tools_token\n # if function_call:\n # function_call_token = num_tokens_from_function_call_output(\n # function_call_output=function_call, model=chunk[\"model\"]\n # )\n # completion_token += function_call_token\n\n count_end_time = datetime.datetime.now()\n logger.debug(\n f\"counting token time : {(count_end_time - count_start_time).total_seconds() * 1000} ms\"\n )\n\n usage = Usage(\n **{\n \"prompt_tokens\": prompt_token,\n \"completion_tokens\": completion_token,\n \"total_tokens\": prompt_token + completion_token,\n }\n )\n\n last_message = Message(\n role=chunk.choices[0].delta.role\n if getattr(chunk.choices[0].delta, \"role\", None)\n else \"assistant\",\n content=raw_output if raw_output != \"\" else None,\n function_call=function_call if function_call else None,\n tool_calls=tool_calls if tool_calls else None,\n )\n choices = [\n Choices(finish_reason=chunk.choices[0].finish_reason, message=last_message)\n ]\n\n res = ModelResponse(\n id=chunk[\"id\"],\n created=chunk[\"created\"],\n model=chunk[\"model\"],\n stream=True,\n )\n res.choices = choices\n res.usage = usage\n res._response_ms = response_ms\n\n return res\n\n def __llm_stream_response_generator__(\n self,\n messages: List[Dict[str, str]],\n response: Generator[ModelResponse, None, None],\n start_time: datetime.datetime,\n functions: Optional[List[Any]] = None,\n tools: Optional[List[Any]] = None,\n ) -> Generator[LLMStreamResponse, None, None]:\n raw_output = \"\"\n function_call = {\"name\": \"\", \"arguments\": \"\"}\n tool_calls = []\n\n try:\n for chunk in response:\n yield_api_response_with_fc = False\n if getattr(chunk.choices[0].delta, \"function_call\", None) is not None:\n for key, value in (\n chunk.choices[0].delta.function_call.model_dump().items()\n ):\n if value is not None:\n function_call[key] += value\n\n yield LLMStreamResponse(\n api_response=chunk,\n function_call=chunk.choices[0].delta.function_call,\n )\n yield_api_response_with_fc = True\n\n if getattr(chunk.choices[0].delta, \"tool_calls\", None) is not None:\n # tool_calls: list\n tool_calls_delta: List[Any] = chunk.choices[0].delta.tool_calls\n index = tool_calls_delta[0].index\n if index == len(tool_calls):\n tool_calls.append(\n {\n \"id\": tool_calls_delta[0].id,\n \"function\": {},\n \"type\": \"function\",\n }\n )\n tool_delta: ChoiceDeltaToolCallFunction = tool_calls_delta[\n 0\n ].function\n tool_calls[index][\"function\"] = update_dict(\n tool_calls[index][\"function\"], tool_delta.model_dump()\n )\n\n yield LLMStreamResponse(\n api_response=chunk,\n tool_calls=chunk.choices[0].delta.tool_calls,\n )\n yield_api_response_with_fc = True\n\n if getattr(chunk.choices[0].delta, \"content\", None) is not None:\n raw_output += chunk.choices[0].delta.content\n yield LLMStreamResponse(\n api_response=chunk if not yield_api_response_with_fc else None,\n raw_output=chunk.choices[0].delta.content,\n )\n\n if chunk.choices[0].finish_reason != None:\n end_time = datetime.datetime.now()\n response_ms = (end_time - start_time).total_seconds() * 1000\n yield LLMStreamResponse(\n api_response=self.make_model_response(\n chunk,\n response_ms,\n messages,\n raw_output,\n functions=functions,\n function_call=function_call\n if chunk.choices[0].finish_reason == \"function_call\"\n else None,\n tools=tools,\n tool_calls=tool_calls\n if chunk.choices[0].finish_reason == \"tool_calls\"\n else None,\n )\n )\n except Exception as e:\n logger.error(e)\n yield LLMStreamResponse(error=True, error_log=str(e))\n\n def __single_type_sp_generator__(\n self,\n messages: List[Dict[str, str]],\n response: Generator[ModelResponse, None, None],\n parsing_type: ParsingType,\n start_time: datetime.datetime,\n functions: Optional[List[Any]] = None,\n tools: Optional[List[Any]] = None,\n ) -> Generator[LLMStreamResponse, None, None]:\n try:\n parsing_pattern = get_pattern_by_type(parsing_type)\n start_tag = parsing_pattern[\"start\"]\n start_fstring = parsing_pattern[\"start_fstring\"]\n end_fstring = parsing_pattern[\"end_fstring\"]\n start_token = parsing_pattern[\"start_token\"]\n end_token = parsing_pattern[\"end_token\"]\n\n buffer = \"\"\n raw_output = \"\"\n active_key = None\n stream_pause = False\n end_tag = None\n function_call = {\"name\": \"\", \"arguments\": \"\"}\n tool_calls = []\n\n for chunk in response:\n yield_api_response_with_fc = False\n if getattr(chunk.choices[0].delta, \"function_call\", None) is not None:\n for key, value in (\n chunk.choices[0].delta.function_call.model_dump().items()\n ):\n if value is not None:\n function_call[key] += value\n\n yield LLMStreamResponse(\n api_response=chunk,\n function_call=chunk.choices[0].delta.function_call,\n )\n yield_api_response_with_fc = True\n\n if getattr(chunk.choices[0].delta, \"tool_calls\", None) is not None:\n # tool_calls: list\n tool_calls_delta: List[Any] = chunk.choices[0].delta.tool_calls\n index = tool_calls_delta[0].index\n if index == len(tool_calls):\n tool_calls.append(\n {\n \"id\": tool_calls_delta[0].id,\n \"function\": {},\n \"type\": \"function\",\n }\n )\n tool_delta: ChoiceDeltaToolCallFunction = tool_calls_delta[\n 0\n ].function\n tool_calls[index][\"function\"] = update_dict(\n tool_calls[index][\"function\"], tool_delta.model_dump()\n )\n\n yield LLMStreamResponse(\n api_response=chunk,\n tool_calls=chunk.choices[0].delta.tool_calls,\n )\n yield_api_response_with_fc = True\n\n if getattr(chunk.choices[0].delta, \"content\", None) is not None:\n stream_value: str = chunk.choices[0].delta.content\n raw_output += stream_value\n yield LLMStreamResponse(\n api_response=chunk if not yield_api_response_with_fc else None,\n raw_output=stream_value,\n )\n\n buffer += stream_value\n while True:\n if active_key is None:\n keys = re.findall(start_tag, buffer, flags=re.DOTALL)\n if len(keys) == 0:\n break # no key\n active_key, active_type = keys[\n 0\n ] # Updated to unpack both key and type\n end_tag = end_fstring.format(key=active_key)\n # delete start tag from buffer\n start_pattern = start_fstring.format(\n key=active_key, type=active_type\n )\n buffer = buffer.split(start_pattern)[-1]\n else:\n if (\n stream_value.find(start_token) != -1\n ): # start token appers in chunk -> pause\n stream_pause = True\n break\n elif stream_pause:\n if (\n buffer.find(end_tag) != -1\n ): # if end tag appears in buffer\n yield LLMStreamResponse(\n parsed_outputs={\n active_key: buffer.split(end_tag)[\n 0\n ].replace(end_tag, \"\")\n }\n )\n buffer = buffer.split(end_tag)[-1]\n active_key = None\n stream_pause = False\n elif (\n stream_value.find(end_token) != -1\n ): # if pattern ends = (\"[blah]\" != end_pattern) appeared in buffer\n if (\n active_type == \"List\"\n or active_type == \"Dict\"\n and end_token.find(\"]\") != -1\n ):\n try:\n buffer_dict = json.loads(buffer)\n stream_pause = False\n continue\n except Exception as exception:\n logger.error(exception)\n yield LLMStreamResponse(\n error=True,\n error_log=\"Parsing error : Invalid end tag detected\",\n parsed_outputs={\n active_key: buffer.split(\n start_token\n )[0]\n },\n )\n stream_pause = False\n buffer = \"\"\n yield LLMStreamResponse(\n error=True,\n error_log=\"Parsing error : Invalid end tag detected\",\n parsed_outputs={active_key: buffer},\n )\n stream_pause = False\n buffer = \"\"\n break\n else:\n # no start token, no stream_pause (not inside of tag)\n if buffer:\n yield LLMStreamResponse(\n parsed_outputs={active_key: buffer}\n )\n buffer = \"\"\n break\n\n if chunk.choices[0].finish_reason != None:\n end_time = datetime.datetime.now()\n response_ms = (end_time - start_time).total_seconds() * 1000\n yield LLMStreamResponse(\n api_response=self.make_model_response(\n chunk,\n response_ms,\n messages,\n raw_output,\n functions=functions,\n function_call=function_call\n if chunk.choices[0].finish_reason == \"function_call\"\n else None,\n tools=tools,\n tool_calls=tool_calls\n if chunk.choices[0].finish_reason == \"tool_calls\"\n else None,\n )\n )\n except Exception as e:\n logger.error(e)\n yield LLMStreamResponse(error=True, error_log=str(e))\n\n def __double_type_sp_generator__(\n self,\n messages: List[Dict[str, str]],\n response: Generator[ModelResponse, None, None],\n parsing_type: ParsingType,\n start_time: datetime.datetime,\n functions: Optional[List[Any]] = None,\n tools: Optional[List[Any]] = None,\n ) -> Generator[LLMStreamResponse, None, None]:\n try:\n parsing_pattern = get_pattern_by_type(parsing_type)\n start_tag = parsing_pattern[\"start\"]\n start_fstring = parsing_pattern[\"start_fstring\"]\n end_fstring = parsing_pattern[\"end_fstring\"]\n start_token = parsing_pattern[\"start_token\"]\n end_token = parsing_pattern[\"end_token\"]\n\n buffer = \"\"\n raw_output = \"\"\n active_key = None\n stream_pause = False\n end_tag = None\n function_call = {\"name\": \"\", \"arguments\": \"\"}\n tool_calls = []\n\n for chunk in response:\n yield_api_response_with_fc = False\n if getattr(chunk.choices[0].delta, \"function_call\", None) is not None:\n for key, value in (\n chunk.choices[0].delta.function_call.model_dump().items()\n ):\n if value is not None:\n function_call[key] += value\n\n yield LLMStreamResponse(\n api_response=chunk,\n function_call=chunk.choices[0].delta.function_call,\n )\n yield_api_response_with_fc = True\n\n if getattr(chunk.choices[0].delta, \"tool_calls\", None) is not None:\n # tool_calls: list\n tool_calls_delta: List[Any] = chunk.choices[0].delta.tool_calls\n index = tool_calls_delta[0].index\n if index == len(tool_calls):\n tool_calls.append(\n {\n \"id\": tool_calls_delta[0].id,\n \"function\": {},\n \"type\": \"function\",\n }\n )\n tool_delta: ChoiceDeltaToolCallFunction = tool_calls_delta[\n 0\n ].function\n tool_calls[index][\"function\"] = update_dict(\n tool_calls[index][\"function\"], tool_delta.model_dump()\n )\n\n yield LLMStreamResponse(\n api_response=chunk,\n tool_calls=chunk.choices[0].delta.tool_calls,\n )\n yield_api_response_with_fc = True\n\n if getattr(chunk.choices[0].delta, \"content\", None) is not None:\n stream_value: str = chunk.choices[0].delta.content\n raw_output += stream_value\n yield LLMStreamResponse(\n api_response=chunk if not yield_api_response_with_fc else None,\n raw_output=stream_value,\n )\n\n buffer += stream_value\n\n while True:\n if active_key is None:\n keys = re.findall(start_tag, buffer, flags=re.DOTALL)\n if len(keys) == 0:\n break # no key\n active_key, active_type = keys[0]\n end_tag = end_fstring.format(key=active_key)\n # delete start tag from buffer\n start_pattern = start_fstring.format(\n key=active_key, type=active_type\n )\n buffer = buffer.split(start_pattern)[-1]\n\n else:\n if (\n stream_value.find(start_token) != -1\n ): # start token appers in chunk -> pause\n stream_pause = True\n break\n elif stream_pause:\n if (\n buffer.find(end_tag) != -1\n ): # if end tag appears in buffer\n yield LLMStreamResponse(\n parsed_outputs={\n active_key: buffer.split(end_tag)[0]\n }\n )\n buffer = buffer.split(end_tag)[-1]\n active_key = None\n stream_pause = False\n elif (\n stream_value.find(end_token) != -1\n ): # if (\"[blah]\" != end_pattern) appeared in buffer\n if (\n buffer.find(end_token + end_token) != -1\n ): # if ]] in buffer -> error\n yield LLMStreamResponse(\n error=True,\n error_log=\"Parsing error : Invalid end tag detected\",\n parsed_outputs={\n active_key: buffer.split(start_token)[0]\n },\n )\n buffer = buffer.split(end_token + end_token)[-1]\n stream_pause = False\n break\n else:\n if (\n buffer.find(start_token + start_token) != -1\n ): # if [[ in buffer -> pause\n break\n else:\n # if [ in buffer (== [blah]) -> stream\n yield LLMStreamResponse(\n parsed_outputs={active_key: buffer}\n )\n buffer = \"\"\n stream_pause = False\n break\n break\n else:\n # no start token, no stream_pause (not inside of tag)\n if buffer:\n yield LLMStreamResponse(\n parsed_outputs={active_key: buffer}\n )\n buffer = \"\"\n break\n\n if chunk.choices[0].finish_reason != None:\n end_time = datetime.datetime.now()\n response_ms = (end_time - start_time).total_seconds() * 1000\n yield LLMStreamResponse(\n api_response=self.make_model_response(\n chunk,\n response_ms,\n messages,\n raw_output,\n functions=functions,\n function_call=function_call\n if chunk.choices[0].finish_reason == \"function_call\"\n else None,\n tools=tools,\n tool_calls=tool_calls\n if chunk.choices[0].finish_reason == \"tool_calls\"\n else None,\n )\n )\n except Exception as e:\n logger.error(e)\n yield LLMStreamResponse(error=True, error_log=str(e))\n\n async def __llm_stream_response_agenerator__(\n self,\n messages: List[Dict[str, str]],\n response: AsyncGenerator[ModelResponse, None],\n start_time: datetime.datetime,\n functions: Optional[List[Any]] = None,\n tools: Optional[List[Any]] = None,\n ) -> AsyncGenerator[LLMStreamResponse, None]:\n raw_output = \"\"\n function_call = {\"name\": \"\", \"arguments\": \"\"}\n tool_calls = []\n try:\n async for chunk in response:\n yield_api_response_with_fc = False\n if getattr(chunk.choices[0].delta, \"function_call\", None) is not None:\n for key, value in (\n chunk.choices[0].delta.function_call.model_dump().items()\n ):\n if value is not None:\n function_call[key] += value\n\n yield LLMStreamResponse(\n api_response=chunk,\n function_call=chunk.choices[0].delta.function_call,\n )\n yield_api_response_with_fc = True\n\n if getattr(chunk.choices[0].delta, \"tool_calls\", None) is not None:\n # tool_calls: list\n tool_calls_delta: List[Any] = chunk.choices[0].delta.tool_calls\n index = tool_calls_delta[0].index\n if index == len(tool_calls):\n tool_calls.append(\n {\n \"id\": tool_calls_delta[0].id,\n \"function\": {},\n \"type\": \"function\",\n }\n )\n tool_delta: ChoiceDeltaToolCallFunction = tool_calls_delta[\n 0\n ].function\n tool_calls[index][\"function\"] = update_dict(\n tool_calls[index][\"function\"], tool_delta.model_dump()\n )\n\n yield LLMStreamResponse(\n api_response=chunk,\n tool_calls=chunk.choices[0].delta.tool_calls,\n )\n yield_api_response_with_fc = True\n\n if getattr(chunk.choices[0].delta, \"content\", None) is not None:\n stream_value: str = chunk.choices[0].delta.content\n raw_output += stream_value\n yield LLMStreamResponse(\n api_response=chunk if not yield_api_response_with_fc else None,\n raw_output=stream_value,\n )\n\n if chunk.choices[0].finish_reason != None:\n end_time = datetime.datetime.now()\n response_ms = (end_time - start_time).total_seconds() * 1000\n yield LLMStreamResponse(\n api_response=self.make_model_response(\n chunk,\n response_ms,\n messages,\n raw_output,\n functions=functions,\n function_call=function_call\n if chunk.choices[0].finish_reason == \"function_call\"\n else None,\n tools=tools,\n tool_calls=tool_calls\n if chunk.choices[0].finish_reason == \"tool_calls\"\n else None,\n )\n )\n except Exception as e:\n logger.error(e)\n yield LLMStreamResponse(error=True, error_log=str(e))\n\n async def __single_type_sp_agenerator__(\n self,\n messages: List[Dict[str, str]],\n response: AsyncGenerator[ModelResponse, None],\n parsing_type: ParsingType,\n start_time: datetime.datetime,\n functions: Optional[List[Any]] = None,\n tools: Optional[List[Any]] = None,\n ) -> AsyncGenerator[LLMStreamResponse, None]:\n try:\n parsing_pattern = get_pattern_by_type(parsing_type)\n start_tag = parsing_pattern[\"start\"]\n start_fstring = parsing_pattern[\"start_fstring\"]\n end_fstring = parsing_pattern[\"end_fstring\"]\n start_token = parsing_pattern[\"start_token\"]\n end_token = parsing_pattern[\"end_token\"]\n\n buffer = \"\"\n raw_output = \"\"\n active_key = None\n stream_pause = False\n end_tag = None\n function_call = {\"name\": \"\", \"arguments\": \"\"}\n tool_calls = []\n\n async for chunk in response:\n yield_api_response_with_fc = False\n if getattr(chunk.choices[0].delta, \"function_call\", None) is not None:\n for key, value in (\n chunk.choices[0].delta.function_call.model_dump().items()\n ):\n if value is not None:\n function_call[key] += value\n\n yield LLMStreamResponse(\n api_response=chunk,\n function_call=chunk.choices[0].delta.function_call,\n )\n yield_api_response_with_fc = True\n\n if getattr(chunk.choices[0].delta, \"tool_calls\", None) is not None:\n # tool_calls: list\n tool_calls_delta: List[Any] = chunk.choices[0].delta.tool_calls\n index = tool_calls_delta[0].index\n if index == len(tool_calls):\n tool_calls.append(\n {\n \"id\": tool_calls_delta[0].id,\n \"function\": {},\n \"type\": \"function\",\n }\n )\n tool_delta: ChoiceDeltaToolCallFunction = tool_calls_delta[\n 0\n ].function\n tool_calls[index][\"function\"] = update_dict(\n tool_calls[index][\"function\"], tool_delta.model_dump()\n )\n\n yield LLMStreamResponse(\n api_response=chunk,\n tool_calls=chunk.choices[0].delta.tool_calls,\n )\n yield_api_response_with_fc = True\n\n if getattr(chunk.choices[0].delta, \"content\", None) is not None:\n stream_value: str = chunk.choices[0].delta.content\n raw_output += stream_value\n yield LLMStreamResponse(\n api_response=chunk if not yield_api_response_with_fc else None,\n raw_output=stream_value,\n )\n\n buffer += stream_value\n\n while True:\n if active_key is None:\n keys = re.findall(start_tag, buffer, flags=re.DOTALL)\n if len(keys) == 0:\n break # no key\n\n active_key, active_type = keys[\n 0\n ] # Updated to unpack both key and type\n end_tag = end_fstring.format(key=active_key)\n # delete start tag from buffer\n start_pattern = start_fstring.format(\n key=active_key, type=active_type\n )\n buffer = buffer.split(start_pattern)[-1]\n\n else:\n if (\n stream_value.find(start_token) != -1\n ): # start token appers in chunk -> pause\n stream_pause = True\n break\n elif stream_pause:\n if (\n buffer.find(end_tag) != -1\n ): # if end tag appears in buffer\n yield LLMStreamResponse(\n parsed_outputs={\n active_key: buffer.split(end_tag)[\n 0\n ].replace(end_tag, \"\")\n }\n )\n buffer = buffer.split(end_tag)[-1]\n active_key = None\n stream_pause = False\n elif (\n stream_value.find(end_token) != -1\n ): # if pattern ends = (\"[blah]\" != end_pattern) appeared in buffer\n if (\n active_type == \"List\"\n or active_type == \"Dict\"\n and end_token.find(\"]\") != -1\n ):\n try:\n buffer_dict = json.loads(buffer)\n stream_pause = False\n continue\n except Exception as exception:\n logger.error(exception)\n yield LLMStreamResponse(\n error=True,\n error_log=\"Parsing error : Invalid end tag detected\",\n parsed_outputs={\n active_key: buffer.split(\n start_token\n )[0]\n },\n )\n stream_pause = False\n buffer = \"\"\n yield LLMStreamResponse(\n error=True,\n error_log=\"Parsing error : Invalid end tag detected\",\n parsed_outputs={active_key: buffer},\n )\n stream_pause = False\n buffer = \"\"\n break\n else:\n # no start token, no stream_pause (not inside of tag)\n if buffer:\n yield LLMStreamResponse(\n parsed_outputs={active_key: buffer}\n )\n buffer = \"\"\n break\n\n if chunk.choices[0].finish_reason != None:\n end_time = datetime.datetime.now()\n response_ms = (end_time - start_time).total_seconds() * 1000\n yield LLMStreamResponse(\n api_response=self.make_model_response(\n chunk,\n response_ms,\n messages,\n raw_output,\n functions=functions,\n function_call=function_call\n if chunk.choices[0].finish_reason == \"function_call\"\n else None,\n tools=tools,\n tool_calls=tool_calls\n if chunk.choices[0].finish_reason == \"tool_calls\"\n else None,\n )\n )\n except Exception as e:\n logger.error(e)\n yield LLMStreamResponse(error=True, error_log=str(e))\n\n async def __double_type_sp_agenerator__(\n self,\n messages: List[Dict[str, str]],\n response: AsyncGenerator[ModelResponse, None],\n parsing_type: ParsingType,\n start_time: datetime.datetime,\n functions: Optional[List[Any]] = None,\n tools: Optional[List[Any]] = None,\n ) -> AsyncGenerator[LLMStreamResponse, None]:\n try:\n parsing_pattern = get_pattern_by_type(parsing_type)\n start_tag = parsing_pattern[\"start\"]\n start_fstring = parsing_pattern[\"start_fstring\"]\n end_fstring = parsing_pattern[\"end_fstring\"]\n start_token = parsing_pattern[\"start_token\"]\n end_token = parsing_pattern[\"end_token\"]\n\n buffer = \"\"\n raw_output = \"\"\n active_key = None\n stream_pause = False\n end_tag = None\n function_call = {\"name\": \"\", \"arguments\": \"\"}\n tool_calls = []\n\n async for chunk in response:\n yield_api_response_with_fc = False\n if getattr(chunk.choices[0].delta, \"function_call\", None) is not None:\n for key, value in (\n chunk.choices[0].delta.function_call.model_dump().items()\n ):\n if value is not None:\n function_call[key] += value\n\n yield LLMStreamResponse(\n api_response=chunk,\n function_call=chunk.choices[0].delta.function_call,\n )\n yield_api_response_with_fc = True\n\n if getattr(chunk.choices[0].delta, \"tool_calls\", None) is not None:\n # tool_calls: list\n tool_calls_delta: List[Any] = chunk.choices[0].delta.tool_calls\n index = tool_calls_delta[0].index\n if index == len(tool_calls):\n tool_calls.append(\n {\n \"id\": tool_calls_delta[0].id,\n \"function\": {},\n \"type\": \"function\",\n }\n )\n tool_delta: ChoiceDeltaToolCallFunction = tool_calls_delta[\n 0\n ].function\n tool_calls[index][\"function\"] = update_dict(\n tool_calls[index][\"function\"], tool_delta.model_dump()\n )\n\n yield LLMStreamResponse(\n api_response=chunk,\n tool_calls=chunk.choices[0].delta.tool_calls,\n )\n yield_api_response_with_fc = True\n\n if getattr(chunk.choices[0].delta, \"content\", None) is not None:\n stream_value: str = chunk.choices[0].delta.content\n raw_output += stream_value\n yield LLMStreamResponse(\n api_response=chunk if not yield_api_response_with_fc else None,\n raw_output=stream_value,\n )\n\n buffer += stream_value\n\n while True:\n if active_key is None:\n keys = re.findall(start_tag, buffer, flags=re.DOTALL)\n # if len(keys) > 1:\n # yield LLMStreamResponse(\n # error=True,\n # error_log=\"Parsing error : Nested key detected\",\n # )\n # break\n if len(keys) == 0:\n break # no key\n active_key, active_type = keys[0]\n end_tag = end_fstring.format(key=active_key)\n # delete start tag from buffer\n start_pattern = start_fstring.format(\n key=active_key, type=active_type\n )\n buffer = buffer.split(start_pattern)[-1]\n\n else:\n if (\n stream_value.find(start_token) != -1\n ): # start token appers in chunk -> pause\n stream_pause = True\n break\n elif stream_pause:\n if (\n buffer.find(end_tag) != -1\n ): # if end tag appears in buffer\n yield LLMStreamResponse(\n parsed_outputs={\n active_key: buffer.split(end_tag)[0]\n }\n )\n buffer = buffer.split(end_tag)[-1]\n active_key = None\n stream_pause = False\n # break\n elif (\n stream_value.find(end_token) != -1\n ): # if (\"[blah]\" != end_pattern) appeared in buffer\n if (\n buffer.find(end_token + end_token) != -1\n ): # if ]] in buffer -> error\n yield LLMStreamResponse(\n error=True,\n error_log=\"Parsing error : Invalid end tag detected\",\n parsed_outputs={\n active_key: buffer.split(start_token)[0]\n },\n )\n buffer = buffer.split(end_token + end_token)[-1]\n stream_pause = False\n break\n else:\n if (\n buffer.find(start_token + start_token) != -1\n ): # if [[ in buffer -> pause\n break\n else:\n # if [ in buffer (== [blah]) -> stream\n yield LLMStreamResponse(\n parsed_outputs={active_key: buffer}\n )\n buffer = \"\"\n stream_pause = False\n break\n break\n else:\n # no start token, no stream_pause (not inside of tag)\n if buffer:\n yield LLMStreamResponse(\n parsed_outputs={active_key: buffer}\n )\n buffer = \"\"\n break\n\n if chunk.choices[0].finish_reason != None:\n end_time = datetime.datetime.now()\n response_ms = (end_time - start_time).total_seconds() * 1000\n yield LLMStreamResponse(\n api_response=self.make_model_response(\n chunk,\n response_ms,\n messages,\n raw_output,\n functions=functions,\n function_call=function_call\n if chunk.choices[0].finish_reason == \"function_call\"\n else None,\n tools=tools,\n tool_calls=tool_calls\n if chunk.choices[0].finish_reason == \"tool_calls\"\n else None,\n )\n )\n except Exception as e:\n logger.error(e)\n yield LLMStreamResponse(error=True, error_log=str(e))" }, { "identifier": "DeployedPrompt", "path": "promptmodel/database/models.py", "snippet": "class DeployedPrompt(BaseModel):\n id = AutoField()\n version_uuid = ForeignKeyField(\n DeployedFunctionModelVersion,\n field=DeployedFunctionModelVersion.uuid,\n backref=\"prompts\",\n on_delete=\"CASCADE\",\n )\n role = CharField()\n step = IntegerField()\n content = TextField()" }, { "identifier": "DeployedFunctionModel", "path": "promptmodel/database/models.py", "snippet": "class DeployedFunctionModel(BaseModel):\n uuid = UUIDField(unique=True, default=uuid4)\n name = CharField()" }, { "identifier": "DeployedFunctionModelVersion", "path": "promptmodel/database/models.py", "snippet": "class DeployedFunctionModelVersion(BaseModel):\n uuid = UUIDField(unique=True, default=uuid4)\n version = IntegerField(null=False)\n from_version = IntegerField(null=True)\n function_model_uuid = ForeignKeyField(\n DeployedFunctionModel,\n field=DeployedFunctionModel.uuid,\n backref=\"versions\",\n on_delete=\"CASCADE\",\n )\n model = CharField()\n is_published = BooleanField(default=False)\n is_ab_test = BooleanField(default=False)\n ratio = FloatField(null=True)\n parsing_type = CharField(\n null=True,\n default=None,\n constraints=[\n Check(\n f\"parsing_type IN ('{ParsingType.COLON.value}', '{ParsingType.SQUARE_BRACKET.value}', '{ParsingType.DOUBLE_SQUARE_BRACKET.value}')\"\n )\n ],\n )\n output_keys = JSONField(null=True, default=None)\n functions = JSONField(default=[])" }, { "identifier": "get_deployed_prompts", "path": "promptmodel/database/crud.py", "snippet": "def get_deployed_prompts(function_model_name: str) -> Tuple[List[DeployedPrompt], str]:\n try:\n with db.atomic():\n versions: List[DeployedFunctionModelVersion] = list(\n DeployedFunctionModelVersion.select()\n .join(DeployedFunctionModel)\n .where(\n DeployedFunctionModelVersion.function_model_uuid\n == DeployedFunctionModel.get(\n DeployedFunctionModel.name == function_model_name\n ).uuid\n )\n )\n prompts: List[DeployedPrompt] = list(\n DeployedPrompt.select()\n .where(\n DeployedPrompt.version_uuid.in_(\n [version.uuid for version in versions]\n )\n )\n .order_by(DeployedPrompt.step.asc())\n )\n # select version by ratio\n selected_version = select_version_by_ratio(\n [version.__data__ for version in versions]\n )\n selected_prompts = list(\n filter(\n lambda prompt: str(prompt.version_uuid.uuid)\n == str(selected_version[\"uuid\"]),\n prompts,\n )\n )\n\n version_details = {\n \"model\": selected_version[\"model\"],\n \"version\" : selected_version[\"version\"],\n \"uuid\": selected_version[\"uuid\"],\n \"parsing_type\": selected_version[\"parsing_type\"],\n \"output_keys\": selected_version[\"output_keys\"],\n }\n\n return selected_prompts, version_details\n except Exception as e:\n logger.error(e)\n return None, None" }, { "identifier": "CacheManager", "path": "promptmodel/promptmodel_init.py", "snippet": "class CacheManager:\n _instance = None\n _lock = threading.Lock()\n\n def __new__(cls):\n with cls._lock:\n if cls._instance is None:\n instance = super(CacheManager, cls).__new__(cls)\n instance.last_update_time = 0 # to manage update frequency\n instance.update_interval = 60 * 60 * 6 # seconds, 6 hours\n instance.program_alive = True\n instance.background_tasks = []\n initialize_db()\n atexit.register(instance._terminate)\n asyncio.run(instance.update_cache()) # updae cache first synchronously\n instance.cache_thread = threading.Thread(\n target=instance._run_cache_loop\n )\n instance.cache_thread.daemon = True\n instance.cache_thread.start()\n cls._instance = instance\n return cls._instance\n\n def cache_update_background_task(self, config):\n asyncio.run(update_deployed_db(config))\n\n def _run_cache_loop(self):\n asyncio.run(self._update_cache_periodically())\n\n async def _update_cache_periodically(self):\n while True:\n await asyncio.sleep(self.update_interval) # Non-blocking sleep\n await self.update_cache()\n\n async def update_cache(self):\n # Current time\n current_time = time.time()\n config = read_config()\n\n if not config:\n upsert_config({\"version\": 0}, section=\"project\")\n config = {\"project\": {\"version\": 0}}\n if \"project\" not in config:\n upsert_config({\"version\": 0}, section=\"project\")\n config = {\"project\": {\"version\": 0}}\n\n if \"version\" not in config[\"project\"]:\n upsert_config({\"version\": 0}, section=\"project\")\n config = {\"project\": {\"version\": 0}}\n\n # Check if we need to update the cache\n if current_time - self.last_update_time > self.update_interval:\n # Update cache logic\n try:\n await update_deployed_db(config)\n except:\n # try once more\n await update_deployed_db(config)\n # Update the last update time\n self.last_update_time = current_time\n\n def _terminate(self):\n self.program_alive = False\n\n # async def cleanup_background_tasks(self):\n # for task in self.background_tasks:\n # if not task.done():\n # task.cancel()\n # try:\n # await task\n # except asyncio.CancelledError:\n # pass # 작업이 취소됨" }, { "identifier": "read_config", "path": "promptmodel/utils/config_utils.py", "snippet": "def read_config():\n \"\"\"\n Reads the configuration from the given filename.\n\n :return: A dictionary containing the configuration.\n \"\"\"\n if not os.path.exists(CONFIG_FILE):\n return {}\n\n with open(CONFIG_FILE, \"r\") as file:\n config = yaml.safe_load(file) or {}\n return config" }, { "identifier": "upsert_config", "path": "promptmodel/utils/config_utils.py", "snippet": "def upsert_config(new_config: Dict[str, Any], section: str = None):\n \"\"\"\n Upserts the given configuration file with the given configuration.\n\n :param new_config: A dictionary containing the new configuration.\n :param section: The section of the configuration to update.\n \"\"\"\n config = read_config()\n if section:\n config_section = config.get(section, {})\n new_config = {section: merge_dict(config_section, new_config)}\n config = merge_dict(config, new_config)\n # If . directory does not exist, create it\n if not os.path.exists(\"./.promptmodel\"):\n os.mkdir(\"./.promptmodel\")\n\n with open(CONFIG_FILE, \"w\") as file:\n yaml.safe_dump(config, file, default_flow_style=False)" }, { "identifier": "select_version_by_ratio", "path": "promptmodel/utils/random_utils.py", "snippet": "def select_version_by_ratio(versions):\n epsilon = 1e-10\n ratios = [version[\"ratio\"] for version in versions]\n\n if not abs(sum(ratios) - 1.0) <= epsilon:\n raise ValueError(f\"Sum of ratios must be 1.0, now {sum(ratios)}\")\n\n cumulative_ratios = []\n cumulative_sum = 0\n for ratio in ratios:\n cumulative_sum += ratio\n cumulative_ratios.append(cumulative_sum)\n\n random_value = random.random()\n for idx, cumulative_ratio in enumerate(cumulative_ratios):\n if random_value <= cumulative_ratio:\n return versions[idx]" }, { "identifier": "logger", "path": "promptmodel/utils/logger.py", "snippet": "def debug(msg: Any, *args):\ndef success(msg: Any, *args):\ndef info(msg: Any, *args):\ndef warning(msg: Any, *args):\ndef error(msg: Any, *args):" }, { "identifier": "run_async_in_sync", "path": "promptmodel/utils/async_utils.py", "snippet": "def run_async_in_sync(coro: Coroutine):\n try:\n loop = asyncio.get_running_loop()\n except RuntimeError: # No running loop\n loop = asyncio.new_event_loop()\n asyncio.set_event_loop(loop)\n result = loop.run_until_complete(coro)\n # loop.close()\n return result\n\n return loop.run_until_complete(coro)" }, { "identifier": "num_tokens_for_messages_for_each", "path": "promptmodel/utils/token_counting.py", "snippet": "def num_tokens_for_messages_for_each(\n messages: List[Dict[str, str]], model: str = \"gpt-3.5-turbo-0613\"\n) -> List[int]:\n processed_messages = [\n {**message, \"function_call\": str(message[\"function_call\"])}\n if \"function_call\" in message\n else message\n for message in messages\n ]\n processed_messages = [\n {**message, \"tool_calls\": str(message[\"tool_calls\"])}\n if \"tool_calls\" in message\n else message\n for message in processed_messages\n ]\n return [\n token_counter(model=model, messages=[message]) for message in processed_messages\n ]" }, { "identifier": "num_tokens_from_functions_input", "path": "promptmodel/utils/token_counting.py", "snippet": "def num_tokens_from_functions_input(\n functions: Optional[List[Any]] = None, model=\"gpt-3.5-turbo-0613\"\n) -> int:\n \"\"\"Return the number of tokens used by a list of functions.\"\"\"\n if functions is None:\n return 0\n num_tokens = 0\n for function in functions:\n function_tokens = token_counter(model=model, text=function[\"name\"])\n function_tokens += token_counter(model=model, text=function[\"description\"])\n\n if \"parameters\" in function:\n parameters = function[\"parameters\"]\n if \"properties\" in parameters:\n for properties_key in parameters[\"properties\"]:\n function_tokens += token_counter(model=model, text=properties_key)\n v = parameters[\"properties\"][properties_key]\n for field in v:\n if field == \"type\":\n function_tokens += 2\n function_tokens += token_counter(\n model=model, text=v[\"type\"]\n )\n elif field == \"description\":\n function_tokens += 2\n function_tokens += token_counter(\n model=model, text=v[\"description\"]\n )\n elif field == \"enum\":\n function_tokens -= 3\n for o in v[\"enum\"]:\n function_tokens += 3\n function_tokens += token_counter(model=model, text=o)\n else:\n print(f\"Warning: not supported field {field}\")\n function_tokens += 11\n\n num_tokens += function_tokens\n\n num_tokens += 12\n return num_tokens" }, { "identifier": "update_dict", "path": "promptmodel/utils/output_utils.py", "snippet": "def update_dict(\n target: Dict[str, str],\n source: Dict[str, str],\n):\n for key, value in source.items():\n if value is not None:\n if key not in target:\n target[key] = value\n else:\n target[key] += value\n return target" }, { "identifier": "AsyncAPIClient", "path": "promptmodel/apis/base.py", "snippet": "class AsyncAPIClient:\n \"\"\"\n A class to represent an Async API request client.\n Used in Deployment stage.\n\n ...\n\n Methods\n -------\n get_headers():\n Generates headers for the API request.\n execute(method=\"GET\", params=None, data=None, json=None, **kwargs):\n Executes the API request.\n \"\"\"\n\n @classmethod\n async def _get_headers(cls, use_cli_key: bool = True) -> Dict:\n \"\"\"\n Reads, decrypts the api_key, and returns headers for API request.\n\n Returns\n -------\n dict\n a dictionary containing the Authorization header\n \"\"\"\n config = read_config()\n if use_cli_key:\n if \"connection\" not in config:\n print(\n \"User not logged in. Please run [violet]prompt login[/violet] first.\"\n )\n exit()\n\n encrypted_key = config[\"connection\"][\"encrypted_api_key\"]\n if encrypted_key is None:\n raise Exception(\"No API key found. Please run 'prompt login' first.\")\n decrypted_key = decrypt_message(encrypted_key)\n else:\n decrypted_key = os.environ.get(\"PROMPTMODEL_API_KEY\")\n if decrypted_key is None:\n raise Exception(\n \"PROMPTMODEL_API_KEY was not found in the current environment.\"\n )\n headers = {\"Authorization\": f\"Bearer {decrypted_key}\"}\n return headers\n\n @classmethod\n async def execute(\n cls,\n path: str,\n method=\"GET\",\n params: Dict = None,\n data: Dict = None,\n json: Dict = None,\n ignore_auth_error: bool = False,\n use_cli_key: bool = True,\n **kwargs,\n ) -> requests.Response:\n \"\"\"\n Executes the API request with the decrypted API key in the headers.\n\n Parameters\n ----------\n method : str, optional\n The HTTP method of the request (default is \"GET\")\n params : dict, optional\n The URL parameters to be sent with the request\n data : dict, optional\n The request body to be sent with the request\n json : dict, optional\n The JSON-encoded request body to be sent with the request\n ignore_auth_error: bool, optional\n Whether to ignore authentication errors (default is False)\n **kwargs : dict\n Additional arguments to pass to the requests.request function\n\n Returns\n -------\n requests.Response\n The response object returned by the requests library\n \"\"\"\n url = f\"{ENDPOINT_URL}{path}\"\n headers = await cls._get_headers(use_cli_key)\n try:\n async with httpx.AsyncClient(http2=True) as _client:\n response = await _client.request(\n method,\n url,\n headers=headers,\n params=params,\n data=data,\n json=json,\n **kwargs,\n )\n if not response:\n print(f\"[red]Error: {response}[/red]\")\n if response.status_code == 200:\n return response\n elif response.status_code == 403:\n if not ignore_auth_error:\n print(\"[red]Authentication failed.[/red]\")\n else:\n print(f\"[red]Error: {response}[/red]\")\n\n return response\n except requests.exceptions.ConnectionError:\n print(\"[red]Could not connect to the Promptmodel API.[/red]\")\n except requests.exceptions.Timeout:\n print(\"[red]The request timed out.[/red]\")\n except Exception as exception:\n print(f\"[red]Error: {exception}[/red]\")" }, { "identifier": "LLMResponse", "path": "promptmodel/types/response.py", "snippet": "class LLMResponse(OpenAIObject):\n api_response: Optional[ModelResponse] = None\n raw_output: Optional[str] = None\n parsed_outputs: Optional[Dict[str, Any]] = None\n error: Optional[bool] = None\n error_log: Optional[str] = None\n function_call: Optional[FunctionCall] = None\n tool_calls: Optional[List[ChatCompletionMessageToolCall]] = None\n pm_detail: Optional[PMDetail] = None" }, { "identifier": "LLMStreamResponse", "path": "promptmodel/types/response.py", "snippet": "class LLMStreamResponse(OpenAIObject):\n api_response: Optional[ModelResponse] = None\n raw_output: Optional[str] = None\n parsed_outputs: Optional[Dict[str, Any]] = None\n error: Optional[bool] = None\n error_log: Optional[str] = None\n function_call: Optional[ChoiceDeltaFunctionCall] = None\n tool_calls: Optional[List[ChoiceDeltaToolCall]] = None\n pm_detail: Optional[PMDetail] = None" }, { "identifier": "FunctionModelConfig", "path": "promptmodel/types/response.py", "snippet": "class FunctionModelConfig(BaseModel):\n \"\"\"Response Class for FunctionModel.get_config()\n prompts: List[Dict[str, Any]] = []\n each prompt can have role, content, name, function_call, and tool_calls\n version_detail: Dict[str, Any] = {}\n version_detail has \"model\", \"uuid\", \"parsing_type\" and \"output_keys\".\n model: str\n model name (e.g. \"gpt-3.5-turbo\")\n name: str\n name of the FunctionModel.\n version_uuid: str\n version uuid of the FunctionModel.\n version: int\n version id of the FunctionModel.\n parsing_type: Optional[str] = None\n parsing type of the FunctionModel.\n output_keys: Optional[List[str]] = None\n output keys of the FunctionModel.\n \"\"\"\n\n prompts: List[Dict[str, Any]]\n model: str\n name: str\n version_uuid: str\n version: int\n parsing_type: Optional[str] = None\n output_keys: Optional[List[str]] = None" }, { "identifier": "ChatModelConfig", "path": "promptmodel/types/response.py", "snippet": "class ChatModelConfig(BaseModel):\n system_prompt: str\n model: str\n name: str\n version_uuid: str\n version: int\n message_logs: Optional[List[Dict]] = []" }, { "identifier": "UnitConfig", "path": "promptmodel/types/response.py", "snippet": "class UnitConfig(BaseModel):\n \"\"\"Response Class for UnitLogger.get_config().\n Created after calling UnitLogger.log_start()\n name: str\n name of the UnitLogger.\n version_uuid: str\n version uuid of the UnitLogger.\n version: int\n version id of the UnitLogger.\n log_uuid: str\n log_uuid for current trace.\n \"\"\"\n\n name: str\n version_uuid: str\n log_uuid: str\n version: int" }, { "identifier": "PMDetail", "path": "promptmodel/types/response.py", "snippet": "class PMDetail(BaseModel):\n model: str\n name: str\n version_uuid: str\n version: int\n log_uuid: str" }, { "identifier": "ChatLogRequest", "path": "promptmodel/types/request.py", "snippet": "class ChatLogRequest(BaseModel):\n uuid: Optional[str] = None\n message: Dict[str, Any]\n metadata: Optional[Dict] = None\n api_response: Optional[ModelResponse] = None\n\n def __post_init__(\n self,\n ):\n if self.api_response is not None and self.message is None:\n self.message = self.api_response.choices[0].message.model_dump()" } ]
from typing import ( Any, AsyncGenerator, Callable, Dict, Generator, List, Optional, Tuple, Union, ) from uuid import UUID from threading import Thread from rich import print from uuid import uuid4 from litellm.utils import ModelResponse, get_max_tokens from promptmodel.llms.llm import LLM from promptmodel.database.models import ( DeployedPrompt, DeployedFunctionModel, DeployedFunctionModelVersion, ) from promptmodel.database.crud import ( get_deployed_prompts, ) from promptmodel.promptmodel_init import CacheManager from promptmodel.utils.config_utils import read_config, upsert_config from promptmodel.utils.random_utils import select_version_by_ratio from promptmodel.utils import logger from promptmodel.utils.async_utils import run_async_in_sync from promptmodel.utils.token_counting import ( num_tokens_for_messages_for_each, num_tokens_from_functions_input, ) from promptmodel.utils.output_utils import update_dict from promptmodel.apis.base import AsyncAPIClient from promptmodel.types.response import ( LLMResponse, LLMStreamResponse, FunctionModelConfig, ChatModelConfig, UnitConfig, PMDetail, ) from promptmodel.types.request import ChatLogRequest
20,045
tools: Optional[List[Any]] = None, api_key: Optional[str] = None, ) -> AsyncGenerator[LLMStreamResponse, None]: kwargs = self.make_kwargs(functions=functions, api_key=api_key, tools=tools) return self._wrap_async_gen(super().astream)(inputs, **kwargs) def run_and_parse( self, inputs: Dict[str, Any] = {}, functions: Optional[List[Any]] = None, tools: Optional[List[Any]] = None, api_key: Optional[str] = None, ) -> LLMResponse: kwargs = self.make_kwargs(functions=functions, api_key=api_key, tools=tools) return self._wrap_method(super().run_and_parse)(inputs, **kwargs) def arun_and_parse( self, inputs: Dict[str, Any] = {}, functions: Optional[List[Any]] = None, tools: Optional[List[Any]] = None, api_key: Optional[str] = None, ) -> LLMResponse: kwargs = self.make_kwargs(functions=functions, api_key=api_key, tools=tools) return self._wrap_async_method(super().arun_and_parse)(inputs, **kwargs) def stream_and_parse( self, inputs: Dict[str, Any] = {}, functions: Optional[List[Any]] = None, tools: Optional[List[Any]] = None, api_key: Optional[str] = None, ) -> Generator[LLMStreamResponse, None, None]: kwargs = self.make_kwargs(functions=functions, api_key=api_key, tools=tools) return self._wrap_gen(super().stream_and_parse)(inputs, **kwargs) def astream_and_parse( self, inputs: Dict[str, Any] = {}, functions: Optional[List[Any]] = None, tools: Optional[List[Any]] = None, api_key: Optional[str] = None, ) -> AsyncGenerator[LLMStreamResponse, None]: kwargs = self.make_kwargs(functions=functions, api_key=api_key, tools=tools) return self._wrap_async_gen(super().astream_and_parse)(inputs, **kwargs) def chat_run( self, session_uuid: str, functions: Optional[List[Any]] = None, tools: Optional[List[Any]] = None, api_key: Optional[str] = None, ) -> LLMResponse: kwargs = self.make_kwargs(functions=functions, api_key=api_key, tools=tools) return self._wrap_chat(super().run)(session_uuid, **kwargs) def chat_arun( self, session_uuid: str, functions: Optional[List[Any]] = None, tools: Optional[List[Any]] = None, api_key: Optional[str] = None, ) -> LLMResponse: kwargs = self.make_kwargs(functions=functions, api_key=api_key, tools=tools) return self._wrap_async_chat(super().arun)(session_uuid, **kwargs) def chat_stream( self, session_uuid: str, functions: Optional[List[Any]] = None, tools: Optional[List[Any]] = None, api_key: Optional[str] = None, ) -> LLMResponse: kwargs = self.make_kwargs(functions=functions, api_key=api_key, tools=tools) return self._wrap_chat_gen(super().stream)(session_uuid, **kwargs) def chat_astream( self, session_uuid: str, functions: Optional[List[Any]] = None, tools: Optional[List[Any]] = None, api_key: Optional[str] = None, ) -> LLMResponse: kwargs = self.make_kwargs(functions=functions, api_key=api_key, tools=tools) return self._wrap_async_chat_gen(super().astream)(session_uuid, **kwargs) @staticmethod async def fetch_prompts( name, version: Optional[Union[str, int]] = "deploy", ) -> Tuple[List[Dict[str, str]], Dict[str, Any]]: """fetch prompts. Args: name (str): name of FunctionModel Returns: Tuple[List[Dict[str, str]], Optional[Dict[str, Any]]]: (prompts, version_detail) """ # Check connection activate config = read_config() if ( "connection" in config and "initializing" in config["connection"] and config["connection"]["initializing"] == True ): return [], {} elif ( "connection" in config and "reloading" in config["connection"] and config["connection"]["reloading"] == True ): return [], {} else: if ( "project" in config and "use_cache" in config["project"] and config["project"]["use_cache"] == True and version == "deploy" ):
class LLMProxy(LLM): def __init__( self, name: str, version: Optional[Union[str, int]] = "deploy", unit_config: Optional[UnitConfig] = None ): super().__init__() self._name = name self.version = version self.unit_config = unit_config def _wrap_gen(self, gen: Callable[..., Any]) -> Callable[..., Any]: def wrapper(inputs: Dict[str, Any], **kwargs): prompts, version_details = run_async_in_sync( LLMProxy.fetch_prompts(self._name, self.version) ) call_args = self._prepare_call_args( prompts, version_details, inputs, kwargs ) log_uuid = str(uuid4()) # Call the generator with the arguments stream_response: Generator[LLMStreamResponse, None, None] = gen(**call_args) api_response = None dict_cache = {} # to store aggregated dictionary values string_cache = "" # to store aggregated string values error_occurs = False error_log = None for item in stream_response: if ( item.api_response and "delta" not in item.api_response.choices[0] ): # only get the last api_response, not delta response api_response = item.api_response if item.parsed_outputs: dict_cache = update_dict(dict_cache, item.parsed_outputs) if item.raw_output: string_cache += item.raw_output if item.error and not error_occurs: error_occurs = True error_log = item.error_log if error_occurs: # delete all promptmodel data in item item.raw_output = None item.parsed_outputs = None item.function_call = None item.pm_detail = PMDetail( model=version_details["model"], name=self._name, version_uuid=str(version_details["uuid"]), version=version_details["version"], log_uuid=log_uuid, ) yield item metadata = { "error": error_occurs, "error_log": error_log, } run_async_in_sync( self._async_log_to_cloud( log_uuid=log_uuid, version_uuid=version_details["uuid"], inputs=inputs, api_response=api_response, parsed_outputs=dict_cache, metadata=metadata, ) ) return wrapper def _wrap_async_gen(self, async_gen: Callable[..., Any]) -> Callable[..., Any]: async def wrapper(inputs: Dict[str, Any], **kwargs): prompts, version_details = await LLMProxy.fetch_prompts( self._name, self.version ) call_args = self._prepare_call_args( prompts, version_details, inputs, kwargs ) # Call async_gen with the arguments stream_response: AsyncGenerator[LLMStreamResponse, None] = async_gen( **call_args ) log_uuid = str(uuid4()) api_response = None dict_cache = {} # to store aggregated dictionary values string_cache = "" # to store aggregated string values error_occurs = False error_log = None api_response: Optional[ModelResponse] = None async for item in stream_response: if ( item.api_response and "delta" not in item.api_response.choices[0] ): # only get the last api_response, not delta response api_response = item.api_response if item.parsed_outputs: dict_cache = update_dict(dict_cache, item.parsed_outputs) if item.raw_output: string_cache += item.raw_output if item.error and not error_occurs: error_occurs = True error_log = item.error_log item.pm_detail = PMDetail( model=version_details["model"], name=self._name, version_uuid=str(version_details["uuid"]), version=version_details["version"], log_uuid=log_uuid, ) yield item # # add string_cache in model_response # if api_response: # if "message" not in api_response.choices[0]: # api_response.choices[0].message = {} # if "content" not in api_response.choices[0].message: # api_response.choices[0].message["content"] = string_cache # api_response.choices[0].message["role"] = "assistant" metadata = { "error": error_occurs, "error_log": error_log, } await self._async_log_to_cloud( log_uuid=log_uuid, version_uuid=version_details["uuid"], inputs=inputs, api_response=api_response, parsed_outputs=dict_cache, metadata=metadata, ) # raise Exception("error_log") return wrapper def _wrap_method(self, method: Callable[..., Any]) -> Callable[..., Any]: def wrapper(inputs: Dict[str, Any], **kwargs): prompts, version_details = run_async_in_sync( LLMProxy.fetch_prompts(self._name, self.version) ) call_args = self._prepare_call_args( prompts, version_details, inputs, kwargs ) # Call the method with the arguments llm_response: LLMResponse = method(**call_args) error_occurs = llm_response.error error_log = llm_response.error_log metadata = { "error": error_occurs, "error_log": error_log, } log_uuid = str(uuid4()) if llm_response.parsed_outputs: run_async_in_sync( self._async_log_to_cloud( log_uuid=log_uuid, version_uuid=version_details["uuid"], inputs=inputs, api_response=llm_response.api_response, parsed_outputs=llm_response.parsed_outputs, metadata=metadata, ) ) else: run_async_in_sync( self._async_log_to_cloud( log_uuid=log_uuid, version_uuid=version_details["uuid"], inputs=inputs, api_response=llm_response.api_response, parsed_outputs={}, metadata=metadata, ) ) if error_occurs: # delete all promptmodel data in llm_response llm_response.raw_output = None llm_response.parsed_outputs = None llm_response.function_call = None llm_response.pm_detail = PMDetail( model=version_details["model"], name=self._name, version_uuid=str(version_details["uuid"]), version=version_details["version"], log_uuid=log_uuid, ) return llm_response return wrapper def _wrap_async_method(self, method: Callable[..., Any]) -> Callable[..., Any]: async def async_wrapper(inputs: Dict[str, Any], **kwargs): prompts, version_details = await LLMProxy.fetch_prompts( self._name, self.version ) # messages, model, uuid = self._fetch_prompts() call_args = self._prepare_call_args( prompts, version_details, inputs, kwargs ) # Call the method with the arguments llm_response: LLMResponse = await method(**call_args) error_occurs = llm_response.error error_log = llm_response.error_log metadata = { "error": error_occurs, "error_log": error_log, } log_uuid = str(uuid4()) if llm_response.parsed_outputs: await self._async_log_to_cloud( log_uuid=log_uuid, version_uuid=version_details["uuid"], inputs=inputs, api_response=llm_response.api_response, parsed_outputs=llm_response.parsed_outputs, metadata=metadata, ) else: await self._async_log_to_cloud( log_uuid=log_uuid, version_uuid=version_details["uuid"], inputs=inputs, api_response=llm_response.api_response, parsed_outputs={}, metadata=metadata, ) if error_occurs: # delete all promptmodel data in llm_response llm_response.raw_output = None llm_response.parsed_outputs = None llm_response.function_call = None llm_response.pm_detail = PMDetail( model=version_details["model"], name=self._name, version_uuid=str(version_details["uuid"]), version=version_details["version"], log_uuid=log_uuid, ) return llm_response return async_wrapper def _wrap_chat(self, method: Callable[..., Any]) -> Callable[..., Any]: def wrapper(session_uuid: str, **kwargs): instruction, version_details, message_logs = run_async_in_sync( LLMProxy.fetch_chat_model(self._name, session_uuid, self.version) ) call_args = self._prepare_call_args_for_chat( message_logs, version_details, kwargs ) # Call the method with the arguments llm_response: LLMResponse = method(**call_args) error_occurs = llm_response.error error_log = llm_response.error_log metadata = { "error": error_occurs, "error_log": error_log, } api_response = None if llm_response.api_response: api_response = llm_response.api_response log_uuid = str(uuid4()) run_async_in_sync( self._async_chat_log_to_cloud( session_uuid=session_uuid, version_uuid=version_details["uuid"], chat_log_request_list=[ ChatLogRequest( message=llm_response.api_response.choices[ 0 ].message.model_dump(), uuid=log_uuid, metadata=metadata, api_response=api_response, ) ], ) ) if error_occurs: # delete all promptmodel data in llm_response llm_response.raw_output = None llm_response.parsed_outputs = None llm_response.function_call = None llm_response.pm_detail = PMDetail( model=version_details["model"], name=self._name, version_uuid=str(version_details["uuid"]), version=version_details["version"], log_uuid=log_uuid, ) return llm_response return wrapper def _wrap_async_chat(self, method: Callable[..., Any]) -> Callable[..., Any]: async def async_wrapper(session_uuid: str, **kwargs): ( instruction, version_details, message_logs, ) = await LLMProxy.fetch_chat_model(self._name, session_uuid, self.version) call_args = self._prepare_call_args_for_chat( message_logs, version_details, kwargs ) # Call the method with the arguments llm_response: LLMResponse = await method(**call_args) error_occurs = llm_response.error error_log = llm_response.error_log metadata = { "error": error_occurs, "error_log": error_log, } api_response = None if llm_response.api_response: api_response = llm_response.api_response log_uuid = str(uuid4()) await self._async_chat_log_to_cloud( session_uuid=session_uuid, version_uuid=version_details["uuid"], chat_log_request_list=[ ChatLogRequest( uuid=log_uuid, message=llm_response.api_response.choices[ 0 ].message.model_dump(), metadata=metadata, api_response=api_response, ) ], ) if error_occurs: # delete all promptmodel data in llm_response llm_response.raw_output = None llm_response.parsed_outputs = None llm_response.function_call = None llm_response.pm_detail = PMDetail( model=version_details["model"], name=self._name, version_uuid=str(version_details["uuid"]), version=version_details["version"], log_uuid=log_uuid, ) return llm_response return async_wrapper def _wrap_chat_gen(self, gen: Callable[..., Any]) -> Callable[..., Any]: def wrapper(session_uuid: str, **kwargs): instruction, version_details, message_logs = run_async_in_sync( LLMProxy.fetch_chat_model(self._name, session_uuid, self.version) ) call_args = self._prepare_call_args_for_chat( message_logs, version_details, kwargs ) # Call the generator with the arguments stream_response: Generator[LLMStreamResponse, None, None] = gen(**call_args) api_response = None error_occurs = False error_log = None log_uuid = str(uuid4()) for item in stream_response: if ( item.api_response and "delta" not in item.api_response.choices[0] ): # only get the last api_response, not delta response api_response = item.api_response if item.error and not error_occurs: error_occurs = True error_log = item.error_log if error_occurs: # delete all promptmodel data in item item.raw_output = None item.parsed_outputs = None item.function_call = None item.pm_detail = PMDetail( model=version_details["model"], name=self._name, version_uuid=str(version_details["uuid"]), version=version_details["version"], log_uuid=log_uuid, ) yield item metadata = { "error": error_occurs, "error_log": error_log, } run_async_in_sync( self._async_chat_log_to_cloud( session_uuid=session_uuid, version_uuid=version_details["uuid"], chat_log_request_list=[ ChatLogRequest( uuid=log_uuid, message=api_response.choices[0].message.model_dump(), metadata=metadata, api_response=api_response, ) ], ) ) return wrapper def _wrap_async_chat_gen(self, async_gen: Callable[..., Any]) -> Callable[..., Any]: async def wrapper(session_uuid: str, **kwargs): ( instruction, version_details, message_logs, ) = await LLMProxy.fetch_chat_model(self._name, session_uuid, self.version) call_args = self._prepare_call_args_for_chat( message_logs, version_details, kwargs ) # Call the generator with the arguments stream_response: AsyncGenerator[LLMStreamResponse, None] = async_gen( **call_args ) api_response = None error_occurs = False error_log = None log_uuid = str(uuid4()) async for item in stream_response: if ( item.api_response and "delta" not in item.api_response.choices[0] ): # only get the last api_response, not delta response api_response = item.api_response if item.error and not error_occurs: error_occurs = True error_log = item.error_log if error_occurs: # delete all promptmodel data in item item.raw_output = None item.parsed_outputs = None item.function_call = None item.pm_detail = PMDetail( model=version_details["model"], name=self._name, version_uuid=str(version_details["uuid"]), version=version_details["version"], log_uuid=log_uuid, ) yield item metadata = { "error": error_occurs, "error_log": error_log, } await self._async_chat_log_to_cloud( session_uuid=session_uuid, version_uuid=version_details["uuid"], chat_log_request_list=[ ChatLogRequest( uuid=log_uuid, message=api_response.choices[0].message.model_dump(), metadata=metadata, api_response=api_response, ) ], ) return wrapper def _prepare_call_args( self, prompts: List[Dict[str, str]], version_detail: Dict[str, Any], inputs: Dict[str, Any], kwargs, ): stringified_inputs = {key: str(value) for key, value in inputs.items()} messages = [ { "content": prompt["content"].format(**stringified_inputs), "role": prompt["role"], } for prompt in prompts ] call_args = { "messages": messages, "model": version_detail["model"] if version_detail else None, "parsing_type": version_detail["parsing_type"] if version_detail else None, "output_keys": version_detail["output_keys"] if version_detail else None, } if call_args["parsing_type"] is None: del call_args["parsing_type"] del call_args["output_keys"] if "functions" in kwargs: call_args["functions"] = kwargs["functions"] if "tools" in kwargs: call_args["tools"] = kwargs["tools"] if "api_key" in kwargs: call_args["api_key"] = kwargs["api_key"] return call_args def _prepare_call_args_for_chat( self, messages: List[Dict[str, Any]], version_detail: Dict[str, Any], kwargs, ): call_args = {} token_per_tools = 0 if "functions" in kwargs: call_args["functions"] = kwargs["functions"] token_per_tools = num_tokens_from_functions_input( functions=kwargs["functions"], model=version_detail["model"] if version_detail else "gpt-3.5-turbo", ) if "tools" in kwargs: call_args["tools"] = kwargs["tools"] token_per_tools = num_tokens_from_functions_input( functions=kwargs["tools"], model=version_detail["model"] if version_detail else "gpt-3.5-turbo", ) # truncate messages to make length <= model's max length model_max_tokens = get_max_tokens( model=version_detail["model"] if version_detail else "gpt-3.5-turbo" ) token_per_messages = num_tokens_for_messages_for_each( messages, version_detail["model"] ) token_limit_exceeded = ( sum(token_per_messages) + token_per_tools ) - model_max_tokens if token_limit_exceeded > 0: while token_limit_exceeded > 0: # erase the second oldest message (first one is system prompt, so it should not be erased) if len(messages) == 1: # if there is only one message, Error cannot be solved. Just call LLM and get error response break token_limit_exceeded -= token_per_messages[1] del messages[1] del token_per_messages[1] call_args["messages"] = messages call_args["model"] = version_detail["model"] if version_detail else None if "api_key" in kwargs: call_args["api_key"] = kwargs["api_key"] if "tools" in kwargs: call_args["tools"] = kwargs["tools"] return call_args async def _async_log_to_cloud( self, version_uuid: str, log_uuid: str, inputs: Optional[Dict] = None, api_response: Optional[ModelResponse] = None, parsed_outputs: Optional[Dict] = None, metadata: Optional[Dict] = None, ): config = read_config() if ( "project" in config and "mask_inputs" in config["project"] and config["project"]["mask_inputs"] == True ): inputs = {key: "PRIVATE LOGGING" for key, value in inputs.items()} # Perform the logging asynchronously if api_response: api_response_dict = api_response.model_dump() api_response_dict["response_ms"] = api_response._response_ms api_response_dict["_response_ms"] = api_response._response_ms else: api_response_dict = None run_log_request_body = { "uuid": log_uuid, "api_response": api_response_dict, "inputs": inputs, "parsed_outputs": parsed_outputs, "metadata": metadata, } res = await AsyncAPIClient.execute( method="POST", path="/run_log", params={ "version_uuid": version_uuid, }, json=run_log_request_body, use_cli_key=False, ) if res.status_code != 200: print(f"[red]Failed to log to cloud: {res.json()}[/red]"); if self.unit_config: res_connect = await AsyncAPIClient.execute( method="POST", path="/unit/connect", json={ "unit_log_uuid": self.unit_config.log_uuid, "run_log_uuid": log_uuid, }, use_cli_key=False, ) if res_connect.status_code != 200: print(f"[red]Failed to connect prompt component to run log: {res_connect.json()}[/red]") return res async def _async_chat_log_to_cloud( self, session_uuid: str, version_uuid: Optional[str] = None, chat_log_request_list: List[ChatLogRequest] = [], ): # Perform the logging asynchronously res = await AsyncAPIClient.execute( method="POST", path="/chat_log", params={ "session_uuid": session_uuid, "version_uuid": version_uuid, }, json=[r.model_dump() for r in chat_log_request_list], use_cli_key=False, ) if res.status_code != 200: print(f"[red]Failed to log to cloud: {res.json()}[/red]") return res async def _async_make_session_cloud( self, session_uuid: str, version_uuid: Optional[str] = None, ): # Perform the logging asynchronously res = await AsyncAPIClient.execute( method="POST", path="/make_session", params={ "session_uuid": session_uuid, "version_uuid": version_uuid, }, use_cli_key=False, ) if res.status_code != 200: print(f"[red]Failed to make ChatSession in cloud: {res.json()}[/red]") return res def make_kwargs(self, **kwargs): res = {} for key, value in kwargs.items(): if value is not None: res[key] = value return res def run( self, inputs: Dict[str, Any] = {}, functions: Optional[List[Any]] = None, tools: Optional[List[Any]] = None, api_key: Optional[str] = None, ) -> LLMResponse: kwargs = self.make_kwargs(functions=functions, api_key=api_key, tools=tools) return self._wrap_method(super().run)(inputs, **kwargs) def arun( self, inputs: Dict[str, Any] = {}, functions: Optional[List[Any]] = None, tools: Optional[List[Any]] = None, api_key: Optional[str] = None, ) -> LLMResponse: kwargs = self.make_kwargs(functions=functions, api_key=api_key, tools=tools) return self._wrap_async_method(super().arun)(inputs, **kwargs) def stream( self, inputs: Dict[str, Any] = {}, functions: Optional[List[Any]] = None, tools: Optional[List[Any]] = None, api_key: Optional[str] = None, ) -> Generator[LLMStreamResponse, None, None]: kwargs = self.make_kwargs(functions=functions, api_key=api_key, tools=tools) return self._wrap_gen(super().stream)(inputs, **kwargs) def astream( self, inputs: Optional[Dict[str, Any]] = {}, functions: Optional[List[Any]] = None, tools: Optional[List[Any]] = None, api_key: Optional[str] = None, ) -> AsyncGenerator[LLMStreamResponse, None]: kwargs = self.make_kwargs(functions=functions, api_key=api_key, tools=tools) return self._wrap_async_gen(super().astream)(inputs, **kwargs) def run_and_parse( self, inputs: Dict[str, Any] = {}, functions: Optional[List[Any]] = None, tools: Optional[List[Any]] = None, api_key: Optional[str] = None, ) -> LLMResponse: kwargs = self.make_kwargs(functions=functions, api_key=api_key, tools=tools) return self._wrap_method(super().run_and_parse)(inputs, **kwargs) def arun_and_parse( self, inputs: Dict[str, Any] = {}, functions: Optional[List[Any]] = None, tools: Optional[List[Any]] = None, api_key: Optional[str] = None, ) -> LLMResponse: kwargs = self.make_kwargs(functions=functions, api_key=api_key, tools=tools) return self._wrap_async_method(super().arun_and_parse)(inputs, **kwargs) def stream_and_parse( self, inputs: Dict[str, Any] = {}, functions: Optional[List[Any]] = None, tools: Optional[List[Any]] = None, api_key: Optional[str] = None, ) -> Generator[LLMStreamResponse, None, None]: kwargs = self.make_kwargs(functions=functions, api_key=api_key, tools=tools) return self._wrap_gen(super().stream_and_parse)(inputs, **kwargs) def astream_and_parse( self, inputs: Dict[str, Any] = {}, functions: Optional[List[Any]] = None, tools: Optional[List[Any]] = None, api_key: Optional[str] = None, ) -> AsyncGenerator[LLMStreamResponse, None]: kwargs = self.make_kwargs(functions=functions, api_key=api_key, tools=tools) return self._wrap_async_gen(super().astream_and_parse)(inputs, **kwargs) def chat_run( self, session_uuid: str, functions: Optional[List[Any]] = None, tools: Optional[List[Any]] = None, api_key: Optional[str] = None, ) -> LLMResponse: kwargs = self.make_kwargs(functions=functions, api_key=api_key, tools=tools) return self._wrap_chat(super().run)(session_uuid, **kwargs) def chat_arun( self, session_uuid: str, functions: Optional[List[Any]] = None, tools: Optional[List[Any]] = None, api_key: Optional[str] = None, ) -> LLMResponse: kwargs = self.make_kwargs(functions=functions, api_key=api_key, tools=tools) return self._wrap_async_chat(super().arun)(session_uuid, **kwargs) def chat_stream( self, session_uuid: str, functions: Optional[List[Any]] = None, tools: Optional[List[Any]] = None, api_key: Optional[str] = None, ) -> LLMResponse: kwargs = self.make_kwargs(functions=functions, api_key=api_key, tools=tools) return self._wrap_chat_gen(super().stream)(session_uuid, **kwargs) def chat_astream( self, session_uuid: str, functions: Optional[List[Any]] = None, tools: Optional[List[Any]] = None, api_key: Optional[str] = None, ) -> LLMResponse: kwargs = self.make_kwargs(functions=functions, api_key=api_key, tools=tools) return self._wrap_async_chat_gen(super().astream)(session_uuid, **kwargs) @staticmethod async def fetch_prompts( name, version: Optional[Union[str, int]] = "deploy", ) -> Tuple[List[Dict[str, str]], Dict[str, Any]]: """fetch prompts. Args: name (str): name of FunctionModel Returns: Tuple[List[Dict[str, str]], Optional[Dict[str, Any]]]: (prompts, version_detail) """ # Check connection activate config = read_config() if ( "connection" in config and "initializing" in config["connection"] and config["connection"]["initializing"] == True ): return [], {} elif ( "connection" in config and "reloading" in config["connection"] and config["connection"]["reloading"] == True ): return [], {} else: if ( "project" in config and "use_cache" in config["project"] and config["project"]["use_cache"] == True and version == "deploy" ):
cache_manager = CacheManager()
5
2023-10-09 03:35:44+00:00
24k
cambridgeltl/ClaPS
algs/greedy.py
[ { "identifier": "PromptedClassificationReward", "path": "rewards/text_classification_reward.py", "snippet": "class PromptedClassificationReward:\n def __init__(\n self,\n args,\n task_lm: str,\n is_mask_lm: Optional[bool],\n num_classes: int,\n verbalizers: List[str],\n reward_type: str = \"entropy\",\n compute_zscore: bool = True,\n incorrect_coeff: float = 180.0, # lambda_1 in paper\n correct_coeff: float = 200.0, # lambda_2 in paper\n use_bn_calibration: bool = False,\n bn_calibrator: Optional[BatchNormCalibrate] = None,\n template: Optional[str] = None,\n gpu_id: Optional[int] = None,\n ):\n \"\"\"\n Few shot text classification reward (adapted from RLPrompt repository)\n Args:\n task_lm: the string specifying the language model type of the task LM\n is_mask_lm: bool. Whether the LM is masked, or left-to-right.\n compute_zscore: bool. Whether do reward normalization by normalizing the\n mean and standard deviation across the batch.\n incorrect_coeff, correct_coeff:\n num_classes: number of classes in the labels\n verbalizers: a list of verbalizers (for e.g., for sentiment classification)\n reward_type: the type of the reward.\n \"gap\" -- use the one proposed in RLPrompt\n \"ll\" -- use the usual cross entropy loss\n template: the template to organize the queries and prompts.\n default one is [Input][Prompt][MASK].\n default template is adopted when it is not specified.\n bn_calibrator: an optional batch norm calibrator. When provided,\n in inference mode the logits will be first normalised by it first. The\n calibrator must be initialized when passed to this class.\n This class essentially provides the objective function for BO/RL/any other\n prompt optimizer.\n \"\"\"\n super().__init__()\n if torch.cuda.is_available():\n if gpu_id:\n self.device = torch.device(f\"cuda:{gpu_id}\")\n else:\n self.device = torch.device(\"cuda\")\n else:\n self.device = torch.device(\"cpu\")\n # self.device = torch.device(\"cpu\")\n self.args = args\n self.task_lm = task_lm\n if is_mask_lm is None:\n # If False, then treat as left-to-right LM\n self.is_mask_lm = True if \"bert\" in self.task_lm else False\n else:\n self.is_mask_lm = is_mask_lm\n assert reward_type in [\"gap\", \"cross_entropy\", \"entropy\"]\n self.reward_type = reward_type\n print(\"Task LM:\", self.task_lm)\n if self.is_mask_lm:\n assert self.task_lm in SUPPORTED_MASK_LMS\n self._tokenizer = AutoTokenizer.from_pretrained(self.task_lm)\n self._generator = AutoModelForMaskedLM.from_pretrained(self.task_lm).to(\n self.device\n )\n else:\n self._generator = T5ForConditionalGeneration.from_pretrained(\n self.task_lm\n ).to(self.device)\n self._tokenizer = AutoTokenizer.from_pretrained(\n self.task_lm, use_fast=False\n )\n\n self.compute_zscore = compute_zscore\n self.incorrect_coeff = incorrect_coeff\n self.correct_coeff = correct_coeff\n self.num_classes = num_classes\n print(\"Num classes:\", self.num_classes)\n self.verbalizers = verbalizers\n print(\"Verbalizers:\", self.verbalizers)\n self.verbalizer_ids = [\n self._tokenizer.convert_tokens_to_ids(v) for v in self.verbalizers\n ]\n print(\"Verbalizer ids:\", self.verbalizer_ids)\n if template is None:\n self.template = self.load_default_template() # prompt templates\n else:\n self.template = template\n self.use_bn_calibration = use_bn_calibration\n self.bn_calibrator = bn_calibrator\n self._counter = 0\n\n def to(self, device):\n self._generator.to(device)\n\n def load_default_template(self) -> List[str]:\n template_dict = {\n \"xnli\": [\n \" {prompt} {sentence_1} {sentence_2} Entailment: \", \n \" {prompt}. In this task, the goal is to predict textual entailment with 'yes' 'maybe' 'no'. sentence A implies sentence B entailment: yes; sentence A is neutral to sentence B entailment: maybe; sentence A contradicts sentence B entailment: no. Sentence A: {sentence_1}, Sentence B: {sentence_2}, Entailment: \", \n ],\n \"mnli\": [\n \" {prompt} {sentence_1} {sentence_2} Entailment: \",\n \" {prompt}. In this task, the goal is to predict textual entailment with 'yes' 'maybe' 'no'. sentence A implies sentence B entailment: yes; sentence A is neutral to sentence B entailment: maybe; sentence A contradicts sentence B entailment: no. Sentence A: {sentence_1}, Sentence B: {sentence_2}, Entailment: \", \n ],\n \"snli\": [\n \" {prompt} {sentence_1} {sentence_2} Entailment: \",\n \" {prompt}. In this task, the goal is to predict textual entailment with 'yes' 'maybe' 'no'. sentence A implies sentence B entailment: yes; sentence A is neutral to sentence B entailment: maybe; sentence A contradicts sentence B entailment: no. Sentence A: {sentence_1}, Sentence B: {sentence_2}, Entailment: \",\n ],\n \"rte\": [\n \" {prompt}. Sentence 1: {sentence_1}, Sentence 2: {sentence_2}, Textual Entailment: \",\n ],\n \"sst2\": [\n \" {prompt}. Sentence: {sentence_1}, Sentiment: \",\n ],\n \"mrpc\": [\n \" {prompt}. Sentence 1: {sentence_1}, Sentence 2: {sentence_2}, Semantically Equivalent: \",\n ],\n \"qnli\": [\n \" {prompt}. Question: {sentence_1}, Sentence: {sentence_2}, Entailment: \",\n ],\n \"qqp\": [\n \" {prompt}. Sentence 1: {sentence_1}, Sentence 2: {sentence_2}, Semantically Equivalent: \",\n ],\n \"ag_news\": [\n \" {prompt}. Classify the news articles into the categories of World, Sports, Business, and Technology. {sentence_1}: \",\n \"{prompt}\\n\\n{sentence_1}\\n\\nWhich topic is this article about?\\nWorld, Sports, Business, Technology, \",\n ],\n }\n if \"anli\" in self.args[\"dataset_name\"]:\n template = template_dict[\"anli\"][self.args[\"template_id\"]]\n elif (\n \"xnli\" in self.args[\"dataset_name\"]\n or \"americas_nli\" in self.args[\"dataset_name\"]\n ):\n template = template_dict[\"xnli\"][self.args[\"template_id\"]]\n else:\n if self.args[\"dataset_name\"] in template_dict:\n template = template_dict[self.args[\"dataset_name\"]][\n self.args[\"template_id\"]\n ]\n if self.is_mask_lm:\n mask_token = self._tokenizer.mask_token\n print(mask_token)\n simple_list = [\"SetFit/sst2\", \"SetFit/CR\", \"rotten_tomatoes\", \"SetFit/sst5\"]\n long_list = [\"yelp_polarity\", \"yelp_review_full\"]\n hard_list = [\"ag_news\"]\n rl_list = [\n \"rl-agnews\",\n \"rl-cr\",\n \"rl-mr\",\n \"rl-sst-2\",\n \"rl-sst-5\",\n \"rl-yelp-2\",\n \"rl-yelp-5\",\n ]\n if self.args[\"dataset_name\"] in simple_list:\n template = f\" {{prompt}} {{sentence_1}} It was {mask_token}.\"\n elif self.args[\"dataset_name\"] in long_list:\n template = f\" {{prompt}} It was {mask_token}. {{sentence_1}}\"\n elif self.args[\"dataset_name\"] in hard_list:\n template = f\" {{prompt}} {mask_token} News: {{sentence_1}}\"\n elif self.args[\"dataset_name\"] in rl_list:\n template = f\" {{prompt}} {{sentence_1}} It was {mask_token}.\"\n return template\n\n def __call__(self, *args: Any, **kwds: Any) -> Any:\n return self.forward(*args, **kwds)\n\n def forward(\n self,\n source_texts: List[str],\n source_2_texts: List[str],\n class_labels: List[int],\n output_tokens: Union[List[List[str]], List[str], str],\n # output_token: Union[List[str], str],\n to_tensor: bool,\n mode: str = \"train\",\n verbose: bool = True,\n accumulate_class: bool = False,\n ) -> Tuple[Union[List[float], torch.Tensor], Dict[str, Any]]:\n \"\"\"\n This computes the reward of the current prompt.\n source_texts: a list of string. Usually samples from the validation set\n class_labels: a list of integers. Usually the labels of the validation set\n prompts:\n Either List[List[str]]: List of tokens. The length of the list should be the same as the number of source_texts.\n OR List[str]: List of (decoded) prompts.\n OR: str. A single prompt\n \"\"\"\n assert mode in [\"train\", \"infer\"]\n if mode == \"train\":\n self._counter += 1\n\n # Process prompts and verbalizer indices\n if isinstance(output_tokens, list):\n if isinstance(output_tokens[0], list):\n prompt_tokens = output_tokens\n prompt_strings = self._convert_tokens_to_string(prompt_tokens)\n elif isinstance(output_tokens[0], str):\n prompt_strings = output_tokens\n elif isinstance(output_tokens, str):\n prompt_strings = [output_tokens] # Single prompt string\n\n rewards: List[torch.Tensor] = []\n accs: List[float] = []\n confs: List[float] = []\n entropies: List[float] = []\n class_logits: List[torch.Tensor] = []\n\n counter_list = []\n input_rewards: Dict[str, List[float]] = defaultdict(list)\n quantities_to_log = {}\n for i, prompt in enumerate(prompt_strings):\n # Compute LM logits\n current_prompts = [prompt for _ in source_texts]\n formatted_templates = self._format_prompts(\n source_texts, source_2_texts, current_prompts\n )\n all_logits = self._get_logits(formatted_templates)\n (\n reward,\n acc,\n correct_predictions,\n conf,\n entropy,\n class_logit,\n ) = _compute_reward(\n all_logits,\n target=class_labels,\n reward_type=self.reward_type,\n verbalizer_ids=self.verbalizer_ids,\n correct_coeff=self.correct_coeff,\n incorrect_coeff=self.incorrect_coeff,\n bn_calibrator=self.bn_calibrator if self.use_bn_calibration else None,\n )\n\n rewards.append(reward)\n accs.append(acc.item())\n confs.append(conf.item())\n entropies.append(entropy.item())\n counter_list.append(correct_predictions)\n class_logits.append(class_logit)\n\n # keep track of rewards for z-score normalization\n input_rewards[\"z\"] += [reward.item()]\n\n # Print examples\n if verbose:\n print_strs = [\n \"Accuracy:\",\n acc.item(),\n \"|\",\n \"Reward:\",\n round(reward.item(), 2),\n ]\n print(*print_strs)\n rewards_tensor = torch.stack(rewards)\n accs_tensor = torch.tensor(accs)\n confs_tensor = torch.tensor(confs)\n entropies_tensor = torch.tensor(entropies)\n # compute the expected calibration error (ECE) by accs_tensor and confs_tensor\n ece = torch.abs(accs_tensor - confs_tensor).mean()\n\n # z-score normalization (2nd stage)\n if mode == \"train\" and self.compute_zscore:\n input_reward_means = {k: np.mean(v) for k, v in input_rewards.items()}\n input_reward_stds = {k: np.std(v) for k, v in input_rewards.items()}\n # not source strings\n idx_means = torch.tensor(input_reward_means[\"z\"]).float()\n idx_stds = torch.tensor(input_reward_stds[\"z\"]).float()\n rewards_tensor = (rewards_tensor - idx_means) / (idx_stds + 1e-4)\n quantities_to_log[prompt_strings[i]][\"resized_reward\"] = []\n for i in range(rewards_tensor.size(0)):\n quantities_to_log[prompt_strings[i]][\"resized_reward\"].append(\n rewards_tensor[i].item()\n )\n elif mode == \"infer\": # Optional: Predict Val Prompts\n score = rewards_tensor.mean().item()\n if verbose:\n print(f\"Our prompt: {prompt_strings}. Score={score}. Acc={acc}\")\n for pt in prompt_strings:\n print(self._tokenizer.tokenize(pt))\n print(accumulate_class)\n print(\"counter_list\", counter_list)\n print(\"ece\", ece)\n if accumulate_class:\n return (\n prompt_strings,\n rewards_tensor,\n accs_tensor,\n counter_list,\n ece,\n entropies_tensor,\n class_logits, # <- list of tensors. n elements = n prompts\n )\n else:\n return prompt_strings, rewards_tensor, accs_tensor\n\n if to_tensor is True:\n return rewards_tensor, accs_tensor, quantities_to_log\n else:\n return rewards_tensor.tolist(), accs, quantities_to_log\n\n def kl_divergence_row_by_row(self, p, q):\n kl_div = torch.sum(p * torch.log(p / q), dim=1)\n return kl_div\n\n def compute_default_kl(\n self,\n source_texts: List[str],\n source_2_texts: List[str],\n class_labels: List[int],\n output_tokens: Union[List[List[str]], List[str], str],\n to_tensor: bool,\n ) -> torch.Tensor:\n \"\"\"\n This computes the probs of the naive prompt (instruction).\n source_texts: a list of string. Usually samples from the validation set\n class_labels: a list of integers. Usually the labels of the validation set\n prompts:\n Either List[List[str]]: List of tokens. The length of the list should be the same as the number of source_texts.\n OR List[str]: List of (decoded) prompts.\n OR: str. A single prompt\n \"\"\"\n default_templates = self._format_prompts(\n source_texts, source_2_texts, [\"\" for _ in source_texts]\n )\n default_logits = self._get_logits(default_templates)\n default_probs = _compute_probs(\n default_logits,\n target=class_labels,\n reward_type=self.reward_type,\n verbalizer_ids=self.verbalizer_ids,\n correct_coeff=self.correct_coeff,\n incorrect_coeff=self.incorrect_coeff,\n )\n return default_probs\n\n def compute_default_reward(\n self,\n source_texts: List[str],\n source_2_texts: List[str],\n class_labels: List[int],\n output_tokens: Union[List[List[str]], List[str], str],\n to_tensor: bool,\n ) -> torch.Tensor:\n \"\"\"\n This computes the rewards of the naive prompt (instruction).\n source_texts: a list of string. Usually samples from the validation set\n class_labels: a list of integers. Usually the labels of the validation set\n prompts:\n Either List[List[str]]: List of tokens. The length of the list should be the same as the number of source_texts.\n OR List[str]: List of (decoded) prompts.\n OR: str. A single prompt\n \"\"\"\n default_templates = self._format_prompts(\n source_texts, source_2_texts, [\"\" for _ in source_texts]\n )\n default_logits = self._get_logits(default_templates)\n default_reward, _, _, _, _, _ = _compute_reward(\n default_logits,\n target=class_labels,\n reward_type=self.reward_type,\n verbalizer_ids=self.verbalizer_ids,\n correct_coeff=self.correct_coeff,\n incorrect_coeff=self.incorrect_coeff,\n )\n return default_reward\n\n def compute_kl(\n self,\n source_texts: List[str],\n source_2_texts: List[str],\n class_labels: List[int],\n output_tokens: Union[List[List[str]], List[str], str],\n to_tensor: bool,\n default_probs: torch.Tensor,\n ) -> torch.Tensor:\n \"\"\"\n This computes the kl-divergence of the current prompt to the naive prompt (instruction).\n source_texts: a list of string. Usually samples from the validation set\n class_labels: a list of integers. Usually the labels of the validation set\n prompts:\n Either List[List[str]]: List of tokens. The length of the list should be the same as the number of source_texts.\n OR List[str]: List of (decoded) prompts.\n OR: str. A single prompt\n \"\"\"\n # Process prompts and verbalizer indices\n if isinstance(output_tokens, list):\n if isinstance(output_tokens[0], list):\n prompt_tokens = output_tokens\n prompt_strings = self._convert_tokens_to_string(prompt_tokens)\n elif isinstance(output_tokens[0], str):\n prompt_strings = output_tokens\n elif isinstance(output_tokens, str):\n prompt_strings = [output_tokens] # Single prompt string\n\n rewards: List[torch.Tensor] = []\n input_rewards: Dict[str, List[float]] = defaultdict(list)\n for i, prompt in enumerate(prompt_strings):\n # Compute LM logits\n current_prompts = [prompt for _ in source_texts]\n formatted_templates = self._format_prompts(\n source_texts, source_2_texts, current_prompts\n )\n all_logits = self._get_logits(formatted_templates)\n prompt_probs = _compute_probs(\n all_logits,\n target=class_labels,\n reward_type=self.reward_type,\n verbalizer_ids=self.verbalizer_ids,\n correct_coeff=self.correct_coeff,\n incorrect_coeff=self.incorrect_coeff,\n )\n kl = self.kl_divergence_row_by_row(prompt_probs, default_probs)\n kl = torch.sum(kl)\n rewards.append(kl)\n kl_tensor = torch.stack(rewards)\n return kl_tensor\n\n def compute_reward_diff(\n self,\n source_texts: List[str],\n source_2_texts: List[str],\n class_labels: List[int],\n output_tokens: Union[List[List[str]], List[str], str],\n to_tensor: bool,\n default_rewards: torch.Tensor,\n ) -> torch.Tensor:\n \"\"\"\n This computes the kl-divergence of the current prompt to the naive prompt (instruction).\n source_texts: a list of string. Usually samples from the validation set\n class_labels: a list of integers. Usually the labels of the validation set\n prompts:\n Either List[List[str]]: List of tokens. The length of the list should be the same as the number of source_texts.\n OR List[str]: List of (decoded) prompts.\n OR: str. A single prompt\n \"\"\"\n # Process prompts and verbalizer indices\n if isinstance(output_tokens, list):\n if isinstance(output_tokens[0], list):\n prompt_tokens = output_tokens\n prompt_strings = self._convert_tokens_to_string(prompt_tokens)\n elif isinstance(output_tokens[0], str):\n prompt_strings = output_tokens\n elif isinstance(output_tokens, str):\n prompt_strings = [output_tokens] # Single prompt string\n\n rewards: List[torch.Tensor] = []\n for i, prompt in enumerate(prompt_strings):\n # Compute LM logits\n current_prompts = [prompt for _ in source_texts]\n formatted_templates = self._format_prompts(\n source_texts, source_2_texts, current_prompts\n )\n all_logits = self._get_logits(formatted_templates)\n prompt_rewards, _, _, _, _, _ = _compute_reward(\n all_logits,\n target=class_labels,\n reward_type=self.reward_type,\n verbalizer_ids=self.verbalizer_ids,\n correct_coeff=self.correct_coeff,\n incorrect_coeff=self.incorrect_coeff,\n )\n reward_diff = prompt_rewards - default_rewards\n reward_diff = torch.sum(reward_diff)\n rewards.append(reward_diff)\n reward_diff_tensor = torch.stack(rewards)\n return reward_diff_tensor\n\n # Adapted from\n # https://huggingface.co/docs/transformers/v4.21.1/en/task_summary#masked-language-modeling\n def _get_mask_token_index(self, input_ids: torch.Tensor) -> np.ndarray:\n mask_token_index = torch.where(input_ids == self._tokenizer.mask_token_id)[1]\n return mask_token_index\n\n def ensure_exactly_one_mask_token(\n self, model_inputs: Dict[str, torch.Tensor]\n ) -> None:\n for input_ids in model_inputs[\"input_ids\"]:\n masked_index = self._get_mask_token_index(input_ids)\n numel = np.prod(masked_index.shape)\n assert numel == 1\n\n @torch.no_grad()\n def _get_logits(self, texts: List[str]) -> torch.Tensor:\n # for MLM, add mask token\n batch_size = len(texts)\n encoded_inputs = self._tokenizer(\n texts,\n padding=\"longest\",\n truncation=True,\n return_tensors=\"pt\",\n add_special_tokens=True,\n )\n decoder_input_ids = (\n torch.ones((batch_size, 1)) * torch.tensor(self._tokenizer.pad_token_id)\n ).int()\n if self.is_mask_lm:\n # self.ensure_exactly_one_mask_token(encoded_inputs) TODO\n token_logits = self._generator(**encoded_inputs.to(self.device)).logits\n mask_token_indices = self._get_mask_token_index(encoded_inputs[\"input_ids\"])\n out_logits = token_logits[range(batch_size), mask_token_indices, :]\n return out_logits\n else:\n token_logits = self._generator(\n input_ids=encoded_inputs[\"input_ids\"].to(self.device),\n decoder_input_ids=decoder_input_ids.to(self.device),\n ).logits\n token_logits = token_logits[:, 0, :]\n return token_logits\n\n def _convert_tokens_to_string(self, tokens: List[List[str]]) -> List[str]:\n return [self._tokenizer.convert_tokens_to_string(s) for s in tokens]\n\n def _format_prompts(\n self,\n source_strs: List[str],\n source_2_strs: List[str],\n prompt_strs: List[str],\n ) -> List[str]:\n return [\n self.template.format(sentence_1=s_1, sentence_2=s_2, prompt=p)\n for s_1, s_2, p in zip(source_strs, source_2_strs, prompt_strs)\n ]" }, { "identifier": "PromptedClassificationDataset", "path": "utils/fsc_datasets.py", "snippet": "class PromptedClassificationDataset:\n def __init__(self, args):\n self.args = args\n self.glue_list = ['sst2', 'rte', 'mrpc', 'qqp', 'mnli', 'qnli']\n self.superglue_list = ['cb', 'copa', 'boolq', 'wic', 'wsc']\n self.nli_3_list = ['mnli', 'xnli', 'anli', 'cb', 'snli']\n if 'xnli' in args['dataset_name']:\n split = self.args['dataset_name'].split('_')[1]\n self.dataset = datasets.load_dataset('xnli', split)\n elif args['dataset_name'] in self.glue_list:\n self.dataset = datasets.load_dataset('glue', args['dataset_name'])\n elif 'anli' in args['dataset_name']:\n self.dataset = datasets.load_dataset('anli')\n elif args['dataset_name'] in self.superglue_list:\n self.dataset = datasets.load_dataset('super_glue', args['dataset_name'])\n elif 'rl' in args['dataset_name']:\n pass\n else:\n self.dataset = datasets.load_dataset(args['dataset_name'])\n def get_few_shot_dataset(self, shots: int) -> tuple:\n \"\"\"\n Retrieves a few-shot dataset by selecting a specified number of instances per class from the given dataset.\n \n Args:\n dataset (dict): A dictionary containing the dataset split into \"train\", \"validation\", and \"test\" subsets.\n shots (int): The number of instances to select per class for the few-shot dataset.\n \n Returns:\n tuple: The few-shot training dataset, the original validation dataset, and the original test dataset.\n \"\"\"\n \n if self.args['dataset_name'] == 'mnli':\n train_dataset = self.dataset['train']\n val_dataset = self.dataset['validation_matched']\n test_dataset = self.dataset['test_matched']\n elif self.args['dataset_name'] == 'yelp_polarity' or self.args['dataset_name'] == 'ag_news' or self.args['dataset_name'] == 'SetFit/CR' or self.args['dataset_name'] == 'yelp_review_full':\n train_dataset = self.dataset['train']\n val_dataset = self.dataset['train']\n test_dataset = self.dataset['test']\n elif 'rl' in self.args['dataset_name']:\n train_dataset = get_rl_data('train', self.args['dataset_name'], self.args['seed'])\n val_dataset = get_rl_data('dev', self.args['dataset_name'], self.args['seed'])\n test_dataset = get_rl_data('test', self.args['dataset_name'], self.args['seed'])\n train_dataset = [x for x in train_dataset]\n val_dataset = [x for x in val_dataset]\n return train_dataset, val_dataset, test_dataset\n elif self.args['dataset_name'] == 'snli':\n train_dataset = [x for x in self.dataset['train'] if x['label'] != -1]\n val_dataset = [x for x in self.dataset['validation'] if x['label'] != -1]\n test_dataset = [x for x in self.dataset['test'] if x['label'] != -1]\n else:\n train_dataset = self.dataset['train']\n val_dataset = self.dataset['validation']\n test_dataset = self.dataset['test']\n\n train_0 = [x for x in train_dataset if x['label'] == 0][:shots]\n train_1 = [x for x in train_dataset if x['label'] == 1][:shots]\n train_2 = [x for x in train_dataset if x['label'] == 2][:shots]\n train_3 = [x for x in train_dataset if x['label'] == 3][:shots]\n train_4 = [x for x in train_dataset if x['label'] == 4][:shots]\n train_dataset = train_0 + train_1 + train_2 + train_3 + train_4\n if self.args['dataset_name'] in self.glue_list or self.args['dataset_name'] in self.superglue_list:\n val_0 = [x for x in train_dataset if x['label'] == 0][-shots:]\n val_1 = [x for x in train_dataset if x['label'] == 1][-shots:]\n val_2 = [x for x in train_dataset if x['label'] == 2][-shots:]\n new_val_dataset = val_0 + val_1 + val_2\n test_dataset = val_dataset\n print('train_dataset', train_dataset)\n return train_dataset, new_val_dataset, test_dataset\n elif self.args['dataset_name'] == 'ag_news' or self.args['dataset_name'] == 'yele_review_full':\n val_0 = [x for x in train_dataset if x['label'] == 0][-shots:]\n val_1 = [x for x in train_dataset if x['label'] == 1][-shots:]\n val_2 = [x for x in train_dataset if x['label'] == 2][-shots:]\n val_3 = [x for x in train_dataset if x['label'] == 3][-shots:]\n val_4 = [x for x in train_dataset if x['label'] == 4][-shots:]\n new_val_dataset = val_0 + val_1 + val_2 + val_3 + val_4\n test_dataset = val_dataset\n print('train_dataset', train_dataset)\n return train_dataset, new_val_dataset, test_dataset\n \n val_0 = [x for x in val_dataset if x['label'] == 0][:shots]\n val_1 = [x for x in val_dataset if x['label'] == 1][:shots]\n val_2 = [x for x in val_dataset if x['label'] == 2][:shots]\n val_dataset = val_0 + val_1 + val_2\n print('train_dataset', train_dataset)\n return train_dataset, val_dataset, test_dataset\n\n def get_verbalizer(self) -> list:\n if 'xnli' in self.args['dataset_name'] or self.args['dataset_name'] == 'mnli' or 'anli' in self.args['dataset_name'] or 'americas_nli' in self.args['dataset_name'] or self.args['dataset_name'] == 'snli':\n verbalizer_predefined = ['yes', 'maybe', 'no']\n elif self.args['dataset_name'] == 'sst2' or self.args['dataset_name'] == 'yelp_polarity':\n verbalizer_predefined = ['negative', 'positive']\n elif self.args['dataset_name'] == 'rte' or self.args['dataset_name'] == 'qnli':\n verbalizer_predefined = ['yes', 'no']\n elif self.args['dataset_name'] == 'mrpc' or self.args['dataset_name'] == 'qqp':\n verbalizer_predefined = ['no', 'yes']\n elif self.args['dataset_name'] == 'boolq':\n verbalizer_predefined = ['no', 'yes']\n elif 'indonlp/NusaX-senti' in self.args['dataset_name']:\n verbalizer_predefined = ['negative', 'neutral', 'positive']\n elif self.args['dataset_name'] == 'ag_news':\n verbalizer_predefined = ['World', 'Sports', 'Business', 'Technology']\n\n special_space = '▁'\n binary_list = ['SetFit/sst2', 'yelp_polarity', 'SetFit/CR', 'rotten_tomatoes']\n rl_binary_list = ['rl-cr', 'rl-mr', 'rl-sst-2', \n 'rl-yelp-2']\n if 'bert' in self.args['model_name']:\n special_space = 'Ġ'\n if self.args['dataset_name'] in binary_list:\n verbalizer_predefined = ['terrible', 'great']\n elif self.args['dataset_name'] == 'ag_news':\n verbalizer_predefined = ['World', 'Sports', 'Business', 'Tech']\n elif self.args['dataset_name'] == 'SetFit/sst5' or self.args['dataset_name'] == 'yelp_review_full':\n verbalizer_predefined = ['terrible', 'bad', 'okay', 'good', 'great']\n elif self.args['dataset_name'] in rl_binary_list:\n verbalizer_predefined = ['terrible', 'great']\n\n verbalizer_predefined = [special_space + v for v in verbalizer_predefined]\n return verbalizer_predefined\n \n def get_data(self, data) -> tuple:\n text_label_list = ['yelp_polarity', 'ag_news', 'SetFit/sst5', 'SetFit/CR', 'rotten_tomatoes', \"SetFit/sst2\", 'yelp_review_full']\n rl_list = ['rl-agnews', 'rl-cr', 'rl-mr', 'rl-sst-2', \n 'rl-sst-5', 'rl-yelp-2', 'rl-yelp-5']\n if 'xnli' in self.args['dataset_name'] or self.args['dataset_name'] == 'mnli' or 'anli' in self.args['dataset_name'] or 'americas_nli' in self.args['dataset_name'] or self.args['dataset_name'] == 'snli':\n return [d[\"premise\"] for d in data], [d[\"hypothesis\"] for d in data], [d[\"label\"] for d in data]\n elif self.args['dataset_name'] == 'sst2':\n return [d[\"sentence\"] for d in data], [d[\"sentence\"] for d in data], [d[\"label\"] for d in data]\n elif self.args['dataset_name'] == 'rte' or self.args['dataset_name'] == 'mrpc':\n return [d[\"sentence1\"] for d in data], [d[\"sentence2\"] for d in data], [d[\"label\"] for d in data]\n elif self.args['dataset_name'] == 'qnli':\n return [d[\"question\"] for d in data], [d[\"sentence\"] for d in data], [d[\"label\"] for d in data]\n elif self.args['dataset_name'] == 'qqp':\n return [d[\"question1\"] for d in data], [d[\"question2\"] for d in data], [d[\"label\"] for d in data]\n elif self.args['dataset_name'] == 'boolq':\n return [d[\"question\"] for d in data], [d[\"passage\"] for d in data], [d[\"label\"] for d in data]\n elif 'indonlp/NusaX-senti' in self.args['dataset_name'] or self.args['dataset_name'] in text_label_list:\n return [d[\"text\"] for d in data], [d[\"text\"] for d in data], [d[\"label\"] for d in data]\n elif self.args['dataset_name'] in rl_list:\n return [d[\"text\"] for d in data], [d[\"text\"] for d in data], [d[\"label\"] for d in data]" }, { "identifier": "BaseTrainer", "path": "algs/base_trainer.py", "snippet": "class BaseTrainer(abc.ABC):\n \"\"\"\n The base trainer class.\n\n Attributes:\n obj_func: the callable function handle for model interfacing.\n logger: an optional logger object.\n bn_calibrator: a batch norm calibration object. Only used in\n testing (not training or validation).\n \"\"\"\n\n def __init__(\n self,\n obj_func: PromptedClassificationReward,\n prompt_dataset: PromptedClassificationDataset,\n logger: Optional[Any] = None,\n use_bn_calibrator: bool = False,\n n_samples_bn_calibrator: int = 128,\n ):\n self.obj_func = obj_func\n self.logger = logger\n self.prompt_dataset = prompt_dataset\n\n self.bn_calibrator = BatchNormCalibrate() if use_bn_calibrator else None\n self.n_samples_bn_calibrator = n_samples_bn_calibrator\n\n @abc.abstractmethod\n def train(self, train_data: Iterable[Any]):\n raise NotImplementedError()\n\n def validate(self, val_dataset: Iterable[Any], best_str_list: List[str]) -> str:\n t_dataset = val_dataset\n if self.logger is not None:\n self.logger.info(\"total val dataset length: %s\", len(t_dataset))\n val_acc_list = []\n\n for prompt in best_str_list:\n n_correct = 0\n\n for batch_idx in range(0, len(t_dataset) // self.eval_batch_size + 1):\n idx = np.arange(\n batch_idx * self.eval_batch_size,\n (batch_idx + 1) * self.eval_batch_size,\n )\n idx = [_idx for _idx in idx if _idx < len(t_dataset)]\n\n if len(idx) == 0:\n break\n\n t_data = [t_dataset[int(i)] for i in idx]\n (\n t_premise_texts,\n t_hypothesis,\n t_class_labels,\n ) = self.prompt_dataset.get_data(t_data)\n\n torch.cuda.empty_cache()\n _, _, batch_acc = self.obj_func.forward(\n t_premise_texts,\n t_hypothesis,\n t_class_labels,\n prompt,\n True,\n \"infer\",\n verbose=True,\n )\n n_correct += batch_acc * len(idx)\n torch.cuda.empty_cache()\n\n if self.logger is not None:\n self.logger.info(\"prompt: %s\", prompt)\n self.logger.info(\"final val acc: %s\", (n_correct / len(t_dataset)))\n val_acc_list.append(float(n_correct / len(t_dataset)))\n # best_prompt = best_str_list[np.argmax(val_acc_list)]\n max_acc = np.max(val_acc_list)\n indices = np.argwhere(val_acc_list == max_acc)\n last_index = indices[-1][0]\n best_prompt = best_str_list[last_index]\n if self.logger is not None:\n self.logger.info(\"val acc list: %s\", val_acc_list)\n self.logger.info(\"best prompt: %s\", best_prompt)\n self.logger.info(\"best prompt acc: %s\", np.max(val_acc_list))\n\n return best_prompt\n\n def test(\n self,\n test_dataset,\n best_prompt,\n bn_calibrate_if_available: bool = True,\n return_logits: bool = False,\n ) -> Tuple[float, Optional[Dict[str, torch.Tensor]]]:\n t_dataset = test_dataset\n if self.logger is not None:\n self.logger.info(\"total test dataset length: %s\", len(t_dataset))\n n_correct = 0\n\n if self.bn_calibrator is not None and bn_calibrate_if_available:\n # select some samples for calibration\n idx_calibrate = np.random.choice(\n len(test_dataset),\n min(len(test_dataset), self.n_samples_bn_calibrator),\n replace=False,\n )\n\n calibrate_data = [t_dataset[int(i)] for i in idx_calibrate]\n (\n t_premise_texts,\n t_hypothesis,\n _,\n ) = self.prompt_dataset.get_data(calibrate_data)\n\n # Initialize the bn calibrator\n self.bn_calibrator.train()\n # Get the logits\n calibrate_logits = self.obj_func.forward(\n t_premise_texts,\n t_hypothesis,\n [0] * len(t_premise_texts), # dummy class labels\n best_prompt,\n to_tensor=True,\n mode=\"infer\",\n accumulate_class=True,\n )[-1]\n # Run the prediction logits only through the BN calibrator to obtain\n # running statistics.\n self.bn_calibrator(calibrate_logits[0], flush=True)\n self.bn_calibrator.eval()\n self.obj_func.bn_calibrator = self.bn_calibrator\n else:\n calibrate_logits = None\n\n all_logits: List[torch.Tensor] = []\n all_labels: List[int] = []\n for batch_idx in range(0, len(t_dataset) // self.eval_batch_size + 1):\n idx = np.arange(\n batch_idx * self.eval_batch_size, (batch_idx + 1) * self.eval_batch_size\n )\n idx = [_idx for _idx in idx if _idx < len(t_dataset)]\n\n if len(idx) == 0:\n break\n\n t_data = [t_dataset[int(i)] for i in idx]\n (\n t_premise_texts,\n t_hypothesis,\n t_class_labels,\n ) = self.prompt_dataset.get_data(t_data)\n\n torch.cuda.empty_cache()\n (\n _,\n _,\n batch_acc,\n _,\n _,\n _,\n class_logits,\n ) = self.obj_func.forward(\n t_premise_texts,\n t_hypothesis,\n t_class_labels,\n best_prompt,\n True,\n \"infer\",\n verbose=True,\n accumulate_class=True,\n )\n n_correct += batch_acc * len(idx)\n torch.cuda.empty_cache()\n if return_logits:\n all_logits.append(class_logits[0])\n all_labels += t_class_labels\n if self.logger is not None:\n self.logger.info(\"prompt: %s\", best_prompt)\n self.logger.info(n_correct)\n self.logger.info(\"final test acc: %s\", (n_correct / len(t_dataset)))\n if return_logits:\n return n_correct / len(t_dataset), {\n \"output_logits\": torch.cat(all_logits),\n \"calibrate_logits\": calibrate_logits,\n \"labels\": all_labels,\n }\n return n_correct / len(t_dataset), None\n\n def manual(\n self,\n test_dataset: Iterable[Any],\n bn_calibrate_if_available: bool = True,\n return_logits: bool = False,\n ) -> Tuple[float, Optional[Dict[str, torch.Tensor]]]:\n t_dataset = test_dataset\n for i in range(self.n_classes):\n test_I = [x for x in t_dataset if x[\"label\"] == i]\n if self.logger is not None:\n self.logger.info(\n \"total test dataset length: %s for class %s\", len(test_I), i\n )\n if self.logger is not None:\n self.logger.info(\"total test dataset length: %s\", len(t_dataset))\n n_correct = 0\n sum_ece = 0\n sum_entropy = 0\n class_correct = collections.Counter((i, 0) for i in range(self.n_classes))\n\n if self.bn_calibrator is not None and bn_calibrate_if_available:\n # select some samples for calibration\n idx_calibrate = np.random.choice(\n len(test_dataset),\n min(len(test_dataset), self.n_samples_bn_calibrator),\n replace=False,\n )\n\n calibrate_data = [t_dataset[int(i)] for i in idx_calibrate]\n (\n t_premise_texts,\n t_hypothesis,\n _,\n ) = self.prompt_dataset.get_data(calibrate_data)\n\n # Initialize the bn calibrator\n self.bn_calibrator.train()\n # Get the logits\n calibrate_logits = self.obj_func.forward(\n t_premise_texts,\n t_hypothesis,\n [0] * len(t_premise_texts), # dummy class labels\n \"\",\n to_tensor=True,\n mode=\"infer\",\n accumulate_class=True,\n )[-1]\n # Run the prediction logits only through the BN calibrator to obtain\n # running statistics.\n self.bn_calibrator(calibrate_logits[0], flush=True)\n self.bn_calibrator.eval()\n self.obj_func.bn_calibrator = self.bn_calibrator\n else:\n calibrate_logits = None\n\n all_logits: List[torch.Tensor] = []\n all_labels: List[int] = []\n for batch_idx in range(0, len(t_dataset) // self.eval_batch_size + 1):\n idx = np.arange(\n batch_idx * self.eval_batch_size, (batch_idx + 1) * self.eval_batch_size\n )\n idx = [_idx for _idx in idx if _idx < len(t_dataset)]\n\n if len(idx) == 0:\n break\n\n t_data = [t_dataset[int(i)] for i in idx]\n (\n t_premise_texts,\n t_hypothesis,\n t_class_labels,\n ) = self.prompt_dataset.get_data(t_data)\n\n torch.cuda.empty_cache()\n (\n _,\n _,\n batch_acc,\n count_class,\n batch_ece,\n batch_entropy,\n class_logits,\n ) = self.obj_func.forward(\n t_premise_texts,\n t_hypothesis,\n t_class_labels,\n \"\",\n True,\n \"infer\",\n verbose=True,\n accumulate_class=True,\n )\n n_correct += batch_acc * len(idx)\n sum_ece += batch_ece * len(idx)\n sum_entropy += batch_entropy * len(idx)\n class_correct += count_class[0]\n if return_logits:\n all_logits.append(class_logits[0])\n all_labels += t_class_labels\n # print(count_class)\n torch.cuda.empty_cache()\n # print(class_correct)\n if self.logger is not None:\n self.logger.info(\n \"manual prompt test acc: %s\", (float(n_correct) / len(t_dataset))\n )\n self.logger.info(\"count class: %s\", class_correct)\n self.logger.info(\n \"manual prompt test ece percent: %s\",\n (float(sum_ece) / len(t_dataset) * 100),\n )\n self.logger.info(\n \"manual prompt test entropy: %s\", (float(sum_entropy) / len(t_dataset))\n )\n if return_logits:\n return float(n_correct) / len(t_dataset), {\n \"output_logits\": torch.cat(all_logits),\n \"calibrate_logits\": calibrate_logits,\n \"labels\": all_labels,\n }\n return float(n_correct) / len(t_dataset), None" }, { "identifier": "PromptedClassificationDataset", "path": "utils/fsc_datasets.py", "snippet": "class PromptedClassificationDataset:\n def __init__(self, args):\n self.args = args\n self.glue_list = ['sst2', 'rte', 'mrpc', 'qqp', 'mnli', 'qnli']\n self.superglue_list = ['cb', 'copa', 'boolq', 'wic', 'wsc']\n self.nli_3_list = ['mnli', 'xnli', 'anli', 'cb', 'snli']\n if 'xnli' in args['dataset_name']:\n split = self.args['dataset_name'].split('_')[1]\n self.dataset = datasets.load_dataset('xnli', split)\n elif args['dataset_name'] in self.glue_list:\n self.dataset = datasets.load_dataset('glue', args['dataset_name'])\n elif 'anli' in args['dataset_name']:\n self.dataset = datasets.load_dataset('anli')\n elif args['dataset_name'] in self.superglue_list:\n self.dataset = datasets.load_dataset('super_glue', args['dataset_name'])\n elif 'rl' in args['dataset_name']:\n pass\n else:\n self.dataset = datasets.load_dataset(args['dataset_name'])\n def get_few_shot_dataset(self, shots: int) -> tuple:\n \"\"\"\n Retrieves a few-shot dataset by selecting a specified number of instances per class from the given dataset.\n \n Args:\n dataset (dict): A dictionary containing the dataset split into \"train\", \"validation\", and \"test\" subsets.\n shots (int): The number of instances to select per class for the few-shot dataset.\n \n Returns:\n tuple: The few-shot training dataset, the original validation dataset, and the original test dataset.\n \"\"\"\n \n if self.args['dataset_name'] == 'mnli':\n train_dataset = self.dataset['train']\n val_dataset = self.dataset['validation_matched']\n test_dataset = self.dataset['test_matched']\n elif self.args['dataset_name'] == 'yelp_polarity' or self.args['dataset_name'] == 'ag_news' or self.args['dataset_name'] == 'SetFit/CR' or self.args['dataset_name'] == 'yelp_review_full':\n train_dataset = self.dataset['train']\n val_dataset = self.dataset['train']\n test_dataset = self.dataset['test']\n elif 'rl' in self.args['dataset_name']:\n train_dataset = get_rl_data('train', self.args['dataset_name'], self.args['seed'])\n val_dataset = get_rl_data('dev', self.args['dataset_name'], self.args['seed'])\n test_dataset = get_rl_data('test', self.args['dataset_name'], self.args['seed'])\n train_dataset = [x for x in train_dataset]\n val_dataset = [x for x in val_dataset]\n return train_dataset, val_dataset, test_dataset\n elif self.args['dataset_name'] == 'snli':\n train_dataset = [x for x in self.dataset['train'] if x['label'] != -1]\n val_dataset = [x for x in self.dataset['validation'] if x['label'] != -1]\n test_dataset = [x for x in self.dataset['test'] if x['label'] != -1]\n else:\n train_dataset = self.dataset['train']\n val_dataset = self.dataset['validation']\n test_dataset = self.dataset['test']\n\n train_0 = [x for x in train_dataset if x['label'] == 0][:shots]\n train_1 = [x for x in train_dataset if x['label'] == 1][:shots]\n train_2 = [x for x in train_dataset if x['label'] == 2][:shots]\n train_3 = [x for x in train_dataset if x['label'] == 3][:shots]\n train_4 = [x for x in train_dataset if x['label'] == 4][:shots]\n train_dataset = train_0 + train_1 + train_2 + train_3 + train_4\n if self.args['dataset_name'] in self.glue_list or self.args['dataset_name'] in self.superglue_list:\n val_0 = [x for x in train_dataset if x['label'] == 0][-shots:]\n val_1 = [x for x in train_dataset if x['label'] == 1][-shots:]\n val_2 = [x for x in train_dataset if x['label'] == 2][-shots:]\n new_val_dataset = val_0 + val_1 + val_2\n test_dataset = val_dataset\n print('train_dataset', train_dataset)\n return train_dataset, new_val_dataset, test_dataset\n elif self.args['dataset_name'] == 'ag_news' or self.args['dataset_name'] == 'yele_review_full':\n val_0 = [x for x in train_dataset if x['label'] == 0][-shots:]\n val_1 = [x for x in train_dataset if x['label'] == 1][-shots:]\n val_2 = [x for x in train_dataset if x['label'] == 2][-shots:]\n val_3 = [x for x in train_dataset if x['label'] == 3][-shots:]\n val_4 = [x for x in train_dataset if x['label'] == 4][-shots:]\n new_val_dataset = val_0 + val_1 + val_2 + val_3 + val_4\n test_dataset = val_dataset\n print('train_dataset', train_dataset)\n return train_dataset, new_val_dataset, test_dataset\n \n val_0 = [x for x in val_dataset if x['label'] == 0][:shots]\n val_1 = [x for x in val_dataset if x['label'] == 1][:shots]\n val_2 = [x for x in val_dataset if x['label'] == 2][:shots]\n val_dataset = val_0 + val_1 + val_2\n print('train_dataset', train_dataset)\n return train_dataset, val_dataset, test_dataset\n\n def get_verbalizer(self) -> list:\n if 'xnli' in self.args['dataset_name'] or self.args['dataset_name'] == 'mnli' or 'anli' in self.args['dataset_name'] or 'americas_nli' in self.args['dataset_name'] or self.args['dataset_name'] == 'snli':\n verbalizer_predefined = ['yes', 'maybe', 'no']\n elif self.args['dataset_name'] == 'sst2' or self.args['dataset_name'] == 'yelp_polarity':\n verbalizer_predefined = ['negative', 'positive']\n elif self.args['dataset_name'] == 'rte' or self.args['dataset_name'] == 'qnli':\n verbalizer_predefined = ['yes', 'no']\n elif self.args['dataset_name'] == 'mrpc' or self.args['dataset_name'] == 'qqp':\n verbalizer_predefined = ['no', 'yes']\n elif self.args['dataset_name'] == 'boolq':\n verbalizer_predefined = ['no', 'yes']\n elif 'indonlp/NusaX-senti' in self.args['dataset_name']:\n verbalizer_predefined = ['negative', 'neutral', 'positive']\n elif self.args['dataset_name'] == 'ag_news':\n verbalizer_predefined = ['World', 'Sports', 'Business', 'Technology']\n\n special_space = '▁'\n binary_list = ['SetFit/sst2', 'yelp_polarity', 'SetFit/CR', 'rotten_tomatoes']\n rl_binary_list = ['rl-cr', 'rl-mr', 'rl-sst-2', \n 'rl-yelp-2']\n if 'bert' in self.args['model_name']:\n special_space = 'Ġ'\n if self.args['dataset_name'] in binary_list:\n verbalizer_predefined = ['terrible', 'great']\n elif self.args['dataset_name'] == 'ag_news':\n verbalizer_predefined = ['World', 'Sports', 'Business', 'Tech']\n elif self.args['dataset_name'] == 'SetFit/sst5' or self.args['dataset_name'] == 'yelp_review_full':\n verbalizer_predefined = ['terrible', 'bad', 'okay', 'good', 'great']\n elif self.args['dataset_name'] in rl_binary_list:\n verbalizer_predefined = ['terrible', 'great']\n\n verbalizer_predefined = [special_space + v for v in verbalizer_predefined]\n return verbalizer_predefined\n \n def get_data(self, data) -> tuple:\n text_label_list = ['yelp_polarity', 'ag_news', 'SetFit/sst5', 'SetFit/CR', 'rotten_tomatoes', \"SetFit/sst2\", 'yelp_review_full']\n rl_list = ['rl-agnews', 'rl-cr', 'rl-mr', 'rl-sst-2', \n 'rl-sst-5', 'rl-yelp-2', 'rl-yelp-5']\n if 'xnli' in self.args['dataset_name'] or self.args['dataset_name'] == 'mnli' or 'anli' in self.args['dataset_name'] or 'americas_nli' in self.args['dataset_name'] or self.args['dataset_name'] == 'snli':\n return [d[\"premise\"] for d in data], [d[\"hypothesis\"] for d in data], [d[\"label\"] for d in data]\n elif self.args['dataset_name'] == 'sst2':\n return [d[\"sentence\"] for d in data], [d[\"sentence\"] for d in data], [d[\"label\"] for d in data]\n elif self.args['dataset_name'] == 'rte' or self.args['dataset_name'] == 'mrpc':\n return [d[\"sentence1\"] for d in data], [d[\"sentence2\"] for d in data], [d[\"label\"] for d in data]\n elif self.args['dataset_name'] == 'qnli':\n return [d[\"question\"] for d in data], [d[\"sentence\"] for d in data], [d[\"label\"] for d in data]\n elif self.args['dataset_name'] == 'qqp':\n return [d[\"question1\"] for d in data], [d[\"question2\"] for d in data], [d[\"label\"] for d in data]\n elif self.args['dataset_name'] == 'boolq':\n return [d[\"question\"] for d in data], [d[\"passage\"] for d in data], [d[\"label\"] for d in data]\n elif 'indonlp/NusaX-senti' in self.args['dataset_name'] or self.args['dataset_name'] in text_label_list:\n return [d[\"text\"] for d in data], [d[\"text\"] for d in data], [d[\"label\"] for d in data]\n elif self.args['dataset_name'] in rl_list:\n return [d[\"text\"] for d in data], [d[\"text\"] for d in data], [d[\"label\"] for d in data]" }, { "identifier": "PromptedClassificationReward", "path": "rewards/text_classification_reward.py", "snippet": "class PromptedClassificationReward:\n def __init__(\n self,\n args,\n task_lm: str,\n is_mask_lm: Optional[bool],\n num_classes: int,\n verbalizers: List[str],\n reward_type: str = \"entropy\",\n compute_zscore: bool = True,\n incorrect_coeff: float = 180.0, # lambda_1 in paper\n correct_coeff: float = 200.0, # lambda_2 in paper\n use_bn_calibration: bool = False,\n bn_calibrator: Optional[BatchNormCalibrate] = None,\n template: Optional[str] = None,\n gpu_id: Optional[int] = None,\n ):\n \"\"\"\n Few shot text classification reward (adapted from RLPrompt repository)\n Args:\n task_lm: the string specifying the language model type of the task LM\n is_mask_lm: bool. Whether the LM is masked, or left-to-right.\n compute_zscore: bool. Whether do reward normalization by normalizing the\n mean and standard deviation across the batch.\n incorrect_coeff, correct_coeff:\n num_classes: number of classes in the labels\n verbalizers: a list of verbalizers (for e.g., for sentiment classification)\n reward_type: the type of the reward.\n \"gap\" -- use the one proposed in RLPrompt\n \"ll\" -- use the usual cross entropy loss\n template: the template to organize the queries and prompts.\n default one is [Input][Prompt][MASK].\n default template is adopted when it is not specified.\n bn_calibrator: an optional batch norm calibrator. When provided,\n in inference mode the logits will be first normalised by it first. The\n calibrator must be initialized when passed to this class.\n This class essentially provides the objective function for BO/RL/any other\n prompt optimizer.\n \"\"\"\n super().__init__()\n if torch.cuda.is_available():\n if gpu_id:\n self.device = torch.device(f\"cuda:{gpu_id}\")\n else:\n self.device = torch.device(\"cuda\")\n else:\n self.device = torch.device(\"cpu\")\n # self.device = torch.device(\"cpu\")\n self.args = args\n self.task_lm = task_lm\n if is_mask_lm is None:\n # If False, then treat as left-to-right LM\n self.is_mask_lm = True if \"bert\" in self.task_lm else False\n else:\n self.is_mask_lm = is_mask_lm\n assert reward_type in [\"gap\", \"cross_entropy\", \"entropy\"]\n self.reward_type = reward_type\n print(\"Task LM:\", self.task_lm)\n if self.is_mask_lm:\n assert self.task_lm in SUPPORTED_MASK_LMS\n self._tokenizer = AutoTokenizer.from_pretrained(self.task_lm)\n self._generator = AutoModelForMaskedLM.from_pretrained(self.task_lm).to(\n self.device\n )\n else:\n self._generator = T5ForConditionalGeneration.from_pretrained(\n self.task_lm\n ).to(self.device)\n self._tokenizer = AutoTokenizer.from_pretrained(\n self.task_lm, use_fast=False\n )\n\n self.compute_zscore = compute_zscore\n self.incorrect_coeff = incorrect_coeff\n self.correct_coeff = correct_coeff\n self.num_classes = num_classes\n print(\"Num classes:\", self.num_classes)\n self.verbalizers = verbalizers\n print(\"Verbalizers:\", self.verbalizers)\n self.verbalizer_ids = [\n self._tokenizer.convert_tokens_to_ids(v) for v in self.verbalizers\n ]\n print(\"Verbalizer ids:\", self.verbalizer_ids)\n if template is None:\n self.template = self.load_default_template() # prompt templates\n else:\n self.template = template\n self.use_bn_calibration = use_bn_calibration\n self.bn_calibrator = bn_calibrator\n self._counter = 0\n\n def to(self, device):\n self._generator.to(device)\n\n def load_default_template(self) -> List[str]:\n template_dict = {\n \"xnli\": [\n \" {prompt} {sentence_1} {sentence_2} Entailment: \", \n \" {prompt}. In this task, the goal is to predict textual entailment with 'yes' 'maybe' 'no'. sentence A implies sentence B entailment: yes; sentence A is neutral to sentence B entailment: maybe; sentence A contradicts sentence B entailment: no. Sentence A: {sentence_1}, Sentence B: {sentence_2}, Entailment: \", \n ],\n \"mnli\": [\n \" {prompt} {sentence_1} {sentence_2} Entailment: \",\n \" {prompt}. In this task, the goal is to predict textual entailment with 'yes' 'maybe' 'no'. sentence A implies sentence B entailment: yes; sentence A is neutral to sentence B entailment: maybe; sentence A contradicts sentence B entailment: no. Sentence A: {sentence_1}, Sentence B: {sentence_2}, Entailment: \", \n ],\n \"snli\": [\n \" {prompt} {sentence_1} {sentence_2} Entailment: \",\n \" {prompt}. In this task, the goal is to predict textual entailment with 'yes' 'maybe' 'no'. sentence A implies sentence B entailment: yes; sentence A is neutral to sentence B entailment: maybe; sentence A contradicts sentence B entailment: no. Sentence A: {sentence_1}, Sentence B: {sentence_2}, Entailment: \",\n ],\n \"rte\": [\n \" {prompt}. Sentence 1: {sentence_1}, Sentence 2: {sentence_2}, Textual Entailment: \",\n ],\n \"sst2\": [\n \" {prompt}. Sentence: {sentence_1}, Sentiment: \",\n ],\n \"mrpc\": [\n \" {prompt}. Sentence 1: {sentence_1}, Sentence 2: {sentence_2}, Semantically Equivalent: \",\n ],\n \"qnli\": [\n \" {prompt}. Question: {sentence_1}, Sentence: {sentence_2}, Entailment: \",\n ],\n \"qqp\": [\n \" {prompt}. Sentence 1: {sentence_1}, Sentence 2: {sentence_2}, Semantically Equivalent: \",\n ],\n \"ag_news\": [\n \" {prompt}. Classify the news articles into the categories of World, Sports, Business, and Technology. {sentence_1}: \",\n \"{prompt}\\n\\n{sentence_1}\\n\\nWhich topic is this article about?\\nWorld, Sports, Business, Technology, \",\n ],\n }\n if \"anli\" in self.args[\"dataset_name\"]:\n template = template_dict[\"anli\"][self.args[\"template_id\"]]\n elif (\n \"xnli\" in self.args[\"dataset_name\"]\n or \"americas_nli\" in self.args[\"dataset_name\"]\n ):\n template = template_dict[\"xnli\"][self.args[\"template_id\"]]\n else:\n if self.args[\"dataset_name\"] in template_dict:\n template = template_dict[self.args[\"dataset_name\"]][\n self.args[\"template_id\"]\n ]\n if self.is_mask_lm:\n mask_token = self._tokenizer.mask_token\n print(mask_token)\n simple_list = [\"SetFit/sst2\", \"SetFit/CR\", \"rotten_tomatoes\", \"SetFit/sst5\"]\n long_list = [\"yelp_polarity\", \"yelp_review_full\"]\n hard_list = [\"ag_news\"]\n rl_list = [\n \"rl-agnews\",\n \"rl-cr\",\n \"rl-mr\",\n \"rl-sst-2\",\n \"rl-sst-5\",\n \"rl-yelp-2\",\n \"rl-yelp-5\",\n ]\n if self.args[\"dataset_name\"] in simple_list:\n template = f\" {{prompt}} {{sentence_1}} It was {mask_token}.\"\n elif self.args[\"dataset_name\"] in long_list:\n template = f\" {{prompt}} It was {mask_token}. {{sentence_1}}\"\n elif self.args[\"dataset_name\"] in hard_list:\n template = f\" {{prompt}} {mask_token} News: {{sentence_1}}\"\n elif self.args[\"dataset_name\"] in rl_list:\n template = f\" {{prompt}} {{sentence_1}} It was {mask_token}.\"\n return template\n\n def __call__(self, *args: Any, **kwds: Any) -> Any:\n return self.forward(*args, **kwds)\n\n def forward(\n self,\n source_texts: List[str],\n source_2_texts: List[str],\n class_labels: List[int],\n output_tokens: Union[List[List[str]], List[str], str],\n # output_token: Union[List[str], str],\n to_tensor: bool,\n mode: str = \"train\",\n verbose: bool = True,\n accumulate_class: bool = False,\n ) -> Tuple[Union[List[float], torch.Tensor], Dict[str, Any]]:\n \"\"\"\n This computes the reward of the current prompt.\n source_texts: a list of string. Usually samples from the validation set\n class_labels: a list of integers. Usually the labels of the validation set\n prompts:\n Either List[List[str]]: List of tokens. The length of the list should be the same as the number of source_texts.\n OR List[str]: List of (decoded) prompts.\n OR: str. A single prompt\n \"\"\"\n assert mode in [\"train\", \"infer\"]\n if mode == \"train\":\n self._counter += 1\n\n # Process prompts and verbalizer indices\n if isinstance(output_tokens, list):\n if isinstance(output_tokens[0], list):\n prompt_tokens = output_tokens\n prompt_strings = self._convert_tokens_to_string(prompt_tokens)\n elif isinstance(output_tokens[0], str):\n prompt_strings = output_tokens\n elif isinstance(output_tokens, str):\n prompt_strings = [output_tokens] # Single prompt string\n\n rewards: List[torch.Tensor] = []\n accs: List[float] = []\n confs: List[float] = []\n entropies: List[float] = []\n class_logits: List[torch.Tensor] = []\n\n counter_list = []\n input_rewards: Dict[str, List[float]] = defaultdict(list)\n quantities_to_log = {}\n for i, prompt in enumerate(prompt_strings):\n # Compute LM logits\n current_prompts = [prompt for _ in source_texts]\n formatted_templates = self._format_prompts(\n source_texts, source_2_texts, current_prompts\n )\n all_logits = self._get_logits(formatted_templates)\n (\n reward,\n acc,\n correct_predictions,\n conf,\n entropy,\n class_logit,\n ) = _compute_reward(\n all_logits,\n target=class_labels,\n reward_type=self.reward_type,\n verbalizer_ids=self.verbalizer_ids,\n correct_coeff=self.correct_coeff,\n incorrect_coeff=self.incorrect_coeff,\n bn_calibrator=self.bn_calibrator if self.use_bn_calibration else None,\n )\n\n rewards.append(reward)\n accs.append(acc.item())\n confs.append(conf.item())\n entropies.append(entropy.item())\n counter_list.append(correct_predictions)\n class_logits.append(class_logit)\n\n # keep track of rewards for z-score normalization\n input_rewards[\"z\"] += [reward.item()]\n\n # Print examples\n if verbose:\n print_strs = [\n \"Accuracy:\",\n acc.item(),\n \"|\",\n \"Reward:\",\n round(reward.item(), 2),\n ]\n print(*print_strs)\n rewards_tensor = torch.stack(rewards)\n accs_tensor = torch.tensor(accs)\n confs_tensor = torch.tensor(confs)\n entropies_tensor = torch.tensor(entropies)\n # compute the expected calibration error (ECE) by accs_tensor and confs_tensor\n ece = torch.abs(accs_tensor - confs_tensor).mean()\n\n # z-score normalization (2nd stage)\n if mode == \"train\" and self.compute_zscore:\n input_reward_means = {k: np.mean(v) for k, v in input_rewards.items()}\n input_reward_stds = {k: np.std(v) for k, v in input_rewards.items()}\n # not source strings\n idx_means = torch.tensor(input_reward_means[\"z\"]).float()\n idx_stds = torch.tensor(input_reward_stds[\"z\"]).float()\n rewards_tensor = (rewards_tensor - idx_means) / (idx_stds + 1e-4)\n quantities_to_log[prompt_strings[i]][\"resized_reward\"] = []\n for i in range(rewards_tensor.size(0)):\n quantities_to_log[prompt_strings[i]][\"resized_reward\"].append(\n rewards_tensor[i].item()\n )\n elif mode == \"infer\": # Optional: Predict Val Prompts\n score = rewards_tensor.mean().item()\n if verbose:\n print(f\"Our prompt: {prompt_strings}. Score={score}. Acc={acc}\")\n for pt in prompt_strings:\n print(self._tokenizer.tokenize(pt))\n print(accumulate_class)\n print(\"counter_list\", counter_list)\n print(\"ece\", ece)\n if accumulate_class:\n return (\n prompt_strings,\n rewards_tensor,\n accs_tensor,\n counter_list,\n ece,\n entropies_tensor,\n class_logits, # <- list of tensors. n elements = n prompts\n )\n else:\n return prompt_strings, rewards_tensor, accs_tensor\n\n if to_tensor is True:\n return rewards_tensor, accs_tensor, quantities_to_log\n else:\n return rewards_tensor.tolist(), accs, quantities_to_log\n\n def kl_divergence_row_by_row(self, p, q):\n kl_div = torch.sum(p * torch.log(p / q), dim=1)\n return kl_div\n\n def compute_default_kl(\n self,\n source_texts: List[str],\n source_2_texts: List[str],\n class_labels: List[int],\n output_tokens: Union[List[List[str]], List[str], str],\n to_tensor: bool,\n ) -> torch.Tensor:\n \"\"\"\n This computes the probs of the naive prompt (instruction).\n source_texts: a list of string. Usually samples from the validation set\n class_labels: a list of integers. Usually the labels of the validation set\n prompts:\n Either List[List[str]]: List of tokens. The length of the list should be the same as the number of source_texts.\n OR List[str]: List of (decoded) prompts.\n OR: str. A single prompt\n \"\"\"\n default_templates = self._format_prompts(\n source_texts, source_2_texts, [\"\" for _ in source_texts]\n )\n default_logits = self._get_logits(default_templates)\n default_probs = _compute_probs(\n default_logits,\n target=class_labels,\n reward_type=self.reward_type,\n verbalizer_ids=self.verbalizer_ids,\n correct_coeff=self.correct_coeff,\n incorrect_coeff=self.incorrect_coeff,\n )\n return default_probs\n\n def compute_default_reward(\n self,\n source_texts: List[str],\n source_2_texts: List[str],\n class_labels: List[int],\n output_tokens: Union[List[List[str]], List[str], str],\n to_tensor: bool,\n ) -> torch.Tensor:\n \"\"\"\n This computes the rewards of the naive prompt (instruction).\n source_texts: a list of string. Usually samples from the validation set\n class_labels: a list of integers. Usually the labels of the validation set\n prompts:\n Either List[List[str]]: List of tokens. The length of the list should be the same as the number of source_texts.\n OR List[str]: List of (decoded) prompts.\n OR: str. A single prompt\n \"\"\"\n default_templates = self._format_prompts(\n source_texts, source_2_texts, [\"\" for _ in source_texts]\n )\n default_logits = self._get_logits(default_templates)\n default_reward, _, _, _, _, _ = _compute_reward(\n default_logits,\n target=class_labels,\n reward_type=self.reward_type,\n verbalizer_ids=self.verbalizer_ids,\n correct_coeff=self.correct_coeff,\n incorrect_coeff=self.incorrect_coeff,\n )\n return default_reward\n\n def compute_kl(\n self,\n source_texts: List[str],\n source_2_texts: List[str],\n class_labels: List[int],\n output_tokens: Union[List[List[str]], List[str], str],\n to_tensor: bool,\n default_probs: torch.Tensor,\n ) -> torch.Tensor:\n \"\"\"\n This computes the kl-divergence of the current prompt to the naive prompt (instruction).\n source_texts: a list of string. Usually samples from the validation set\n class_labels: a list of integers. Usually the labels of the validation set\n prompts:\n Either List[List[str]]: List of tokens. The length of the list should be the same as the number of source_texts.\n OR List[str]: List of (decoded) prompts.\n OR: str. A single prompt\n \"\"\"\n # Process prompts and verbalizer indices\n if isinstance(output_tokens, list):\n if isinstance(output_tokens[0], list):\n prompt_tokens = output_tokens\n prompt_strings = self._convert_tokens_to_string(prompt_tokens)\n elif isinstance(output_tokens[0], str):\n prompt_strings = output_tokens\n elif isinstance(output_tokens, str):\n prompt_strings = [output_tokens] # Single prompt string\n\n rewards: List[torch.Tensor] = []\n input_rewards: Dict[str, List[float]] = defaultdict(list)\n for i, prompt in enumerate(prompt_strings):\n # Compute LM logits\n current_prompts = [prompt for _ in source_texts]\n formatted_templates = self._format_prompts(\n source_texts, source_2_texts, current_prompts\n )\n all_logits = self._get_logits(formatted_templates)\n prompt_probs = _compute_probs(\n all_logits,\n target=class_labels,\n reward_type=self.reward_type,\n verbalizer_ids=self.verbalizer_ids,\n correct_coeff=self.correct_coeff,\n incorrect_coeff=self.incorrect_coeff,\n )\n kl = self.kl_divergence_row_by_row(prompt_probs, default_probs)\n kl = torch.sum(kl)\n rewards.append(kl)\n kl_tensor = torch.stack(rewards)\n return kl_tensor\n\n def compute_reward_diff(\n self,\n source_texts: List[str],\n source_2_texts: List[str],\n class_labels: List[int],\n output_tokens: Union[List[List[str]], List[str], str],\n to_tensor: bool,\n default_rewards: torch.Tensor,\n ) -> torch.Tensor:\n \"\"\"\n This computes the kl-divergence of the current prompt to the naive prompt (instruction).\n source_texts: a list of string. Usually samples from the validation set\n class_labels: a list of integers. Usually the labels of the validation set\n prompts:\n Either List[List[str]]: List of tokens. The length of the list should be the same as the number of source_texts.\n OR List[str]: List of (decoded) prompts.\n OR: str. A single prompt\n \"\"\"\n # Process prompts and verbalizer indices\n if isinstance(output_tokens, list):\n if isinstance(output_tokens[0], list):\n prompt_tokens = output_tokens\n prompt_strings = self._convert_tokens_to_string(prompt_tokens)\n elif isinstance(output_tokens[0], str):\n prompt_strings = output_tokens\n elif isinstance(output_tokens, str):\n prompt_strings = [output_tokens] # Single prompt string\n\n rewards: List[torch.Tensor] = []\n for i, prompt in enumerate(prompt_strings):\n # Compute LM logits\n current_prompts = [prompt for _ in source_texts]\n formatted_templates = self._format_prompts(\n source_texts, source_2_texts, current_prompts\n )\n all_logits = self._get_logits(formatted_templates)\n prompt_rewards, _, _, _, _, _ = _compute_reward(\n all_logits,\n target=class_labels,\n reward_type=self.reward_type,\n verbalizer_ids=self.verbalizer_ids,\n correct_coeff=self.correct_coeff,\n incorrect_coeff=self.incorrect_coeff,\n )\n reward_diff = prompt_rewards - default_rewards\n reward_diff = torch.sum(reward_diff)\n rewards.append(reward_diff)\n reward_diff_tensor = torch.stack(rewards)\n return reward_diff_tensor\n\n # Adapted from\n # https://huggingface.co/docs/transformers/v4.21.1/en/task_summary#masked-language-modeling\n def _get_mask_token_index(self, input_ids: torch.Tensor) -> np.ndarray:\n mask_token_index = torch.where(input_ids == self._tokenizer.mask_token_id)[1]\n return mask_token_index\n\n def ensure_exactly_one_mask_token(\n self, model_inputs: Dict[str, torch.Tensor]\n ) -> None:\n for input_ids in model_inputs[\"input_ids\"]:\n masked_index = self._get_mask_token_index(input_ids)\n numel = np.prod(masked_index.shape)\n assert numel == 1\n\n @torch.no_grad()\n def _get_logits(self, texts: List[str]) -> torch.Tensor:\n # for MLM, add mask token\n batch_size = len(texts)\n encoded_inputs = self._tokenizer(\n texts,\n padding=\"longest\",\n truncation=True,\n return_tensors=\"pt\",\n add_special_tokens=True,\n )\n decoder_input_ids = (\n torch.ones((batch_size, 1)) * torch.tensor(self._tokenizer.pad_token_id)\n ).int()\n if self.is_mask_lm:\n # self.ensure_exactly_one_mask_token(encoded_inputs) TODO\n token_logits = self._generator(**encoded_inputs.to(self.device)).logits\n mask_token_indices = self._get_mask_token_index(encoded_inputs[\"input_ids\"])\n out_logits = token_logits[range(batch_size), mask_token_indices, :]\n return out_logits\n else:\n token_logits = self._generator(\n input_ids=encoded_inputs[\"input_ids\"].to(self.device),\n decoder_input_ids=decoder_input_ids.to(self.device),\n ).logits\n token_logits = token_logits[:, 0, :]\n return token_logits\n\n def _convert_tokens_to_string(self, tokens: List[List[str]]) -> List[str]:\n return [self._tokenizer.convert_tokens_to_string(s) for s in tokens]\n\n def _format_prompts(\n self,\n source_strs: List[str],\n source_2_strs: List[str],\n prompt_strs: List[str],\n ) -> List[str]:\n return [\n self.template.format(sentence_1=s_1, sentence_2=s_2, prompt=p)\n for s_1, s_2, p in zip(source_strs, source_2_strs, prompt_strs)\n ]" } ]
import random import numpy as np from typing import Any, Optional from rewards.text_classification_reward import ( PromptedClassificationReward, ) from utils.fsc_datasets import PromptedClassificationDataset from .base_trainer import BaseTrainer from utils.fsc_datasets import PromptedClassificationDataset from rewards.text_classification_reward import PromptedClassificationReward
18,712
class GreedyTrainer(BaseTrainer): def __init__( self, obj_func: PromptedClassificationReward,
class GreedyTrainer(BaseTrainer): def __init__( self, obj_func: PromptedClassificationReward,
prompt_dataset: PromptedClassificationDataset,
3
2023-10-08 12:39:44+00:00
24k
MachinePerceptionLab/Attentive_DFPrior
src/DF_Prior.py
[ { "identifier": "config", "path": "src/config.py", "snippet": "def load_config(path, default_path=None):\ndef update_recursive(dict1, dict2):\ndef get_model(cfg):" }, { "identifier": "Mapper", "path": "src/Mapper.py", "snippet": "class Mapper(object):\n \"\"\"\n Mapper thread. \n\n \"\"\"\n\n def __init__(self, cfg, args, slam\n ):\n\n self.cfg = cfg\n self.args = args\n\n self.idx = slam.idx\n self.c = slam.shared_c\n self.bound = slam.bound\n self.logger = slam.logger\n self.mesher = slam.mesher\n self.output = slam.output\n self.verbose = slam.verbose\n self.renderer = slam.renderer\n self.low_gpu_mem = slam.low_gpu_mem\n self.mapping_idx = slam.mapping_idx\n self.mapping_cnt = slam.mapping_cnt\n self.decoders = slam.shared_decoders\n self.estimate_c2w_list = slam.estimate_c2w_list\n self.mapping_first_frame = slam.mapping_first_frame\n self.scene_id = slam.scene_id\n with torch.no_grad():\n self.tsdf_volume_shared = slam.tsdf_volume_shared\n self.tsdf_bnds = slam.tsdf_bnds\n \n \n self.scale = cfg['scale']\n self.occupancy = cfg['occupancy']\n self.sync_method = cfg['sync_method']\n\n self.device = cfg['mapping']['device']\n self.fix_high = cfg['mapping']['fix_high']\n self.eval_rec = cfg['meshing']['eval_rec']\n \n \n self.mesh_freq = cfg['mapping']['mesh_freq']\n self.ckpt_freq = cfg['mapping']['ckpt_freq']\n self.fix_color = cfg['mapping']['fix_color']\n self.mapping_pixels = cfg['mapping']['pixels']\n self.num_joint_iters = cfg['mapping']['iters']\n self.clean_mesh = cfg['meshing']['clean_mesh']\n self.every_frame = cfg['mapping']['every_frame']\n self.color_refine = cfg['mapping']['color_refine']\n self.w_color_loss = cfg['mapping']['w_color_loss']\n self.keyframe_every = cfg['mapping']['keyframe_every']\n self.high_iter_ratio = cfg['mapping']['high_iter_ratio']\n self.low_iter_ratio = cfg['mapping']['low_iter_ratio']\n self.mapping_window_size = cfg['mapping']['mapping_window_size']\n self.no_vis_on_first_frame = cfg['mapping']['no_vis_on_first_frame']\n self.no_log_on_first_frame = cfg['mapping']['no_log_on_first_frame']\n self.no_mesh_on_first_frame = cfg['mapping']['no_mesh_on_first_frame']\n self.frustum_feature_selection = cfg['mapping']['frustum_feature_selection']\n self.keyframe_selection_method = cfg['mapping']['keyframe_selection_method']\n self.save_selected_keyframes_info = cfg['mapping']['save_selected_keyframes_info']\n if self.save_selected_keyframes_info:\n self.selected_keyframes = {}\n\n\n self.keyframe_dict = []\n self.keyframe_list = []\n self.frame_reader = get_dataset(\n cfg, args, self.scale, device=self.device)\n self.n_img = len(self.frame_reader)\n if 'Demo' not in self.output: # disable this visualization in demo\n self.visualizer = Visualizer(freq=cfg['mapping']['vis_freq'], inside_freq=cfg['mapping']['vis_inside_freq'],\n vis_dir=os.path.join(self.output, 'mapping_vis'), renderer=self.renderer,\n verbose=self.verbose, device=self.device)\n self.H, self.W, self.fx, self.fy, self.cx, self.cy = slam.H, slam.W, slam.fx, slam.fy, slam.cx, slam.cy\n\n def get_mask_from_c2w(self, c2w, key, val_shape, depth_np):\n \"\"\"\n Frustum feature selection based on current camera pose and depth image.\n\n Args:\n c2w (tensor): camera pose of current frame.\n key (str): name of this feature grid.\n val_shape (tensor): shape of the grid.\n depth_np (numpy.array): depth image of current frame.\n\n Returns:\n mask (tensor): mask for selected optimizable feature.\n points (tensor): corresponding point coordinates.\n \"\"\"\n H, W, fx, fy, cx, cy, = self.H, self.W, self.fx, self.fy, self.cx, self.cy\n X, Y, Z = torch.meshgrid(torch.linspace(self.bound[0][0], self.bound[0][1], val_shape[2]),\n torch.linspace(self.bound[1][0], self.bound[1][1], val_shape[1]),\n torch.linspace(self.bound[2][0], self.bound[2][1], val_shape[0]))\n\n points = torch.stack([X, Y, Z], dim=-1).reshape(-1, 3)\n points_bak = points.clone()\n c2w = c2w.cpu().numpy()\n w2c = np.linalg.inv(c2w)\n ones = np.ones_like(points[:, 0]).reshape(-1, 1)\n homo_vertices = np.concatenate(\n [points, ones], axis=1).reshape(-1, 4, 1)\n cam_cord_homo = w2c@homo_vertices\n cam_cord = cam_cord_homo[:, :3]\n K = np.array([[fx, .0, cx], [.0, fy, cy], [.0, .0, 1.0]]).reshape(3, 3)\n cam_cord[:, 0] *= -1\n uv = K@cam_cord\n z = uv[:, -1:]+1e-5\n uv = uv[:, :2]/z\n uv = uv.astype(np.float32)\n\n remap_chunk = int(3e4)\n depths = []\n for i in range(0, uv.shape[0], remap_chunk):\n depths += [cv2.remap(depth_np,\n uv[i:i+remap_chunk, 0],\n uv[i:i+remap_chunk, 1],\n interpolation=cv2.INTER_LINEAR)[:, 0].reshape(-1, 1)]\n depths = np.concatenate(depths, axis=0)\n\n edge = 0\n mask = (uv[:, 0] < W-edge)*(uv[:, 0] > edge) * \\\n (uv[:, 1] < H-edge)*(uv[:, 1] > edge)\n\n # For ray with depth==0, fill it with maximum depth\n zero_mask = (depths == 0)\n depths[zero_mask] = np.max(depths)\n\n # depth test\n mask = mask & (0 <= -z[:, :, 0]) & (-z[:, :, 0] <= depths+0.5)\n mask = mask.reshape(-1)\n\n # add feature grid near cam center\n ray_o = c2w[:3, 3]\n ray_o = torch.from_numpy(ray_o).unsqueeze(0)\n\n dist = points_bak-ray_o\n dist = torch.sum(dist*dist, axis=1)\n mask2 = dist < 0.5*0.5\n mask2 = mask2.cpu().numpy()\n mask = mask | mask2\n\n points = points[mask]\n mask = mask.reshape(val_shape[2], val_shape[1], val_shape[0])\n return mask\n\n def keyframe_selection_overlap(self, gt_color, gt_depth, c2w, keyframe_dict, k, N_samples=16, pixels=100):\n \"\"\"\n Select overlapping keyframes to the current camera observation.\n\n Args:\n gt_color (tensor): ground truth color image of the current frame.\n gt_depth (tensor): ground truth depth image of the current frame.\n c2w (tensor): camera to world matrix (3*4 or 4*4 both fine).\n keyframe_dict (list): a list containing info for each keyframe.\n k (int): number of overlapping keyframes to select.\n N_samples (int, optional): number of samples/points per ray. Defaults to 16.\n pixels (int, optional): number of pixels to sparsely sample \n from the image of the current camera. Defaults to 100.\n Returns:\n selected_keyframe_list (list): list of selected keyframe id.\n \"\"\"\n device = self.device\n H, W, fx, fy, cx, cy = self.H, self.W, self.fx, self.fy, self.cx, self.cy\n\n rays_o, rays_d, gt_depth, gt_color = get_samples(\n 0, H, 0, W, pixels, H, W, fx, fy, cx, cy, c2w, gt_depth, gt_color, self.device)\n\n gt_depth = gt_depth.reshape(-1, 1)\n gt_depth = gt_depth.repeat(1, N_samples)\n t_vals = torch.linspace(0., 1., steps=N_samples).to(device)\n near = gt_depth*0.8\n far = gt_depth+0.5\n z_vals = near * (1.-t_vals) + far * (t_vals)\n pts = rays_o[..., None, :] + rays_d[..., None, :] * \\\n z_vals[..., :, None] # [N_rays, N_samples, 3]\n vertices = pts.reshape(-1, 3).cpu().numpy()\n list_keyframe = []\n for keyframeid, keyframe in enumerate(keyframe_dict):\n c2w = keyframe['est_c2w'].cpu().numpy()\n w2c = np.linalg.inv(c2w)\n ones = np.ones_like(vertices[:, 0]).reshape(-1, 1)\n homo_vertices = np.concatenate(\n [vertices, ones], axis=1).reshape(-1, 4, 1) # (N, 4)\n cam_cord_homo = w2c@homo_vertices # (N, 4, 1)=(4,4)*(N, 4, 1)\n cam_cord = cam_cord_homo[:, :3] # (N, 3, 1)\n K = np.array([[fx, .0, cx], [.0, fy, cy],\n [.0, .0, 1.0]]).reshape(3, 3)\n cam_cord[:, 0] *= -1\n uv = K@cam_cord\n z = uv[:, -1:]+1e-5\n uv = uv[:, :2]/z\n uv = uv.astype(np.float32)\n edge = 20\n mask = (uv[:, 0] < W-edge)*(uv[:, 0] > edge) * \\\n (uv[:, 1] < H-edge)*(uv[:, 1] > edge)\n mask = mask & (z[:, :, 0] < 0)\n mask = mask.reshape(-1)\n percent_inside = mask.sum()/uv.shape[0]\n list_keyframe.append(\n {'id': keyframeid, 'percent_inside': percent_inside})\n\n list_keyframe = sorted(\n list_keyframe, key=lambda i: i['percent_inside'], reverse=True)\n selected_keyframe_list = [dic['id']\n for dic in list_keyframe if dic['percent_inside'] > 0.00]\n selected_keyframe_list = list(np.random.permutation(\n np.array(selected_keyframe_list))[:k])\n return selected_keyframe_list\n \n def eval_points(self, p, decoders, tsdf_volume, tsdf_bnds, c=None, stage='color', device='cuda:0'):\n \"\"\"\n Evaluates the occupancy and/or color value for the points.\n\n Args:\n p (tensor, N*3): point coordinates.\n decoders (nn.module decoders): decoders.\n c (dicts, optional): feature grids. Defaults to None.\n stage (str, optional): query stage, corresponds to different levels. Defaults to 'color'.\n device (str, optional): device name to compute on. Defaults to 'cuda:0'.\n\n Returns:\n ret (tensor): occupancy (and color) value of input points.\n \"\"\"\n\n p_split = torch.split(p, 500)\n bound = self.bound\n rets = []\n for pi in p_split:\n # mask for points out of bound\n mask_x = (pi[:, 0] < bound[0][1]) & (pi[:, 0] > bound[0][0])\n mask_y = (pi[:, 1] < bound[1][1]) & (pi[:, 1] > bound[1][0])\n mask_z = (pi[:, 2] < bound[2][1]) & (pi[:, 2] > bound[2][0])\n mask = mask_x & mask_y & mask_z\n\n pi = pi.unsqueeze(0)\n ret, _ = decoders(pi, c_grid=c, tsdf_volume=tsdf_volume, tsdf_bnds=tsdf_bnds, stage=stage)\n \n ret = ret.squeeze(0)\n if len(ret.shape) == 1 and ret.shape[0] == 4:\n ret = ret.unsqueeze(0)\n\n ret[~mask, 3] = 100\n rets.append(ret)\n\n ret = torch.cat(rets, dim=0)\n return ret\n\n def optimize_map(self, num_joint_iters, lr_factor, idx, cur_gt_color, cur_gt_depth, gt_cur_c2w, keyframe_dict, keyframe_list, tsdf_volume, cur_c2w):\n \"\"\"\n Mapping iterations. Sample pixels from selected keyframes,\n then optimize scene representation.\n\n Args:\n num_joint_iters (int): number of mapping iterations.\n lr_factor (float): the factor to times on current lr.\n idx (int): the index of current frame\n cur_gt_color (tensor): gt_color image of the current camera.\n cur_gt_depth (tensor): gt_depth image of the current camera.\n gt_cur_c2w (tensor): groundtruth camera to world matrix corresponding to current frame.\n keyframe_dict (list): list of keyframes info dictionary.\n keyframe_list (list): list ofkeyframe index.\n tsdf_volume (tensor): tsdf volume.\n cur_c2w (tensor): the estimated camera to world matrix of current frame. \n\n Returns:\n return None\n \"\"\"\n H, W, fx, fy, cx, cy = self.H, self.W, self.fx, self.fy, self.cx, self.cy\n c = self.c\n cfg = self.cfg\n device = self.device\n tsdf_bnds = self.tsdf_bnds.to(device)\n\n if len(keyframe_dict) == 0:\n optimize_frame = []\n else:\n if self.keyframe_selection_method == 'global':\n num = self.mapping_window_size-2\n optimize_frame = random_select(len(self.keyframe_dict)-1, num)\n elif self.keyframe_selection_method == 'overlap':\n num = self.mapping_window_size-2\n optimize_frame = self.keyframe_selection_overlap(\n cur_gt_color, cur_gt_depth, cur_c2w, keyframe_dict[:-1], num)\n\n # add the last keyframe and the current frame(use -1 to denote)\n oldest_frame = None\n if len(keyframe_list) > 0:\n optimize_frame = optimize_frame + [len(keyframe_list)-1]\n oldest_frame = min(optimize_frame)\n optimize_frame += [-1]\n\n if self.save_selected_keyframes_info:\n keyframes_info = []\n for id, frame in enumerate(optimize_frame):\n if frame != -1:\n frame_idx = keyframe_list[frame]\n tmp_gt_c2w = keyframe_dict[frame]['gt_c2w']\n tmp_est_c2w = keyframe_dict[frame]['est_c2w']\n else:\n frame_idx = idx\n tmp_gt_c2w = gt_cur_c2w\n tmp_est_c2w = cur_c2w\n keyframes_info.append(\n {'idx': frame_idx, 'gt_c2w': tmp_gt_c2w, 'est_c2w': tmp_est_c2w})\n self.selected_keyframes[idx] = keyframes_info\n\n pixs_per_image = self.mapping_pixels//len(optimize_frame)\n\n mlp_para_list = []\n decoders_para_list = []\n low_grid_para = []\n high_grid_para = []\n color_grid_para = []\n gt_depth_np = cur_gt_depth.cpu().numpy()\n if True:\n if self.frustum_feature_selection:\n masked_c_grad = {}\n mask_c2w = cur_c2w\n for key, val in c.items():\n if not self.frustum_feature_selection:\n val = Variable(val.to(device), requires_grad=True)\n c[key] = val\n if key == 'grid_low':\n low_grid_para.append(val)\n elif key == 'grid_high':\n high_grid_para.append(val)\n elif key == 'grid_color':\n color_grid_para.append(val)\n\n else:\n mask = self.get_mask_from_c2w(\n mask_c2w, key, val.shape[2:], gt_depth_np)\n mask = torch.from_numpy(mask).permute(2, 1, 0).unsqueeze(\n 0).unsqueeze(0).repeat(1, val.shape[1], 1, 1, 1)\n val = val.to(device)\n # val_grad is the optimizable part, other parameters will be fixed\n val_grad = val[mask].clone()\n val_grad = Variable(val_grad.to(\n device), requires_grad=True)\n masked_c_grad[key] = val_grad\n masked_c_grad[key+'mask'] = mask\n if key == 'grid_low':\n low_grid_para.append(val_grad)\n elif key == 'grid_high':\n high_grid_para.append(val_grad)\n elif key == 'grid_color':\n color_grid_para.append(val_grad)\n\n\n if not self.fix_high:\n decoders_para_list += list(\n self.decoders.high_decoder.parameters())\n if not self.fix_color:\n decoders_para_list += list(\n self.decoders.color_decoder.parameters())\n mlp_para_list += list(\n self.decoders.mlp.parameters())\n \n\n optimizer = torch.optim.Adam([{'params': decoders_para_list, 'lr': 0},\n {'params': mlp_para_list, 'lr': 0},\n {'params': low_grid_para, 'lr': 0},\n {'params': high_grid_para, 'lr': 0},\n {'params': color_grid_para, 'lr': 0}])\n \n\n for joint_iter in range(num_joint_iters):\n if self.frustum_feature_selection:\n for key, val in c.items():\n val_grad = masked_c_grad[key]\n mask = masked_c_grad[key+'mask']\n val = val.to(device)\n val[mask] = val_grad\n c[key] = val\n\n if joint_iter <= int(num_joint_iters*self.low_iter_ratio):\n self.stage = 'low'\n elif joint_iter <= int(num_joint_iters*self.high_iter_ratio):\n self.stage = 'high'\n else:\n self.stage = 'color'\n\n optimizer.param_groups[0]['lr'] = cfg['mapping']['stage'][self.stage]['decoders_lr']*lr_factor\n optimizer.param_groups[1]['lr'] = cfg['mapping']['stage'][self.stage]['mlp_lr']*lr_factor\n optimizer.param_groups[2]['lr'] = cfg['mapping']['stage'][self.stage]['low_lr']*lr_factor\n optimizer.param_groups[3]['lr'] = cfg['mapping']['stage'][self.stage]['high_lr']*lr_factor\n optimizer.param_groups[4]['lr'] = cfg['mapping']['stage'][self.stage]['color_lr']*lr_factor\n \n if (not (idx == 0 and self.no_vis_on_first_frame)) and ('Demo' not in self.output):\n self.visualizer.vis(\n idx, joint_iter, cur_gt_depth, cur_gt_color, cur_c2w, self.c, self.decoders, tsdf_volume, tsdf_bnds)\n\n optimizer.zero_grad()\n batch_rays_d_list = []\n batch_rays_o_list = []\n batch_gt_depth_list = []\n batch_gt_color_list = []\n\n camera_tensor_id = 0\n for frame in optimize_frame:\n if frame != -1:\n gt_depth = keyframe_dict[frame]['depth'].to(device)\n gt_color = keyframe_dict[frame]['color'].to(device)\n c2w = keyframe_dict[frame]['est_c2w']\n\n else:\n gt_depth = cur_gt_depth.to(device)\n gt_color = cur_gt_color.to(device)\n c2w = cur_c2w\n\n batch_rays_o, batch_rays_d, batch_gt_depth, batch_gt_color = get_samples(\n 0, H, 0, W, pixs_per_image, H, W, fx, fy, cx, cy, c2w, gt_depth, gt_color, self.device)\n batch_rays_o_list.append(batch_rays_o.float())\n batch_rays_d_list.append(batch_rays_d.float())\n batch_gt_depth_list.append(batch_gt_depth.float())\n batch_gt_color_list.append(batch_gt_color.float())\n\n batch_rays_d = torch.cat(batch_rays_d_list)\n batch_rays_o = torch.cat(batch_rays_o_list)\n batch_gt_depth = torch.cat(batch_gt_depth_list)\n batch_gt_color = torch.cat(batch_gt_color_list)\n\n\n # should pre-filter those out of bounding box depth value\n with torch.no_grad():\n det_rays_o = batch_rays_o.clone().detach().unsqueeze(-1) # (N, 3, 1)\n det_rays_d = batch_rays_d.clone().detach().unsqueeze(-1) # (N, 3, 1)\n t = (self.bound.unsqueeze(0).to(\n device)-det_rays_o)/det_rays_d\n t, _ = torch.min(torch.max(t, dim=2)[0], dim=1)\n inside_mask = t >= batch_gt_depth\n batch_rays_d = batch_rays_d[inside_mask]\n batch_rays_o = batch_rays_o[inside_mask]\n batch_gt_depth = batch_gt_depth[inside_mask]\n batch_gt_color = batch_gt_color[inside_mask]\n\n ret = self.renderer.render_batch_ray(c, self.decoders, batch_rays_d,\n batch_rays_o, device, tsdf_volume, tsdf_bnds, self.stage,\n batch_gt_depth)\n depth, uncertainty, color, weight = ret\n\n\n depth_mask = (batch_gt_depth > 0)\n \n if joint_iter > int(num_joint_iters*self.low_iter_ratio) and joint_iter <= int(num_joint_iters*self.low_iter_ratio)+5 and idx <= 1:\n loss = torch.abs(\n batch_gt_depth[depth_mask]-depth[depth_mask]).sum() + torch.abs(weight-torch.ones(weight.shape).to(device)).sum()\n else:\n loss = torch.abs(\n batch_gt_depth[depth_mask]-depth[depth_mask]).sum()\n \n if self.stage == 'color':\n color_loss = torch.abs(batch_gt_color - color).sum()\n weighted_color_loss = self.w_color_loss*color_loss\n loss += weighted_color_loss\n\n loss.backward(retain_graph=False)\n optimizer.step()\n optimizer.zero_grad()\n\n # put selected and updated features back to the grid\n if self.frustum_feature_selection:\n for key, val in c.items():\n val_grad = masked_c_grad[key]\n mask = masked_c_grad[key+'mask']\n val = val.detach()\n val[mask] = val_grad.clone().detach()\n c[key] = val\n\n return None\n\n\n def run(self):\n cfg = self.cfg\n idx, gt_color, gt_depth, gt_c2w = self.frame_reader[0]\n\n self.estimate_c2w_list[0] = gt_c2w.cpu()\n init = True\n prev_idx = -1\n tsdf_volume = self.tsdf_volume_shared\n \n while (1):\n while True:\n idx = self.idx[0].clone()\n if idx == self.n_img-1:\n break\n if self.sync_method == 'strict':\n if idx % self.every_frame == 0 and idx != prev_idx:\n break\n elif self.sync_method == 'loose':\n if idx == 0 or idx >= prev_idx+self.every_frame//2:\n break\n elif self.sync_method == 'free':\n break\n time.sleep(0.1)\n prev_idx = idx\n\n if self.verbose:\n print(Fore.GREEN)\n prefix = ''\n print(prefix+\"Mapping Frame \", idx.item())\n print(Style.RESET_ALL)\n\n _, gt_color, gt_depth, gt_c2w = self.frame_reader[idx]\n\n # valid c2w\n valid_c2w = gt_c2w.clone().cpu().numpy()\n if not np.isfinite(valid_c2w).any():\n self.mapping_idx[0] = idx\n continue\n\n\n if not init:\n lr_factor = cfg['mapping']['lr_factor']\n num_joint_iters = cfg['mapping']['iters']\n\n # here provides a color refinement postprocess\n if idx == self.n_img-1 and self.color_refine:\n outer_joint_iters = 5\n self.mapping_window_size *= 2\n self.low_iter_ratio = 0.0\n self.high_iter_ratio = 0.0\n num_joint_iters *= 5\n self.fix_color = True\n self.frustum_feature_selection = False\n else:\n outer_joint_iters = 1\n \n\n else:\n outer_joint_iters = 1\n lr_factor = cfg['mapping']['lr_first_factor']\n num_joint_iters = cfg['mapping']['iters_first']\n\n cur_c2w = self.estimate_c2w_list[idx].to(self.device)\n num_joint_iters = num_joint_iters//outer_joint_iters\n \n for outer_joint_iter in range(outer_joint_iters):\n\n\n _ = self.optimize_map(num_joint_iters, lr_factor, idx, gt_color, gt_depth,\n gt_c2w, self.keyframe_dict, self.keyframe_list, tsdf_volume, cur_c2w=cur_c2w)\n \n\n # add new frame to keyframe set\n if outer_joint_iter == outer_joint_iters-1:\n if (idx % self.keyframe_every == 0 or (idx == self.n_img-2)) \\\n and (idx not in self.keyframe_list):\n self.keyframe_list.append(idx)\n self.keyframe_dict.append({'gt_c2w': gt_c2w.cpu(), 'idx': idx, 'color': gt_color.cpu(\n ), 'depth': gt_depth.cpu(), 'est_c2w': cur_c2w.clone()})\n\n if self.low_gpu_mem:\n torch.cuda.empty_cache()\n\n init = False\n # mapping of first frame is done, can begin tracking\n self.mapping_first_frame[0] = 1\n\n if True:\n if ((not (idx == 0 and self.no_log_on_first_frame)) and idx % self.ckpt_freq == 0) \\\n or idx == self.n_img-1 or (idx == 4640 and self.scene_id==50):\n self.logger.log(idx, self.keyframe_dict, self.keyframe_list,\n selected_keyframes=self.selected_keyframes\n if self.save_selected_keyframes_info else None)\n\n self.mapping_idx[0] = idx\n self.mapping_cnt[0] += 1\n\n if (idx % self.mesh_freq == 0) and (not (idx == 0 and self.no_mesh_on_first_frame)):\n mesh_out_file = f'{self.output}/mesh/{idx:05d}_mesh.ply'\n self.mesher.get_mesh(mesh_out_file, self.c, self.decoders, self.keyframe_dict, self.estimate_c2w_list,\n idx, tsdf_volume, self.device,\n clean_mesh=self.clean_mesh, get_mask_use_all_frames=False)\n\n if idx == self.n_img-1 or (idx == 4640 and self.scene_id==50):\n mesh_out_file = f'{self.output}/mesh/final_mesh.ply'\n self.mesher.get_mesh(mesh_out_file, self.c, self.decoders, self.keyframe_dict, self.estimate_c2w_list,\n idx, tsdf_volume, self.device,\n clean_mesh=self.clean_mesh, get_mask_use_all_frames=False)\n os.system(\n f\"cp {mesh_out_file} {self.output}/mesh/{idx:05d}_mesh.ply\")\n if self.eval_rec:\n mesh_out_file = f'{self.output}/mesh/final_mesh_eval_rec.ply'\n self.mesher.get_mesh(mesh_out_file, self.c, self.decoders, self.keyframe_dict,\n self.estimate_c2w_list, idx, tsdf_volume, self.device,\n clean_mesh=self.clean_mesh, get_mask_use_all_frames=True)\n break\n\n if idx == self.n_img-1 or (idx == 4640 and self.scene_id==50):\n break" }, { "identifier": "Tracker", "path": "src/Tracker.py", "snippet": "class Tracker(object):\n def __init__(self, cfg, args, slam\n ):\n self.cfg = cfg\n self.args = args\n\n self.scale = cfg['scale']\n self.occupancy = cfg['occupancy']\n self.sync_method = cfg['sync_method']\n\n self.idx = slam.idx\n self.bound = slam.bound\n self.mesher = slam.mesher\n self.output = slam.output\n self.verbose = slam.verbose\n self.shared_c = slam.shared_c\n self.renderer = slam.renderer\n self.gt_c2w_list = slam.gt_c2w_list\n self.low_gpu_mem = slam.low_gpu_mem\n self.mapping_idx = slam.mapping_idx\n self.mapping_cnt = slam.mapping_cnt\n self.shared_decoders = slam.shared_decoders\n self.estimate_c2w_list = slam.estimate_c2w_list\n with torch.no_grad():\n self.tsdf_volume_shared = slam.tsdf_volume_shared\n self.tsdf_bnds = slam.tsdf_bnds\n\n\n self.cam_lr = cfg['tracking']['lr']\n self.device = cfg['tracking']['device']\n self.num_cam_iters = cfg['tracking']['iters']\n self.gt_camera = cfg['tracking']['gt_camera']\n self.tracking_pixels = cfg['tracking']['pixels']\n self.seperate_LR = cfg['tracking']['seperate_LR']\n self.w_color_loss = cfg['tracking']['w_color_loss']\n self.ignore_edge_W = cfg['tracking']['ignore_edge_W']\n self.ignore_edge_H = cfg['tracking']['ignore_edge_H']\n self.handle_dynamic = cfg['tracking']['handle_dynamic']\n self.use_color_in_tracking = cfg['tracking']['use_color_in_tracking']\n self.const_speed_assumption = cfg['tracking']['const_speed_assumption']\n\n self.every_frame = cfg['mapping']['every_frame'] \n self.no_vis_on_first_frame = cfg['mapping']['no_vis_on_first_frame'] # ori mapping\n\n self.prev_mapping_idx = -1\n self.frame_reader = get_dataset(\n cfg, args, self.scale, device=self.device)\n self.n_img = len(self.frame_reader)\n self.frame_loader = DataLoader(\n self.frame_reader, batch_size=1, shuffle=False, num_workers=1)\n self.visualizer = Visualizer(freq=cfg['tracking']['vis_freq'], inside_freq=cfg['tracking']['vis_inside_freq'],\n vis_dir=os.path.join(self.output, 'vis' if 'Demo' in self.output else 'tracking_vis'),\n renderer=self.renderer, verbose=self.verbose, device=self.device)\n self.H, self.W, self.fx, self.fy, self.cx, self.cy = slam.H, slam.W, slam.fx, slam.fy, slam.cx, slam.cy\n\n def optimize_cam_in_batch(self, camera_tensor, gt_color, gt_depth, batch_size, optimizer, tsdf_volume):\n \"\"\"\n Do one iteration of camera iteration. Sample pixels, render depth/color, calculate loss and backpropagation.\n\n Args:\n camera_tensor (tensor): camera tensor.\n gt_color (tensor): ground truth color image of the current frame.\n gt_depth (tensor): ground truth depth image of the current frame.\n batch_size (int): batch size, number of sampling rays.\n optimizer (torch.optim): camera optimizer.\n tsdf_volume (tensor): tsdf volume\n\n Returns:\n loss (float): The value of loss.\n \"\"\"\n device = self.device\n H, W, fx, fy, cx, cy = self.H, self.W, self.fx, self.fy, self.cx, self.cy\n optimizer.zero_grad()\n c2w = get_camera_from_tensor(camera_tensor)\n tsdf_bnds = self.tsdf_bnds.to(device)\n Wedge = self.ignore_edge_W\n Hedge = self.ignore_edge_H\n batch_rays_o, batch_rays_d, batch_gt_depth, batch_gt_color = get_samples(\n Hedge, H-Hedge, Wedge, W-Wedge, batch_size, H, W, fx, fy, cx, cy, c2w, gt_depth, gt_color, self.device)\n \n # should pre-filter those out of bounding box depth value\n with torch.no_grad():\n det_rays_o = batch_rays_o.clone().detach().unsqueeze(-1) # (N, 3, 1)\n det_rays_d = batch_rays_d.clone().detach().unsqueeze(-1) # (N, 3, 1)\n t = (self.bound.unsqueeze(0).to(device)-det_rays_o)/det_rays_d\n t, _ = torch.min(torch.max(t, dim=2)[0], dim=1)\n inside_mask = t >= batch_gt_depth\n batch_rays_d = batch_rays_d[inside_mask]\n batch_rays_o = batch_rays_o[inside_mask]\n batch_gt_depth = batch_gt_depth[inside_mask]\n batch_gt_color = batch_gt_color[inside_mask]\n\n ret = self.renderer.render_batch_ray(\n self.c, self.decoders, batch_rays_d, batch_rays_o, self.device, tsdf_volume, tsdf_bnds, stage='color', gt_depth=batch_gt_depth) #color\n depth, uncertainty, color, _ = ret\n\n uncertainty = uncertainty.detach()\n if self.handle_dynamic:\n tmp = torch.abs(batch_gt_depth-depth)/torch.sqrt(uncertainty+1e-10)\n mask = (tmp < 10*tmp.median()) & (batch_gt_depth > 0)\n else:\n mask = batch_gt_depth > 0\n\n loss = (torch.abs(batch_gt_depth-depth) /\n torch.sqrt(uncertainty+1e-10))[mask].sum()\n\n if self.use_color_in_tracking:\n color_loss = torch.abs(\n batch_gt_color - color)[mask].sum()\n loss += self.w_color_loss*color_loss\n \n loss.backward(retain_graph=False)\n optimizer.step()\n optimizer.zero_grad()\n return loss.item()\n\n def update_para_from_mapping(self):\n \"\"\"\n Update the parameters of scene representation from the mapping thread.\n\n \"\"\"\n if self.mapping_idx[0] != self.prev_mapping_idx:\n if self.verbose:\n print('Tracking: update the parameters from mapping')\n self.decoders = copy.deepcopy(self.shared_decoders).to(self.device)\n for key, val in self.shared_c.items():\n val = val.clone().to(self.device)\n self.c[key] = val\n self.prev_mapping_idx = self.mapping_idx[0].clone()\n\n def run(self):\n device = self.device\n tsdf_volume = self.tsdf_volume_shared\n tsdf_bnds = self.tsdf_bnds.to(device)\n \n self.c = {}\n if self.verbose:\n pbar = self.frame_loader\n else:\n pbar = tqdm(self.frame_loader)\n\n for idx, gt_color, gt_depth, gt_c2w in pbar:\n if not self.verbose:\n pbar.set_description(f\"Tracking Frame {idx[0]}\")\n\n idx = idx[0]\n gt_depth = gt_depth[0]\n gt_color = gt_color[0]\n gt_c2w = gt_c2w[0]\n\n if self.sync_method == 'strict':\n # strictly mapping and then tracking\n # initiate mapping every self.every_frame frames\n if idx > 0 and (idx % self.every_frame == 1 or self.every_frame == 1):\n while self.mapping_idx[0] != idx-1:\n time.sleep(0.1)\n pre_c2w = self.estimate_c2w_list[idx-1].to(device)\n elif self.sync_method == 'loose':\n # mapping idx can be later than tracking idx is within the bound of\n # [-self.every_frame-self.every_frame//2, -self.every_frame+self.every_frame//2]\n while self.mapping_idx[0] < idx-self.every_frame-self.every_frame//2:\n time.sleep(0.1)\n elif self.sync_method == 'free':\n # pure parallel, if mesh/vis happens may cause inbalance\n pass\n\n self.update_para_from_mapping()\n\n if self.verbose:\n print(Fore.MAGENTA)\n print(\"Tracking Frame \", idx.item())\n print(Style.RESET_ALL)\n \n \n\n if idx == 0 or self.gt_camera:\n c2w = gt_c2w\n if not self.no_vis_on_first_frame:\n self.visualizer.vis(\n idx, 0, gt_depth, gt_color, c2w, self.c, self.decoders, tsdf_volume, tsdf_bnds)\n \n else:\n gt_camera_tensor = get_tensor_from_camera(gt_c2w)\n if self.const_speed_assumption and idx-2 >= 0:\n pre_c2w = pre_c2w.float()\n delta = [email protected]_c2w_list[idx-2].to(\n device).float().inverse()\n estimated_new_cam_c2w = delta@pre_c2w\n else:\n estimated_new_cam_c2w = pre_c2w\n\n camera_tensor = get_tensor_from_camera(\n estimated_new_cam_c2w.detach())\n if self.seperate_LR:\n camera_tensor = camera_tensor.to(device).detach()\n T = camera_tensor[-3:]\n quad = camera_tensor[:4]\n cam_para_list_quad = [quad]\n quad = Variable(quad, requires_grad=True)\n T = Variable(T, requires_grad=True)\n camera_tensor = torch.cat([quad, T], 0)\n cam_para_list_T = [T]\n cam_para_list_quad = [quad]\n optimizer_camera = torch.optim.Adam([{'params': cam_para_list_T, 'lr': self.cam_lr},\n {'params': cam_para_list_quad, 'lr': self.cam_lr*0.2}])\n else:\n camera_tensor = Variable(\n camera_tensor.to(device), requires_grad=True)\n cam_para_list = [camera_tensor]\n optimizer_camera = torch.optim.Adam(\n cam_para_list, lr=self.cam_lr)\n\n initial_loss_camera_tensor = torch.abs(\n gt_camera_tensor.to(device)-camera_tensor).mean().item()\n candidate_cam_tensor = None\n current_min_loss = 10000000000.\n\n \n\n for cam_iter in range(self.num_cam_iters):\n if self.seperate_LR:\n camera_tensor = torch.cat([quad, T], 0).to(self.device)\n\n self.visualizer.vis(\n idx, cam_iter, gt_depth, gt_color, camera_tensor, self.c, self.decoders, tsdf_volume, tsdf_bnds)\n\n loss = self.optimize_cam_in_batch(\n camera_tensor, gt_color, gt_depth, self.tracking_pixels, optimizer_camera, tsdf_volume)\n\n if cam_iter == 0:\n initial_loss = loss\n\n loss_camera_tensor = torch.abs(\n gt_camera_tensor.to(device)-camera_tensor).mean().item()\n if self.verbose:\n if cam_iter == self.num_cam_iters-1:\n print(\n f'Re-rendering loss: {initial_loss:.2f}->{loss:.2f} ' +\n f'camera tensor error: {initial_loss_camera_tensor:.4f}->{loss_camera_tensor:.4f}')\n if loss < current_min_loss:\n current_min_loss = loss\n candidate_cam_tensor = camera_tensor.clone().detach()\n bottom = torch.from_numpy(np.array([0, 0, 0, 1.]).reshape(\n [1, 4])).type(torch.float32).to(self.device)\n c2w = get_camera_from_tensor(\n candidate_cam_tensor.clone().detach())\n c2w = torch.cat([c2w, bottom], dim=0)\n\n \n self.estimate_c2w_list[idx] = c2w.clone().cpu()\n self.gt_c2w_list[idx] = gt_c2w.clone().cpu()\n pre_c2w = c2w.clone()\n self.idx[0] = idx\n if self.low_gpu_mem:\n torch.cuda.empty_cache()" }, { "identifier": "get_dataset", "path": "src/utils/datasets.py", "snippet": "def get_dataset(cfg, args, scale, device='cuda:0'):\n return dataset_dict[cfg['dataset']](cfg, args, scale, device=device)" }, { "identifier": "Logger", "path": "src/utils/Logger.py", "snippet": "class Logger(object):\n \"\"\"\n Save checkpoints to file.\n\n \"\"\"\n\n def __init__(self, cfg, args, slam\n ):\n self.verbose = slam.verbose\n self.ckptsdir = slam.ckptsdir\n self.shared_c = slam.shared_c\n self.gt_c2w_list = slam.gt_c2w_list\n self.shared_decoders = slam.shared_decoders\n self.estimate_c2w_list = slam.estimate_c2w_list\n self.tsdf_volume = slam.tsdf_volume_shared\n\n def log(self, idx, keyframe_dict, keyframe_list, selected_keyframes=None):\n path = os.path.join(self.ckptsdir, '{:05d}.tar'.format(idx))\n torch.save({\n 'c': self.shared_c,\n 'decoder_state_dict': self.shared_decoders.state_dict(),\n 'gt_c2w_list': self.gt_c2w_list,\n 'estimate_c2w_list': self.estimate_c2w_list,\n 'keyframe_list': keyframe_list,\n 'keyframe_dict': keyframe_dict, # to save keyframe_dict into ckpt, uncomment this line\n 'selected_keyframes': selected_keyframes,\n 'idx': idx,\n 'tsdf_volume': self.tsdf_volume,\n }, path, _use_new_zipfile_serialization=False)\n\n if self.verbose:\n print('Saved checkpoints at', path)" }, { "identifier": "Mesher", "path": "src/utils/Mesher.py", "snippet": "class Mesher(object):\n\n def __init__(self, cfg, args, slam, points_batch_size=500000, ray_batch_size=100000):\n \"\"\"\n Mesher class, given a scene representation, the mesher extracts the mesh from it.\n\n Args:\n cfg (dict): parsed config dict.\n args (class 'argparse.Namespace'): argparse arguments.\n slam (class DF_Prior): DF_Prior main class.\n points_batch_size (int): maximum points size for query in one batch. \n Used to alleviate GPU memeory usage. Defaults to 500000.\n ray_batch_size (int): maximum ray size for query in one batch. \n Used to alleviate GPU memeory usage. Defaults to 100000.\n \"\"\"\n self.points_batch_size = points_batch_size\n self.ray_batch_size = ray_batch_size\n self.renderer = slam.renderer\n self.scale = cfg['scale']\n self.occupancy = cfg['occupancy']\n \n self.resolution = cfg['meshing']['resolution']\n self.level_set = cfg['meshing']['level_set']\n self.clean_mesh_bound_scale = cfg['meshing']['clean_mesh_bound_scale']\n self.remove_small_geometry_threshold = cfg['meshing']['remove_small_geometry_threshold']\n self.color_mesh_extraction_method = cfg['meshing']['color_mesh_extraction_method']\n self.get_largest_components = cfg['meshing']['get_largest_components']\n self.depth_test = cfg['meshing']['depth_test']\n \n self.bound = slam.bound\n self.verbose = slam.verbose\n \n\n self.marching_cubes_bound = torch.from_numpy(\n np.array(cfg['mapping']['marching_cubes_bound']) * self.scale)\n\n self.frame_reader = get_dataset(cfg, args, self.scale, device='cpu')\n self.n_img = len(self.frame_reader)\n\n self.H, self.W, self.fx, self.fy, self.cx, self.cy = slam.H, slam.W, slam.fx, slam.fy, slam.cx, slam.cy\n\n self.sample_mode = 'bilinear'\n self.tsdf_bnds = slam.tsdf_bnds\n\n\n\n def point_masks(self, input_points, keyframe_dict, estimate_c2w_list,\n idx, device, get_mask_use_all_frames=False):\n \"\"\"\n Split the input points into seen, unseen, and forcast,\n according to the estimated camera pose and depth image.\n\n Args:\n input_points (tensor): input points.\n keyframe_dict (list): list of keyframe info dictionary.\n estimate_c2w_list (tensor): estimated camera pose.\n idx (int): current frame index.\n device (str): device name to compute on.\n\n Returns:\n seen_mask (tensor): the mask for seen area.\n forecast_mask (tensor): the mask for forecast area.\n unseen_mask (tensor): the mask for unseen area.\n \"\"\"\n H, W, fx, fy, cx, cy = self.H, self.W, self.fx, self.fy, self.cx, self.cy\n if not isinstance(input_points, torch.Tensor):\n input_points = torch.from_numpy(input_points)\n input_points = input_points.clone().detach()\n seen_mask_list = []\n forecast_mask_list = []\n unseen_mask_list = []\n for i, pnts in enumerate(\n torch.split(input_points, self.points_batch_size, dim=0)):\n points = pnts.to(device).float()\n # should divide the points into three parts, seen and forecast and unseen\n # seen: union of all the points in the viewing frustum of keyframes\n # forecast: union of all the points in the extended edge of the viewing frustum of keyframes\n # unseen: all the other points\n\n seen_mask = torch.zeros((points.shape[0])).bool().to(device)\n forecast_mask = torch.zeros((points.shape[0])).bool().to(device)\n if get_mask_use_all_frames:\n for i in range(0, idx + 1, 1):\n c2w = estimate_c2w_list[i].cpu().numpy()\n w2c = np.linalg.inv(c2w)\n w2c = torch.from_numpy(w2c).to(device).float()\n ones = torch.ones_like(\n points[:, 0]).reshape(-1, 1).to(device)\n homo_points = torch.cat([points, ones], dim=1).reshape(\n -1, 4, 1).to(device).float() # (N, 4)\n # (N, 4, 1)=(4,4)*(N, 4, 1)\n cam_cord_homo = w2c @ homo_points\n cam_cord = cam_cord_homo[:, :3] # (N, 3, 1)\n\n K = torch.from_numpy(\n np.array([[fx, .0, cx], [.0, fy, cy],\n [.0, .0, 1.0]]).reshape(3, 3)).to(device)\n cam_cord[:, 0] *= -1\n uv = K.float() @ cam_cord.float()\n z = uv[:, -1:] + 1e-8\n uv = uv[:, :2] / z\n uv = uv.float()\n edge = 0\n cur_mask_seen = (uv[:, 0] < W - edge) & (\n uv[:, 0] > edge) & (uv[:, 1] < H - edge) & (uv[:, 1] > edge)\n cur_mask_seen = cur_mask_seen & (z[:, :, 0] < 0)\n\n edge = -1000\n cur_mask_forecast = (uv[:, 0] < W - edge) & (\n uv[:, 0] > edge) & (uv[:, 1] < H - edge) & (uv[:, 1] > edge)\n cur_mask_forecast = cur_mask_forecast & (z[:, :, 0] < 0)\n\n # forecast\n cur_mask_forecast = cur_mask_forecast.reshape(-1)\n # seen\n cur_mask_seen = cur_mask_seen.reshape(-1)\n\n seen_mask |= cur_mask_seen\n forecast_mask |= cur_mask_forecast\n else:\n for keyframe in keyframe_dict:\n c2w = keyframe['est_c2w'].cpu().numpy()\n w2c = np.linalg.inv(c2w)\n w2c = torch.from_numpy(w2c).to(device).float()\n ones = torch.ones_like(\n points[:, 0]).reshape(-1, 1).to(device)\n homo_points = torch.cat([points, ones], dim=1).reshape(\n -1, 4, 1).to(device).float()\n cam_cord_homo = w2c @ homo_points\n cam_cord = cam_cord_homo[:, :3]\n\n K = torch.from_numpy(\n np.array([[fx, .0, cx], [.0, fy, cy],\n [.0, .0, 1.0]]).reshape(3, 3)).to(device)\n cam_cord[:, 0] *= -1\n uv = K.float() @ cam_cord.float()\n z = uv[:, -1:] + 1e-8\n uv = uv[:, :2] / z\n uv = uv.float()\n edge = 0\n cur_mask_seen = (uv[:, 0] < W - edge) & (\n uv[:, 0] > edge) & (uv[:, 1] < H - edge) & (uv[:, 1] > edge)\n cur_mask_seen = cur_mask_seen & (z[:, :, 0] < 0)\n\n edge = -1000\n cur_mask_forecast = (uv[:, 0] < W - edge) & (\n uv[:, 0] > edge) & (uv[:, 1] < H - edge) & (uv[:, 1] > edge)\n cur_mask_forecast = cur_mask_forecast & (z[:, :, 0] < 0)\n\n if self.depth_test:\n gt_depth = keyframe['depth'].to(\n device).reshape(1, 1, H, W)\n vgrid = uv.reshape(1, 1, -1, 2)\n # normalized to [-1, 1]\n vgrid[..., 0] = (vgrid[..., 0] / (W-1) * 2.0 - 1.0)\n vgrid[..., 1] = (vgrid[..., 1] / (H-1) * 2.0 - 1.0)\n depth_sample = F.grid_sample(\n gt_depth, vgrid, padding_mode='zeros', align_corners=True)\n depth_sample = depth_sample.reshape(-1)\n max_depth = torch.max(depth_sample)\n # forecast\n cur_mask_forecast = cur_mask_forecast.reshape(-1)\n proj_depth_forecast = -cam_cord[cur_mask_forecast,\n 2].reshape(-1)\n cur_mask_forecast[cur_mask_forecast.clone()] &= proj_depth_forecast < max_depth\n # seen\n cur_mask_seen = cur_mask_seen.reshape(-1)\n proj_depth_seen = - cam_cord[cur_mask_seen, 2].reshape(-1)\n cur_mask_seen[cur_mask_seen.clone()] &= \\\n (proj_depth_seen < depth_sample[cur_mask_seen]+2.4) \\\n & (depth_sample[cur_mask_seen]-2.4 < proj_depth_seen)\n else:\n max_depth = torch.max(keyframe['depth'])*1.1\n\n # forecast\n cur_mask_forecast = cur_mask_forecast.reshape(-1)\n proj_depth_forecast = -cam_cord[cur_mask_forecast,\n 2].reshape(-1)\n cur_mask_forecast[\n cur_mask_forecast.clone()] &= proj_depth_forecast < max_depth\n\n # seen\n cur_mask_seen = cur_mask_seen.reshape(-1)\n proj_depth_seen = - \\\n cam_cord[cur_mask_seen, 2].reshape(-1)\n cur_mask_seen[cur_mask_seen.clone(\n )] &= proj_depth_seen < max_depth\n\n seen_mask |= cur_mask_seen\n forecast_mask |= cur_mask_forecast\n\n forecast_mask &= ~seen_mask\n unseen_mask = ~(seen_mask | forecast_mask)\n\n seen_mask = seen_mask.cpu().numpy()\n forecast_mask = forecast_mask.cpu().numpy()\n unseen_mask = unseen_mask.cpu().numpy()\n\n seen_mask_list.append(seen_mask)\n forecast_mask_list.append(forecast_mask)\n unseen_mask_list.append(unseen_mask)\n\n seen_mask = np.concatenate(seen_mask_list, axis=0)\n forecast_mask = np.concatenate(forecast_mask_list, axis=0)\n unseen_mask = np.concatenate(unseen_mask_list, axis=0)\n return seen_mask, forecast_mask, unseen_mask\n\n def get_bound_from_frames(self, keyframe_dict, scale=1):\n \"\"\"\n Get the scene bound (convex hull),\n using sparse estimated camera poses and corresponding depth images.\n\n Args:\n keyframe_dict (list): list of keyframe info dictionary.\n scale (float): scene scale.\n\n Returns:\n return_mesh (trimesh.Trimesh): the convex hull.\n \"\"\"\n\n H, W, fx, fy, cx, cy = self.H, self.W, self.fx, self.fy, self.cx, self.cy\n\n if version.parse(o3d.__version__) >= version.parse('0.13.0'):\n # for new version as provided in environment.yaml\n volume = o3d.pipelines.integration.ScalableTSDFVolume(\n voxel_length=4.0 * scale / 512.0,\n sdf_trunc=0.04 * scale,\n color_type=o3d.pipelines.integration.TSDFVolumeColorType.RGB8)\n else:\n # for lower version\n volume = o3d.integration.ScalableTSDFVolume(\n voxel_length=4.0 * scale / 512.0,\n sdf_trunc=0.04 * scale,\n color_type=o3d.integration.TSDFVolumeColorType.RGB8)\n cam_points = []\n for keyframe in keyframe_dict:\n c2w = keyframe['est_c2w'].cpu().numpy()\n # convert to open3d camera pose\n c2w[:3, 1] *= -1.0\n c2w[:3, 2] *= -1.0\n w2c = np.linalg.inv(c2w)\n cam_points.append(c2w[:3, 3])\n depth = keyframe['depth'].cpu().numpy()\n color = keyframe['color'].cpu().numpy()\n\n depth = o3d.geometry.Image(depth.astype(np.float32))\n color = o3d.geometry.Image(np.array(\n (color * 255).astype(np.uint8)))\n\n intrinsic = o3d.camera.PinholeCameraIntrinsic(W, H, fx, fy, cx, cy)\n rgbd = o3d.geometry.RGBDImage.create_from_color_and_depth(\n color,\n depth,\n depth_scale=1,\n depth_trunc=1000,\n convert_rgb_to_intensity=False)\n volume.integrate(rgbd, intrinsic, w2c)\n\n cam_points = np.stack(cam_points, axis=0)\n mesh = volume.extract_triangle_mesh()\n mesh_points = np.array(mesh.vertices)\n points = np.concatenate([cam_points, mesh_points], axis=0)\n o3d_pc = o3d.geometry.PointCloud(o3d.utility.Vector3dVector(points))\n mesh, _ = o3d_pc.compute_convex_hull()\n mesh.compute_vertex_normals()\n if version.parse(o3d.__version__) >= version.parse('0.13.0'):\n mesh = mesh.scale(self.clean_mesh_bound_scale, mesh.get_center())\n else:\n mesh = mesh.scale(self.clean_mesh_bound_scale, center=True)\n points = np.array(mesh.vertices)\n faces = np.array(mesh.triangles)\n return_mesh = trimesh.Trimesh(vertices=points, faces=faces)\n return return_mesh\n\n def eval_points(self, p, decoders, tsdf_volume, tsdf_bnds, c=None, stage='color', device='cuda:0'):\n \"\"\"\n Evaluates the occupancy and/or color value for the points.\n\n Args:\n p (tensor, N*3): point coordinates.\n decoders (nn.module decoders): decoders.\n tsdf_volume (tensor): tsdf volume.\n tsdf_bnds (tensor): tsdf volume bounds.\n c (dicts, optional): feature grids. Defaults to None.\n stage (str, optional): query stage, corresponds to different levels. Defaults to 'color'.\n device (str, optional): device name to compute on. Defaults to 'cuda:0'.\n\n Returns:\n ret (tensor): occupancy (and color) value of input points.\n \"\"\"\n\n p_split = torch.split(p, self.points_batch_size)\n bound = self.bound\n rets = []\n\n for pi in p_split:\n # mask for points out of bound\n mask_x = (pi[:, 0] < bound[0][1]) & (pi[:, 0] > bound[0][0])\n mask_y = (pi[:, 1] < bound[1][1]) & (pi[:, 1] > bound[1][0])\n mask_z = (pi[:, 2] < bound[2][1]) & (pi[:, 2] > bound[2][0])\n mask = mask_x & mask_y & mask_z\n\n pi = pi.unsqueeze(0)\n ret, _ = decoders(pi, c_grid=c, tsdf_volume=tsdf_volume, tsdf_bnds=tsdf_bnds, stage=stage)\n \n ret = ret.squeeze(0)\n if len(ret.shape) == 1 and ret.shape[0] == 4:\n ret = ret.unsqueeze(0)\n\n ret[~mask, 3] = 100\n rets.append(ret)\n\n ret = torch.cat(rets, dim=0)\n\n return ret\n\n def sample_grid_tsdf(self, p, tsdf_volume, device='cuda:0'):\n\n p_nor = normalize_3d_coordinate(p.clone(), self.tsdf_bnds)\n p_nor = p_nor.unsqueeze(0)\n vgrid = p_nor[:, :, None, None].float()\n # acutally trilinear interpolation if mode = 'bilinear'\n tsdf_value = F.grid_sample(tsdf_volume.to(device), vgrid.to(device), padding_mode='border', align_corners=True,\n mode='bilinear').squeeze(-1).squeeze(-1)\n return tsdf_value\n\n\n def eval_points_tsdf(self, p, tsdf_volume, device='cuda:0'):\n \"\"\"\n Evaluates the occupancy and/or color value for the points.\n\n Args:\n p (tensor, N*3): Point coordinates.\n tsdf_volume (tensor): tsdf volume.\n\n Returns:\n ret (tensor): tsdf value of input points.\n \"\"\"\n\n p_split = torch.split(p, self.points_batch_size)\n tsdf_vals = []\n for pi in p_split:\n pi = pi.unsqueeze(0)\n tsdf_volume_tensor = tsdf_volume\n\n tsdf_val = self.sample_grid_tsdf(pi, tsdf_volume_tensor, device)\n tsdf_val = tsdf_val.squeeze(0)\n tsdf_vals.append(tsdf_val)\n\n tsdf_values = torch.cat(tsdf_vals, dim=1)\n return tsdf_values\n\n\n def get_grid_uniform(self, resolution):\n \"\"\"\n Get query point coordinates for marching cubes.\n\n Args:\n resolution (int): marching cubes resolution.\n\n Returns:\n (dict): points coordinates and sampled coordinates for each axis.\n \"\"\"\n bound = self.marching_cubes_bound\n\n padding = 0.05\n x = np.linspace(bound[0][0] - padding, bound[0][1] + padding,\n resolution)\n y = np.linspace(bound[1][0] - padding, bound[1][1] + padding,\n resolution)\n z = np.linspace(bound[2][0] - padding, bound[2][1] + padding,\n resolution)\n\n xx, yy, zz = np.meshgrid(x, y, z)\n grid_points = np.vstack([xx.ravel(), yy.ravel(), zz.ravel()]).T\n grid_points = torch.tensor(np.vstack(\n [xx.ravel(), yy.ravel(), zz.ravel()]).T,\n dtype=torch.float)\n\n\n\n return {\"grid_points\": grid_points, \"xyz\": [x, y, z]}\n\n def get_mesh(self,\n mesh_out_file,\n c,\n decoders,\n keyframe_dict,\n estimate_c2w_list,\n idx,\n tsdf_volume,\n device='cuda:0',\n color=True,\n clean_mesh=True,\n get_mask_use_all_frames=False):\n \"\"\"\n Extract mesh from scene representation and save mesh to file.\n\n Args:\n mesh_out_file (str): output mesh filename.\n c (dicts): feature grids.\n decoders (nn.module): decoders.\n keyframe_dict (list): list of keyframe info.\n estimate_c2w_list (tensor): estimated camera pose.\n idx (int): current processed camera ID.\n tsdf volume (tensor): tsdf volume.\n device (str, optional): device name to compute on. Defaults to 'cuda:0'.\n color (bool, optional): whether to extract colored mesh. Defaults to True.\n clean_mesh (bool, optional): whether to clean the output mesh \n (remove outliers outside the convexhull and small geometry noise). \n Defaults to True.\n get_mask_use_all_frames (bool, optional): \n whether to use all frames or just keyframes when getting the seen/unseen mask. Defaults to False.\n \"\"\"\n with torch.no_grad():\n\n grid = self.get_grid_uniform(self.resolution) \n points = grid['grid_points']\n points = points.to(device)\n eval_tsdf_volume = tsdf_volume\n\n mesh_bound = self.get_bound_from_frames(\n keyframe_dict, self.scale)\n z = []\n mask = []\n for i, pnts in enumerate(torch.split(points, self.points_batch_size, dim=0)):\n mask.append(mesh_bound.contains(pnts.cpu().numpy()))\n mask = np.concatenate(mask, axis=0)\n for i, pnts in enumerate(torch.split(points, self.points_batch_size, dim=0)):\n eval_tsdf = self.eval_points_tsdf(pnts, eval_tsdf_volume, device)\n eval_tsdf_mask = ((eval_tsdf > -1.0+1e-4) & (eval_tsdf < 1.0-1e-4)).cpu().numpy()\n ret = self.eval_points(pnts, decoders, tsdf_volume, self.tsdf_bnds, c, 'high', device)\n ret = ret.cpu().numpy()[:, -1]\n\n eval_tsdf_mask = eval_tsdf_mask.reshape(ret.shape)\n z.append(ret)\n \n z = np.concatenate(z, axis=0)\n z[~mask] = 100\n z = z.astype(np.float32)\n\n z_uni_m = z.reshape(\n grid['xyz'][1].shape[0], grid['xyz'][0].shape[0],\n grid['xyz'][2].shape[0]).transpose([1, 0, 2])\n\n print('begin marching cube...')\n combine_occ_tsdf = z_uni_m\n\n try:\n if version.parse(\n skimage.__version__) > version.parse('0.15.0'):\n # for new version as provided in environment.yaml\n verts, faces, normals, values = skimage.measure.marching_cubes(\n volume=combine_occ_tsdf,\n level=self.level_set, \n spacing=(grid['xyz'][0][2] - grid['xyz'][0][1],\n grid['xyz'][1][2] - grid['xyz'][1][1],\n grid['xyz'][2][2] - grid['xyz'][2][1]))\n else:\n # for lower version\n verts, faces, normals, values = skimage.measure.marching_cubes_lewiner(\n volume=combine_occ_tsdf,\n level=self.level_set, \n spacing=(grid['xyz'][0][2] - grid['xyz'][0][1],\n grid['xyz'][1][2] - grid['xyz'][1][1],\n grid['xyz'][2][2] - grid['xyz'][2][1]))\n except:\n print(\n 'marching_cubes error. Possibly no surface extracted from the level set.'\n )\n return\n\n # convert back to world coordinates\n vertices = verts + np.array(\n [grid['xyz'][0][0], grid['xyz'][1][0], grid['xyz'][2][0]])\n\n if clean_mesh:\n points = vertices\n mesh = trimesh.Trimesh(vertices=vertices,\n faces=faces,\n process=False)\n seen_mask, _, unseen_mask = self.point_masks(\n points, keyframe_dict, estimate_c2w_list, idx, device=device, \n get_mask_use_all_frames=get_mask_use_all_frames)\n unseen_mask = ~seen_mask\n face_mask = unseen_mask[mesh.faces].all(axis=1)\n mesh.update_faces(~face_mask)\n\n # get connected components\n components = mesh.split(only_watertight=False)\n if self.get_largest_components:\n areas = np.array([c.area for c in components], dtype=np.float)\n mesh = components[areas.argmax()]\n else:\n new_components = []\n for comp in components:\n if comp.area > self.remove_small_geometry_threshold * self.scale * self.scale:\n new_components.append(comp)\n mesh = trimesh.util.concatenate(new_components)\n vertices = mesh.vertices\n faces = mesh.faces\n\n if color:\n if self.color_mesh_extraction_method == 'direct_point_query':\n # color is extracted by passing the coordinates of mesh vertices through the network\n points = torch.from_numpy(vertices)\n z = []\n for i, pnts in enumerate(\n torch.split(points, self.points_batch_size, dim=0)):\n ret = self.eval_points(\n pnts.to(device).float(), decoders, tsdf_volume, self.tsdf_bnds, c, 'color',\n device)\n z_color = ret.cpu()[..., :3]\n z.append(z_color)\n z = torch.cat(z, axis=0)\n vertex_colors = z.numpy()\n\n vertex_colors = np.clip(vertex_colors, 0, 1) * 255\n vertex_colors = vertex_colors.astype(np.uint8)\n\n\n else:\n vertex_colors = None\n\n vertices /= self.scale\n mesh = trimesh.Trimesh(vertices, faces, vertex_colors=vertex_colors)\n mesh.export(mesh_out_file)\n if self.verbose:\n print('Saved mesh at', mesh_out_file)\n\n return z_uni_m" }, { "identifier": "Renderer", "path": "src/utils/Renderer.py", "snippet": "class Renderer(object):\n def __init__(self, cfg, args, slam, points_batch_size=500000, ray_batch_size=100000):\n self.ray_batch_size = ray_batch_size\n self.points_batch_size = points_batch_size\n\n self.lindisp = cfg['rendering']['lindisp']\n self.perturb = cfg['rendering']['perturb']\n self.N_samples = cfg['rendering']['N_samples']\n self.N_surface = cfg['rendering']['N_surface']\n self.N_importance = cfg['rendering']['N_importance']\n\n self.scale = cfg['scale']\n self.occupancy = cfg['occupancy']\n self.bound = slam.bound\n self.sample_mode = 'bilinear'\n self.tsdf_bnds = slam.vol_bnds\n\n self.H, self.W, self.fx, self.fy, self.cx, self.cy = slam.H, slam.W, slam.fx, slam.fy, slam.cx, slam.cy\n\n self.resolution = cfg['meshing']['resolution']\n\n def eval_points(self, p, decoders, tsdf_volume, tsdf_bnds, c=None, stage='color', device='cuda:0'):\n \"\"\"\n Evaluates the occupancy and/or color value for the points.\n\n Args:\n p (tensor, N*3): Point coordinates.\n decoders (nn.module decoders): Decoders.\n tsdf_volume (tensor): tsdf volume.\n tsdf_bnds (tensor): tsdf volume bounds.\n c (dicts, optional): Feature grids. Defaults to None.\n stage (str, optional): Query stage, corresponds to different levels. Defaults to 'color'.\n device (str, optional): CUDA device. Defaults to 'cuda:0'.\n\n Returns:\n ret (tensor): occupancy (and color) value of input points.\n \"\"\"\n\n p_split = torch.split(p, self.points_batch_size)\n bound = self.bound\n rets = []\n weights = []\n\n for pi in p_split:\n # mask for points out of bound\n mask_x = (pi[:, 0] < bound[0][1]) & (pi[:, 0] > bound[0][0])\n mask_y = (pi[:, 1] < bound[1][1]) & (pi[:, 1] > bound[1][0])\n mask_z = (pi[:, 2] < bound[2][1]) & (pi[:, 2] > bound[2][0])\n mask = mask_x & mask_y & mask_z\n\n pi = pi.unsqueeze(0)\n ret, w = decoders(pi, c_grid=c, tsdf_volume=tsdf_volume, tsdf_bnds=tsdf_bnds, stage=stage)\n ret = ret.squeeze(0)\n\n\n if len(ret.shape) == 1 and ret.shape[0] == 4:\n ret = ret.unsqueeze(0)\n\n ret[~mask, 3] = 100 \n rets.append(ret)\n weights.append(w)\n\n ret = torch.cat(rets, dim=0)\n weight = torch.cat(weights, dim=0)\n\n return ret, weight \n\n def sample_grid_tsdf(self, p, tsdf_volume, device='cuda:0'):\n\n p_nor = normalize_3d_coordinate(p.clone(), self.tsdf_bnds)\n p_nor = p_nor.unsqueeze(0)\n vgrid = p_nor[:, :, None, None].float()\n # acutally trilinear interpolation if mode = 'bilinear'\n tsdf_value = F.grid_sample(tsdf_volume.to(device), vgrid.to(device), padding_mode='border', align_corners=True,\n mode='bilinear').squeeze(-1).squeeze(-1)\n return tsdf_value\n\n\n def eval_points_tsdf(self, p, tsdf_volume, device='cuda:0'):\n \"\"\"\n Evaluates the occupancy and/or color value for the points.\n\n Args:\n p (tensor, N*3): Point coordinates.\n \n\n Returns:\n ret (tensor): tsdf value of input points.\n \"\"\"\n\n p_split = torch.split(p, self.points_batch_size)\n tsdf_vals = []\n for pi in p_split:\n pi = pi.unsqueeze(0)\n tsdf_volume_tensor = tsdf_volume\n\n tsdf_val = self.sample_grid_tsdf(pi, tsdf_volume_tensor, device)\n tsdf_val = tsdf_val.squeeze(0)\n tsdf_vals.append(tsdf_val)\n\n tsdf_values = torch.cat(tsdf_vals, dim=1)\n return tsdf_values\n\n\n def render_batch_ray(self, c, decoders, rays_d, rays_o, device, tsdf_volume, tsdf_bnds, stage, gt_depth=None):\n \"\"\"\n Render color, depth and uncertainty of a batch of rays.\n\n Args:\n c (dict): feature grids.\n decoders (nn.module): decoders.\n rays_d (tensor, N*3): rays direction.\n rays_o (tensor, N*3): rays origin.\n device (str): device name to compute on.\n tsdf_volume (tensor): tsdf volume.\n tsdf_bnds (tensor): tsdf volume bounds.\n stage (str): query stage.\n gt_depth (tensor, optional): sensor depth image. Defaults to None.\n\n Returns:\n depth (tensor): rendered depth.\n uncertainty (tensor): rendered uncertainty.\n color (tensor): rendered color.\n weight (tensor): attention weight.\n \"\"\"\n eval_tsdf_volume = tsdf_volume\n \n\n N_samples = self.N_samples\n N_surface = self.N_surface\n N_importance = self.N_importance\n\n N_rays = rays_o.shape[0]\n\n if gt_depth is None:\n N_surface = 0\n near = 0.01\n else:\n gt_depth = gt_depth.reshape(-1, 1)\n gt_depth_samples = gt_depth.repeat(1, N_samples)\n near = gt_depth_samples*0.01\n\n with torch.no_grad():\n det_rays_o = rays_o.clone().detach().unsqueeze(-1) # (N, 3, 1)\n det_rays_d = rays_d.clone().detach().unsqueeze(-1) # (N, 3, 1)\n t = (self.bound.unsqueeze(0).to(device) -\n det_rays_o)/det_rays_d # (N, 3, 2)\n far_bb, _ = torch.min(torch.max(t, dim=2)[0], dim=1)\n far_bb = far_bb.unsqueeze(-1)\n far_bb += 0.01\n\n if gt_depth is not None:\n # in case the bound is too large\n far = torch.clamp(far_bb, 0, torch.max(gt_depth*1.2))\n\n else:\n far = far_bb\n if N_surface > 0:\n if False:\n # this naive implementation downgrades performance\n gt_depth_surface = gt_depth.repeat(1, N_surface)\n t_vals_surface = torch.linspace(\n 0., 1., steps=N_surface).to(device)\n z_vals_surface = 0.95*gt_depth_surface * \\\n (1.-t_vals_surface) + 1.05 * \\\n gt_depth_surface * (t_vals_surface)\n else:\n # since we want to colorize even on regions with no depth sensor readings,\n # meaning colorize on interpolated geometry region,\n # we sample all pixels (not using depth mask) for color loss.\n # Therefore, for pixels with non-zero depth value, we sample near the surface,\n # since it is not a good idea to sample 16 points near (half even behind) camera,\n # for pixels with zero depth value, we sample uniformly from camera to max_depth.\n gt_none_zero_mask = gt_depth > 0\n gt_none_zero = gt_depth[gt_none_zero_mask]\n gt_none_zero = gt_none_zero.unsqueeze(-1)\n gt_depth_surface = gt_none_zero.repeat(1, N_surface)\n t_vals_surface = torch.linspace(\n 0., 1., steps=N_surface).double().to(device)\n # emperical range 0.05*depth\n z_vals_surface_depth_none_zero = 0.95*gt_depth_surface * \\\n (1.-t_vals_surface) + 1.05 * \\\n gt_depth_surface * (t_vals_surface)\n z_vals_surface = torch.zeros(\n gt_depth.shape[0], N_surface).to(device).double()\n gt_none_zero_mask = gt_none_zero_mask.squeeze(-1)\n z_vals_surface[gt_none_zero_mask,\n :] = z_vals_surface_depth_none_zero\n near_surface = 0.001\n far_surface = torch.max(gt_depth)\n z_vals_surface_depth_zero = near_surface * \\\n (1.-t_vals_surface) + far_surface * (t_vals_surface)\n z_vals_surface_depth_zero.unsqueeze(\n 0).repeat((~gt_none_zero_mask).sum(), 1)\n z_vals_surface[~gt_none_zero_mask,\n :] = z_vals_surface_depth_zero\n\n t_vals = torch.linspace(0., 1., steps=N_samples, device=device)\n\n if not self.lindisp:\n z_vals = near * (1.-t_vals) + far * (t_vals)\n else:\n z_vals = 1./(1./near * (1.-t_vals) + 1./far * (t_vals))\n\n if self.perturb > 0.:\n # get intervals between samples\n mids = .5 * (z_vals[..., 1:] + z_vals[..., :-1])\n upper = torch.cat([mids, z_vals[..., -1:]], -1)\n lower = torch.cat([z_vals[..., :1], mids], -1)\n # stratified samples in those intervals\n t_rand = torch.rand(z_vals.shape).to(device)\n z_vals = lower + (upper - lower) * t_rand\n\n if N_surface > 0:\n z_vals, _ = torch.sort(\n torch.cat([z_vals, z_vals_surface.double()], -1), -1)\n\n pts = rays_o[..., None, :] + rays_d[..., None, :] * \\\n z_vals[..., :, None] # [N_rays, N_samples+N_surface, 3]\n pointsf = pts.reshape(-1, 3)\n \n raw, weight = self.eval_points(pointsf, decoders, tsdf_volume, tsdf_bnds, c, stage, device)\n raw = raw.reshape(N_rays, N_samples+N_surface, -1)\n weight = weight.reshape(N_rays, N_samples+N_surface, -1)\n\n\n depth, uncertainty, color, weights = raw2outputs_nerf_color(\n raw, z_vals, rays_d, occupancy=self.occupancy, device=device)\n \n if N_importance > 0:\n z_vals_mid = .5 * (z_vals[..., 1:] + z_vals[..., :-1])\n z_samples = sample_pdf(\n z_vals_mid, weights[..., 1:-1], N_importance, det=(self.perturb == 0.), device=device)\n z_samples = z_samples.detach()\n z_vals, _ = torch.sort(torch.cat([z_vals, z_samples], -1), -1)\n\n pts = rays_o[..., None, :] + \\\n rays_d[..., None, :] * z_vals[..., :, None]\n pts = pts.reshape(-1, 3)\n \n raw, weight = self.eval_points(pointsf, decoders, tsdf_volume, tsdf_bnds, c, stage, device)\n raw = raw.reshape(N_rays, N_samples+N_surface, -1)\n weight = weight.reshape(N_rays, N_samples+N_surface, -1)\n\n depth, uncertainty, color, weights = raw2outputs_nerf_color(\n raw, z_vals, rays_d, occupancy=self.occupancy, device=device)\n return depth, uncertainty, color, weight\n\n\n return depth, uncertainty, color, weight\n\n\n def render_img(self, c, decoders, c2w, device, tsdf_volume, tsdf_bnds, stage, gt_depth=None):\n \"\"\"\n Renders out depth, uncertainty, and color images.\n\n Args:\n c (dict): feature grids.\n decoders (nn.module): decoders.\n c2w (tensor): camera to world matrix of current frame.\n device (str): device name to compute on.\n tsdf_volume (tensor): tsdf volume.\n tsdf_bnds (tensor): tsdf volume bounds.\n stage (str): query stage.\n gt_depth (tensor, optional): sensor depth image. Defaults to None.\n\n Returns:\n depth (tensor, H*W): rendered depth image.\n uncertainty (tensor, H*W): rendered uncertainty image.\n color (tensor, H*W*3): rendered color image.\n \"\"\"\n \n with torch.no_grad():\n H = self.H\n W = self.W\n rays_o, rays_d = get_rays(\n H, W, self.fx, self.fy, self.cx, self.cy, c2w, device)\n rays_o = rays_o.reshape(-1, 3)\n rays_d = rays_d.reshape(-1, 3)\n\n depth_list = []\n uncertainty_list = []\n color_list = []\n\n\n ray_batch_size = self.ray_batch_size\n gt_depth = gt_depth.reshape(-1)\n\n for i in range(0, rays_d.shape[0], ray_batch_size):\n rays_d_batch = rays_d[i:i+ray_batch_size]\n rays_o_batch = rays_o[i:i+ray_batch_size]\n\n iter = 10\n\n if gt_depth is None:\n ret = self.render_batch_ray(\n c, decoders, rays_d_batch, rays_o_batch, device, tsdf_volume, tsdf_bnds, stage, gt_depth=None)\n else:\n gt_depth_batch = gt_depth[i:i+ray_batch_size]\n ret = self.render_batch_ray(\n c, decoders, rays_d_batch, rays_o_batch, device, tsdf_volume, tsdf_bnds, stage, gt_depth=gt_depth_batch)\n\n depth, uncertainty, color, _= ret\n\n \n depth_list.append(depth.double())\n uncertainty_list.append(uncertainty.double())\n color_list.append(color)\n \n \n\n\n\n depth = torch.cat(depth_list, dim=0)\n uncertainty = torch.cat(uncertainty_list, dim=0)\n color = torch.cat(color_list, dim=0)\n \n depth = depth.reshape(H, W)\n uncertainty = uncertainty.reshape(H, W)\n color = color.reshape(H, W, 3)\n\n return depth, uncertainty, color " } ]
import os import time import numpy as np import torch import torch.multiprocessing import torch.multiprocessing as mp from src import config from src.Mapper import Mapper from src.Tracker import Tracker from src.utils.datasets import get_dataset from src.utils.Logger import Logger from src.utils.Mesher import Mesher from src.utils.Renderer import Renderer
20,088
# import src.fusion as fusion # import open3d as o3d torch.multiprocessing.set_sharing_strategy('file_system') class DF_Prior(): """ DF_Prior main class. Mainly allocate shared resources, and dispatch mapping and tracking process. """ def __init__(self, cfg, args): self.cfg = cfg self.args = args self.occupancy = cfg['occupancy'] self.low_gpu_mem = cfg['low_gpu_mem'] self.verbose = cfg['verbose'] self.dataset = cfg['dataset'] if args.output is None: self.output = cfg['data']['output'] else: self.output = args.output self.ckptsdir = os.path.join(self.output, 'ckpts') os.makedirs(self.output, exist_ok=True) os.makedirs(self.ckptsdir, exist_ok=True) os.makedirs(f'{self.output}/mesh', exist_ok=True) self.H, self.W, self.fx, self.fy, self.cx, self.cy = cfg['cam']['H'], cfg['cam'][ 'W'], cfg['cam']['fx'], cfg['cam']['fy'], cfg['cam']['cx'], cfg['cam']['cy'] self.update_cam() model = config.get_model(cfg) self.shared_decoders = model self.scale = cfg['scale'] self.load_bound(cfg) self.load_pretrain(cfg) self.grid_init(cfg) # need to use spawn try: mp.set_start_method('spawn', force=True) except RuntimeError: pass
# import src.fusion as fusion # import open3d as o3d torch.multiprocessing.set_sharing_strategy('file_system') class DF_Prior(): """ DF_Prior main class. Mainly allocate shared resources, and dispatch mapping and tracking process. """ def __init__(self, cfg, args): self.cfg = cfg self.args = args self.occupancy = cfg['occupancy'] self.low_gpu_mem = cfg['low_gpu_mem'] self.verbose = cfg['verbose'] self.dataset = cfg['dataset'] if args.output is None: self.output = cfg['data']['output'] else: self.output = args.output self.ckptsdir = os.path.join(self.output, 'ckpts') os.makedirs(self.output, exist_ok=True) os.makedirs(self.ckptsdir, exist_ok=True) os.makedirs(f'{self.output}/mesh', exist_ok=True) self.H, self.W, self.fx, self.fy, self.cx, self.cy = cfg['cam']['H'], cfg['cam'][ 'W'], cfg['cam']['fx'], cfg['cam']['fy'], cfg['cam']['cx'], cfg['cam']['cy'] self.update_cam() model = config.get_model(cfg) self.shared_decoders = model self.scale = cfg['scale'] self.load_bound(cfg) self.load_pretrain(cfg) self.grid_init(cfg) # need to use spawn try: mp.set_start_method('spawn', force=True) except RuntimeError: pass
self.frame_reader = get_dataset(cfg, args, self.scale)
3
2023-10-13 00:49:57+00:00
24k
fury-05/BookRecomendApp
.pythonlibs/lib/python3.10/site-packages/sklearn/naive_bayes.py
[ { "identifier": "BaseEstimator", "path": ".pythonlibs/lib/python3.10/site-packages/sklearn/base.py", "snippet": "class BaseEstimator(_MetadataRequester):\n \"\"\"Base class for all estimators in scikit-learn.\n\n Notes\n -----\n All estimators should specify all the parameters that can be set\n at the class level in their ``__init__`` as explicit keyword\n arguments (no ``*args`` or ``**kwargs``).\n \"\"\"\n\n @classmethod\n def _get_param_names(cls):\n \"\"\"Get parameter names for the estimator\"\"\"\n # fetch the constructor or the original constructor before\n # deprecation wrapping if any\n init = getattr(cls.__init__, \"deprecated_original\", cls.__init__)\n if init is object.__init__:\n # No explicit constructor to introspect\n return []\n\n # introspect the constructor arguments to find the model parameters\n # to represent\n init_signature = inspect.signature(init)\n # Consider the constructor parameters excluding 'self'\n parameters = [\n p\n for p in init_signature.parameters.values()\n if p.name != \"self\" and p.kind != p.VAR_KEYWORD\n ]\n for p in parameters:\n if p.kind == p.VAR_POSITIONAL:\n raise RuntimeError(\n \"scikit-learn estimators should always \"\n \"specify their parameters in the signature\"\n \" of their __init__ (no varargs).\"\n \" %s with constructor %s doesn't \"\n \" follow this convention.\" % (cls, init_signature)\n )\n # Extract and sort argument names excluding 'self'\n return sorted([p.name for p in parameters])\n\n def get_params(self, deep=True):\n \"\"\"\n Get parameters for this estimator.\n\n Parameters\n ----------\n deep : bool, default=True\n If True, will return the parameters for this estimator and\n contained subobjects that are estimators.\n\n Returns\n -------\n params : dict\n Parameter names mapped to their values.\n \"\"\"\n out = dict()\n for key in self._get_param_names():\n value = getattr(self, key)\n if deep and hasattr(value, \"get_params\") and not isinstance(value, type):\n deep_items = value.get_params().items()\n out.update((key + \"__\" + k, val) for k, val in deep_items)\n out[key] = value\n return out\n\n def set_params(self, **params):\n \"\"\"Set the parameters of this estimator.\n\n The method works on simple estimators as well as on nested objects\n (such as :class:`~sklearn.pipeline.Pipeline`). The latter have\n parameters of the form ``<component>__<parameter>`` so that it's\n possible to update each component of a nested object.\n\n Parameters\n ----------\n **params : dict\n Estimator parameters.\n\n Returns\n -------\n self : estimator instance\n Estimator instance.\n \"\"\"\n if not params:\n # Simple optimization to gain speed (inspect is slow)\n return self\n valid_params = self.get_params(deep=True)\n\n nested_params = defaultdict(dict) # grouped by prefix\n for key, value in params.items():\n key, delim, sub_key = key.partition(\"__\")\n if key not in valid_params:\n local_valid_params = self._get_param_names()\n raise ValueError(\n f\"Invalid parameter {key!r} for estimator {self}. \"\n f\"Valid parameters are: {local_valid_params!r}.\"\n )\n\n if delim:\n nested_params[key][sub_key] = value\n else:\n setattr(self, key, value)\n valid_params[key] = value\n\n for key, sub_params in nested_params.items():\n # TODO(1.4): remove specific handling of \"base_estimator\".\n # The \"base_estimator\" key is special. It was deprecated and\n # renamed to \"estimator\" for several estimators. This means we\n # need to translate it here and set sub-parameters on \"estimator\",\n # but only if the user did not explicitly set a value for\n # \"base_estimator\".\n if (\n key == \"base_estimator\"\n and valid_params[key] == \"deprecated\"\n and self.__module__.startswith(\"sklearn.\")\n ):\n warnings.warn(\n (\n f\"Parameter 'base_estimator' of {self.__class__.__name__} is\"\n \" deprecated in favor of 'estimator'. See\"\n f\" {self.__class__.__name__}'s docstring for more details.\"\n ),\n FutureWarning,\n stacklevel=2,\n )\n key = \"estimator\"\n valid_params[key].set_params(**sub_params)\n\n return self\n\n def __sklearn_clone__(self):\n return _clone_parametrized(self)\n\n def __repr__(self, N_CHAR_MAX=700):\n # N_CHAR_MAX is the (approximate) maximum number of non-blank\n # characters to render. We pass it as an optional parameter to ease\n # the tests.\n\n from .utils._pprint import _EstimatorPrettyPrinter\n\n N_MAX_ELEMENTS_TO_SHOW = 30 # number of elements to show in sequences\n\n # use ellipsis for sequences with a lot of elements\n pp = _EstimatorPrettyPrinter(\n compact=True,\n indent=1,\n indent_at_name=True,\n n_max_elements_to_show=N_MAX_ELEMENTS_TO_SHOW,\n )\n\n repr_ = pp.pformat(self)\n\n # Use bruteforce ellipsis when there are a lot of non-blank characters\n n_nonblank = len(\"\".join(repr_.split()))\n if n_nonblank > N_CHAR_MAX:\n lim = N_CHAR_MAX // 2 # apprx number of chars to keep on both ends\n regex = r\"^(\\s*\\S){%d}\" % lim\n # The regex '^(\\s*\\S){%d}' % n\n # matches from the start of the string until the nth non-blank\n # character:\n # - ^ matches the start of string\n # - (pattern){n} matches n repetitions of pattern\n # - \\s*\\S matches a non-blank char following zero or more blanks\n left_lim = re.match(regex, repr_).end()\n right_lim = re.match(regex, repr_[::-1]).end()\n\n if \"\\n\" in repr_[left_lim:-right_lim]:\n # The left side and right side aren't on the same line.\n # To avoid weird cuts, e.g.:\n # categoric...ore',\n # we need to start the right side with an appropriate newline\n # character so that it renders properly as:\n # categoric...\n # handle_unknown='ignore',\n # so we add [^\\n]*\\n which matches until the next \\n\n regex += r\"[^\\n]*\\n\"\n right_lim = re.match(regex, repr_[::-1]).end()\n\n ellipsis = \"...\"\n if left_lim + len(ellipsis) < len(repr_) - right_lim:\n # Only add ellipsis if it results in a shorter repr\n repr_ = repr_[:left_lim] + \"...\" + repr_[-right_lim:]\n\n return repr_\n\n def __getstate__(self):\n if getattr(self, \"__slots__\", None):\n raise TypeError(\n \"You cannot use `__slots__` in objects inheriting from \"\n \"`sklearn.base.BaseEstimator`.\"\n )\n\n try:\n state = super().__getstate__()\n if state is None:\n # For Python 3.11+, empty instance (no `__slots__`,\n # and `__dict__`) will return a state equal to `None`.\n state = self.__dict__.copy()\n except AttributeError:\n # Python < 3.11\n state = self.__dict__.copy()\n\n if type(self).__module__.startswith(\"sklearn.\"):\n return dict(state.items(), _sklearn_version=__version__)\n else:\n return state\n\n def __setstate__(self, state):\n if type(self).__module__.startswith(\"sklearn.\"):\n pickle_version = state.pop(\"_sklearn_version\", \"pre-0.18\")\n if pickle_version != __version__:\n warnings.warn(\n InconsistentVersionWarning(\n estimator_name=self.__class__.__name__,\n current_sklearn_version=__version__,\n original_sklearn_version=pickle_version,\n ),\n )\n try:\n super().__setstate__(state)\n except AttributeError:\n self.__dict__.update(state)\n\n def _more_tags(self):\n return _DEFAULT_TAGS\n\n def _get_tags(self):\n collected_tags = {}\n for base_class in reversed(inspect.getmro(self.__class__)):\n if hasattr(base_class, \"_more_tags\"):\n # need the if because mixins might not have _more_tags\n # but might do redundant work in estimators\n # (i.e. calling more tags on BaseEstimator multiple times)\n more_tags = base_class._more_tags(self)\n collected_tags.update(more_tags)\n return collected_tags\n\n def _check_n_features(self, X, reset):\n \"\"\"Set the `n_features_in_` attribute, or check against it.\n\n Parameters\n ----------\n X : {ndarray, sparse matrix} of shape (n_samples, n_features)\n The input samples.\n reset : bool\n If True, the `n_features_in_` attribute is set to `X.shape[1]`.\n If False and the attribute exists, then check that it is equal to\n `X.shape[1]`. If False and the attribute does *not* exist, then\n the check is skipped.\n .. note::\n It is recommended to call reset=True in `fit` and in the first\n call to `partial_fit`. All other methods that validate `X`\n should set `reset=False`.\n \"\"\"\n try:\n n_features = _num_features(X)\n except TypeError as e:\n if not reset and hasattr(self, \"n_features_in_\"):\n raise ValueError(\n \"X does not contain any features, but \"\n f\"{self.__class__.__name__} is expecting \"\n f\"{self.n_features_in_} features\"\n ) from e\n # If the number of features is not defined and reset=True,\n # then we skip this check\n return\n\n if reset:\n self.n_features_in_ = n_features\n return\n\n if not hasattr(self, \"n_features_in_\"):\n # Skip this check if the expected number of expected input features\n # was not recorded by calling fit first. This is typically the case\n # for stateless transformers.\n return\n\n if n_features != self.n_features_in_:\n raise ValueError(\n f\"X has {n_features} features, but {self.__class__.__name__} \"\n f\"is expecting {self.n_features_in_} features as input.\"\n )\n\n def _check_feature_names(self, X, *, reset):\n \"\"\"Set or check the `feature_names_in_` attribute.\n\n .. versionadded:: 1.0\n\n Parameters\n ----------\n X : {ndarray, dataframe} of shape (n_samples, n_features)\n The input samples.\n\n reset : bool\n Whether to reset the `feature_names_in_` attribute.\n If False, the input will be checked for consistency with\n feature names of data provided when reset was last True.\n .. note::\n It is recommended to call `reset=True` in `fit` and in the first\n call to `partial_fit`. All other methods that validate `X`\n should set `reset=False`.\n \"\"\"\n\n if reset:\n feature_names_in = _get_feature_names(X)\n if feature_names_in is not None:\n self.feature_names_in_ = feature_names_in\n elif hasattr(self, \"feature_names_in_\"):\n # Delete the attribute when the estimator is fitted on a new dataset\n # that has no feature names.\n delattr(self, \"feature_names_in_\")\n return\n\n fitted_feature_names = getattr(self, \"feature_names_in_\", None)\n X_feature_names = _get_feature_names(X)\n\n if fitted_feature_names is None and X_feature_names is None:\n # no feature names seen in fit and in X\n return\n\n if X_feature_names is not None and fitted_feature_names is None:\n warnings.warn(\n f\"X has feature names, but {self.__class__.__name__} was fitted without\"\n \" feature names\"\n )\n return\n\n if X_feature_names is None and fitted_feature_names is not None:\n warnings.warn(\n \"X does not have valid feature names, but\"\n f\" {self.__class__.__name__} was fitted with feature names\"\n )\n return\n\n # validate the feature names against the `feature_names_in_` attribute\n if len(fitted_feature_names) != len(X_feature_names) or np.any(\n fitted_feature_names != X_feature_names\n ):\n message = (\n \"The feature names should match those that were passed during fit.\\n\"\n )\n fitted_feature_names_set = set(fitted_feature_names)\n X_feature_names_set = set(X_feature_names)\n\n unexpected_names = sorted(X_feature_names_set - fitted_feature_names_set)\n missing_names = sorted(fitted_feature_names_set - X_feature_names_set)\n\n def add_names(names):\n output = \"\"\n max_n_names = 5\n for i, name in enumerate(names):\n if i >= max_n_names:\n output += \"- ...\\n\"\n break\n output += f\"- {name}\\n\"\n return output\n\n if unexpected_names:\n message += \"Feature names unseen at fit time:\\n\"\n message += add_names(unexpected_names)\n\n if missing_names:\n message += \"Feature names seen at fit time, yet now missing:\\n\"\n message += add_names(missing_names)\n\n if not missing_names and not unexpected_names:\n message += (\n \"Feature names must be in the same order as they were in fit.\\n\"\n )\n\n raise ValueError(message)\n\n def _validate_data(\n self,\n X=\"no_validation\",\n y=\"no_validation\",\n reset=True,\n validate_separately=False,\n cast_to_ndarray=True,\n **check_params,\n ):\n \"\"\"Validate input data and set or check the `n_features_in_` attribute.\n\n Parameters\n ----------\n X : {array-like, sparse matrix, dataframe} of shape \\\n (n_samples, n_features), default='no validation'\n The input samples.\n If `'no_validation'`, no validation is performed on `X`. This is\n useful for meta-estimator which can delegate input validation to\n their underlying estimator(s). In that case `y` must be passed and\n the only accepted `check_params` are `multi_output` and\n `y_numeric`.\n\n y : array-like of shape (n_samples,), default='no_validation'\n The targets.\n\n - If `None`, `check_array` is called on `X`. If the estimator's\n requires_y tag is True, then an error will be raised.\n - If `'no_validation'`, `check_array` is called on `X` and the\n estimator's requires_y tag is ignored. This is a default\n placeholder and is never meant to be explicitly set. In that case\n `X` must be passed.\n - Otherwise, only `y` with `_check_y` or both `X` and `y` are\n checked with either `check_array` or `check_X_y` depending on\n `validate_separately`.\n\n reset : bool, default=True\n Whether to reset the `n_features_in_` attribute.\n If False, the input will be checked for consistency with data\n provided when reset was last True.\n .. note::\n It is recommended to call reset=True in `fit` and in the first\n call to `partial_fit`. All other methods that validate `X`\n should set `reset=False`.\n\n validate_separately : False or tuple of dicts, default=False\n Only used if y is not None.\n If False, call validate_X_y(). Else, it must be a tuple of kwargs\n to be used for calling check_array() on X and y respectively.\n\n `estimator=self` is automatically added to these dicts to generate\n more informative error message in case of invalid input data.\n\n cast_to_ndarray : bool, default=True\n Cast `X` and `y` to ndarray with checks in `check_params`. If\n `False`, `X` and `y` are unchanged and only `feature_names_in_` and\n `n_features_in_` are checked.\n\n **check_params : kwargs\n Parameters passed to :func:`sklearn.utils.check_array` or\n :func:`sklearn.utils.check_X_y`. Ignored if validate_separately\n is not False.\n\n `estimator=self` is automatically added to these params to generate\n more informative error message in case of invalid input data.\n\n Returns\n -------\n out : {ndarray, sparse matrix} or tuple of these\n The validated input. A tuple is returned if both `X` and `y` are\n validated.\n \"\"\"\n self._check_feature_names(X, reset=reset)\n\n if y is None and self._get_tags()[\"requires_y\"]:\n raise ValueError(\n f\"This {self.__class__.__name__} estimator \"\n \"requires y to be passed, but the target y is None.\"\n )\n\n no_val_X = isinstance(X, str) and X == \"no_validation\"\n no_val_y = y is None or isinstance(y, str) and y == \"no_validation\"\n\n if no_val_X and no_val_y:\n raise ValueError(\"Validation should be done on X, y or both.\")\n\n default_check_params = {\"estimator\": self}\n check_params = {**default_check_params, **check_params}\n\n if not cast_to_ndarray:\n if not no_val_X and no_val_y:\n out = X\n elif no_val_X and not no_val_y:\n out = y\n else:\n out = X, y\n elif not no_val_X and no_val_y:\n out = check_array(X, input_name=\"X\", **check_params)\n elif no_val_X and not no_val_y:\n out = _check_y(y, **check_params)\n else:\n if validate_separately:\n # We need this because some estimators validate X and y\n # separately, and in general, separately calling check_array()\n # on X and y isn't equivalent to just calling check_X_y()\n # :(\n check_X_params, check_y_params = validate_separately\n if \"estimator\" not in check_X_params:\n check_X_params = {**default_check_params, **check_X_params}\n X = check_array(X, input_name=\"X\", **check_X_params)\n if \"estimator\" not in check_y_params:\n check_y_params = {**default_check_params, **check_y_params}\n y = check_array(y, input_name=\"y\", **check_y_params)\n else:\n X, y = check_X_y(X, y, **check_params)\n out = X, y\n\n if not no_val_X and check_params.get(\"ensure_2d\", True):\n self._check_n_features(X, reset=reset)\n\n return out\n\n def _validate_params(self):\n \"\"\"Validate types and values of constructor parameters\n\n The expected type and values must be defined in the `_parameter_constraints`\n class attribute, which is a dictionary `param_name: list of constraints`. See\n the docstring of `validate_parameter_constraints` for a description of the\n accepted constraints.\n \"\"\"\n validate_parameter_constraints(\n self._parameter_constraints,\n self.get_params(deep=False),\n caller_name=self.__class__.__name__,\n )\n\n @property\n def _repr_html_(self):\n \"\"\"HTML representation of estimator.\n\n This is redundant with the logic of `_repr_mimebundle_`. The latter\n should be favorted in the long term, `_repr_html_` is only\n implemented for consumers who do not interpret `_repr_mimbundle_`.\n \"\"\"\n if get_config()[\"display\"] != \"diagram\":\n raise AttributeError(\n \"_repr_html_ is only defined when the \"\n \"'display' configuration option is set to \"\n \"'diagram'\"\n )\n return self._repr_html_inner\n\n def _repr_html_inner(self):\n \"\"\"This function is returned by the @property `_repr_html_` to make\n `hasattr(estimator, \"_repr_html_\") return `True` or `False` depending\n on `get_config()[\"display\"]`.\n \"\"\"\n return estimator_html_repr(self)\n\n def _repr_mimebundle_(self, **kwargs):\n \"\"\"Mime bundle used by jupyter kernels to display estimator\"\"\"\n output = {\"text/plain\": repr(self)}\n if get_config()[\"display\"] == \"diagram\":\n output[\"text/html\"] = estimator_html_repr(self)\n return output" }, { "identifier": "ClassifierMixin", "path": ".pythonlibs/lib/python3.10/site-packages/sklearn/base.py", "snippet": "class ClassifierMixin:\n \"\"\"Mixin class for all classifiers in scikit-learn.\"\"\"\n\n _estimator_type = \"classifier\"\n\n def score(self, X, y, sample_weight=None):\n \"\"\"\n Return the mean accuracy on the given test data and labels.\n\n In multi-label classification, this is the subset accuracy\n which is a harsh metric since you require for each sample that\n each label set be correctly predicted.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Test samples.\n\n y : array-like of shape (n_samples,) or (n_samples, n_outputs)\n True labels for `X`.\n\n sample_weight : array-like of shape (n_samples,), default=None\n Sample weights.\n\n Returns\n -------\n score : float\n Mean accuracy of ``self.predict(X)`` w.r.t. `y`.\n \"\"\"\n from .metrics import accuracy_score\n\n return accuracy_score(y, self.predict(X), sample_weight=sample_weight)\n\n def _more_tags(self):\n return {\"requires_y\": True}" }, { "identifier": "_fit_context", "path": ".pythonlibs/lib/python3.10/site-packages/sklearn/base.py", "snippet": "def _fit_context(*, prefer_skip_nested_validation):\n \"\"\"Decorator to run the fit methods of estimators within context managers.\n\n Parameters\n ----------\n prefer_skip_nested_validation : bool\n If True, the validation of parameters of inner estimators or functions\n called during fit will be skipped.\n\n This is useful to avoid validating many times the parameters passed by the\n user from the public facing API. It's also useful to avoid validating\n parameters that we pass internally to inner functions that are guaranteed to\n be valid by the test suite.\n\n It should be set to True for most estimators, except for those that receive\n non-validated objects as parameters, such as meta-estimators that are given\n estimator objects.\n\n Returns\n -------\n decorated_fit : method\n The decorated fit method.\n \"\"\"\n\n def decorator(fit_method):\n @functools.wraps(fit_method)\n def wrapper(estimator, *args, **kwargs):\n global_skip_validation = get_config()[\"skip_parameter_validation\"]\n\n # we don't want to validate again for each call to partial_fit\n partial_fit_and_fitted = (\n fit_method.__name__ == \"partial_fit\" and _is_fitted(estimator)\n )\n\n if not global_skip_validation and not partial_fit_and_fitted:\n estimator._validate_params()\n\n with config_context(\n skip_parameter_validation=(\n prefer_skip_nested_validation or global_skip_validation\n )\n ):\n return fit_method(estimator, *args, **kwargs)\n\n return wrapper\n\n return decorator" }, { "identifier": "binarize", "path": ".pythonlibs/lib/python3.10/site-packages/sklearn/preprocessing/_data.py", "snippet": "@validate_params(\n {\n \"X\": [\"array-like\", \"sparse matrix\"],\n \"threshold\": [Interval(Real, None, None, closed=\"neither\")],\n \"copy\": [\"boolean\"],\n },\n prefer_skip_nested_validation=True,\n)\ndef binarize(X, *, threshold=0.0, copy=True):\n \"\"\"Boolean thresholding of array-like or scipy.sparse matrix.\n\n Read more in the :ref:`User Guide <preprocessing_binarization>`.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The data to binarize, element by element.\n scipy.sparse matrices should be in CSR or CSC format to avoid an\n un-necessary copy.\n\n threshold : float, default=0.0\n Feature values below or equal to this are replaced by 0, above it by 1.\n Threshold may not be less than 0 for operations on sparse matrices.\n\n copy : bool, default=True\n Set to False to perform inplace binarization and avoid a copy\n (if the input is already a numpy array or a scipy.sparse CSR / CSC\n matrix and if axis is 1).\n\n Returns\n -------\n X_tr : {ndarray, sparse matrix} of shape (n_samples, n_features)\n The transformed data.\n\n See Also\n --------\n Binarizer : Performs binarization using the Transformer API\n (e.g. as part of a preprocessing :class:`~sklearn.pipeline.Pipeline`).\n \"\"\"\n X = check_array(X, accept_sparse=[\"csr\", \"csc\"], copy=copy)\n if sparse.issparse(X):\n if threshold < 0:\n raise ValueError(\"Cannot binarize a sparse matrix with threshold < 0\")\n cond = X.data > threshold\n not_cond = np.logical_not(cond)\n X.data[cond] = 1\n X.data[not_cond] = 0\n X.eliminate_zeros()\n else:\n cond = X > threshold\n not_cond = np.logical_not(cond)\n X[cond] = 1\n X[not_cond] = 0\n return X" }, { "identifier": "LabelBinarizer", "path": ".pythonlibs/lib/python3.10/site-packages/sklearn/preprocessing/_label.py", "snippet": "class LabelBinarizer(TransformerMixin, BaseEstimator, auto_wrap_output_keys=None):\n \"\"\"Binarize labels in a one-vs-all fashion.\n\n Several regression and binary classification algorithms are\n available in scikit-learn. A simple way to extend these algorithms\n to the multi-class classification case is to use the so-called\n one-vs-all scheme.\n\n At learning time, this simply consists in learning one regressor\n or binary classifier per class. In doing so, one needs to convert\n multi-class labels to binary labels (belong or does not belong\n to the class). `LabelBinarizer` makes this process easy with the\n transform method.\n\n At prediction time, one assigns the class for which the corresponding\n model gave the greatest confidence. `LabelBinarizer` makes this easy\n with the :meth:`inverse_transform` method.\n\n Read more in the :ref:`User Guide <preprocessing_targets>`.\n\n Parameters\n ----------\n neg_label : int, default=0\n Value with which negative labels must be encoded.\n\n pos_label : int, default=1\n Value with which positive labels must be encoded.\n\n sparse_output : bool, default=False\n True if the returned array from transform is desired to be in sparse\n CSR format.\n\n Attributes\n ----------\n classes_ : ndarray of shape (n_classes,)\n Holds the label for each class.\n\n y_type_ : str\n Represents the type of the target data as evaluated by\n :func:`~sklearn.utils.multiclass.type_of_target`. Possible type are\n 'continuous', 'continuous-multioutput', 'binary', 'multiclass',\n 'multiclass-multioutput', 'multilabel-indicator', and 'unknown'.\n\n sparse_input_ : bool\n `True` if the input data to transform is given as a sparse matrix,\n `False` otherwise.\n\n See Also\n --------\n label_binarize : Function to perform the transform operation of\n LabelBinarizer with fixed classes.\n OneHotEncoder : Encode categorical features using a one-hot aka one-of-K\n scheme.\n\n Examples\n --------\n >>> from sklearn.preprocessing import LabelBinarizer\n >>> lb = LabelBinarizer()\n >>> lb.fit([1, 2, 6, 4, 2])\n LabelBinarizer()\n >>> lb.classes_\n array([1, 2, 4, 6])\n >>> lb.transform([1, 6])\n array([[1, 0, 0, 0],\n [0, 0, 0, 1]])\n\n Binary targets transform to a column vector\n\n >>> lb = LabelBinarizer()\n >>> lb.fit_transform(['yes', 'no', 'no', 'yes'])\n array([[1],\n [0],\n [0],\n [1]])\n\n Passing a 2D matrix for multilabel classification\n\n >>> import numpy as np\n >>> lb.fit(np.array([[0, 1, 1], [1, 0, 0]]))\n LabelBinarizer()\n >>> lb.classes_\n array([0, 1, 2])\n >>> lb.transform([0, 1, 2, 1])\n array([[1, 0, 0],\n [0, 1, 0],\n [0, 0, 1],\n [0, 1, 0]])\n \"\"\"\n\n _parameter_constraints: dict = {\n \"neg_label\": [Integral],\n \"pos_label\": [Integral],\n \"sparse_output\": [\"boolean\"],\n }\n\n def __init__(self, *, neg_label=0, pos_label=1, sparse_output=False):\n self.neg_label = neg_label\n self.pos_label = pos_label\n self.sparse_output = sparse_output\n\n @_fit_context(prefer_skip_nested_validation=True)\n def fit(self, y):\n \"\"\"Fit label binarizer.\n\n Parameters\n ----------\n y : ndarray of shape (n_samples,) or (n_samples, n_classes)\n Target values. The 2-d matrix should only contain 0 and 1,\n represents multilabel classification.\n\n Returns\n -------\n self : object\n Returns the instance itself.\n \"\"\"\n if self.neg_label >= self.pos_label:\n raise ValueError(\n f\"neg_label={self.neg_label} must be strictly less than \"\n f\"pos_label={self.pos_label}.\"\n )\n\n if self.sparse_output and (self.pos_label == 0 or self.neg_label != 0):\n raise ValueError(\n \"Sparse binarization is only supported with non \"\n \"zero pos_label and zero neg_label, got \"\n f\"pos_label={self.pos_label} and neg_label={self.neg_label}\"\n )\n\n self.y_type_ = type_of_target(y, input_name=\"y\")\n\n if \"multioutput\" in self.y_type_:\n raise ValueError(\n \"Multioutput target data is not supported with label binarization\"\n )\n if _num_samples(y) == 0:\n raise ValueError(\"y has 0 samples: %r\" % y)\n\n self.sparse_input_ = sp.issparse(y)\n self.classes_ = unique_labels(y)\n return self\n\n def fit_transform(self, y):\n \"\"\"Fit label binarizer/transform multi-class labels to binary labels.\n\n The output of transform is sometimes referred to as\n the 1-of-K coding scheme.\n\n Parameters\n ----------\n y : {ndarray, sparse matrix} of shape (n_samples,) or \\\n (n_samples, n_classes)\n Target values. The 2-d matrix should only contain 0 and 1,\n represents multilabel classification. Sparse matrix can be\n CSR, CSC, COO, DOK, or LIL.\n\n Returns\n -------\n Y : {ndarray, sparse matrix} of shape (n_samples, n_classes)\n Shape will be (n_samples, 1) for binary problems. Sparse matrix\n will be of CSR format.\n \"\"\"\n return self.fit(y).transform(y)\n\n def transform(self, y):\n \"\"\"Transform multi-class labels to binary labels.\n\n The output of transform is sometimes referred to by some authors as\n the 1-of-K coding scheme.\n\n Parameters\n ----------\n y : {array, sparse matrix} of shape (n_samples,) or \\\n (n_samples, n_classes)\n Target values. The 2-d matrix should only contain 0 and 1,\n represents multilabel classification. Sparse matrix can be\n CSR, CSC, COO, DOK, or LIL.\n\n Returns\n -------\n Y : {ndarray, sparse matrix} of shape (n_samples, n_classes)\n Shape will be (n_samples, 1) for binary problems. Sparse matrix\n will be of CSR format.\n \"\"\"\n check_is_fitted(self)\n\n y_is_multilabel = type_of_target(y).startswith(\"multilabel\")\n if y_is_multilabel and not self.y_type_.startswith(\"multilabel\"):\n raise ValueError(\"The object was not fitted with multilabel input.\")\n\n return label_binarize(\n y,\n classes=self.classes_,\n pos_label=self.pos_label,\n neg_label=self.neg_label,\n sparse_output=self.sparse_output,\n )\n\n def inverse_transform(self, Y, threshold=None):\n \"\"\"Transform binary labels back to multi-class labels.\n\n Parameters\n ----------\n Y : {ndarray, sparse matrix} of shape (n_samples, n_classes)\n Target values. All sparse matrices are converted to CSR before\n inverse transformation.\n\n threshold : float, default=None\n Threshold used in the binary and multi-label cases.\n\n Use 0 when ``Y`` contains the output of :term:`decision_function`\n (classifier).\n Use 0.5 when ``Y`` contains the output of :term:`predict_proba`.\n\n If None, the threshold is assumed to be half way between\n neg_label and pos_label.\n\n Returns\n -------\n y : {ndarray, sparse matrix} of shape (n_samples,)\n Target values. Sparse matrix will be of CSR format.\n\n Notes\n -----\n In the case when the binary labels are fractional\n (probabilistic), :meth:`inverse_transform` chooses the class with the\n greatest value. Typically, this allows to use the output of a\n linear model's :term:`decision_function` method directly as the input\n of :meth:`inverse_transform`.\n \"\"\"\n check_is_fitted(self)\n\n if threshold is None:\n threshold = (self.pos_label + self.neg_label) / 2.0\n\n if self.y_type_ == \"multiclass\":\n y_inv = _inverse_binarize_multiclass(Y, self.classes_)\n else:\n y_inv = _inverse_binarize_thresholding(\n Y, self.y_type_, self.classes_, threshold\n )\n\n if self.sparse_input_:\n y_inv = sp.csr_matrix(y_inv)\n elif sp.issparse(y_inv):\n y_inv = y_inv.toarray()\n\n return y_inv\n\n def _more_tags(self):\n return {\"X_types\": [\"1dlabels\"]}" }, { "identifier": "label_binarize", "path": ".pythonlibs/lib/python3.10/site-packages/sklearn/preprocessing/_label.py", "snippet": "@validate_params(\n {\n \"y\": [\"array-like\"],\n \"classes\": [\"array-like\"],\n \"neg_label\": [Interval(Integral, None, None, closed=\"neither\")],\n \"pos_label\": [Interval(Integral, None, None, closed=\"neither\")],\n \"sparse_output\": [\"boolean\"],\n },\n prefer_skip_nested_validation=True,\n)\ndef label_binarize(y, *, classes, neg_label=0, pos_label=1, sparse_output=False):\n \"\"\"Binarize labels in a one-vs-all fashion.\n\n Several regression and binary classification algorithms are\n available in scikit-learn. A simple way to extend these algorithms\n to the multi-class classification case is to use the so-called\n one-vs-all scheme.\n\n This function makes it possible to compute this transformation for a\n fixed set of class labels known ahead of time.\n\n Parameters\n ----------\n y : array-like\n Sequence of integer labels or multilabel data to encode.\n\n classes : array-like of shape (n_classes,)\n Uniquely holds the label for each class.\n\n neg_label : int, default=0\n Value with which negative labels must be encoded.\n\n pos_label : int, default=1\n Value with which positive labels must be encoded.\n\n sparse_output : bool, default=False,\n Set to true if output binary array is desired in CSR sparse format.\n\n Returns\n -------\n Y : {ndarray, sparse matrix} of shape (n_samples, n_classes)\n Shape will be (n_samples, 1) for binary problems. Sparse matrix will\n be of CSR format.\n\n See Also\n --------\n LabelBinarizer : Class used to wrap the functionality of label_binarize and\n allow for fitting to classes independently of the transform operation.\n\n Examples\n --------\n >>> from sklearn.preprocessing import label_binarize\n >>> label_binarize([1, 6], classes=[1, 2, 4, 6])\n array([[1, 0, 0, 0],\n [0, 0, 0, 1]])\n\n The class ordering is preserved:\n\n >>> label_binarize([1, 6], classes=[1, 6, 4, 2])\n array([[1, 0, 0, 0],\n [0, 1, 0, 0]])\n\n Binary targets transform to a column vector\n\n >>> label_binarize(['yes', 'no', 'no', 'yes'], classes=['no', 'yes'])\n array([[1],\n [0],\n [0],\n [1]])\n \"\"\"\n if not isinstance(y, list):\n # XXX Workaround that will be removed when list of list format is\n # dropped\n y = check_array(\n y, input_name=\"y\", accept_sparse=\"csr\", ensure_2d=False, dtype=None\n )\n else:\n if _num_samples(y) == 0:\n raise ValueError(\"y has 0 samples: %r\" % y)\n if neg_label >= pos_label:\n raise ValueError(\n \"neg_label={0} must be strictly less than pos_label={1}.\".format(\n neg_label, pos_label\n )\n )\n\n if sparse_output and (pos_label == 0 or neg_label != 0):\n raise ValueError(\n \"Sparse binarization is only supported with non \"\n \"zero pos_label and zero neg_label, got \"\n \"pos_label={0} and neg_label={1}\"\n \"\".format(pos_label, neg_label)\n )\n\n # To account for pos_label == 0 in the dense case\n pos_switch = pos_label == 0\n if pos_switch:\n pos_label = -neg_label\n\n y_type = type_of_target(y)\n if \"multioutput\" in y_type:\n raise ValueError(\n \"Multioutput target data is not supported with label binarization\"\n )\n if y_type == \"unknown\":\n raise ValueError(\"The type of target data is not known\")\n\n n_samples = y.shape[0] if sp.issparse(y) else len(y)\n n_classes = len(classes)\n classes = np.asarray(classes)\n\n if y_type == \"binary\":\n if n_classes == 1:\n if sparse_output:\n return sp.csr_matrix((n_samples, 1), dtype=int)\n else:\n Y = np.zeros((len(y), 1), dtype=int)\n Y += neg_label\n return Y\n elif len(classes) >= 3:\n y_type = \"multiclass\"\n\n sorted_class = np.sort(classes)\n if y_type == \"multilabel-indicator\":\n y_n_classes = y.shape[1] if hasattr(y, \"shape\") else len(y[0])\n if classes.size != y_n_classes:\n raise ValueError(\n \"classes {0} mismatch with the labels {1} found in the data\".format(\n classes, unique_labels(y)\n )\n )\n\n if y_type in (\"binary\", \"multiclass\"):\n y = column_or_1d(y)\n\n # pick out the known labels from y\n y_in_classes = np.isin(y, classes)\n y_seen = y[y_in_classes]\n indices = np.searchsorted(sorted_class, y_seen)\n indptr = np.hstack((0, np.cumsum(y_in_classes)))\n\n data = np.empty_like(indices)\n data.fill(pos_label)\n Y = sp.csr_matrix((data, indices, indptr), shape=(n_samples, n_classes))\n elif y_type == \"multilabel-indicator\":\n Y = sp.csr_matrix(y)\n if pos_label != 1:\n data = np.empty_like(Y.data)\n data.fill(pos_label)\n Y.data = data\n else:\n raise ValueError(\n \"%s target data is not supported with label binarization\" % y_type\n )\n\n if not sparse_output:\n Y = Y.toarray()\n Y = Y.astype(int, copy=False)\n\n if neg_label != 0:\n Y[Y == 0] = neg_label\n\n if pos_switch:\n Y[Y == pos_label] = 0\n else:\n Y.data = Y.data.astype(int, copy=False)\n\n # preserve label ordering\n if np.any(classes != sorted_class):\n indices = np.searchsorted(sorted_class, classes)\n Y = Y[:, indices]\n\n if y_type == \"binary\":\n if sparse_output:\n Y = Y.getcol(-1)\n else:\n Y = Y[:, -1].reshape((-1, 1))\n\n return Y" }, { "identifier": "Hidden", "path": ".pythonlibs/lib/python3.10/site-packages/sklearn/utils/_param_validation.py", "snippet": "class Hidden:\n \"\"\"Class encapsulating a constraint not meant to be exposed to the user.\n\n Parameters\n ----------\n constraint : str or _Constraint instance\n The constraint to be used internally.\n \"\"\"\n\n def __init__(self, constraint):\n self.constraint = constraint" }, { "identifier": "Interval", "path": ".pythonlibs/lib/python3.10/site-packages/sklearn/utils/_param_validation.py", "snippet": "class Interval(_Constraint):\n \"\"\"Constraint representing a typed interval.\n\n Parameters\n ----------\n type : {numbers.Integral, numbers.Real, RealNotInt}\n The set of numbers in which to set the interval.\n\n If RealNotInt, only reals that don't have the integer type\n are allowed. For example 1.0 is allowed but 1 is not.\n\n left : float or int or None\n The left bound of the interval. None means left bound is -∞.\n\n right : float, int or None\n The right bound of the interval. None means right bound is +∞.\n\n closed : {\"left\", \"right\", \"both\", \"neither\"}\n Whether the interval is open or closed. Possible choices are:\n\n - `\"left\"`: the interval is closed on the left and open on the right.\n It is equivalent to the interval `[ left, right )`.\n - `\"right\"`: the interval is closed on the right and open on the left.\n It is equivalent to the interval `( left, right ]`.\n - `\"both\"`: the interval is closed.\n It is equivalent to the interval `[ left, right ]`.\n - `\"neither\"`: the interval is open.\n It is equivalent to the interval `( left, right )`.\n\n Notes\n -----\n Setting a bound to `None` and setting the interval closed is valid. For instance,\n strictly speaking, `Interval(Real, 0, None, closed=\"both\")` corresponds to\n `[0, +∞) U {+∞}`.\n \"\"\"\n\n def __init__(self, type, left, right, *, closed):\n super().__init__()\n self.type = type\n self.left = left\n self.right = right\n self.closed = closed\n\n self._check_params()\n\n def _check_params(self):\n if self.type not in (Integral, Real, RealNotInt):\n raise ValueError(\n \"type must be either numbers.Integral, numbers.Real or RealNotInt.\"\n f\" Got {self.type} instead.\"\n )\n\n if self.closed not in (\"left\", \"right\", \"both\", \"neither\"):\n raise ValueError(\n \"closed must be either 'left', 'right', 'both' or 'neither'. \"\n f\"Got {self.closed} instead.\"\n )\n\n if self.type is Integral:\n suffix = \"for an interval over the integers.\"\n if self.left is not None and not isinstance(self.left, Integral):\n raise TypeError(f\"Expecting left to be an int {suffix}\")\n if self.right is not None and not isinstance(self.right, Integral):\n raise TypeError(f\"Expecting right to be an int {suffix}\")\n if self.left is None and self.closed in (\"left\", \"both\"):\n raise ValueError(\n f\"left can't be None when closed == {self.closed} {suffix}\"\n )\n if self.right is None and self.closed in (\"right\", \"both\"):\n raise ValueError(\n f\"right can't be None when closed == {self.closed} {suffix}\"\n )\n else:\n if self.left is not None and not isinstance(self.left, Real):\n raise TypeError(\"Expecting left to be a real number.\")\n if self.right is not None and not isinstance(self.right, Real):\n raise TypeError(\"Expecting right to be a real number.\")\n\n if self.right is not None and self.left is not None and self.right <= self.left:\n raise ValueError(\n f\"right can't be less than left. Got left={self.left} and \"\n f\"right={self.right}\"\n )\n\n def __contains__(self, val):\n if np.isnan(val):\n return False\n\n left_cmp = operator.lt if self.closed in (\"left\", \"both\") else operator.le\n right_cmp = operator.gt if self.closed in (\"right\", \"both\") else operator.ge\n\n left = -np.inf if self.left is None else self.left\n right = np.inf if self.right is None else self.right\n\n if left_cmp(val, left):\n return False\n if right_cmp(val, right):\n return False\n return True\n\n def is_satisfied_by(self, val):\n if not isinstance(val, self.type):\n return False\n\n return val in self\n\n def __str__(self):\n type_str = \"an int\" if self.type is Integral else \"a float\"\n left_bracket = \"[\" if self.closed in (\"left\", \"both\") else \"(\"\n left_bound = \"-inf\" if self.left is None else self.left\n right_bound = \"inf\" if self.right is None else self.right\n right_bracket = \"]\" if self.closed in (\"right\", \"both\") else \")\"\n\n # better repr if the bounds were given as integers\n if not self.type == Integral and isinstance(self.left, Real):\n left_bound = float(left_bound)\n if not self.type == Integral and isinstance(self.right, Real):\n right_bound = float(right_bound)\n\n return (\n f\"{type_str} in the range \"\n f\"{left_bracket}{left_bound}, {right_bound}{right_bracket}\"\n )" }, { "identifier": "StrOptions", "path": ".pythonlibs/lib/python3.10/site-packages/sklearn/utils/_param_validation.py", "snippet": "class StrOptions(Options):\n \"\"\"Constraint representing a finite set of strings.\n\n Parameters\n ----------\n options : set of str\n The set of valid strings.\n\n deprecated : set of str or None, default=None\n A subset of the `options` to mark as deprecated in the string\n representation of the constraint.\n \"\"\"\n\n def __init__(self, options, *, deprecated=None):\n super().__init__(type=str, options=options, deprecated=deprecated)" }, { "identifier": "safe_sparse_dot", "path": ".pythonlibs/lib/python3.10/site-packages/sklearn/utils/extmath.py", "snippet": "def safe_sparse_dot(a, b, *, dense_output=False):\n \"\"\"Dot product that handle the sparse matrix case correctly.\n\n Parameters\n ----------\n a : {ndarray, sparse matrix}\n b : {ndarray, sparse matrix}\n dense_output : bool, default=False\n When False, ``a`` and ``b`` both being sparse will yield sparse output.\n When True, output will always be a dense array.\n\n Returns\n -------\n dot_product : {ndarray, sparse matrix}\n Sparse if ``a`` and ``b`` are sparse and ``dense_output=False``.\n \"\"\"\n if a.ndim > 2 or b.ndim > 2:\n if sparse.issparse(a):\n # sparse is always 2D. Implies b is 3D+\n # [i, j] @ [k, ..., l, m, n] -> [i, k, ..., l, n]\n b_ = np.rollaxis(b, -2)\n b_2d = b_.reshape((b.shape[-2], -1))\n ret = a @ b_2d\n ret = ret.reshape(a.shape[0], *b_.shape[1:])\n elif sparse.issparse(b):\n # sparse is always 2D. Implies a is 3D+\n # [k, ..., l, m] @ [i, j] -> [k, ..., l, j]\n a_2d = a.reshape(-1, a.shape[-1])\n ret = a_2d @ b\n ret = ret.reshape(*a.shape[:-1], b.shape[1])\n else:\n ret = np.dot(a, b)\n else:\n ret = a @ b\n\n if (\n sparse.issparse(a)\n and sparse.issparse(b)\n and dense_output\n and hasattr(ret, \"toarray\")\n ):\n return ret.toarray()\n return ret" }, { "identifier": "_check_partial_fit_first_call", "path": ".pythonlibs/lib/python3.10/site-packages/sklearn/utils/multiclass.py", "snippet": "def _check_partial_fit_first_call(clf, classes=None):\n \"\"\"Private helper function for factorizing common classes param logic.\n\n Estimators that implement the ``partial_fit`` API need to be provided with\n the list of possible classes at the first call to partial_fit.\n\n Subsequent calls to partial_fit should check that ``classes`` is still\n consistent with a previous value of ``clf.classes_`` when provided.\n\n This function returns True if it detects that this was the first call to\n ``partial_fit`` on ``clf``. In that case the ``classes_`` attribute is also\n set on ``clf``.\n\n \"\"\"\n if getattr(clf, \"classes_\", None) is None and classes is None:\n raise ValueError(\"classes must be passed on the first call to partial_fit.\")\n\n elif classes is not None:\n if getattr(clf, \"classes_\", None) is not None:\n if not np.array_equal(clf.classes_, unique_labels(classes)):\n raise ValueError(\n \"`classes=%r` is not the same as on last call \"\n \"to partial_fit, was: %r\" % (classes, clf.classes_)\n )\n\n else:\n # This is the first call to partial_fit\n clf.classes_ = unique_labels(classes)\n return True\n\n # classes is None and clf.classes_ has already previously been set:\n # nothing to do\n return False" }, { "identifier": "_check_sample_weight", "path": ".pythonlibs/lib/python3.10/site-packages/sklearn/utils/validation.py", "snippet": "def _check_sample_weight(\n sample_weight, X, dtype=None, copy=False, only_non_negative=False\n):\n \"\"\"Validate sample weights.\n\n Note that passing sample_weight=None will output an array of ones.\n Therefore, in some cases, you may want to protect the call with:\n if sample_weight is not None:\n sample_weight = _check_sample_weight(...)\n\n Parameters\n ----------\n sample_weight : {ndarray, Number or None}, shape (n_samples,)\n Input sample weights.\n\n X : {ndarray, list, sparse matrix}\n Input data.\n\n only_non_negative : bool, default=False,\n Whether or not the weights are expected to be non-negative.\n\n .. versionadded:: 1.0\n\n dtype : dtype, default=None\n dtype of the validated `sample_weight`.\n If None, and the input `sample_weight` is an array, the dtype of the\n input is preserved; otherwise an array with the default numpy dtype\n is be allocated. If `dtype` is not one of `float32`, `float64`,\n `None`, the output will be of dtype `float64`.\n\n copy : bool, default=False\n If True, a copy of sample_weight will be created.\n\n Returns\n -------\n sample_weight : ndarray of shape (n_samples,)\n Validated sample weight. It is guaranteed to be \"C\" contiguous.\n \"\"\"\n n_samples = _num_samples(X)\n\n if dtype is not None and dtype not in [np.float32, np.float64]:\n dtype = np.float64\n\n if sample_weight is None:\n sample_weight = np.ones(n_samples, dtype=dtype)\n elif isinstance(sample_weight, numbers.Number):\n sample_weight = np.full(n_samples, sample_weight, dtype=dtype)\n else:\n if dtype is None:\n dtype = [np.float64, np.float32]\n sample_weight = check_array(\n sample_weight,\n accept_sparse=False,\n ensure_2d=False,\n dtype=dtype,\n order=\"C\",\n copy=copy,\n input_name=\"sample_weight\",\n )\n if sample_weight.ndim != 1:\n raise ValueError(\"Sample weights must be 1D array or scalar\")\n\n if sample_weight.shape != (n_samples,):\n raise ValueError(\n \"sample_weight.shape == {}, expected {}!\".format(\n sample_weight.shape, (n_samples,)\n )\n )\n\n if only_non_negative:\n check_non_negative(sample_weight, \"`sample_weight`\")\n\n return sample_weight" }, { "identifier": "check_is_fitted", "path": ".pythonlibs/lib/python3.10/site-packages/sklearn/utils/validation.py", "snippet": "def check_is_fitted(estimator, attributes=None, *, msg=None, all_or_any=all):\n \"\"\"Perform is_fitted validation for estimator.\n\n Checks if the estimator is fitted by verifying the presence of\n fitted attributes (ending with a trailing underscore) and otherwise\n raises a NotFittedError with the given message.\n\n If an estimator does not set any attributes with a trailing underscore, it\n can define a ``__sklearn_is_fitted__`` method returning a boolean to specify if the\n estimator is fitted or not.\n\n Parameters\n ----------\n estimator : estimator instance\n Estimator instance for which the check is performed.\n\n attributes : str, list or tuple of str, default=None\n Attribute name(s) given as string or a list/tuple of strings\n Eg.: ``[\"coef_\", \"estimator_\", ...], \"coef_\"``\n\n If `None`, `estimator` is considered fitted if there exist an\n attribute that ends with a underscore and does not start with double\n underscore.\n\n msg : str, default=None\n The default error message is, \"This %(name)s instance is not fitted\n yet. Call 'fit' with appropriate arguments before using this\n estimator.\"\n\n For custom messages if \"%(name)s\" is present in the message string,\n it is substituted for the estimator name.\n\n Eg. : \"Estimator, %(name)s, must be fitted before sparsifying\".\n\n all_or_any : callable, {all, any}, default=all\n Specify whether all or any of the given attributes must exist.\n\n Raises\n ------\n TypeError\n If the estimator is a class or not an estimator instance\n\n NotFittedError\n If the attributes are not found.\n \"\"\"\n if isclass(estimator):\n raise TypeError(\"{} is a class, not an instance.\".format(estimator))\n if msg is None:\n msg = (\n \"This %(name)s instance is not fitted yet. Call 'fit' with \"\n \"appropriate arguments before using this estimator.\"\n )\n\n if not hasattr(estimator, \"fit\"):\n raise TypeError(\"%s is not an estimator instance.\" % (estimator))\n\n if not _is_fitted(estimator, attributes, all_or_any):\n raise NotFittedError(msg % {\"name\": type(estimator).__name__})" }, { "identifier": "check_non_negative", "path": ".pythonlibs/lib/python3.10/site-packages/sklearn/utils/validation.py", "snippet": "def check_non_negative(X, whom):\n \"\"\"\n Check if there is any negative value in an array.\n\n Parameters\n ----------\n X : {array-like, sparse matrix}\n Input data.\n\n whom : str\n Who passed X to this function.\n \"\"\"\n xp, _ = get_namespace(X)\n # avoid X.min() on sparse matrix since it also sorts the indices\n if sp.issparse(X):\n if X.format in [\"lil\", \"dok\"]:\n X = X.tocsr()\n if X.data.size == 0:\n X_min = 0\n else:\n X_min = X.data.min()\n else:\n X_min = xp.min(X)\n\n if X_min < 0:\n raise ValueError(\"Negative values in data passed to %s\" % whom)" } ]
import warnings import numpy as np from abc import ABCMeta, abstractmethod from numbers import Integral, Real from scipy.special import logsumexp from .base import BaseEstimator, ClassifierMixin, _fit_context from .preprocessing import LabelBinarizer, binarize, label_binarize from .utils._param_validation import Hidden, Interval, StrOptions from .utils.extmath import safe_sparse_dot from .utils.multiclass import _check_partial_fit_first_call from .utils.validation import _check_sample_weight, check_is_fitted, check_non_negative
15,891
X : array-like of shape (n_samples, n_features) The input samples. Returns ------- C : array-like of shape (n_samples, n_classes) Returns the log-probability of the samples for each class in the model. The columns correspond to the classes in sorted order, as they appear in the attribute :term:`classes_`. """ check_is_fitted(self) X = self._check_X(X) jll = self._joint_log_likelihood(X) # normalize by P(x) = P(f_1, ..., f_n) log_prob_x = logsumexp(jll, axis=1) return jll - np.atleast_2d(log_prob_x).T def predict_proba(self, X): """ Return probability estimates for the test vector X. Parameters ---------- X : array-like of shape (n_samples, n_features) The input samples. Returns ------- C : array-like of shape (n_samples, n_classes) Returns the probability of the samples for each class in the model. The columns correspond to the classes in sorted order, as they appear in the attribute :term:`classes_`. """ return np.exp(self.predict_log_proba(X)) class GaussianNB(_BaseNB): """ Gaussian Naive Bayes (GaussianNB). Can perform online updates to model parameters via :meth:`partial_fit`. For details on algorithm used to update feature means and variance online, see Stanford CS tech report STAN-CS-79-773 by Chan, Golub, and LeVeque: http://i.stanford.edu/pub/cstr/reports/cs/tr/79/773/CS-TR-79-773.pdf Read more in the :ref:`User Guide <gaussian_naive_bayes>`. Parameters ---------- priors : array-like of shape (n_classes,), default=None Prior probabilities of the classes. If specified, the priors are not adjusted according to the data. var_smoothing : float, default=1e-9 Portion of the largest variance of all features that is added to variances for calculation stability. .. versionadded:: 0.20 Attributes ---------- class_count_ : ndarray of shape (n_classes,) number of training samples observed in each class. class_prior_ : ndarray of shape (n_classes,) probability of each class. classes_ : ndarray of shape (n_classes,) class labels known to the classifier. epsilon_ : float absolute additive value to variances. n_features_in_ : int Number of features seen during :term:`fit`. .. versionadded:: 0.24 feature_names_in_ : ndarray of shape (`n_features_in_`,) Names of features seen during :term:`fit`. Defined only when `X` has feature names that are all strings. .. versionadded:: 1.0 var_ : ndarray of shape (n_classes, n_features) Variance of each feature per class. .. versionadded:: 1.0 theta_ : ndarray of shape (n_classes, n_features) mean of each feature per class. See Also -------- BernoulliNB : Naive Bayes classifier for multivariate Bernoulli models. CategoricalNB : Naive Bayes classifier for categorical features. ComplementNB : Complement Naive Bayes classifier. MultinomialNB : Naive Bayes classifier for multinomial models. Examples -------- >>> import numpy as np >>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]]) >>> Y = np.array([1, 1, 1, 2, 2, 2]) >>> from sklearn.naive_bayes import GaussianNB >>> clf = GaussianNB() >>> clf.fit(X, Y) GaussianNB() >>> print(clf.predict([[-0.8, -1]])) [1] >>> clf_pf = GaussianNB() >>> clf_pf.partial_fit(X, Y, np.unique(Y)) GaussianNB() >>> print(clf_pf.predict([[-0.8, -1]])) [1] """ _parameter_constraints: dict = { "priors": ["array-like", None],
""" The :mod:`sklearn.naive_bayes` module implements Naive Bayes algorithms. These are supervised learning methods based on applying Bayes' theorem with strong (naive) feature independence assumptions. """ # Author: Vincent Michel <[email protected]> # Minor fixes by Fabian Pedregosa # Amit Aides <[email protected]> # Yehuda Finkelstein <[email protected]> # Lars Buitinck # Jan Hendrik Metzen <[email protected]> # (parts based on earlier work by Mathieu Blondel) # # License: BSD 3 clause __all__ = [ "BernoulliNB", "GaussianNB", "MultinomialNB", "ComplementNB", "CategoricalNB", ] class _BaseNB(ClassifierMixin, BaseEstimator, metaclass=ABCMeta): """Abstract base class for naive Bayes estimators""" @abstractmethod def _joint_log_likelihood(self, X): """Compute the unnormalized posterior log probability of X I.e. ``log P(c) + log P(x|c)`` for all rows x of X, as an array-like of shape (n_samples, n_classes). Public methods predict, predict_proba, predict_log_proba, and predict_joint_log_proba pass the input through _check_X before handing it over to _joint_log_likelihood. The term "joint log likelihood" is used interchangibly with "joint log probability". """ @abstractmethod def _check_X(self, X): """To be overridden in subclasses with the actual checks. Only used in predict* methods. """ def predict_joint_log_proba(self, X): """Return joint log probability estimates for the test vector X. For each row x of X and class y, the joint log probability is given by ``log P(x, y) = log P(y) + log P(x|y),`` where ``log P(y)`` is the class prior probability and ``log P(x|y)`` is the class-conditional probability. Parameters ---------- X : array-like of shape (n_samples, n_features) The input samples. Returns ------- C : ndarray of shape (n_samples, n_classes) Returns the joint log-probability of the samples for each class in the model. The columns correspond to the classes in sorted order, as they appear in the attribute :term:`classes_`. """ check_is_fitted(self) X = self._check_X(X) return self._joint_log_likelihood(X) def predict(self, X): """ Perform classification on an array of test vectors X. Parameters ---------- X : array-like of shape (n_samples, n_features) The input samples. Returns ------- C : ndarray of shape (n_samples,) Predicted target values for X. """ check_is_fitted(self) X = self._check_X(X) jll = self._joint_log_likelihood(X) return self.classes_[np.argmax(jll, axis=1)] def predict_log_proba(self, X): """ Return log-probability estimates for the test vector X. Parameters ---------- X : array-like of shape (n_samples, n_features) The input samples. Returns ------- C : array-like of shape (n_samples, n_classes) Returns the log-probability of the samples for each class in the model. The columns correspond to the classes in sorted order, as they appear in the attribute :term:`classes_`. """ check_is_fitted(self) X = self._check_X(X) jll = self._joint_log_likelihood(X) # normalize by P(x) = P(f_1, ..., f_n) log_prob_x = logsumexp(jll, axis=1) return jll - np.atleast_2d(log_prob_x).T def predict_proba(self, X): """ Return probability estimates for the test vector X. Parameters ---------- X : array-like of shape (n_samples, n_features) The input samples. Returns ------- C : array-like of shape (n_samples, n_classes) Returns the probability of the samples for each class in the model. The columns correspond to the classes in sorted order, as they appear in the attribute :term:`classes_`. """ return np.exp(self.predict_log_proba(X)) class GaussianNB(_BaseNB): """ Gaussian Naive Bayes (GaussianNB). Can perform online updates to model parameters via :meth:`partial_fit`. For details on algorithm used to update feature means and variance online, see Stanford CS tech report STAN-CS-79-773 by Chan, Golub, and LeVeque: http://i.stanford.edu/pub/cstr/reports/cs/tr/79/773/CS-TR-79-773.pdf Read more in the :ref:`User Guide <gaussian_naive_bayes>`. Parameters ---------- priors : array-like of shape (n_classes,), default=None Prior probabilities of the classes. If specified, the priors are not adjusted according to the data. var_smoothing : float, default=1e-9 Portion of the largest variance of all features that is added to variances for calculation stability. .. versionadded:: 0.20 Attributes ---------- class_count_ : ndarray of shape (n_classes,) number of training samples observed in each class. class_prior_ : ndarray of shape (n_classes,) probability of each class. classes_ : ndarray of shape (n_classes,) class labels known to the classifier. epsilon_ : float absolute additive value to variances. n_features_in_ : int Number of features seen during :term:`fit`. .. versionadded:: 0.24 feature_names_in_ : ndarray of shape (`n_features_in_`,) Names of features seen during :term:`fit`. Defined only when `X` has feature names that are all strings. .. versionadded:: 1.0 var_ : ndarray of shape (n_classes, n_features) Variance of each feature per class. .. versionadded:: 1.0 theta_ : ndarray of shape (n_classes, n_features) mean of each feature per class. See Also -------- BernoulliNB : Naive Bayes classifier for multivariate Bernoulli models. CategoricalNB : Naive Bayes classifier for categorical features. ComplementNB : Complement Naive Bayes classifier. MultinomialNB : Naive Bayes classifier for multinomial models. Examples -------- >>> import numpy as np >>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]]) >>> Y = np.array([1, 1, 1, 2, 2, 2]) >>> from sklearn.naive_bayes import GaussianNB >>> clf = GaussianNB() >>> clf.fit(X, Y) GaussianNB() >>> print(clf.predict([[-0.8, -1]])) [1] >>> clf_pf = GaussianNB() >>> clf_pf.partial_fit(X, Y, np.unique(Y)) GaussianNB() >>> print(clf_pf.predict([[-0.8, -1]])) [1] """ _parameter_constraints: dict = { "priors": ["array-like", None],
"var_smoothing": [Interval(Real, 0, None, closed="left")],
7
2023-10-07 13:19:48+00:00
24k
zbzhu99/madiff
diffuser/models/diffusion.py
[ { "identifier": "DPM_Solver", "path": "diffuser/utils/dpm_solver.py", "snippet": "class DPM_Solver:\n def __init__(\n self,\n model_fn,\n noise_schedule,\n algorithm_type=\"dpmsolver++\",\n correcting_x0_fn=None,\n correcting_xt_fn=None,\n thresholding_max_val=1.0,\n dynamic_thresholding_ratio=0.995,\n ):\n \"\"\"Construct a DPM-Solver.\n\n We support both DPM-Solver (`algorithm_type=\"dpmsolver\"`) and DPM-Solver++ (`algorithm_type=\"dpmsolver++\"`).\n\n We also support the \"dynamic thresholding\" method in Imagen[1]. For pixel-space diffusion models, you\n can set both `algorithm_type=\"dpmsolver++\"` and `correcting_x0_fn=\"dynamic_thresholding\"` to use the\n dynamic thresholding. The \"dynamic thresholding\" can greatly improve the sample quality for pixel-space\n DPMs with large guidance scales. Note that the thresholding method is **unsuitable** for latent-space\n DPMs (such as stable-diffusion).\n\n To support advanced algorithms in image-to-image applications, we also support corrector functions for\n both x0 and xt.\n\n Args:\n model_fn: A noise prediction model function which accepts the continuous-time input (t in [epsilon, T]):\n ``\n def model_fn(x, t_continuous):\n return noise\n ``\n The shape of `x` is `(batch_size, **shape)`, and the shape of `t_continuous` is `(batch_size,)`.\n noise_schedule: A noise schedule object, such as NoiseScheduleVP.\n algorithm_type: A `str`. Either \"dpmsolver\" or \"dpmsolver++\".\n correcting_x0_fn: A `str` or a function with the following format:\n ```\n def correcting_x0_fn(x0, t):\n x0_new = ...\n return x0_new\n ```\n This function is to correct the outputs of the data prediction model at each sampling step. e.g.,\n ```\n x0_pred = data_pred_model(xt, t)\n if correcting_x0_fn is not None:\n x0_pred = correcting_x0_fn(x0_pred, t)\n xt_1 = update(x0_pred, xt, t)\n ```\n If `correcting_x0_fn=\"dynamic_thresholding\"`, we use the dynamic thresholding proposed in Imagen[1].\n correcting_xt_fn: A function with the following format:\n ```\n def correcting_xt_fn(xt, t, step):\n x_new = ...\n return x_new\n ```\n This function is to correct the intermediate samples xt at each sampling step. e.g.,\n ```\n xt = ...\n xt = correcting_xt_fn(xt, t, step)\n ```\n thresholding_max_val: A `float`. The max value for thresholding.\n Valid only when use `dpmsolver++` and `correcting_x0_fn=\"dynamic_thresholding\"`.\n dynamic_thresholding_ratio: A `float`. The ratio for dynamic thresholding (see Imagen[1] for details).\n Valid only when use `dpmsolver++` and `correcting_x0_fn=\"dynamic_thresholding\"`.\n\n [1] Chitwan Saharia, William Chan, Saurabh Saxena, Lala Li, Jay Whang, Emily Denton, Seyed Kamyar Seyed Ghasemipour,\n Burcu Karagol Ayan, S Sara Mahdavi, Rapha Gontijo Lopes, et al. Photorealistic text-to-image diffusion models\n with deep language understanding. arXiv preprint arXiv:2205.11487, 2022b.\n \"\"\"\n self.model = lambda x, t: model_fn(x, t.expand((x.shape[0])))\n self.noise_schedule = noise_schedule\n assert algorithm_type in [\"dpmsolver\", \"dpmsolver++\"]\n self.algorithm_type = algorithm_type\n if correcting_x0_fn == \"dynamic_thresholding\":\n self.correcting_x0_fn = self.dynamic_thresholding_fn\n else:\n self.correcting_x0_fn = correcting_x0_fn\n self.correcting_xt_fn = correcting_xt_fn\n self.dynamic_thresholding_ratio = dynamic_thresholding_ratio\n self.thresholding_max_val = thresholding_max_val\n\n def dynamic_thresholding_fn(self, x0, t):\n \"\"\"\n The dynamic thresholding method.\n \"\"\"\n dims = x0.dim()\n p = self.dynamic_thresholding_ratio\n s = torch.quantile(torch.abs(x0).reshape((x0.shape[0], -1)), p, dim=1)\n s = expand_dims(\n torch.maximum(\n s, self.thresholding_max_val * torch.ones_like(s).to(s.device)\n ),\n dims,\n )\n x0 = torch.clamp(x0, -s, s) / s\n return x0\n\n def noise_prediction_fn(self, x, t):\n \"\"\"\n Return the noise prediction model.\n \"\"\"\n return self.model(x, t)\n\n def data_prediction_fn(self, x, t):\n \"\"\"\n Return the data prediction model (with corrector).\n \"\"\"\n noise = self.noise_prediction_fn(x, t)\n alpha_t, sigma_t = self.noise_schedule.marginal_alpha(\n t\n ), self.noise_schedule.marginal_std(t)\n x0 = (x - sigma_t * noise) / alpha_t\n if self.correcting_x0_fn is not None:\n x0 = self.correcting_x0_fn(x0, t)\n return x0\n\n def model_fn(self, x, t):\n \"\"\"\n Convert the model to the noise prediction model or the data prediction model.\n \"\"\"\n if self.algorithm_type == \"dpmsolver++\":\n return self.data_prediction_fn(x, t)\n else:\n return self.noise_prediction_fn(x, t)\n\n def get_time_steps(self, skip_type, t_T, t_0, N, device):\n \"\"\"Compute the intermediate time steps for sampling.\n\n Args:\n skip_type: A `str`. The type for the spacing of the time steps. We support three types:\n - 'logSNR': uniform logSNR for the time steps.\n - 'time_uniform': uniform time for the time steps. (**Recommended for high-resolutional data**.)\n - 'time_quadratic': quadratic time for the time steps. (Used in DDIM for low-resolutional data.)\n t_T: A `float`. The starting time of the sampling (default is T).\n t_0: A `float`. The ending time of the sampling (default is epsilon).\n N: A `int`. The total number of the spacing of the time steps.\n device: A torch device.\n Returns:\n A pytorch tensor of the time steps, with the shape (N + 1,).\n \"\"\"\n if skip_type == \"logSNR\":\n lambda_T = self.noise_schedule.marginal_lambda(torch.tensor(t_T).to(device))\n lambda_0 = self.noise_schedule.marginal_lambda(torch.tensor(t_0).to(device))\n logSNR_steps = torch.linspace(\n lambda_T.cpu().item(), lambda_0.cpu().item(), N + 1\n ).to(device)\n return self.noise_schedule.inverse_lambda(logSNR_steps)\n elif skip_type == \"time_uniform\":\n return torch.linspace(t_T, t_0, N + 1).to(device)\n elif skip_type == \"time_quadratic\":\n t_order = 2\n t = (\n torch.linspace(t_T ** (1.0 / t_order), t_0 ** (1.0 / t_order), N + 1)\n .pow(t_order)\n .to(device)\n )\n return t\n else:\n raise ValueError(\n \"Unsupported skip_type {}, need to be 'logSNR' or 'time_uniform' or 'time_quadratic'\".format(\n skip_type\n )\n )\n\n def get_orders_and_timesteps_for_singlestep_solver(\n self, steps, order, skip_type, t_T, t_0, device\n ):\n \"\"\"\n Get the order of each step for sampling by the singlestep DPM-Solver.\n\n We combine both DPM-Solver-1,2,3 to use all the function evaluations, which is named as \"DPM-Solver-fast\".\n Given a fixed number of function evaluations by `steps`, the sampling procedure by DPM-Solver-fast is:\n - If order == 1:\n We take `steps` of DPM-Solver-1 (i.e. DDIM).\n - If order == 2:\n - Denote K = (steps // 2). We take K or (K + 1) intermediate time steps for sampling.\n - If steps % 2 == 0, we use K steps of DPM-Solver-2.\n - If steps % 2 == 1, we use K steps of DPM-Solver-2 and 1 step of DPM-Solver-1.\n - If order == 3:\n - Denote K = (steps // 3 + 1). We take K intermediate time steps for sampling.\n - If steps % 3 == 0, we use (K - 2) steps of DPM-Solver-3, and 1 step of DPM-Solver-2 and 1 step of DPM-Solver-1.\n - If steps % 3 == 1, we use (K - 1) steps of DPM-Solver-3 and 1 step of DPM-Solver-1.\n - If steps % 3 == 2, we use (K - 1) steps of DPM-Solver-3 and 1 step of DPM-Solver-2.\n\n ============================================\n Args:\n order: A `int`. The max order for the solver (2 or 3).\n steps: A `int`. The total number of function evaluations (NFE).\n skip_type: A `str`. The type for the spacing of the time steps. We support three types:\n - 'logSNR': uniform logSNR for the time steps.\n - 'time_uniform': uniform time for the time steps. (**Recommended for high-resolutional data**.)\n - 'time_quadratic': quadratic time for the time steps. (Used in DDIM for low-resolutional data.)\n t_T: A `float`. The starting time of the sampling (default is T).\n t_0: A `float`. The ending time of the sampling (default is epsilon).\n device: A torch device.\n Returns:\n orders: A list of the solver order of each step.\n \"\"\"\n if order == 3:\n K = steps // 3 + 1\n if steps % 3 == 0:\n orders = [\n 3,\n ] * (\n K - 2\n ) + [2, 1]\n elif steps % 3 == 1:\n orders = [\n 3,\n ] * (\n K - 1\n ) + [1]\n else:\n orders = [\n 3,\n ] * (\n K - 1\n ) + [2]\n elif order == 2:\n if steps % 2 == 0:\n K = steps // 2\n orders = [\n 2,\n ] * K\n else:\n K = steps // 2 + 1\n orders = [\n 2,\n ] * (\n K - 1\n ) + [1]\n elif order == 1:\n K = 1\n orders = [\n 1,\n ] * steps\n else:\n raise ValueError(\"'order' must be '1' or '2' or '3'.\")\n if skip_type == \"logSNR\":\n # To reproduce the results in DPM-Solver paper\n timesteps_outer = self.get_time_steps(skip_type, t_T, t_0, K, device)\n else:\n timesteps_outer = self.get_time_steps(skip_type, t_T, t_0, steps, device)[\n torch.cumsum(\n torch.tensor(\n [\n 0,\n ]\n + orders\n ),\n 0,\n ).to(device)\n ]\n return timesteps_outer, orders\n\n def denoise_to_zero_fn(self, x, s):\n \"\"\"\n Denoise at the final step, which is equivalent to solve the ODE from lambda_s to infty by first-order discretization.\n \"\"\"\n return self.data_prediction_fn(x, s)\n\n def dpm_solver_first_update(self, x, s, t, model_s=None, return_intermediate=False):\n \"\"\"\n DPM-Solver-1 (equivalent to DDIM) from time `s` to time `t`.\n\n Args:\n x: A pytorch tensor. The initial value at time `s`.\n s: A pytorch tensor. The starting time, with the shape (1,).\n t: A pytorch tensor. The ending time, with the shape (1,).\n model_s: A pytorch tensor. The model function evaluated at time `s`.\n If `model_s` is None, we evaluate the model by `x` and `s`; otherwise we directly use it.\n return_intermediate: A `bool`. If true, also return the model value at time `s`.\n Returns:\n x_t: A pytorch tensor. The approximated solution at time `t`.\n \"\"\"\n ns = self.noise_schedule\n dims = x.dim()\n lambda_s, lambda_t = ns.marginal_lambda(s), ns.marginal_lambda(t)\n h = lambda_t - lambda_s\n log_alpha_s, log_alpha_t = ns.marginal_log_mean_coeff(\n s\n ), ns.marginal_log_mean_coeff(t)\n sigma_s, sigma_t = ns.marginal_std(s), ns.marginal_std(t)\n alpha_t = torch.exp(log_alpha_t)\n\n if self.algorithm_type == \"dpmsolver++\":\n phi_1 = torch.expm1(-h)\n if model_s is None:\n model_s = self.model_fn(x, s)\n x_t = sigma_t / sigma_s * x - alpha_t * phi_1 * model_s\n if return_intermediate:\n return x_t, {\"model_s\": model_s}\n else:\n return x_t\n else:\n phi_1 = torch.expm1(h)\n if model_s is None:\n model_s = self.model_fn(x, s)\n x_t = torch.exp(log_alpha_t - log_alpha_s) * x - (sigma_t * phi_1) * model_s\n if return_intermediate:\n return x_t, {\"model_s\": model_s}\n else:\n return x_t\n\n def singlestep_dpm_solver_second_update(\n self,\n x,\n s,\n t,\n r1=0.5,\n model_s=None,\n return_intermediate=False,\n solver_type=\"dpmsolver\",\n ):\n \"\"\"\n Singlestep solver DPM-Solver-2 from time `s` to time `t`.\n\n Args:\n x: A pytorch tensor. The initial value at time `s`.\n s: A pytorch tensor. The starting time, with the shape (1,).\n t: A pytorch tensor. The ending time, with the shape (1,).\n r1: A `float`. The hyperparameter of the second-order solver.\n model_s: A pytorch tensor. The model function evaluated at time `s`.\n If `model_s` is None, we evaluate the model by `x` and `s`; otherwise we directly use it.\n return_intermediate: A `bool`. If true, also return the model value at time `s` and `s1` (the intermediate time).\n solver_type: either 'dpmsolver' or 'taylor'. The type for the high-order solvers.\n The type slightly impacts the performance. We recommend to use 'dpmsolver' type.\n Returns:\n x_t: A pytorch tensor. The approximated solution at time `t`.\n \"\"\"\n if solver_type not in [\"dpmsolver\", \"taylor\"]:\n raise ValueError(\n \"'solver_type' must be either 'dpmsolver' or 'taylor', got {}\".format(\n solver_type\n )\n )\n if r1 is None:\n r1 = 0.5\n ns = self.noise_schedule\n lambda_s, lambda_t = ns.marginal_lambda(s), ns.marginal_lambda(t)\n h = lambda_t - lambda_s\n lambda_s1 = lambda_s + r1 * h\n s1 = ns.inverse_lambda(lambda_s1)\n log_alpha_s, log_alpha_s1, log_alpha_t = (\n ns.marginal_log_mean_coeff(s),\n ns.marginal_log_mean_coeff(s1),\n ns.marginal_log_mean_coeff(t),\n )\n sigma_s, sigma_s1, sigma_t = (\n ns.marginal_std(s),\n ns.marginal_std(s1),\n ns.marginal_std(t),\n )\n alpha_s1, alpha_t = torch.exp(log_alpha_s1), torch.exp(log_alpha_t)\n\n if self.algorithm_type == \"dpmsolver++\":\n phi_11 = torch.expm1(-r1 * h)\n phi_1 = torch.expm1(-h)\n\n if model_s is None:\n model_s = self.model_fn(x, s)\n x_s1 = (sigma_s1 / sigma_s) * x - (alpha_s1 * phi_11) * model_s\n model_s1 = self.model_fn(x_s1, s1)\n if solver_type == \"dpmsolver\":\n x_t = (\n (sigma_t / sigma_s) * x\n - (alpha_t * phi_1) * model_s\n - (0.5 / r1) * (alpha_t * phi_1) * (model_s1 - model_s)\n )\n elif solver_type == \"taylor\":\n x_t = (\n (sigma_t / sigma_s) * x\n - (alpha_t * phi_1) * model_s\n + (1.0 / r1) * (alpha_t * (phi_1 / h + 1.0)) * (model_s1 - model_s)\n )\n else:\n phi_11 = torch.expm1(r1 * h)\n phi_1 = torch.expm1(h)\n\n if model_s is None:\n model_s = self.model_fn(x, s)\n x_s1 = (\n torch.exp(log_alpha_s1 - log_alpha_s) * x\n - (sigma_s1 * phi_11) * model_s\n )\n model_s1 = self.model_fn(x_s1, s1)\n if solver_type == \"dpmsolver\":\n x_t = (\n torch.exp(log_alpha_t - log_alpha_s) * x\n - (sigma_t * phi_1) * model_s\n - (0.5 / r1) * (sigma_t * phi_1) * (model_s1 - model_s)\n )\n elif solver_type == \"taylor\":\n x_t = (\n torch.exp(log_alpha_t - log_alpha_s) * x\n - (sigma_t * phi_1) * model_s\n - (1.0 / r1) * (sigma_t * (phi_1 / h - 1.0)) * (model_s1 - model_s)\n )\n if return_intermediate:\n return x_t, {\"model_s\": model_s, \"model_s1\": model_s1}\n else:\n return x_t\n\n def singlestep_dpm_solver_third_update(\n self,\n x,\n s,\n t,\n r1=1.0 / 3.0,\n r2=2.0 / 3.0,\n model_s=None,\n model_s1=None,\n return_intermediate=False,\n solver_type=\"dpmsolver\",\n ):\n \"\"\"\n Singlestep solver DPM-Solver-3 from time `s` to time `t`.\n\n Args:\n x: A pytorch tensor. The initial value at time `s`.\n s: A pytorch tensor. The starting time, with the shape (1,).\n t: A pytorch tensor. The ending time, with the shape (1,).\n r1: A `float`. The hyperparameter of the third-order solver.\n r2: A `float`. The hyperparameter of the third-order solver.\n model_s: A pytorch tensor. The model function evaluated at time `s`.\n If `model_s` is None, we evaluate the model by `x` and `s`; otherwise we directly use it.\n model_s1: A pytorch tensor. The model function evaluated at time `s1` (the intermediate time given by `r1`).\n If `model_s1` is None, we evaluate the model at `s1`; otherwise we directly use it.\n return_intermediate: A `bool`. If true, also return the model value at time `s`, `s1` and `s2` (the intermediate times).\n solver_type: either 'dpmsolver' or 'taylor'. The type for the high-order solvers.\n The type slightly impacts the performance. We recommend to use 'dpmsolver' type.\n Returns:\n x_t: A pytorch tensor. The approximated solution at time `t`.\n \"\"\"\n if solver_type not in [\"dpmsolver\", \"taylor\"]:\n raise ValueError(\n \"'solver_type' must be either 'dpmsolver' or 'taylor', got {}\".format(\n solver_type\n )\n )\n if r1 is None:\n r1 = 1.0 / 3.0\n if r2 is None:\n r2 = 2.0 / 3.0\n ns = self.noise_schedule\n lambda_s, lambda_t = ns.marginal_lambda(s), ns.marginal_lambda(t)\n h = lambda_t - lambda_s\n lambda_s1 = lambda_s + r1 * h\n lambda_s2 = lambda_s + r2 * h\n s1 = ns.inverse_lambda(lambda_s1)\n s2 = ns.inverse_lambda(lambda_s2)\n log_alpha_s, log_alpha_s1, log_alpha_s2, log_alpha_t = (\n ns.marginal_log_mean_coeff(s),\n ns.marginal_log_mean_coeff(s1),\n ns.marginal_log_mean_coeff(s2),\n ns.marginal_log_mean_coeff(t),\n )\n sigma_s, sigma_s1, sigma_s2, sigma_t = (\n ns.marginal_std(s),\n ns.marginal_std(s1),\n ns.marginal_std(s2),\n ns.marginal_std(t),\n )\n alpha_s1, alpha_s2, alpha_t = (\n torch.exp(log_alpha_s1),\n torch.exp(log_alpha_s2),\n torch.exp(log_alpha_t),\n )\n\n if self.algorithm_type == \"dpmsolver++\":\n phi_11 = torch.expm1(-r1 * h)\n phi_12 = torch.expm1(-r2 * h)\n phi_1 = torch.expm1(-h)\n phi_22 = torch.expm1(-r2 * h) / (r2 * h) + 1.0\n phi_2 = phi_1 / h + 1.0\n phi_3 = phi_2 / h - 0.5\n\n if model_s is None:\n model_s = self.model_fn(x, s)\n if model_s1 is None:\n x_s1 = (sigma_s1 / sigma_s) * x - (alpha_s1 * phi_11) * model_s\n model_s1 = self.model_fn(x_s1, s1)\n x_s2 = (\n (sigma_s2 / sigma_s) * x\n - (alpha_s2 * phi_12) * model_s\n + r2 / r1 * (alpha_s2 * phi_22) * (model_s1 - model_s)\n )\n model_s2 = self.model_fn(x_s2, s2)\n if solver_type == \"dpmsolver\":\n x_t = (\n (sigma_t / sigma_s) * x\n - (alpha_t * phi_1) * model_s\n + (1.0 / r2) * (alpha_t * phi_2) * (model_s2 - model_s)\n )\n elif solver_type == \"taylor\":\n D1_0 = (1.0 / r1) * (model_s1 - model_s)\n D1_1 = (1.0 / r2) * (model_s2 - model_s)\n D1 = (r2 * D1_0 - r1 * D1_1) / (r2 - r1)\n D2 = 2.0 * (D1_1 - D1_0) / (r2 - r1)\n x_t = (\n (sigma_t / sigma_s) * x\n - (alpha_t * phi_1) * model_s\n + (alpha_t * phi_2) * D1\n - (alpha_t * phi_3) * D2\n )\n else:\n phi_11 = torch.expm1(r1 * h)\n phi_12 = torch.expm1(r2 * h)\n phi_1 = torch.expm1(h)\n phi_22 = torch.expm1(r2 * h) / (r2 * h) - 1.0\n phi_2 = phi_1 / h - 1.0\n phi_3 = phi_2 / h - 0.5\n\n if model_s is None:\n model_s = self.model_fn(x, s)\n if model_s1 is None:\n x_s1 = (torch.exp(log_alpha_s1 - log_alpha_s)) * x - (\n sigma_s1 * phi_11\n ) * model_s\n model_s1 = self.model_fn(x_s1, s1)\n x_s2 = (\n (torch.exp(log_alpha_s2 - log_alpha_s)) * x\n - (sigma_s2 * phi_12) * model_s\n - r2 / r1 * (sigma_s2 * phi_22) * (model_s1 - model_s)\n )\n model_s2 = self.model_fn(x_s2, s2)\n if solver_type == \"dpmsolver\":\n x_t = (\n (torch.exp(log_alpha_t - log_alpha_s)) * x\n - (sigma_t * phi_1) * model_s\n - (1.0 / r2) * (sigma_t * phi_2) * (model_s2 - model_s)\n )\n elif solver_type == \"taylor\":\n D1_0 = (1.0 / r1) * (model_s1 - model_s)\n D1_1 = (1.0 / r2) * (model_s2 - model_s)\n D1 = (r2 * D1_0 - r1 * D1_1) / (r2 - r1)\n D2 = 2.0 * (D1_1 - D1_0) / (r2 - r1)\n x_t = (\n (torch.exp(log_alpha_t - log_alpha_s)) * x\n - (sigma_t * phi_1) * model_s\n - (sigma_t * phi_2) * D1\n - (sigma_t * phi_3) * D2\n )\n\n if return_intermediate:\n return x_t, {\"model_s\": model_s, \"model_s1\": model_s1, \"model_s2\": model_s2}\n else:\n return x_t\n\n def multistep_dpm_solver_second_update(\n self, x, model_prev_list, t_prev_list, t, solver_type=\"dpmsolver\"\n ):\n \"\"\"\n Multistep solver DPM-Solver-2 from time `t_prev_list[-1]` to time `t`.\n\n Args:\n x: A pytorch tensor. The initial value at time `s`.\n model_prev_list: A list of pytorch tensor. The previous computed model values.\n t_prev_list: A list of pytorch tensor. The previous times, each time has the shape (1,)\n t: A pytorch tensor. The ending time, with the shape (1,).\n solver_type: either 'dpmsolver' or 'taylor'. The type for the high-order solvers.\n The type slightly impacts the performance. We recommend to use 'dpmsolver' type.\n Returns:\n x_t: A pytorch tensor. The approximated solution at time `t`.\n \"\"\"\n if solver_type not in [\"dpmsolver\", \"taylor\"]:\n raise ValueError(\n \"'solver_type' must be either 'dpmsolver' or 'taylor', got {}\".format(\n solver_type\n )\n )\n ns = self.noise_schedule\n model_prev_1, model_prev_0 = model_prev_list[-2], model_prev_list[-1]\n t_prev_1, t_prev_0 = t_prev_list[-2], t_prev_list[-1]\n lambda_prev_1, lambda_prev_0, lambda_t = (\n ns.marginal_lambda(t_prev_1),\n ns.marginal_lambda(t_prev_0),\n ns.marginal_lambda(t),\n )\n log_alpha_prev_0, log_alpha_t = ns.marginal_log_mean_coeff(\n t_prev_0\n ), ns.marginal_log_mean_coeff(t)\n sigma_prev_0, sigma_t = ns.marginal_std(t_prev_0), ns.marginal_std(t)\n alpha_t = torch.exp(log_alpha_t)\n\n h_0 = lambda_prev_0 - lambda_prev_1\n h = lambda_t - lambda_prev_0\n r0 = h_0 / h\n D1_0 = (1.0 / r0) * (model_prev_0 - model_prev_1)\n if self.algorithm_type == \"dpmsolver++\":\n phi_1 = torch.expm1(-h)\n if solver_type == \"dpmsolver\":\n x_t = (\n (sigma_t / sigma_prev_0) * x\n - (alpha_t * phi_1) * model_prev_0\n - 0.5 * (alpha_t * phi_1) * D1_0\n )\n elif solver_type == \"taylor\":\n x_t = (\n (sigma_t / sigma_prev_0) * x\n - (alpha_t * phi_1) * model_prev_0\n + (alpha_t * (phi_1 / h + 1.0)) * D1_0\n )\n else:\n phi_1 = torch.expm1(h)\n if solver_type == \"dpmsolver\":\n x_t = (\n (torch.exp(log_alpha_t - log_alpha_prev_0)) * x\n - (sigma_t * phi_1) * model_prev_0\n - 0.5 * (sigma_t * phi_1) * D1_0\n )\n elif solver_type == \"taylor\":\n x_t = (\n (torch.exp(log_alpha_t - log_alpha_prev_0)) * x\n - (sigma_t * phi_1) * model_prev_0\n - (sigma_t * (phi_1 / h - 1.0)) * D1_0\n )\n return x_t\n\n def multistep_dpm_solver_third_update(\n self, x, model_prev_list, t_prev_list, t, solver_type=\"dpmsolver\"\n ):\n \"\"\"\n Multistep solver DPM-Solver-3 from time `t_prev_list[-1]` to time `t`.\n\n Args:\n x: A pytorch tensor. The initial value at time `s`.\n model_prev_list: A list of pytorch tensor. The previous computed model values.\n t_prev_list: A list of pytorch tensor. The previous times, each time has the shape (1,)\n t: A pytorch tensor. The ending time, with the shape (1,).\n solver_type: either 'dpmsolver' or 'taylor'. The type for the high-order solvers.\n The type slightly impacts the performance. We recommend to use 'dpmsolver' type.\n Returns:\n x_t: A pytorch tensor. The approximated solution at time `t`.\n \"\"\"\n ns = self.noise_schedule\n model_prev_2, model_prev_1, model_prev_0 = model_prev_list\n t_prev_2, t_prev_1, t_prev_0 = t_prev_list\n lambda_prev_2, lambda_prev_1, lambda_prev_0, lambda_t = (\n ns.marginal_lambda(t_prev_2),\n ns.marginal_lambda(t_prev_1),\n ns.marginal_lambda(t_prev_0),\n ns.marginal_lambda(t),\n )\n log_alpha_prev_0, log_alpha_t = ns.marginal_log_mean_coeff(\n t_prev_0\n ), ns.marginal_log_mean_coeff(t)\n sigma_prev_0, sigma_t = ns.marginal_std(t_prev_0), ns.marginal_std(t)\n alpha_t = torch.exp(log_alpha_t)\n\n h_1 = lambda_prev_1 - lambda_prev_2\n h_0 = lambda_prev_0 - lambda_prev_1\n h = lambda_t - lambda_prev_0\n r0, r1 = h_0 / h, h_1 / h\n D1_0 = (1.0 / r0) * (model_prev_0 - model_prev_1)\n D1_1 = (1.0 / r1) * (model_prev_1 - model_prev_2)\n D1 = D1_0 + (r0 / (r0 + r1)) * (D1_0 - D1_1)\n D2 = (1.0 / (r0 + r1)) * (D1_0 - D1_1)\n if self.algorithm_type == \"dpmsolver++\":\n phi_1 = torch.expm1(-h)\n phi_2 = phi_1 / h + 1.0\n phi_3 = phi_2 / h - 0.5\n x_t = (\n (sigma_t / sigma_prev_0) * x\n - (alpha_t * phi_1) * model_prev_0\n + (alpha_t * phi_2) * D1\n - (alpha_t * phi_3) * D2\n )\n else:\n phi_1 = torch.expm1(h)\n phi_2 = phi_1 / h - 1.0\n phi_3 = phi_2 / h - 0.5\n x_t = (\n (torch.exp(log_alpha_t - log_alpha_prev_0)) * x\n - (sigma_t * phi_1) * model_prev_0\n - (sigma_t * phi_2) * D1\n - (sigma_t * phi_3) * D2\n )\n return x_t\n\n def singlestep_dpm_solver_update(\n self,\n x,\n s,\n t,\n order,\n return_intermediate=False,\n solver_type=\"dpmsolver\",\n r1=None,\n r2=None,\n ):\n \"\"\"\n Singlestep DPM-Solver with the order `order` from time `s` to time `t`.\n\n Args:\n x: A pytorch tensor. The initial value at time `s`.\n s: A pytorch tensor. The starting time, with the shape (1,).\n t: A pytorch tensor. The ending time, with the shape (1,).\n order: A `int`. The order of DPM-Solver. We only support order == 1 or 2 or 3.\n return_intermediate: A `bool`. If true, also return the model value at time `s`, `s1` and `s2` (the intermediate times).\n solver_type: either 'dpmsolver' or 'taylor'. The type for the high-order solvers.\n The type slightly impacts the performance. We recommend to use 'dpmsolver' type.\n r1: A `float`. The hyperparameter of the second-order or third-order solver.\n r2: A `float`. The hyperparameter of the third-order solver.\n Returns:\n x_t: A pytorch tensor. The approximated solution at time `t`.\n \"\"\"\n if order == 1:\n return self.dpm_solver_first_update(\n x, s, t, return_intermediate=return_intermediate\n )\n elif order == 2:\n return self.singlestep_dpm_solver_second_update(\n x,\n s,\n t,\n return_intermediate=return_intermediate,\n solver_type=solver_type,\n r1=r1,\n )\n elif order == 3:\n return self.singlestep_dpm_solver_third_update(\n x,\n s,\n t,\n return_intermediate=return_intermediate,\n solver_type=solver_type,\n r1=r1,\n r2=r2,\n )\n else:\n raise ValueError(\"Solver order must be 1 or 2 or 3, got {}\".format(order))\n\n def multistep_dpm_solver_update(\n self, x, model_prev_list, t_prev_list, t, order, solver_type=\"dpmsolver\"\n ):\n \"\"\"\n Multistep DPM-Solver with the order `order` from time `t_prev_list[-1]` to time `t`.\n\n Args:\n x: A pytorch tensor. The initial value at time `s`.\n model_prev_list: A list of pytorch tensor. The previous computed model values.\n t_prev_list: A list of pytorch tensor. The previous times, each time has the shape (1,)\n t: A pytorch tensor. The ending time, with the shape (1,).\n order: A `int`. The order of DPM-Solver. We only support order == 1 or 2 or 3.\n solver_type: either 'dpmsolver' or 'taylor'. The type for the high-order solvers.\n The type slightly impacts the performance. We recommend to use 'dpmsolver' type.\n Returns:\n x_t: A pytorch tensor. The approximated solution at time `t`.\n \"\"\"\n if order == 1:\n return self.dpm_solver_first_update(\n x, t_prev_list[-1], t, model_s=model_prev_list[-1]\n )\n elif order == 2:\n return self.multistep_dpm_solver_second_update(\n x, model_prev_list, t_prev_list, t, solver_type=solver_type\n )\n elif order == 3:\n return self.multistep_dpm_solver_third_update(\n x, model_prev_list, t_prev_list, t, solver_type=solver_type\n )\n else:\n raise ValueError(\"Solver order must be 1 or 2 or 3, got {}\".format(order))\n\n def dpm_solver_adaptive(\n self,\n x,\n order,\n t_T,\n t_0,\n h_init=0.05,\n atol=0.0078,\n rtol=0.05,\n theta=0.9,\n t_err=1e-5,\n solver_type=\"dpmsolver\",\n ):\n \"\"\"\n The adaptive step size solver based on singlestep DPM-Solver.\n\n Args:\n x: A pytorch tensor. The initial value at time `t_T`.\n order: A `int`. The (higher) order of the solver. We only support order == 2 or 3.\n t_T: A `float`. The starting time of the sampling (default is T).\n t_0: A `float`. The ending time of the sampling (default is epsilon).\n h_init: A `float`. The initial step size (for logSNR).\n atol: A `float`. The absolute tolerance of the solver. For image data, the default setting is 0.0078, followed [1].\n rtol: A `float`. The relative tolerance of the solver. The default setting is 0.05.\n theta: A `float`. The safety hyperparameter for adapting the step size. The default setting is 0.9, followed [1].\n t_err: A `float`. The tolerance for the time. We solve the diffusion ODE until the absolute error between the\n current time and `t_0` is less than `t_err`. The default setting is 1e-5.\n solver_type: either 'dpmsolver' or 'taylor'. The type for the high-order solvers.\n The type slightly impacts the performance. We recommend to use 'dpmsolver' type.\n Returns:\n x_0: A pytorch tensor. The approximated solution at time `t_0`.\n\n [1] A. Jolicoeur-Martineau, K. Li, R. Piché-Taillefer, T. Kachman, and I. Mitliagkas, \"Gotta go fast when generating data with score-based models,\" arXiv preprint arXiv:2105.14080, 2021.\n \"\"\"\n ns = self.noise_schedule\n s = t_T * torch.ones((1,)).to(x)\n lambda_s = ns.marginal_lambda(s)\n lambda_0 = ns.marginal_lambda(t_0 * torch.ones_like(s).to(x))\n h = h_init * torch.ones_like(s).to(x)\n x_prev = x\n nfe = 0\n if order == 2:\n r1 = 0.5\n lower_update = lambda x, s, t: self.dpm_solver_first_update(\n x, s, t, return_intermediate=True\n )\n higher_update = (\n lambda x, s, t, **kwargs: self.singlestep_dpm_solver_second_update(\n x, s, t, r1=r1, solver_type=solver_type, **kwargs\n )\n )\n elif order == 3:\n r1, r2 = 1.0 / 3.0, 2.0 / 3.0\n lower_update = lambda x, s, t: self.singlestep_dpm_solver_second_update(\n x, s, t, r1=r1, return_intermediate=True, solver_type=solver_type\n )\n higher_update = (\n lambda x, s, t, **kwargs: self.singlestep_dpm_solver_third_update(\n x, s, t, r1=r1, r2=r2, solver_type=solver_type, **kwargs\n )\n )\n else:\n raise ValueError(\n \"For adaptive step size solver, order must be 2 or 3, got {}\".format(\n order\n )\n )\n while torch.abs((s - t_0)).mean() > t_err:\n t = ns.inverse_lambda(lambda_s + h)\n x_lower, lower_noise_kwargs = lower_update(x, s, t)\n x_higher = higher_update(x, s, t, **lower_noise_kwargs)\n delta = torch.max(\n torch.ones_like(x).to(x) * atol,\n rtol * torch.max(torch.abs(x_lower), torch.abs(x_prev)),\n )\n norm_fn = lambda v: torch.sqrt(\n torch.square(v.reshape((v.shape[0], -1))).mean(dim=-1, keepdim=True)\n )\n E = norm_fn((x_higher - x_lower) / delta).max()\n if torch.all(E <= 1.0):\n x = x_higher\n s = t\n x_prev = x_lower\n lambda_s = ns.marginal_lambda(s)\n h = torch.min(\n theta * h * torch.float_power(E, -1.0 / order).float(),\n lambda_0 - lambda_s,\n )\n nfe += order\n print(\"adaptive solver nfe\", nfe)\n return x\n\n def add_noise(self, x, t, noise=None):\n \"\"\"\n Compute the noised input xt = alpha_t * x + sigma_t * noise.\n\n Args:\n x: A `torch.Tensor` with shape `(batch_size, *shape)`.\n t: A `torch.Tensor` with shape `(t_size,)`.\n Returns:\n xt with shape `(t_size, batch_size, *shape)`.\n \"\"\"\n alpha_t, sigma_t = self.noise_schedule.marginal_alpha(\n t\n ), self.noise_schedule.marginal_std(t)\n if noise is None:\n noise = torch.randn((t.shape[0], *x.shape), device=x.device)\n x = x.reshape((-1, *x.shape))\n xt = expand_dims(alpha_t, x.dim()) * x + expand_dims(sigma_t, x.dim()) * noise\n if t.shape[0] == 1:\n return xt.squeeze(0)\n else:\n return xt\n\n def inverse(\n self,\n x,\n steps=20,\n t_start=None,\n t_end=None,\n order=2,\n skip_type=\"time_uniform\",\n method=\"multistep\",\n lower_order_final=True,\n denoise_to_zero=False,\n solver_type=\"dpmsolver\",\n atol=0.0078,\n rtol=0.05,\n return_intermediate=False,\n ):\n \"\"\"\n Inverse the sample `x` from time `t_start` to `t_end` by DPM-Solver.\n For discrete-time DPMs, we use `t_start=1/N`, where `N` is the total time steps during training.\n \"\"\"\n t_0 = 1.0 / self.noise_schedule.total_N if t_start is None else t_start\n t_T = self.noise_schedule.T if t_end is None else t_end\n assert (\n t_0 > 0 and t_T > 0\n ), \"Time range needs to be greater than 0. For discrete-time DPMs, it needs to be in [1 / N, 1], where N is the length of betas array\"\n return self.sample(\n x,\n steps=steps,\n t_start=t_0,\n t_end=t_T,\n order=order,\n skip_type=skip_type,\n method=method,\n lower_order_final=lower_order_final,\n denoise_to_zero=denoise_to_zero,\n solver_type=solver_type,\n atol=atol,\n rtol=rtol,\n return_intermediate=return_intermediate,\n )\n\n def sample(\n self,\n x,\n condition_func,\n steps=20,\n t_start=None,\n t_end=None,\n order=2,\n skip_type=\"time_uniform\",\n method=\"multistep\",\n lower_order_final=True,\n denoise_to_zero=False,\n solver_type=\"dpmsolver\",\n atol=0.0078,\n rtol=0.05,\n return_intermediate=False,\n ):\n \"\"\"\n Compute the sample at time `t_end` by DPM-Solver, given the initial `x` at time `t_start`.\n\n =====================================================\n\n We support the following algorithms for both noise prediction model and data prediction model:\n - 'singlestep':\n Singlestep DPM-Solver (i.e. \"DPM-Solver-fast\" in the paper), which combines different orders of singlestep DPM-Solver.\n We combine all the singlestep solvers with order <= `order` to use up all the function evaluations (steps).\n The total number of function evaluations (NFE) == `steps`.\n Given a fixed NFE == `steps`, the sampling procedure is:\n - If `order` == 1:\n - Denote K = steps. We use K steps of DPM-Solver-1 (i.e. DDIM).\n - If `order` == 2:\n - Denote K = (steps // 2) + (steps % 2). We take K intermediate time steps for sampling.\n - If steps % 2 == 0, we use K steps of singlestep DPM-Solver-2.\n - If steps % 2 == 1, we use (K - 1) steps of singlestep DPM-Solver-2 and 1 step of DPM-Solver-1.\n - If `order` == 3:\n - Denote K = (steps // 3 + 1). We take K intermediate time steps for sampling.\n - If steps % 3 == 0, we use (K - 2) steps of singlestep DPM-Solver-3, and 1 step of singlestep DPM-Solver-2 and 1 step of DPM-Solver-1.\n - If steps % 3 == 1, we use (K - 1) steps of singlestep DPM-Solver-3 and 1 step of DPM-Solver-1.\n - If steps % 3 == 2, we use (K - 1) steps of singlestep DPM-Solver-3 and 1 step of singlestep DPM-Solver-2.\n - 'multistep':\n Multistep DPM-Solver with the order of `order`. The total number of function evaluations (NFE) == `steps`.\n We initialize the first `order` values by lower order multistep solvers.\n Given a fixed NFE == `steps`, the sampling procedure is:\n Denote K = steps.\n - If `order` == 1:\n - We use K steps of DPM-Solver-1 (i.e. DDIM).\n - If `order` == 2:\n - We firstly use 1 step of DPM-Solver-1, then use (K - 1) step of multistep DPM-Solver-2.\n - If `order` == 3:\n - We firstly use 1 step of DPM-Solver-1, then 1 step of multistep DPM-Solver-2, then (K - 2) step of multistep DPM-Solver-3.\n - 'singlestep_fixed':\n Fixed order singlestep DPM-Solver (i.e. DPM-Solver-1 or singlestep DPM-Solver-2 or singlestep DPM-Solver-3).\n We use singlestep DPM-Solver-`order` for `order`=1 or 2 or 3, with total [`steps` // `order`] * `order` NFE.\n - 'adaptive':\n Adaptive step size DPM-Solver (i.e. \"DPM-Solver-12\" and \"DPM-Solver-23\" in the paper).\n We ignore `steps` and use adaptive step size DPM-Solver with a higher order of `order`.\n You can adjust the absolute tolerance `atol` and the relative tolerance `rtol` to balance the computatation costs\n (NFE) and the sample quality.\n - If `order` == 2, we use DPM-Solver-12 which combines DPM-Solver-1 and singlestep DPM-Solver-2.\n - If `order` == 3, we use DPM-Solver-23 which combines singlestep DPM-Solver-2 and singlestep DPM-Solver-3.\n\n =====================================================\n\n Some advices for choosing the algorithm:\n - For **unconditional sampling** or **guided sampling with small guidance scale** by DPMs:\n Use singlestep DPM-Solver or DPM-Solver++ (\"DPM-Solver-fast\" in the paper) with `order = 3`.\n e.g., DPM-Solver:\n >>> dpm_solver = DPM_Solver(model_fn, noise_schedule, algorithm_type=\"dpmsolver\")\n >>> x_sample = dpm_solver.sample(x, steps=steps, t_start=t_start, t_end=t_end, order=3,\n skip_type='time_uniform', method='singlestep')\n e.g., DPM-Solver++:\n >>> dpm_solver = DPM_Solver(model_fn, noise_schedule, algorithm_type=\"dpmsolver++\")\n >>> x_sample = dpm_solver.sample(x, steps=steps, t_start=t_start, t_end=t_end, order=3,\n skip_type='time_uniform', method='singlestep')\n - For **guided sampling with large guidance scale** by DPMs:\n Use multistep DPM-Solver with `algorithm_type=\"dpmsolver++\"` and `order = 2`.\n e.g.\n >>> dpm_solver = DPM_Solver(model_fn, noise_schedule, algorithm_type=\"dpmsolver++\")\n >>> x_sample = dpm_solver.sample(x, steps=steps, t_start=t_start, t_end=t_end, order=2,\n skip_type='time_uniform', method='multistep')\n\n We support three types of `skip_type`:\n - 'logSNR': uniform logSNR for the time steps. **Recommended for low-resolutional images**\n - 'time_uniform': uniform time for the time steps. **Recommended for high-resolutional images**.\n - 'time_quadratic': quadratic time for the time steps.\n\n =====================================================\n Args:\n x: A pytorch tensor. The initial value at time `t_start`\n e.g. if `t_start` == T, then `x` is a sample from the standard normal distribution.\n steps: A `int`. The total number of function evaluations (NFE).\n t_start: A `float`. The starting time of the sampling.\n If `T` is None, we use self.noise_schedule.T (default is 1.0).\n t_end: A `float`. The ending time of the sampling.\n If `t_end` is None, we use 1. / self.noise_schedule.total_N.\n e.g. if total_N == 1000, we have `t_end` == 1e-3.\n For discrete-time DPMs:\n - We recommend `t_end` == 1. / self.noise_schedule.total_N.\n For continuous-time DPMs:\n - We recommend `t_end` == 1e-3 when `steps` <= 15; and `t_end` == 1e-4 when `steps` > 15.\n order: A `int`. The order of DPM-Solver.\n skip_type: A `str`. The type for the spacing of the time steps. 'time_uniform' or 'logSNR' or 'time_quadratic'.\n method: A `str`. The method for sampling. 'singlestep' or 'multistep' or 'singlestep_fixed' or 'adaptive'.\n denoise_to_zero: A `bool`. Whether to denoise to time 0 at the final step.\n Default is `False`. If `denoise_to_zero` is `True`, the total NFE is (`steps` + 1).\n\n This trick is firstly proposed by DDPM (https://arxiv.org/abs/2006.11239) and\n score_sde (https://arxiv.org/abs/2011.13456). Such trick can improve the FID\n for diffusion models sampling by diffusion SDEs for low-resolutional images\n (such as CIFAR-10). However, we observed that such trick does not matter for\n high-resolutional images. As it needs an additional NFE, we do not recommend\n it for high-resolutional images.\n lower_order_final: A `bool`. Whether to use lower order solvers at the final steps.\n Only valid for `method=multistep` and `steps < 15`. We empirically find that\n this trick is a key to stabilizing the sampling by DPM-Solver with very few steps\n (especially for steps <= 10). So we recommend to set it to be `True`.\n solver_type: A `str`. The taylor expansion type for the solver. `dpmsolver` or `taylor`. We recommend `dpmsolver`.\n atol: A `float`. The absolute tolerance of the adaptive step size solver. Valid when `method` == 'adaptive'.\n rtol: A `float`. The relative tolerance of the adaptive step size solver. Valid when `method` == 'adaptive'.\n return_intermediate: A `bool`. Whether to save the xt at each step.\n When set to `True`, method returns a tuple (x0, intermediates); when set to False, method returns only x0.\n Returns:\n x_end: A pytorch tensor. The approximated solution at time `t_end`.\n\n \"\"\"\n t_0 = 1.0 / self.noise_schedule.total_N if t_end is None else t_end\n t_T = self.noise_schedule.T if t_start is None else t_start\n assert (\n t_0 > 0 and t_T > 0\n ), \"Time range needs to be greater than 0. For discrete-time DPMs, it needs to be in [1 / N, 1], where N is the length of betas array\"\n if return_intermediate:\n assert method in [\n \"multistep\",\n \"singlestep\",\n \"singlestep_fixed\",\n ], \"Cannot use adaptive solver when saving intermediate values\"\n if self.correcting_xt_fn is not None:\n assert method in [\n \"multistep\",\n \"singlestep\",\n \"singlestep_fixed\",\n ], \"Cannot use adaptive solver when correcting_xt_fn is not None\"\n device = x.device\n intermediates = []\n with torch.no_grad():\n if method == \"adaptive\":\n x = self.dpm_solver_adaptive(\n x,\n order=order,\n t_T=t_T,\n t_0=t_0,\n atol=atol,\n rtol=rtol,\n solver_type=solver_type,\n )\n elif method == \"multistep\":\n assert steps >= order\n timesteps = self.get_time_steps(\n skip_type=skip_type, t_T=t_T, t_0=t_0, N=steps, device=device\n )\n assert timesteps.shape[0] - 1 == steps\n # Init the initial values.\n step = 0\n t = timesteps[step]\n t_prev_list = [t]\n model_prev_list = [self.model_fn(x, t)]\n if self.correcting_xt_fn is not None:\n x = self.correcting_xt_fn(x, t, step)\n if return_intermediate:\n intermediates.append(x)\n # Init the first `order` values by lower order multistep DPM-Solver.\n for step in range(1, order):\n t = timesteps[step]\n x = condition_func(x)\n x = self.multistep_dpm_solver_update(\n x,\n model_prev_list,\n t_prev_list,\n t,\n step,\n solver_type=solver_type,\n )\n if self.correcting_xt_fn is not None:\n x = condition_func(x)\n x = self.correcting_xt_fn(x, t, step)\n if return_intermediate:\n intermediates.append(x)\n t_prev_list.append(t)\n model_prev_list.append(self.model_fn(x, t))\n # Compute the remaining values by `order`-th order multistep DPM-Solver.\n for step in range(order, steps + 1):\n t = timesteps[step]\n # We only use lower order for steps < 10\n if lower_order_final and steps < 10:\n step_order = min(order, steps + 1 - step)\n else:\n step_order = order\n x = condition_func(x)\n x = self.multistep_dpm_solver_update(\n x,\n model_prev_list,\n t_prev_list,\n t,\n step_order,\n solver_type=solver_type,\n )\n if self.correcting_xt_fn is not None:\n x = condition_func(x)\n x = self.correcting_xt_fn(x, t, step)\n if return_intermediate:\n intermediates.append(x)\n for i in range(order - 1):\n t_prev_list[i] = t_prev_list[i + 1]\n model_prev_list[i] = model_prev_list[i + 1]\n t_prev_list[-1] = t\n # We do not need to evaluate the final model value.\n if step < steps:\n model_prev_list[-1] = self.model_fn(x, t)\n elif method in [\"singlestep\", \"singlestep_fixed\"]:\n if method == \"singlestep\":\n (\n timesteps_outer,\n orders,\n ) = self.get_orders_and_timesteps_for_singlestep_solver(\n steps=steps,\n order=order,\n skip_type=skip_type,\n t_T=t_T,\n t_0=t_0,\n device=device,\n )\n elif method == \"singlestep_fixed\":\n K = steps // order\n orders = [\n order,\n ] * K\n timesteps_outer = self.get_time_steps(\n skip_type=skip_type, t_T=t_T, t_0=t_0, N=K, device=device\n )\n for step, order in enumerate(orders):\n s, t = timesteps_outer[step], timesteps_outer[step + 1]\n timesteps_inner = self.get_time_steps(\n skip_type=skip_type,\n t_T=s.item(),\n t_0=t.item(),\n N=order,\n device=device,\n )\n lambda_inner = self.noise_schedule.marginal_lambda(timesteps_inner)\n h = lambda_inner[-1] - lambda_inner[0]\n r1 = None if order <= 1 else (lambda_inner[1] - lambda_inner[0]) / h\n r2 = None if order <= 2 else (lambda_inner[2] - lambda_inner[0]) / h\n x = condition_func(x)\n x = self.singlestep_dpm_solver_update(\n x, s, t, order, solver_type=solver_type, r1=r1, r2=r2\n )\n if self.correcting_xt_fn is not None:\n x = condition_func(x)\n x = self.correcting_xt_fn(x, t, step)\n if return_intermediate:\n intermediates.append(x)\n else:\n raise ValueError(\"Got wrong method {}\".format(method))\n if denoise_to_zero:\n t = torch.ones((1,)).to(device) * t_0\n x = self.denoise_to_zero_fn(x, t)\n x = condition_func(x)\n if self.correcting_xt_fn is not None:\n x = self.correcting_xt_fn(x, t, step + 1)\n x = condition_func(x)\n if return_intermediate:\n intermediates.append(x)\n if return_intermediate:\n return x, intermediates\n else:\n return x" }, { "identifier": "NoiseScheduleVP", "path": "diffuser/utils/dpm_solver.py", "snippet": "class NoiseScheduleVP:\n def __init__(\n self,\n schedule=\"discrete\",\n betas=None,\n alphas_cumprod=None,\n continuous_beta_0=0.1,\n continuous_beta_1=20.0,\n dtype=torch.float32,\n ):\n \"\"\"Create a wrapper class for the forward SDE (VP type).\n\n ***\n Update: We support discrete-time diffusion models by implementing a picewise linear interpolation for log_alpha_t.\n We recommend to use schedule='discrete' for the discrete-time diffusion models, especially for high-resolution images.\n ***\n\n The forward SDE ensures that the condition distribution q_{t|0}(x_t | x_0) = N ( alpha_t * x_0, sigma_t^2 * I ).\n We further define lambda_t = log(alpha_t) - log(sigma_t), which is the half-logSNR (described in the DPM-Solver paper).\n Therefore, we implement the functions for computing alpha_t, sigma_t and lambda_t. For t in [0, T], we have:\n\n log_alpha_t = self.marginal_log_mean_coeff(t)\n sigma_t = self.marginal_std(t)\n lambda_t = self.marginal_lambda(t)\n\n Moreover, as lambda(t) is an invertible function, we also support its inverse function:\n\n t = self.inverse_lambda(lambda_t)\n\n ===============================================================\n\n We support both discrete-time DPMs (trained on n = 0, 1, ..., N-1) and continuous-time DPMs (trained on t in [t_0, T]).\n\n 1. For discrete-time DPMs:\n\n For discrete-time DPMs trained on n = 0, 1, ..., N-1, we convert the discrete steps to continuous time steps by:\n t_i = (i + 1) / N\n e.g. for N = 1000, we have t_0 = 1e-3 and T = t_{N-1} = 1.\n We solve the corresponding diffusion ODE from time T = 1 to time t_0 = 1e-3.\n\n Args:\n betas: A `torch.Tensor`. The beta array for the discrete-time DPM. (See the original DDPM paper for details)\n alphas_cumprod: A `torch.Tensor`. The cumprod alphas for the discrete-time DPM. (See the original DDPM paper for details)\n\n Note that we always have alphas_cumprod = cumprod(1 - betas). Therefore, we only need to set one of `betas` and `alphas_cumprod`.\n\n **Important**: Please pay special attention for the args for `alphas_cumprod`:\n The `alphas_cumprod` is the \\hat{alpha_n} arrays in the notations of DDPM. Specifically, DDPMs assume that\n q_{t_n | 0}(x_{t_n} | x_0) = N ( \\sqrt{\\hat{alpha_n}} * x_0, (1 - \\hat{alpha_n}) * I ).\n Therefore, the notation \\hat{alpha_n} is different from the notation alpha_t in DPM-Solver. In fact, we have\n alpha_{t_n} = \\sqrt{\\hat{alpha_n}},\n and\n log(alpha_{t_n}) = 0.5 * log(\\hat{alpha_n}).\n\n\n 2. For continuous-time DPMs:\n\n We support two types of VPSDEs: linear (DDPM) and cosine (improved-DDPM). The hyperparameters for the noise\n schedule are the default settings in DDPM and improved-DDPM:\n\n Args:\n beta_min: A `float` number. The smallest beta for the linear schedule.\n beta_max: A `float` number. The largest beta for the linear schedule.\n cosine_s: A `float` number. The hyperparameter in the cosine schedule.\n cosine_beta_max: A `float` number. The hyperparameter in the cosine schedule.\n T: A `float` number. The ending time of the forward process.\n\n ===============================================================\n\n Args:\n schedule: A `str`. The noise schedule of the forward SDE. 'discrete' for discrete-time DPMs,\n 'linear' or 'cosine' for continuous-time DPMs.\n Returns:\n A wrapper object of the forward SDE (VP type).\n\n ===============================================================\n\n Example:\n\n # For discrete-time DPMs, given betas (the beta array for n = 0, 1, ..., N - 1):\n >>> ns = NoiseScheduleVP('discrete', betas=betas)\n\n # For discrete-time DPMs, given alphas_cumprod (the \\hat{alpha_n} array for n = 0, 1, ..., N - 1):\n >>> ns = NoiseScheduleVP('discrete', alphas_cumprod=alphas_cumprod)\n\n # For continuous-time DPMs (VPSDE), linear schedule:\n >>> ns = NoiseScheduleVP('linear', continuous_beta_0=0.1, continuous_beta_1=20.)\n\n \"\"\"\n\n if schedule not in [\"discrete\", \"linear\", \"cosine\"]:\n raise ValueError(\n \"Unsupported noise schedule {}. The schedule needs to be 'discrete' or 'linear' or 'cosine'\".format(\n schedule\n )\n )\n\n self.schedule = schedule\n if schedule == \"discrete\":\n if betas is not None:\n log_alphas = 0.5 * torch.log(1 - betas).cumsum(dim=0)\n else:\n assert alphas_cumprod is not None\n log_alphas = 0.5 * torch.log(alphas_cumprod)\n self.total_N = len(log_alphas)\n self.T = 1.0\n self.t_array = (\n torch.linspace(0.0, 1.0, self.total_N + 1)[1:]\n .reshape((1, -1))\n .to(dtype=dtype)\n )\n self.log_alpha_array = log_alphas.reshape(\n (\n 1,\n -1,\n )\n ).to(dtype=dtype)\n else:\n self.total_N = 1000\n self.beta_0 = continuous_beta_0\n self.beta_1 = continuous_beta_1\n self.cosine_s = 0.008\n self.cosine_beta_max = 999.0\n self.cosine_t_max = (\n math.atan(self.cosine_beta_max * (1.0 + self.cosine_s) / math.pi)\n * 2.0\n * (1.0 + self.cosine_s)\n / math.pi\n - self.cosine_s\n )\n self.cosine_log_alpha_0 = math.log(\n math.cos(self.cosine_s / (1.0 + self.cosine_s) * math.pi / 2.0)\n )\n self.schedule = schedule\n if schedule == \"cosine\":\n # For the cosine schedule, T = 1 will have numerical issues. So we manually set the ending time T.\n # Note that T = 0.9946 may be not the optimal setting. However, we find it works well.\n self.T = 0.9946\n else:\n self.T = 1.0\n\n def marginal_log_mean_coeff(self, t):\n \"\"\"\n Compute log(alpha_t) of a given continuous-time label t in [0, T].\n \"\"\"\n if self.schedule == \"discrete\":\n return interpolate_fn(\n t.reshape((-1, 1)),\n self.t_array.to(t.device),\n self.log_alpha_array.to(t.device),\n ).reshape((-1))\n elif self.schedule == \"linear\":\n return -0.25 * t**2 * (self.beta_1 - self.beta_0) - 0.5 * t * self.beta_0\n elif self.schedule == \"cosine\":\n log_alpha_fn = lambda s: torch.log(\n torch.cos((s + self.cosine_s) / (1.0 + self.cosine_s) * math.pi / 2.0)\n )\n log_alpha_t = log_alpha_fn(t) - self.cosine_log_alpha_0\n return log_alpha_t\n\n def marginal_alpha(self, t):\n \"\"\"\n Compute alpha_t of a given continuous-time label t in [0, T].\n \"\"\"\n return torch.exp(self.marginal_log_mean_coeff(t))\n\n def marginal_std(self, t):\n \"\"\"\n Compute sigma_t of a given continuous-time label t in [0, T].\n \"\"\"\n return torch.sqrt(1.0 - torch.exp(2.0 * self.marginal_log_mean_coeff(t)))\n\n def marginal_lambda(self, t):\n \"\"\"\n Compute lambda_t = log(alpha_t) - log(sigma_t) of a given continuous-time label t in [0, T].\n \"\"\"\n log_mean_coeff = self.marginal_log_mean_coeff(t)\n log_std = 0.5 * torch.log(1.0 - torch.exp(2.0 * log_mean_coeff))\n return log_mean_coeff - log_std\n\n def inverse_lambda(self, lamb):\n \"\"\"\n Compute the continuous-time label t in [0, T] of a given half-logSNR lambda_t.\n \"\"\"\n if self.schedule == \"linear\":\n tmp = (\n 2.0\n * (self.beta_1 - self.beta_0)\n * torch.logaddexp(-2.0 * lamb, torch.zeros((1,)).to(lamb))\n )\n Delta = self.beta_0**2 + tmp\n return tmp / (torch.sqrt(Delta) + self.beta_0) / (self.beta_1 - self.beta_0)\n elif self.schedule == \"discrete\":\n log_alpha = -0.5 * torch.logaddexp(\n torch.zeros((1,)).to(lamb.device), -2.0 * lamb\n )\n t = interpolate_fn(\n log_alpha.reshape((-1, 1)),\n torch.flip(self.log_alpha_array.to(lamb.device), [1]),\n torch.flip(self.t_array.to(lamb.device), [1]),\n )\n return t.reshape((-1,))\n else:\n log_alpha = -0.5 * torch.logaddexp(-2.0 * lamb, torch.zeros((1,)).to(lamb))\n t_fn = (\n lambda log_alpha_t: torch.arccos(\n torch.exp(log_alpha_t + self.cosine_log_alpha_0)\n )\n * 2.0\n * (1.0 + self.cosine_s)\n / math.pi\n - self.cosine_s\n )\n t = t_fn(log_alpha)\n return t" }, { "identifier": "model_wrapper", "path": "diffuser/utils/dpm_solver.py", "snippet": "def model_wrapper(\n model,\n noise_schedule,\n model_type=\"noise\",\n model_kwargs={},\n guidance_type=\"uncond\",\n condition=None,\n unconditional_condition=None,\n guidance_scale=1.0,\n classifier_fn=None,\n classifier_kwargs={},\n):\n \"\"\"Create a wrapper function for the noise prediction model.\n\n DPM-Solver needs to solve the continuous-time diffusion ODEs. For DPMs trained on discrete-time labels, we need to\n firstly wrap the model function to a noise prediction model that accepts the continuous time as the input.\n\n We support four types of the diffusion model by setting `model_type`:\n\n 1. \"noise\": noise prediction model. (Trained by predicting noise).\n\n 2. \"x_start\": data prediction model. (Trained by predicting the data x_0 at time 0).\n\n 3. \"v\": velocity prediction model. (Trained by predicting the velocity).\n The \"v\" prediction is derivation detailed in Appendix D of [1], and is used in Imagen-Video [2].\n\n [1] Salimans, Tim, and Jonathan Ho. \"Progressive distillation for fast sampling of diffusion models.\"\n arXiv preprint arXiv:2202.00512 (2022).\n [2] Ho, Jonathan, et al. \"Imagen Video: High Definition Video Generation with Diffusion Models.\"\n arXiv preprint arXiv:2210.02303 (2022).\n\n 4. \"score\": marginal score function. (Trained by denoising score matching).\n Note that the score function and the noise prediction model follows a simple relationship:\n ```\n noise(x_t, t) = -sigma_t * score(x_t, t)\n ```\n\n We support three types of guided sampling by DPMs by setting `guidance_type`:\n 1. \"uncond\": unconditional sampling by DPMs.\n The input `model` has the following format:\n ``\n model(x, t_input, **model_kwargs) -> noise | x_start | v | score\n ``\n\n 2. \"classifier\": classifier guidance sampling [3] by DPMs and another classifier.\n The input `model` has the following format:\n ``\n model(x, t_input, **model_kwargs) -> noise | x_start | v | score\n ``\n\n The input `classifier_fn` has the following format:\n ``\n classifier_fn(x, t_input, cond, **classifier_kwargs) -> logits(x, t_input, cond)\n ``\n\n [3] P. Dhariwal and A. Q. Nichol, \"Diffusion models beat GANs on image synthesis,\"\n in Advances in Neural Information Processing Systems, vol. 34, 2021, pp. 8780-8794.\n\n 3. \"classifier-free\": classifier-free guidance sampling by conditional DPMs.\n The input `model` has the following format:\n ``\n model(x, t_input, cond, **model_kwargs) -> noise | x_start | v | score\n ``\n And if cond == `unconditional_condition`, the model output is the unconditional DPM output.\n\n [4] Ho, Jonathan, and Tim Salimans. \"Classifier-free diffusion guidance.\"\n arXiv preprint arXiv:2207.12598 (2022).\n\n\n The `t_input` is the time label of the model, which may be discrete-time labels (i.e. 0 to 999)\n or continuous-time labels (i.e. epsilon to T).\n\n We wrap the model function to accept only `x` and `t_continuous` as inputs, and outputs the predicted noise:\n ``\n def model_fn(x, t_continuous) -> noise:\n t_input = get_model_input_time(t_continuous)\n return noise_pred(model, x, t_input, **model_kwargs)\n ``\n where `t_continuous` is the continuous time labels (i.e. epsilon to T). And we use `model_fn` for DPM-Solver.\n\n ===============================================================\n\n Args:\n model: A diffusion model with the corresponding format described above.\n noise_schedule: A noise schedule object, such as NoiseScheduleVP.\n model_type: A `str`. The parameterization type of the diffusion model.\n \"noise\" or \"x_start\" or \"v\" or \"score\".\n model_kwargs: A `dict`. A dict for the other inputs of the model function.\n guidance_type: A `str`. The type of the guidance for sampling.\n \"uncond\" or \"classifier\" or \"classifier-free\".\n condition: A pytorch tensor. The condition for the guided sampling.\n Only used for \"classifier\" or \"classifier-free\" guidance type.\n unconditional_condition: A pytorch tensor. The condition for the unconditional sampling.\n Only used for \"classifier-free\" guidance type.\n guidance_scale: A `float`. The scale for the guided sampling.\n classifier_fn: A classifier function. Only used for the classifier guidance.\n classifier_kwargs: A `dict`. A dict for the other inputs of the classifier function.\n Returns:\n A noise prediction model that accepts the noised data and the continuous time as the inputs.\n \"\"\"\n\n def get_model_input_time(t_continuous):\n \"\"\"\n Convert the continuous-time `t_continuous` (in [epsilon, T]) to the model input time.\n For discrete-time DPMs, we convert `t_continuous` in [1 / N, 1] to `t_input` in [0, 1000 * (N - 1) / N].\n For continuous-time DPMs, we just use `t_continuous`.\n \"\"\"\n if noise_schedule.schedule == \"discrete\":\n return (t_continuous - 1.0 / noise_schedule.total_N) * 1000.0\n else:\n return t_continuous\n\n def noise_pred_fn(x, t_continuous, cond=None):\n t_input = get_model_input_time(t_continuous)\n if cond is None:\n output = model(x, t_input, **model_kwargs)\n else:\n output = model(x, t_input, cond, **model_kwargs)\n if model_type == \"noise\":\n return output\n elif model_type == \"x_start\":\n alpha_t, sigma_t = noise_schedule.marginal_alpha(\n t_continuous\n ), noise_schedule.marginal_std(t_continuous)\n return (x - alpha_t * output) / sigma_t\n elif model_type == \"v\":\n alpha_t, sigma_t = noise_schedule.marginal_alpha(\n t_continuous\n ), noise_schedule.marginal_std(t_continuous)\n return alpha_t * output + sigma_t * x\n elif model_type == \"score\":\n sigma_t = noise_schedule.marginal_std(t_continuous)\n return -sigma_t * output\n\n def cond_grad_fn(x, t_input):\n \"\"\"\n Compute the gradient of the classifier, i.e. nabla_{x} log p_t(cond | x_t).\n \"\"\"\n with torch.enable_grad():\n x_in = x.detach().requires_grad_(True)\n log_prob = classifier_fn(x_in, t_input, condition, **classifier_kwargs)\n return torch.autograd.grad(log_prob.sum(), x_in)[0]\n\n def model_fn(x, t_continuous):\n \"\"\"\n The noise predicition model function that is used for DPM-Solver.\n \"\"\"\n if guidance_type == \"uncond\":\n return noise_pred_fn(x, t_continuous)\n elif guidance_type == \"classifier\":\n assert classifier_fn is not None\n t_input = get_model_input_time(t_continuous)\n cond_grad = cond_grad_fn(x, t_input)\n sigma_t = noise_schedule.marginal_std(t_continuous)\n noise = noise_pred_fn(x, t_continuous)\n return noise - guidance_scale * sigma_t * cond_grad\n elif guidance_type == \"classifier-free\":\n if guidance_scale == 1.0 or unconditional_condition is None:\n return noise_pred_fn(x, t_continuous, cond=condition)\n else:\n x_in = torch.cat([x] * 2)\n t_in = torch.cat([t_continuous] * 2)\n c_in = torch.cat([unconditional_condition, condition])\n noise_uncond, noise = noise_pred_fn(x_in, t_in, cond=c_in).chunk(2)\n return noise_uncond + guidance_scale * (noise - noise_uncond)\n\n assert model_type in [\"noise\", \"x_start\", \"v\", \"score\"]\n assert guidance_type in [\"uncond\", \"classifier\", \"classifier-free\"]\n return model_fn" }, { "identifier": "Losses", "path": "diffuser/models/helpers.py", "snippet": "class SinusoidalPosEmb(nn.Module):\nclass Downsample1d(nn.Module):\nclass Upsample1d(nn.Module):\nclass Conv1dBlock(nn.Module):\nclass SelfAttention(nn.Module):\nclass PositionalEncoding(nn.Module):\nclass MlpSelfAttention(nn.Module):\nclass WeightedLoss(nn.Module):\nclass WeightedStateLoss(nn.Module):\nclass ValueLoss(nn.Module):\nclass WeightedL1(WeightedLoss):\nclass WeightedL2(WeightedLoss):\nclass WeightedStateL2(WeightedStateLoss):\nclass ValueL1(ValueLoss):\nclass ValueL2(ValueLoss):\n def __init__(self, dim):\n def forward(self, x):\n def __init__(self, dim):\n def forward(self, x):\n def __init__(self, dim):\n def forward(self, x):\n def __init__(self, inp_channels, out_channels, kernel_size, mish=True, n_groups=8):\n def forward(self, x):\n def __init__(\n self,\n n_channels: int,\n qk_n_channels: int,\n v_n_channels: int,\n nheads: int = 4,\n residual: bool = False,\n use_state: bool = False,\n ):\n def forward(self, x, states: torch.Tensor = None):\n def __init__(self, num_hiddens, dropout: float = 0, max_len: int = 1000):\n def forward(self, X):\n def __init__(self, dim_in, dim_hidden=128):\n def forward(self, x):\ndef extract(a, t, x_shape):\ndef cosine_beta_schedule(timesteps, s=0.008, dtype=torch.float32):\ndef apply_conditioning(x, conditions, action_dim):\n def __init__(self, weights, action_dim):\n def forward(self, pred, targ):\n def __init__(self, weights):\n def forward(self, pred, targ):\n def __init__(self, *args):\n def forward(self, pred, targ):\n def _loss(self, pred, targ):\n def _loss(self, pred, targ):\n def _loss(self, pred, targ):\n def _loss(self, pred, targ):\n def _loss(self, pred, targ):\n X = torch.arange(max_len, dtype=torch.float32).reshape(-1, 1) / torch.pow(\n 10000, torch.arange(0, num_hiddens, 2, dtype=torch.float32) / num_hiddens\n )\n X = X + self.P[:, : X.shape[1], :].to(X.device)" } ]
import functools import numpy as np import torch import torch.nn.functional as F import diffuser.utils as utils from torch import nn from diffuser.utils.dpm_solver import DPM_Solver, NoiseScheduleVP, model_wrapper from .helpers import Losses, apply_conditioning, cosine_beta_schedule, extract
20,879
class GaussianDiffusion(nn.Module): def __init__( self, model, n_agents, horizon, history_horizon, observation_dim, action_dim, n_timesteps=1000, loss_type="l1", clip_denoised=False, predict_epsilon=True, action_weight=1.0, loss_discount=1.0, loss_weights=None, returns_condition=False, condition_guidance_w=0.1, agent_share_noise=False, data_encoder=utils.IdentityEncoder(), **kwargs, ): super().__init__() self.n_agents = n_agents self.horizon = horizon self.history_horizon = history_horizon self.observation_dim = observation_dim self.action_dim = action_dim self.transition_dim = observation_dim + action_dim self.model = model self.returns_condition = returns_condition self.condition_guidance_w = condition_guidance_w self.agent_share_noise = agent_share_noise self.data_encoder = data_encoder betas = cosine_beta_schedule(n_timesteps) alphas = 1.0 - betas alphas_cumprod = torch.cumprod(alphas, axis=0) alphas_cumprod_prev = torch.cat([torch.ones(1), alphas_cumprod[:-1]]) self.n_timesteps = int(n_timesteps) self.clip_denoised = clip_denoised self.predict_epsilon = predict_epsilon self.register_buffer("betas", betas) self.register_buffer("alphas_cumprod", alphas_cumprod) self.register_buffer("alphas_cumprod_prev", alphas_cumprod_prev) # calculations for diffusion q(x_t | x_{t-1}) and others self.register_buffer("sqrt_alphas_cumprod", torch.sqrt(alphas_cumprod)) self.register_buffer( "sqrt_one_minus_alphas_cumprod", torch.sqrt(1.0 - alphas_cumprod) ) self.register_buffer( "log_one_minus_alphas_cumprod", torch.log(1.0 - alphas_cumprod) ) self.register_buffer( "sqrt_recip_alphas_cumprod", torch.sqrt(1.0 / alphas_cumprod) ) self.register_buffer( "sqrt_recipm1_alphas_cumprod", torch.sqrt(1.0 / alphas_cumprod - 1) ) # calculations for posterior q(x_{t-1} | x_t, x_0) posterior_variance = ( betas * (1.0 - alphas_cumprod_prev) / (1.0 - alphas_cumprod) ) self.register_buffer("posterior_variance", posterior_variance) # log calculation clipped because the posterior variance # is 0 at the beginning of the diffusion chain self.register_buffer( "posterior_log_variance_clipped", torch.log(torch.clamp(posterior_variance, min=1e-20)), ) self.register_buffer( "posterior_mean_coef1", betas * np.sqrt(alphas_cumprod_prev) / (1.0 - alphas_cumprod), ) self.register_buffer( "posterior_mean_coef2", (1.0 - alphas_cumprod_prev) * np.sqrt(alphas) / (1.0 - alphas_cumprod), ) # get loss coefficients and initialize objective self.loss_type = loss_type loss_weights = self.get_loss_weights(action_weight, loss_discount, loss_weights)
class GaussianDiffusion(nn.Module): def __init__( self, model, n_agents, horizon, history_horizon, observation_dim, action_dim, n_timesteps=1000, loss_type="l1", clip_denoised=False, predict_epsilon=True, action_weight=1.0, loss_discount=1.0, loss_weights=None, returns_condition=False, condition_guidance_w=0.1, agent_share_noise=False, data_encoder=utils.IdentityEncoder(), **kwargs, ): super().__init__() self.n_agents = n_agents self.horizon = horizon self.history_horizon = history_horizon self.observation_dim = observation_dim self.action_dim = action_dim self.transition_dim = observation_dim + action_dim self.model = model self.returns_condition = returns_condition self.condition_guidance_w = condition_guidance_w self.agent_share_noise = agent_share_noise self.data_encoder = data_encoder betas = cosine_beta_schedule(n_timesteps) alphas = 1.0 - betas alphas_cumprod = torch.cumprod(alphas, axis=0) alphas_cumprod_prev = torch.cat([torch.ones(1), alphas_cumprod[:-1]]) self.n_timesteps = int(n_timesteps) self.clip_denoised = clip_denoised self.predict_epsilon = predict_epsilon self.register_buffer("betas", betas) self.register_buffer("alphas_cumprod", alphas_cumprod) self.register_buffer("alphas_cumprod_prev", alphas_cumprod_prev) # calculations for diffusion q(x_t | x_{t-1}) and others self.register_buffer("sqrt_alphas_cumprod", torch.sqrt(alphas_cumprod)) self.register_buffer( "sqrt_one_minus_alphas_cumprod", torch.sqrt(1.0 - alphas_cumprod) ) self.register_buffer( "log_one_minus_alphas_cumprod", torch.log(1.0 - alphas_cumprod) ) self.register_buffer( "sqrt_recip_alphas_cumprod", torch.sqrt(1.0 / alphas_cumprod) ) self.register_buffer( "sqrt_recipm1_alphas_cumprod", torch.sqrt(1.0 / alphas_cumprod - 1) ) # calculations for posterior q(x_{t-1} | x_t, x_0) posterior_variance = ( betas * (1.0 - alphas_cumprod_prev) / (1.0 - alphas_cumprod) ) self.register_buffer("posterior_variance", posterior_variance) # log calculation clipped because the posterior variance # is 0 at the beginning of the diffusion chain self.register_buffer( "posterior_log_variance_clipped", torch.log(torch.clamp(posterior_variance, min=1e-20)), ) self.register_buffer( "posterior_mean_coef1", betas * np.sqrt(alphas_cumprod_prev) / (1.0 - alphas_cumprod), ) self.register_buffer( "posterior_mean_coef2", (1.0 - alphas_cumprod_prev) * np.sqrt(alphas) / (1.0 - alphas_cumprod), ) # get loss coefficients and initialize objective self.loss_type = loss_type loss_weights = self.get_loss_weights(action_weight, loss_discount, loss_weights)
self.loss_fn = Losses[loss_type](loss_weights, self.action_dim)
3
2023-10-13 13:03:53+00:00
24k
hellloxiaotian/KDNet
train_KDNet.py
[ { "identifier": "attempt_load", "path": "models/experimental.py", "snippet": "def attempt_load(weights, map_location=None):\n # Loads an ensemble of models weights=[a,b,c] or a single model weights=[a] or weights=a\n model = Ensemble()\n # print('weights', weights) # /runs/train/yolov7_distillation19/weights/epoch_074.pt\n for w in weights if isinstance(weights, list) else [weights]:\n # attempt_download(w) # /runs/train/yolov7_distillation19/weights/epoch_074.pt\n ckpt = torch.load(w, map_location=map_location) # load\n model.append(ckpt['ema' if ckpt.get('ema') else 'model'].float().fuse().eval()) # FP32 model\n \n # Compatibility updates\n for m in model.modules():\n if type(m) in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU]:\n m.inplace = True # pytorch 1.7.0 compatibility\n elif type(m) is nn.Upsample:\n m.recompute_scale_factor = None # torch 1.11.0 compatibility\n elif type(m) is Conv:\n m._non_persistent_buffers_set = set() # pytorch 1.6.0 compatibility\n \n if len(model) == 1:\n return model[-1] # return model\n else:\n print('Ensemble created with %s\\n' % weights)\n for k in ['names', 'stride']:\n setattr(model, k, getattr(model[-1], k))\n return model # return ensemble" }, { "identifier": "attempt_loadv5", "path": "models/experimental.py", "snippet": "def attempt_loadv5(weights, device=None, inplace=True, fuse=True):\n # Loads an ensemble of models weights=[a,b,c] or a single model weights=[a] or weights=a\n from models.yolo import Detect, Model\n\n model = Ensemble()\n for w in weights if isinstance(weights, list) else [weights]:\n ckpt = torch.load(attempt_download(w), map_location='cpu') # load\n ckpt = (ckpt.get('ema') or ckpt['model']).to(device).float() # FP32 model\n\n # Model compatibility updates\n if not hasattr(ckpt, 'stride'):\n ckpt.stride = torch.tensor([32.])\n if hasattr(ckpt, 'names') and isinstance(ckpt.names, (list, tuple)):\n ckpt.names = dict(enumerate(ckpt.names)) # convert to dict\n\n model.append(ckpt.fuse().eval() if fuse and hasattr(ckpt, 'fuse') else ckpt.eval()) # model in eval mode\n\n # Module compatibility updates\n for m in model.modules():\n t = type(m)\n if t in (nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU, Detect, Model):\n m.inplace = inplace # torch 1.7.0 compatibility\n if t is Detect and not isinstance(m.anchor_grid, list):\n delattr(m, 'anchor_grid')\n setattr(m, 'anchor_grid', [torch.zeros(1)] * m.nl)\n elif t is nn.Upsample and not hasattr(m, 'recompute_scale_factor'):\n m.recompute_scale_factor = None # torch 1.11.0 compatibility\n\n # Return model\n if len(model) == 1:\n return model[-1]\n\n # Return detection ensemble\n print(f'Ensemble created with {weights}\\n')\n for k in 'names', 'nc', 'yaml':\n setattr(model, k, getattr(model[0], k))\n model.stride = model[torch.argmax(torch.tensor([m.stride.max() for m in model])).int()].stride # max stride\n assert all(model[0].nc == m.nc for m in model), f'Models have different class counts: {[m.nc for m in model]}'\n return model" }, { "identifier": "attempt_load_zxy", "path": "models/experimental.py", "snippet": "def attempt_load_zxy(weights, device, map_location=None):\n # Loads an ensemble of models weights=[a,b,c] or a single model weights=[a] or weights=a\n model = Ensemble()\n for w in weights if isinstance(weights, list) else [weights]:\n attempt_download(w)\n ckpt = torch.load(w, map_location=map_location) # load\n model.append(ckpt['ema' if ckpt.get('ema') else 'model'].to(device).float().fuse().eval()) # FP32 model\n\n # Compatibility updates\n for m in model.modules():\n if type(m) in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU]:\n m.inplace = True # pytorch 1.7.0 compatibility\n elif type(m) is nn.Upsample:\n m.recompute_scale_factor = None # torch 1.11.0 compatibility\n elif type(m) is Conv:\n m._non_persistent_buffers_set = set() # pytorch 1.6.0 compatibility\n\n if len(model) == 1:\n return model[-1] # return model\n else:\n print('Ensemble created with %s\\n' % weights)\n for k in ['names', 'stride']:\n setattr(model, k, getattr(model[-1], k))\n return model # return ensemble" }, { "identifier": "Model", "path": "models/yolo.py", "snippet": "class Model(nn.Module):\n # def __init__(self, cfg='yolor-csp-c.yaml', ch=3, nc=None, anchors=None): # model, input channels, number of classes\n def __init__(self, cfg='yolor-csp-c.yaml', ch=3, nc=None, anchors=None): # model, input channels, number of classes\n super(Model, self).__init__()\n self.traced = False\n if isinstance(cfg, dict):\n self.yaml = cfg # model dict\n else: # is *.yaml\n import yaml # for torch hub\n self.yaml_file = Path(cfg).name\n with open(cfg) as f:\n self.yaml = yaml.load(f, Loader=yaml.SafeLoader) # model dict\n\n # Define model\n ch = self.yaml['ch'] = self.yaml.get('ch', ch) # input channels\n if nc and nc != self.yaml['nc']:\n logger.info(f\"Overriding model.yaml nc={self.yaml['nc']} with nc={nc}\")\n self.yaml['nc'] = nc # override yaml value\n if anchors:\n logger.info(f'Overriding model.yaml anchors with anchors={anchors}')\n self.yaml['anchors'] = round(anchors) # override yaml value\n self.model, self.save = parse_model(deepcopy(self.yaml), ch=[ch]) # model, savelist\n # self.model, self.save = parse_model(deepcopy(self.yaml), ch=[ch]).cuda() # model, savelist\n self.names = [str(i) for i in range(self.yaml['nc'])] # default names\n # print([x.shape for x in self.forward(torch.zeros(1, ch, 64, 64))])\n\n # Build strides, anchors\n # m = self.model[-1] # Detect()\n m = self.model[-1] # Detect()\n if isinstance(m, Detect):\n s = 256 # 2x min stride\n m.stride = torch.tensor([s / x.shape[-2] for x in self.forward(torch.zeros(1, ch, s, s))]) # forward\n check_anchor_order(m)\n m.anchors /= m.stride.view(-1, 1, 1)\n self.stride = m.stride\n self._initialize_biases() # only run once\n # print('Strides: %s' % m.stride.tolist())\n if isinstance(m, IDetect):\n print('yolo.py-IDetect')\n # print('m', m) # m IDetect\n m.cuda()\n s = 256 # 2x min stride\n # m.stride = torch.tensor([s / x.shape[-2] for x in self.forward(torch.zeros(1, ch, s, s))]) # forward\n m.stride = torch.tensor([s / x.shape[-2] for x in self.forward(torch.zeros(1, ch, s, s))]).cuda() # forward\n # print('m.device2', m.device)\n check_anchor_order(m)\n # print('m.device3', m.device)\n m.anchors /= m.stride.view(-1, 1, 1)\n self.stride = m.stride\n self._initialize_biases() # only run once\n # print('Strides: %s' % m.stride.tolist())\n if isinstance(m, IAuxDetect):\n s = 256 # 2x min stride\n m.stride = torch.tensor([s / x.shape[-2] for x in self.forward(torch.zeros(1, ch, s, s))[:4]]) # forward\n #print(m.stride)\n check_anchor_order(m)\n m.anchors /= m.stride.view(-1, 1, 1)\n self.stride = m.stride\n self._initialize_aux_biases() # only run once\n # print('Strides: %s' % m.stride.tolist())\n if isinstance(m, IBin):\n s = 256 # 2x min stride\n m.stride = torch.tensor([s / x.shape[-2] for x in self.forward(torch.zeros(1, ch, s, s))]) # forward\n check_anchor_order(m)\n m.anchors /= m.stride.view(-1, 1, 1)\n self.stride = m.stride\n self._initialize_biases_bin() # only run once\n # print('Strides: %s' % m.stride.tolist())\n if isinstance(m, IKeypoint):\n s = 256 # 2x min stride\n m.stride = torch.tensor([s / x.shape[-2] for x in self.forward(torch.zeros(1, ch, s, s))]) # forward\n check_anchor_order(m)\n m.anchors /= m.stride.view(-1, 1, 1)\n self.stride = m.stride\n self._initialize_biases_kpt() # only run once\n # print('Strides: %s' % m.stride.tolist())\n\n # Init weights, biases\n initialize_weights(self)\n self.info()\n logger.info('')\n\n def forward(self, x, augment=False, profile=False):\n # print('x', x.shape)\n if augment:\n img_size = x.shape[-2:] # height, width\n s = [1, 0.83, 0.67] # scales\n f = [None, 3, None] # flips (2-ud, 3-lr)\n y = [] # outputs\n for si, fi in zip(s, f):\n xi = scale_img(x.flip(fi) if fi else x, si, gs=int(self.stride.max()))\n yi = self.forward_once(xi)[0] # forward\n # cv2.imwrite(f'img_{si}.jpg', 255 * xi[0].cpu().numpy().transpose((1, 2, 0))[:, :, ::-1]) # save\n yi[..., :4] /= si # de-scale\n if fi == 2:\n yi[..., 1] = img_size[0] - yi[..., 1] # de-flip ud\n elif fi == 3:\n yi[..., 0] = img_size[1] - yi[..., 0] # de-flip lr\n y.append(yi)\n # print('y', y.shape)\n return torch.cat(y, 1), None # augmented inference, train\n else:\n return self.forward_once(x, profile) # single-scale inference, train\n\n def forward_once(self, x, profile=False):\n # print('x1', x.shape)\n y, dt = [], [] # outputs\n for m in self.model:\n if m.f != -1: # if not from previous layer\n x = y[m.f] if isinstance(m.f, int) else [x if j == -1 else y[j] for j in m.f] # from earlier layers\n\n if not hasattr(self, 'traced'):\n self.traced=False\n\n if self.traced:\n if isinstance(m, Detect) or isinstance(m, IDetect) or isinstance(m, IAuxDetect) or isinstance(m, IKeypoint):\n break\n\n # print('profile', profile) # Flase\n if profile:\n c = isinstance(m, (Detect, IDetect, IAuxDetect, IBin))\n o = thop.profile(m, inputs=(x.copy() if c else x,), verbose=False)[0] / 1E9 * 2 if thop else 0 # FLOPS\n # print('o', o.shape)\n for _ in range(10):\n m(x.copy() if c else x)\n t = time_synchronized()\n for _ in range(10):\n m(x.copy() if c else x)\n dt.append((time_synchronized() - t) * 100)\n print('%10.1f%10.0f%10.1fms %-40s' % (o, m.np, dt[-1], m.type))\n\n # print('x3', x.shape)\n # print('m.i', m.i) # =len(y)\n x = m(x) # run\\\n \n y.append(x if m.i in self.save else None) # save output\n # print('x4', x.shape)\n\n if profile:\n print('%.1fms total' % sum(dt))\n\n # print('x', len(x)) # 3\n return x\n\n def _initialize_biases(self, cf=None): # initialize biases into Detect(), cf is class frequency\n # https://arxiv.org/abs/1708.02002 section 3.3\n # cf = torch.bincount(torch.tensor(np.concatenate(dataset.labels, 0)[:, 0]).long(), minlength=nc) + 1.\n m = self.model[-1] # Detect() module\n for mi, s in zip(m.m, m.stride): # from\n b = mi.bias.view(m.na, -1) # conv.bias(255) to (3,85)\n b.data[:, 4] += math.log(8 / (640 / s) ** 2) # obj (8 objects per 640 image)\n b.data[:, 5:] += math.log(0.6 / (m.nc - 0.99)) if cf is None else torch.log(cf / cf.sum()) # cls\n mi.bias = torch.nn.Parameter(b.view(-1), requires_grad=True)\n\n def _initialize_aux_biases(self, cf=None): # initialize biases into Detect(), cf is class frequency\n # https://arxiv.org/abs/1708.02002 section 3.3\n # cf = torch.bincount(torch.tensor(np.concatenate(dataset.labels, 0)[:, 0]).long(), minlength=nc) + 1.\n m = self.model[-1] # Detect() module\n for mi, mi2, s in zip(m.m, m.m2, m.stride): # from\n b = mi.bias.view(m.na, -1) # conv.bias(255) to (3,85)\n b.data[:, 4] += math.log(8 / (640 / s) ** 2) # obj (8 objects per 640 image)\n b.data[:, 5:] += math.log(0.6 / (m.nc - 0.99)) if cf is None else torch.log(cf / cf.sum()) # cls\n mi.bias = torch.nn.Parameter(b.view(-1), requires_grad=True)\n b2 = mi2.bias.view(m.na, -1) # conv.bias(255) to (3,85)\n b2.data[:, 4] += math.log(8 / (640 / s) ** 2) # obj (8 objects per 640 image)\n b2.data[:, 5:] += math.log(0.6 / (m.nc - 0.99)) if cf is None else torch.log(cf / cf.sum()) # cls\n mi2.bias = torch.nn.Parameter(b2.view(-1), requires_grad=True)\n\n def _initialize_biases_bin(self, cf=None): # initialize biases into Detect(), cf is class frequency\n # https://arxiv.org/abs/1708.02002 section 3.3\n # cf = torch.bincount(torch.tensor(np.concatenate(dataset.labels, 0)[:, 0]).long(), minlength=nc) + 1.\n m = self.model[-1] # Bin() module\n bc = m.bin_count\n for mi, s in zip(m.m, m.stride): # from\n b = mi.bias.view(m.na, -1) # conv.bias(255) to (3,85)\n old = b[:, (0,1,2,bc+3)].data\n obj_idx = 2*bc+4\n b[:, :obj_idx].data += math.log(0.6 / (bc + 1 - 0.99))\n b[:, obj_idx].data += math.log(8 / (640 / s) ** 2) # obj (8 objects per 640 image)\n b[:, (obj_idx+1):].data += math.log(0.6 / (m.nc - 0.99)) if cf is None else torch.log(cf / cf.sum()) # cls\n b[:, (0,1,2,bc+3)].data = old\n mi.bias = torch.nn.Parameter(b.view(-1), requires_grad=True)\n\n def _initialize_biases_kpt(self, cf=None): # initialize biases into Detect(), cf is class frequency\n # https://arxiv.org/abs/1708.02002 section 3.3\n # cf = torch.bincount(torch.tensor(np.concatenate(dataset.labels, 0)[:, 0]).long(), minlength=nc) + 1.\n m = self.model[-1] # Detect() module\n for mi, s in zip(m.m, m.stride): # from\n b = mi.bias.view(m.na, -1) # conv.bias(255) to (3,85)\n b.data[:, 4] += math.log(8 / (640 / s) ** 2) # obj (8 objects per 640 image)\n b.data[:, 5:] += math.log(0.6 / (m.nc - 0.99)) if cf is None else torch.log(cf / cf.sum()) # cls\n mi.bias = torch.nn.Parameter(b.view(-1), requires_grad=True)\n\n def _print_biases(self):\n m = self.model[-1] # Detect() module\n for mi in m.m: # from\n b = mi.bias.detach().view(m.na, -1).T # conv.bias(255) to (3,85)\n print(('%6g Conv2d.bias:' + '%10.3g' * 6) % (mi.weight.shape[1], *b[:5].mean(1).tolist(), b[5:].mean()))\n\n # def _print_weights(self):\n # for m in self.model.modules():\n # if type(m) is Bottleneck:\n # print('%10.3g' % (m.w.detach().sigmoid() * 2)) # shortcut weights\n\n def fuse(self): # fuse model Conv2d() + BatchNorm2d() layers\n print('Fusing layers... ')\n for m in self.model.modules():\n if isinstance(m, RepConv):\n #print(f\" fuse_repvgg_block\")\n m.fuse_repvgg_block()\n elif isinstance(m, RepConv_OREPA):\n #print(f\" switch_to_deploy\")\n m.switch_to_deploy()\n elif type(m) is Conv and hasattr(m, 'bn'):\n m.conv = fuse_conv_and_bn(m.conv, m.bn) # update conv\n delattr(m, 'bn') # remove batchnorm\n m.forward = m.fuseforward # update forward\n elif isinstance(m, (IDetect, IAuxDetect)):\n m.fuse()\n m.forward = m.fuseforward\n self.info()\n return self\n\n def nms(self, mode=True): # add or remove NMS module\n present = type(self.model[-1]) is NMS # last layer is NMS\n if mode and not present:\n print('Adding NMS... ')\n m = NMS() # module\n m.f = -1 # from\n m.i = self.model[-1].i + 1 # index\n self.model.add_module(name='%s' % m.i, module=m) # add\n self.eval()\n elif not mode and present:\n print('Removing NMS... ')\n self.model = self.model[:-1] # remove\n return self\n\n def autoshape(self): # add autoShape module\n print('Adding autoShape... ')\n m = autoShape(self) # wrap model\n copy_attr(m, self, include=('yaml', 'nc', 'hyp', 'names', 'stride'), exclude=()) # copy attributes\n return m\n\n def info(self, verbose=False, img_size=640): # print model information\n model_info(self, verbose, img_size)" }, { "identifier": "check_anchors", "path": "utils/autoanchor.py", "snippet": "def check_anchors(dataset, model, thr=4.0, imgsz=640):\n # Check anchor fit to data, recompute if necessary\n prefix = colorstr('autoanchor: ')\n print(f'\\n{prefix}Analyzing anchors... ', end='')\n m = model.module.model[-1] if hasattr(model, 'module') else model.model[-1] # Detect()\n shapes = imgsz * dataset.shapes / dataset.shapes.max(1, keepdims=True)\n scale = np.random.uniform(0.9, 1.1, size=(shapes.shape[0], 1)) # augment scale\n wh = torch.tensor(np.concatenate([l[:, 3:5] * s for s, l in zip(shapes * scale, dataset.labels)])).float() # wh\n\n def metric(k): # compute metric\n r = wh[:, None] / k[None]\n x = torch.min(r, 1. / r).min(2)[0] # ratio metric\n best = x.max(1)[0] # best_x\n aat = (x > 1. / thr).float().sum(1).mean() # anchors above threshold\n bpr = (best > 1. / thr).float().mean() # best possible recall\n return bpr, aat\n\n anchors = m.anchor_grid.clone().cpu().view(-1, 2) # current anchors\n bpr, aat = metric(anchors)\n print(f'anchors/target = {aat:.2f}, Best Possible Recall (BPR) = {bpr:.4f}', end='')\n if bpr < 0.98: # threshold to recompute\n print('. Attempting to improve anchors, please wait...')\n na = m.anchor_grid.numel() // 2 # number of anchors\n try:\n anchors = kmean_anchors(dataset, n=na, img_size=imgsz, thr=thr, gen=1000, verbose=False)\n except Exception as e:\n print(f'{prefix}ERROR: {e}')\n new_bpr = metric(anchors)[0]\n if new_bpr > bpr: # replace anchors\n anchors = torch.tensor(anchors, device=m.anchors.device).type_as(m.anchors)\n m.anchor_grid[:] = anchors.clone().view_as(m.anchor_grid) # for inference\n check_anchor_order(m)\n m.anchors[:] = anchors.clone().view_as(m.anchors) / m.stride.to(m.anchors.device).view(-1, 1, 1) # loss\n print(f'{prefix}New anchors saved to model. Update model *.yaml to use these anchors in the future.')\n else:\n print(f'{prefix}Original anchors better than new anchors. Proceeding with original anchors.')\n print('') # newline" }, { "identifier": "create_dataloader", "path": "utils/datasets.py", "snippet": "def create_dataloader(path, imgsz, batch_size, stride, opt, hyp=None, augment=False, cache=False, pad=0.0, rect=False,\n rank=-1, world_size=1, workers=8, image_weights=False, quad=False, prefix=''):\n # Make sure only the first process in DDP process the dataset first, and the following others can use the cache\n with torch_distributed_zero_first(rank):\n dataset = LoadImagesAndLabels(path, imgsz, batch_size,\n augment=augment, # augment images\n hyp=hyp, # augmentation hyperparameters\n rect=rect, # rectangular training\n cache_images=cache,\n single_cls=opt.single_cls,\n stride=int(stride),\n pad=pad,\n image_weights=image_weights,\n prefix=prefix)\n\n batch_size = min(batch_size, len(dataset))\n nw = min([os.cpu_count() // world_size, batch_size if batch_size > 1 else 0, workers]) # number of workers\n sampler = torch.utils.data.distributed.DistributedSampler(dataset) if rank != -1 else None\n loader = torch.utils.data.DataLoader if image_weights else InfiniteDataLoader\n # Use torch.utils.data.DataLoader() if dataset.properties will update during training else InfiniteDataLoader()\n dataloader = loader(dataset,\n batch_size=batch_size,\n num_workers=nw,\n sampler=sampler,\n pin_memory=True,\n collate_fn=LoadImagesAndLabels.collate_fn4 if quad else LoadImagesAndLabels.collate_fn)\n return dataloader, dataset" }, { "identifier": "labels_to_class_weights", "path": "utils/general.py", "snippet": "def set_logging(rank=-1):\ndef init_seeds(seed=0):\ndef get_latest_run(search_dir='.'):\ndef isdocker():\ndef emojis(str=''):\ndef check_online():\ndef check_git_status():\ndef check_requirements(requirements='requirements.txt', exclude=()):\ndef check_img_size(img_size, s=32):\ndef check_imshow():\ndef check_file(file):\ndef check_dataset(dict):\ndef make_divisible(x, divisor):\ndef clean_str(s):\ndef one_cycle(y1=0.0, y2=1.0, steps=100):\ndef colorstr(*input):\ndef labels_to_class_weights(labels, nc=80):\ndef labels_to_image_weights(labels, nc=80, class_weights=np.ones(80)):\ndef coco80_to_coco91_class(): # converts 80-index (val2014) to 91-index (paper)\ndef xyxy2xywh(x):\ndef xywh2xyxy(x):\ndef xywhn2xyxy(x, w=640, h=640, padw=0, padh=0):\ndef xyn2xy(x, w=640, h=640, padw=0, padh=0):\ndef segment2box(segment, width=640, height=640):\ndef segments2boxes(segments):\ndef resample_segments(segments, n=1000):\ndef scale_coords(img1_shape, coords, img0_shape, ratio_pad=None):\ndef clip_coords(boxes, img_shape):\ndef bbox_iou(box1, box2, x1y1x2y2=True, GIoU=False, DIoU=False, CIoU=False, eps=1e-7):\ndef bbox_alpha_iou(box1, box2, x1y1x2y2=False, GIoU=False, DIoU=False, CIoU=False, alpha=2, eps=1e-9):\ndef box_iou(box1, box2):\n def box_area(box):\ndef wh_iou(wh1, wh2):\ndef box_giou(box1, box2):\n def box_area(box):\ndef box_ciou(box1, box2, eps: float = 1e-7):\n def box_area(box):\ndef box_diou(box1, box2, eps: float = 1e-7):\n def box_area(box):\ndef non_max_suppression(prediction, conf_thres=0.25, iou_thres=0.45, classes=None, agnostic=False, multi_label=False,\n labels=()):\ndef non_max_suppression_kpt(prediction, conf_thres=0.25, iou_thres=0.45, classes=None, agnostic=False, multi_label=False,\n labels=(), kpt_label=False, nc=None, nkpt=None):\ndef strip_optimizer(f='best.pt', s=''): # from utils.general import *; strip_optimizer()\ndef print_mutation(hyp, results, yaml_file='hyp_evolved.yaml', bucket=''):\ndef apply_classifier(x, model, img, im0):\ndef increment_path(path, exist_ok=True, sep=''):" }, { "identifier": "attempt_download", "path": "utils/google_utils.py", "snippet": "def attempt_download(file, repo='WongKinYiu/yolov7'):\n # Attempt file download if does not exist\n file = Path(str(file).strip().replace(\"'\", '').lower())\n\n if not file.exists():\n try:\n response = requests.get(f'https://api.github.com/repos/{repo}/releases/latest').json() # github api\n assets = [x['name'] for x in response['assets']] # release assets\n tag = response['tag_name'] # i.e. 'v1.0'\n except: # fallback plan\n assets = ['yolov7.pt', 'yolov7-tiny.pt', 'yolov7x.pt', 'yolov7-d6.pt', 'yolov7-e6.pt', \n 'yolov7-e6e.pt', 'yolov7-w6.pt']\n tag = subprocess.check_output('git tag', shell=True).decode().split()[-1]\n\n name = file.name\n if name in assets:\n msg = f'{file} missing, try downloading from https://github.com/{repo}/releases/'\n redundant = False # second download option\n try: # GitHub\n url = f'https://github.com/{repo}/releases/download/{tag}/{name}'\n print(f'Downloading {url} to {file}...')\n torch.hub.download_url_to_file(url, file)\n assert file.exists() and file.stat().st_size > 1E6 # check\n except Exception as e: # GCP\n print(f'Download error: {e}')\n assert redundant, 'No secondary mirror'\n url = f'https://storage.googleapis.com/{repo}/ckpt/{name}'\n print(f'Downloading {url} to {file}...')\n os.system(f'curl -L {url} -o {file}') # torch.hub.download_url_to_file(url, weights)\n finally:\n if not file.exists() or file.stat().st_size < 1E6: # check\n file.unlink(missing_ok=True) # remove partial downloads\n print(f'ERROR: Download failure: {msg}')\n print('')\n return" }, { "identifier": "ComputeLoss", "path": "utils/loss.py", "snippet": "class ComputeLoss:\n # Compute losses\n def __init__(self, model, autobalance=False):\n super(ComputeLoss, self).__init__()\n device = next(model.parameters()).device # get model device\n h = model.hyp # hyperparameters\n\n # Define criteria\n BCEcls = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['cls_pw']], device=device))\n BCEobj = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['obj_pw']], device=device))\n\n # Class label smoothing https://arxiv.org/pdf/1902.04103.pdf eqn 3\n self.cp, self.cn = smooth_BCE(eps=h.get('label_smoothing', 0.0)) # positive, negative BCE targets\n\n # Focal loss\n g = h['fl_gamma'] # focal loss gamma\n if g > 0:\n BCEcls, BCEobj = FocalLoss(BCEcls, g), FocalLoss(BCEobj, g)\n\n det = model.module.model[-1] if is_parallel(model) else model.model[-1] # Detect() module\n self.balance = {3: [4.0, 1.0, 0.4]}.get(det.nl, [4.0, 1.0, 0.25, 0.06, .02]) # P3-P7\n #self.balance = {3: [4.0, 1.0, 0.4]}.get(det.nl, [4.0, 1.0, 0.25, 0.1, .05]) # P3-P7\n #self.balance = {3: [4.0, 1.0, 0.4]}.get(det.nl, [4.0, 1.0, 0.5, 0.4, .1]) # P3-P7\n self.ssi = list(det.stride).index(16) if autobalance else 0 # stride 16 index\n self.BCEcls, self.BCEobj, self.gr, self.hyp, self.autobalance = BCEcls, BCEobj, model.gr, h, autobalance\n for k in 'na', 'nc', 'nl', 'anchors':\n setattr(self, k, getattr(det, k))\n\n def __call__(self, p, targets): # predictions, targets, model\n device = targets.device\n lcls, lbox, lobj = torch.zeros(1, device=device), torch.zeros(1, device=device), torch.zeros(1, device=device)\n tcls, tbox, indices, anchors = self.build_targets(p, targets) # targets\n\n # Losses\n for i, pi in enumerate(p): # layer index, layer predictions\n b, a, gj, gi = indices[i] # image, anchor, gridy, gridx\n tobj = torch.zeros_like(pi[..., 0], device=device) # target obj\n\n n = b.shape[0] # number of targets\n if n:\n ps = pi[b, a, gj, gi] # prediction subset corresponding to targets\n\n # Regression\n pxy = ps[:, :2].sigmoid() * 2. - 0.5\n pwh = (ps[:, 2:4].sigmoid() * 2) ** 2 * anchors[i]\n pbox = torch.cat((pxy, pwh), 1) # predicted box\n iou = bbox_iou(pbox.T, tbox[i], x1y1x2y2=False, CIoU=True) # iou(prediction, target)\n lbox += (1.0 - iou).mean() # iou loss\n\n # Objectness\n tobj[b, a, gj, gi] = (1.0 - self.gr) + self.gr * iou.detach().clamp(0).type(tobj.dtype) # iou ratio\n\n # Classification\n if self.nc > 1: # cls loss (only if multiple classes)\n t = torch.full_like(ps[:, 5:], self.cn, device=device) # targets\n t[range(n), tcls[i]] = self.cp\n #t[t==self.cp] = iou.detach().clamp(0).type(t.dtype)\n lcls += self.BCEcls(ps[:, 5:], t) # BCE\n\n # Append targets to text file\n # with open('targets.txt', 'a') as file:\n # [file.write('%11.5g ' * 4 % tuple(x) + '\\n') for x in torch.cat((txy[i], twh[i]), 1)]\n\n obji = self.BCEobj(pi[..., 4], tobj)\n lobj += obji * self.balance[i] # obj loss\n if self.autobalance:\n self.balance[i] = self.balance[i] * 0.9999 + 0.0001 / obji.detach().item()\n\n if self.autobalance:\n self.balance = [x / self.balance[self.ssi] for x in self.balance]\n lbox *= self.hyp['box']\n lobj *= self.hyp['obj']\n lcls *= self.hyp['cls']\n bs = tobj.shape[0] # batch size\n\n loss = lbox + lobj + lcls\n return loss * bs, torch.cat((lbox, lobj, lcls, loss)).detach()\n\n def build_targets(self, p, targets):\n # Build targets for compute_loss(), input targets(image,class,x,y,w,h)\n na, nt = self.na, targets.shape[0] # number of anchors, targets\n tcls, tbox, indices, anch = [], [], [], []\n gain = torch.ones(7, device=targets.device).long() # normalized to gridspace gain\n ai = torch.arange(na, device=targets.device).float().view(na, 1).repeat(1, nt) # same as .repeat_interleave(nt)\n targets = torch.cat((targets.repeat(na, 1, 1), ai[:, :, None]), 2) # append anchor indices\n\n g = 0.5 # bias\n off = torch.tensor([[0, 0],\n [1, 0], [0, 1], [-1, 0], [0, -1], # j,k,l,m\n # [1, 1], [1, -1], [-1, 1], [-1, -1], # jk,jm,lk,lm\n ], device=targets.device).float() * g # offsets\n\n for i in range(self.nl):\n anchors = self.anchors[i]\n gain[2:6] = torch.tensor(p[i].shape)[[3, 2, 3, 2]] # xyxy gain\n\n # Match targets to anchors\n t = targets * gain\n if nt:\n # Matches\n r = t[:, :, 4:6] / anchors[:, None] # wh ratio\n j = torch.max(r, 1. / r).max(2)[0] < self.hyp['anchor_t'] # compare\n # j = wh_iou(anchors, t[:, 4:6]) > model.hyp['iou_t'] # iou(3,n)=wh_iou(anchors(3,2), gwh(n,2))\n t = t[j] # filter\n\n # Offsets\n gxy = t[:, 2:4] # grid xy\n gxi = gain[[2, 3]] - gxy # inverse\n j, k = ((gxy % 1. < g) & (gxy > 1.)).T\n l, m = ((gxi % 1. < g) & (gxi > 1.)).T\n j = torch.stack((torch.ones_like(j), j, k, l, m))\n t = t.repeat((5, 1, 1))[j]\n offsets = (torch.zeros_like(gxy)[None] + off[:, None])[j]\n else:\n t = targets[0]\n offsets = 0\n\n # Define\n b, c = t[:, :2].long().T # image, class\n gxy = t[:, 2:4] # grid xy\n gwh = t[:, 4:6] # grid wh\n gij = (gxy - offsets).long()\n gi, gj = gij.T # grid xy indices\n\n # Append\n a = t[:, 6].long() # anchor indices\n indices.append((b, a, gj.clamp_(0, gain[3] - 1), gi.clamp_(0, gain[2] - 1))) # image, anchor, grid indices\n tbox.append(torch.cat((gxy - gij, gwh), 1)) # box\n anch.append(anchors[a]) # anchors\n tcls.append(c) # class\n\n return tcls, tbox, indices, anch" }, { "identifier": "ComputeLossOTA", "path": "utils/loss.py", "snippet": "class ComputeLossOTA:\n # Compute losses\n def __init__(self, model, autobalance=False):\n super(ComputeLossOTA, self).__init__()\n device = next(model.parameters()).device # get model device\n h = model.hyp # hyperparameters\n\n # Define criteria\n BCEcls = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['cls_pw']], device=device))\n BCEobj = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['obj_pw']], device=device))\n\n # Class label smoothing https://arxiv.org/pdf/1902.04103.pdf eqn 3\n self.cp, self.cn = smooth_BCE(eps=h.get('label_smoothing', 0.0)) # positive, negative BCE targets\n\n # Focal loss\n g = h['fl_gamma'] # focal loss gamma\n if g > 0:\n BCEcls, BCEobj = FocalLoss(BCEcls, g), FocalLoss(BCEobj, g)\n\n det = model.module.model[-1] if is_parallel(model) else model.model[-1] # Detect() module\n self.balance = {3: [4.0, 1.0, 0.4]}.get(det.nl, [4.0, 1.0, 0.25, 0.06, .02]) # P3-P7\n self.ssi = list(det.stride).index(16) if autobalance else 0 # stride 16 index\n self.BCEcls, self.BCEobj, self.gr, self.hyp, self.autobalance = BCEcls, BCEobj, model.gr, h, autobalance\n for k in 'na', 'nc', 'nl', 'anchors', 'stride':\n setattr(self, k, getattr(det, k))\n\n def __call__(self, p, targets, imgs): # predictions, targets, model \n device = targets.device\n lcls, lbox, lobj = torch.zeros(1, device=device), torch.zeros(1, device=device), torch.zeros(1, device=device)\n bs, as_, gjs, gis, targets, anchors = self.build_targets(p, targets, imgs)\n pre_gen_gains = [torch.tensor(pp.shape, device=device)[[3, 2, 3, 2]] for pp in p] \n \n\n # Losses\n for i, pi in enumerate(p): # layer index, layer predictions\n b, a, gj, gi = bs[i], as_[i], gjs[i], gis[i] # image, anchor, gridy, gridx\n tobj = torch.zeros_like(pi[..., 0], device=device) # target obj\n\n n = b.shape[0] # number of targets\n if n:\n ps = pi[b, a, gj, gi] # prediction subset corresponding to targets\n\n # Regression\n grid = torch.stack([gi, gj], dim=1)\n pxy = ps[:, :2].sigmoid() * 2. - 0.5\n #pxy = ps[:, :2].sigmoid() * 3. - 1.\n pwh = (ps[:, 2:4].sigmoid() * 2) ** 2 * anchors[i]\n pbox = torch.cat((pxy, pwh), 1) # predicted box\n selected_tbox = targets[i][:, 2:6] * pre_gen_gains[i]\n selected_tbox[:, :2] -= grid\n iou = bbox_iou(pbox.T, selected_tbox, x1y1x2y2=False, CIoU=True) # iou(prediction, target)\n lbox += (1.0 - iou).mean() # iou loss\n\n # Objectness\n tobj[b, a, gj, gi] = (1.0 - self.gr) + self.gr * iou.detach().clamp(0).type(tobj.dtype) # iou ratio\n\n # Classification\n selected_tcls = targets[i][:, 1].long()\n if self.nc > 1: # cls loss (only if multiple classes)\n t = torch.full_like(ps[:, 5:], self.cn, device=device) # targets\n t[range(n), selected_tcls] = self.cp\n lcls += self.BCEcls(ps[:, 5:], t) # BCE\n\n # Append targets to text file\n # with open('targets.txt', 'a') as file:\n # [file.write('%11.5g ' * 4 % tuple(x) + '\\n') for x in torch.cat((txy[i], twh[i]), 1)]\n\n obji = self.BCEobj(pi[..., 4], tobj)\n lobj += obji * self.balance[i] # obj loss\n if self.autobalance:\n self.balance[i] = self.balance[i] * 0.9999 + 0.0001 / obji.detach().item()\n\n if self.autobalance:\n self.balance = [x / self.balance[self.ssi] for x in self.balance]\n lbox *= self.hyp['box']\n lobj *= self.hyp['obj']\n lcls *= self.hyp['cls']\n bs = tobj.shape[0] # batch size\n\n loss = lbox + lobj + lcls\n return loss * bs, torch.cat((lbox, lobj, lcls, loss)).detach()\n\n def build_targets(self, p, targets, imgs):\n \n #indices, anch = self.find_positive(p, targets)\n indices, anch = self.find_3_positive(p, targets)\n #indices, anch = self.find_4_positive(p, targets)\n #indices, anch = self.find_5_positive(p, targets)\n #indices, anch = self.find_9_positive(p, targets)\n device = torch.device(targets.device)\n matching_bs = [[] for pp in p]\n matching_as = [[] for pp in p]\n matching_gjs = [[] for pp in p]\n matching_gis = [[] for pp in p]\n matching_targets = [[] for pp in p]\n matching_anchs = [[] for pp in p]\n \n nl = len(p) \n \n for batch_idx in range(p[0].shape[0]):\n \n b_idx = targets[:, 0]==batch_idx\n this_target = targets[b_idx]\n if this_target.shape[0] == 0:\n continue\n \n txywh = this_target[:, 2:6] * imgs[batch_idx].shape[1]\n txyxy = xywh2xyxy(txywh)\n\n pxyxys = []\n p_cls = []\n p_obj = []\n from_which_layer = []\n all_b = []\n all_a = []\n all_gj = []\n all_gi = []\n all_anch = []\n \n for i, pi in enumerate(p):\n \n b, a, gj, gi = indices[i]\n idx = (b == batch_idx)\n b, a, gj, gi = b[idx], a[idx], gj[idx], gi[idx] \n all_b.append(b)\n all_a.append(a)\n all_gj.append(gj)\n all_gi.append(gi)\n all_anch.append(anch[i][idx])\n from_which_layer.append((torch.ones(size=(len(b),)) * i).to(device))\n \n fg_pred = pi[b, a, gj, gi] \n p_obj.append(fg_pred[:, 4:5])\n p_cls.append(fg_pred[:, 5:])\n \n grid = torch.stack([gi, gj], dim=1)\n pxy = (fg_pred[:, :2].sigmoid() * 2. - 0.5 + grid) * self.stride[i] #/ 8.\n #pxy = (fg_pred[:, :2].sigmoid() * 3. - 1. + grid) * self.stride[i]\n pwh = (fg_pred[:, 2:4].sigmoid() * 2) ** 2 * anch[i][idx] * self.stride[i] #/ 8.\n pxywh = torch.cat([pxy, pwh], dim=-1)\n pxyxy = xywh2xyxy(pxywh)\n pxyxys.append(pxyxy)\n \n pxyxys = torch.cat(pxyxys, dim=0)\n if pxyxys.shape[0] == 0:\n continue\n p_obj = torch.cat(p_obj, dim=0)\n p_cls = torch.cat(p_cls, dim=0)\n from_which_layer = torch.cat(from_which_layer, dim=0)\n all_b = torch.cat(all_b, dim=0)\n all_a = torch.cat(all_a, dim=0)\n all_gj = torch.cat(all_gj, dim=0)\n all_gi = torch.cat(all_gi, dim=0)\n all_anch = torch.cat(all_anch, dim=0)\n \n pair_wise_iou = box_iou(txyxy, pxyxys)\n\n pair_wise_iou_loss = -torch.log(pair_wise_iou + 1e-8)\n\n top_k, _ = torch.topk(pair_wise_iou, min(10, pair_wise_iou.shape[1]), dim=1)\n dynamic_ks = torch.clamp(top_k.sum(1).int(), min=1)\n\n gt_cls_per_image = (\n F.one_hot(this_target[:, 1].to(torch.int64), self.nc)\n .float()\n .unsqueeze(1)\n .repeat(1, pxyxys.shape[0], 1)\n )\n\n num_gt = this_target.shape[0]\n cls_preds_ = (\n p_cls.float().unsqueeze(0).repeat(num_gt, 1, 1).sigmoid_()\n * p_obj.unsqueeze(0).repeat(num_gt, 1, 1).sigmoid_()\n )\n\n y = cls_preds_.sqrt_()\n pair_wise_cls_loss = F.binary_cross_entropy_with_logits(\n torch.log(y/(1-y)) , gt_cls_per_image, reduction=\"none\"\n ).sum(-1)\n del cls_preds_\n \n cost = (\n pair_wise_cls_loss\n + 3.0 * pair_wise_iou_loss\n )\n\n matching_matrix = torch.zeros_like(cost, device=device)\n\n for gt_idx in range(num_gt):\n _, pos_idx = torch.topk(\n cost[gt_idx], k=dynamic_ks[gt_idx].item(), largest=False\n )\n matching_matrix[gt_idx][pos_idx] = 1.0\n\n del top_k, dynamic_ks\n anchor_matching_gt = matching_matrix.sum(0)\n if (anchor_matching_gt > 1).sum() > 0:\n _, cost_argmin = torch.min(cost[:, anchor_matching_gt > 1], dim=0)\n matching_matrix[:, anchor_matching_gt > 1] *= 0.0\n matching_matrix[cost_argmin, anchor_matching_gt > 1] = 1.0\n fg_mask_inboxes = (matching_matrix.sum(0) > 0.0).to(device)\n matched_gt_inds = matching_matrix[:, fg_mask_inboxes].argmax(0)\n \n from_which_layer = from_which_layer[fg_mask_inboxes]\n all_b = all_b[fg_mask_inboxes]\n all_a = all_a[fg_mask_inboxes]\n all_gj = all_gj[fg_mask_inboxes]\n all_gi = all_gi[fg_mask_inboxes]\n all_anch = all_anch[fg_mask_inboxes]\n \n this_target = this_target[matched_gt_inds]\n \n for i in range(nl):\n layer_idx = from_which_layer == i\n matching_bs[i].append(all_b[layer_idx])\n matching_as[i].append(all_a[layer_idx])\n matching_gjs[i].append(all_gj[layer_idx])\n matching_gis[i].append(all_gi[layer_idx])\n matching_targets[i].append(this_target[layer_idx])\n matching_anchs[i].append(all_anch[layer_idx])\n\n for i in range(nl):\n if matching_targets[i] != []:\n matching_bs[i] = torch.cat(matching_bs[i], dim=0)\n matching_as[i] = torch.cat(matching_as[i], dim=0)\n matching_gjs[i] = torch.cat(matching_gjs[i], dim=0)\n matching_gis[i] = torch.cat(matching_gis[i], dim=0)\n matching_targets[i] = torch.cat(matching_targets[i], dim=0)\n matching_anchs[i] = torch.cat(matching_anchs[i], dim=0)\n else:\n matching_bs[i] = torch.tensor([], device='cuda:0', dtype=torch.int64)\n matching_as[i] = torch.tensor([], device='cuda:0', dtype=torch.int64)\n matching_gjs[i] = torch.tensor([], device='cuda:0', dtype=torch.int64)\n matching_gis[i] = torch.tensor([], device='cuda:0', dtype=torch.int64)\n matching_targets[i] = torch.tensor([], device='cuda:0', dtype=torch.int64)\n matching_anchs[i] = torch.tensor([], device='cuda:0', dtype=torch.int64)\n\n return matching_bs, matching_as, matching_gjs, matching_gis, matching_targets, matching_anchs \n\n def find_3_positive(self, p, targets):\n # Build targets for compute_loss(), input targets(image,class,x,y,w,h)\n na, nt = self.na, targets.shape[0] # number of anchors, targets\n indices, anch = [], []\n gain = torch.ones(7, device=targets.device).long() # normalized to gridspace gain\n ai = torch.arange(na, device=targets.device).float().view(na, 1).repeat(1, nt) # same as .repeat_interleave(nt)\n targets = torch.cat((targets.repeat(na, 1, 1), ai[:, :, None]), 2) # append anchor indices\n\n g = 0.5 # bias\n off = torch.tensor([[0, 0],\n [1, 0], [0, 1], [-1, 0], [0, -1], # j,k,l,m\n # [1, 1], [1, -1], [-1, 1], [-1, -1], # jk,jm,lk,lm\n ], device=targets.device).float() * g # offsets\n\n for i in range(self.nl):\n anchors = self.anchors[i]\n gain[2:6] = torch.tensor(p[i].shape)[[3, 2, 3, 2]] # xyxy gain\n\n # Match targets to anchors\n t = targets * gain\n if nt:\n # Matches\n r = t[:, :, 4:6] / anchors[:, None] # wh ratio\n j = torch.max(r, 1. / r).max(2)[0] < self.hyp['anchor_t'] # compare\n # j = wh_iou(anchors, t[:, 4:6]) > model.hyp['iou_t'] # iou(3,n)=wh_iou(anchors(3,2), gwh(n,2))\n t = t[j] # filter\n\n # Offsets\n gxy = t[:, 2:4] # grid xy\n gxi = gain[[2, 3]] - gxy # inverse\n j, k = ((gxy % 1. < g) & (gxy > 1.)).T\n l, m = ((gxi % 1. < g) & (gxi > 1.)).T\n j = torch.stack((torch.ones_like(j), j, k, l, m))\n t = t.repeat((5, 1, 1))[j]\n offsets = (torch.zeros_like(gxy)[None] + off[:, None])[j]\n else:\n t = targets[0]\n offsets = 0\n\n # Define\n b, c = t[:, :2].long().T # image, class\n gxy = t[:, 2:4] # grid xy\n gwh = t[:, 4:6] # grid wh\n gij = (gxy - offsets).long()\n gi, gj = gij.T # grid xy indices\n\n # Append\n a = t[:, 6].long() # anchor indices\n indices.append((b, a, gj.clamp_(0, gain[3] - 1), gi.clamp_(0, gain[2] - 1))) # image, anchor, grid indices\n anch.append(anchors[a]) # anchors\n\n return indices, anch" }, { "identifier": "plot_images", "path": "utils/plots.py", "snippet": "def plot_images(images, targets, paths=None, fname='images.jpg', names=None, max_size=640, max_subplots=16):\n # Plot image grid with labels\n\n if isinstance(images, torch.Tensor):\n images = images.cpu().float().numpy()\n if isinstance(targets, torch.Tensor):\n targets = targets.cpu().numpy()\n\n # un-normalise\n if np.max(images[0]) <= 1:\n images *= 255\n\n tl = 3 # line thickness\n tf = max(tl - 1, 1) # font thickness\n bs, _, h, w = images.shape # batch size, _, height, width\n bs = min(bs, max_subplots) # limit plot images\n ns = np.ceil(bs ** 0.5) # number of subplots (square)\n\n # Check if we should resize\n scale_factor = max_size / max(h, w)\n if scale_factor < 1:\n h = math.ceil(scale_factor * h)\n w = math.ceil(scale_factor * w)\n\n colors = color_list() # list of colors\n mosaic = np.full((int(ns * h), int(ns * w), 3), 255, dtype=np.uint8) # init\n for i, img in enumerate(images):\n if i == max_subplots: # if last batch has fewer images than we expect\n break\n\n block_x = int(w * (i // ns))\n block_y = int(h * (i % ns))\n\n img = img.transpose(1, 2, 0)\n if scale_factor < 1:\n img = cv2.resize(img, (w, h))\n\n mosaic[block_y:block_y + h, block_x:block_x + w, :] = img\n if len(targets) > 0:\n image_targets = targets[targets[:, 0] == i]\n boxes = xywh2xyxy(image_targets[:, 2:6]).T\n classes = image_targets[:, 1].astype('int')\n labels = image_targets.shape[1] == 6 # labels if no conf column\n conf = None if labels else image_targets[:, 6] # check for confidence presence (label vs pred)\n\n if boxes.shape[1]:\n if boxes.max() <= 1.01: # if normalized with tolerance 0.01\n boxes[[0, 2]] *= w # scale to pixels\n boxes[[1, 3]] *= h\n elif scale_factor < 1: # absolute coords need scale if image scales\n boxes *= scale_factor\n boxes[[0, 2]] += block_x\n boxes[[1, 3]] += block_y\n for j, box in enumerate(boxes.T):\n cls = int(classes[j])\n color = colors[cls % len(colors)]\n cls = names[cls] if names else cls\n if labels or conf[j] > 0.25: # 0.25 conf thresh\n label = '%s' % cls if labels else '%s %.1f' % (cls, conf[j])\n plot_one_box(box, mosaic, label=label, color=color, line_thickness=tl)\n\n # Draw image filename labels\n if paths:\n label = Path(paths[i]).name[:40] # trim to 40 char\n t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0]\n cv2.putText(mosaic, label, (block_x + 5, block_y + t_size[1] + 5), 0, tl / 3, [220, 220, 220], thickness=tf,\n lineType=cv2.LINE_AA)\n\n # Image border\n cv2.rectangle(mosaic, (block_x, block_y), (block_x + w, block_y + h), (255, 255, 255), thickness=3)\n\n if fname:\n r = min(1280. / max(h, w) / ns, 1.0) # ratio to limit image size\n mosaic = cv2.resize(mosaic, (int(ns * w * r), int(ns * h * r)), interpolation=cv2.INTER_AREA)\n # cv2.imwrite(fname, cv2.cvtColor(mosaic, cv2.COLOR_BGR2RGB)) # cv2 save\n Image.fromarray(mosaic).save(fname) # PIL save\n return mosaic" }, { "identifier": "plot_labels", "path": "utils/plots.py", "snippet": "def plot_labels(labels, names=(), save_dir=Path(''), loggers=None):\n # plot dataset labels\n print('Plotting labels... ')\n c, b = labels[:, 0], labels[:, 1:].transpose() # classes, boxes\n nc = int(c.max() + 1) # number of classes\n colors = color_list()\n x = pd.DataFrame(b.transpose(), columns=['x', 'y', 'width', 'height'])\n\n # seaborn correlogram\n sns.pairplot(x, corner=True, diag_kind='auto', kind='hist', diag_kws=dict(bins=50), plot_kws=dict(pmax=0.9))\n plt.savefig(save_dir / 'labels_correlogram.jpg', dpi=200)\n plt.close()\n\n # matplotlib labels\n matplotlib.use('svg') # faster\n ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True)[1].ravel()\n ax[0].hist(c, bins=np.linspace(0, nc, nc + 1) - 0.5, rwidth=0.8)\n ax[0].set_ylabel('instances')\n if 0 < len(names) < 30:\n ax[0].set_xticks(range(len(names)))\n ax[0].set_xticklabels(names, rotation=90, fontsize=10)\n else:\n ax[0].set_xlabel('classes')\n sns.histplot(x, x='x', y='y', ax=ax[2], bins=50, pmax=0.9)\n sns.histplot(x, x='width', y='height', ax=ax[3], bins=50, pmax=0.9)\n\n # rectangles\n labels[:, 1:3] = 0.5 # center\n labels[:, 1:] = xywh2xyxy(labels[:, 1:]) * 2000\n img = Image.fromarray(np.ones((2000, 2000, 3), dtype=np.uint8) * 255)\n for cls, *box in labels[:1000]:\n ImageDraw.Draw(img).rectangle(box, width=1, outline=colors[int(cls) % 10]) # plot\n ax[1].imshow(img)\n ax[1].axis('off')\n\n for a in [0, 1, 2, 3]:\n for s in ['top', 'right', 'left', 'bottom']:\n ax[a].spines[s].set_visible(False)\n\n plt.savefig(save_dir / 'labels.jpg', dpi=200)\n matplotlib.use('Agg')\n plt.close()\n\n # loggers\n for k, v in loggers.items() or {}:\n if k == 'wandb' and v:\n v.log({\"Labels\": [v.Image(str(x), caption=x.name) for x in save_dir.glob('*labels*.jpg')]}, commit=False)" }, { "identifier": "plot_results", "path": "utils/plots.py", "snippet": "def plot_results(start=0, stop=0, bucket='', id=(), labels=(), save_dir=''):\n # Plot training 'results*.txt'. from utils.plots import *; plot_results(save_dir='runs/train/exp')\n fig, ax = plt.subplots(2, 5, figsize=(12, 6), tight_layout=True)\n ax = ax.ravel()\n s = ['Box', 'Objectness', 'Classification', 'Precision', 'Recall',\n 'val Box', 'val Objectness', 'val Classification', '[email protected]', '[email protected]:0.95']\n if bucket:\n # files = ['https://storage.googleapis.com/%s/results%g.txt' % (bucket, x) for x in id]\n files = ['results%g.txt' % x for x in id]\n c = ('gsutil cp ' + '%s ' * len(files) + '.') % tuple('gs://%s/results%g.txt' % (bucket, x) for x in id)\n os.system(c)\n else:\n files = list(Path(save_dir).glob('results*.txt'))\n assert len(files), 'No results.txt files found in %s, nothing to plot.' % os.path.abspath(save_dir)\n for fi, f in enumerate(files):\n try:\n results = np.loadtxt(f, usecols=[2, 3, 4, 8, 9, 12, 13, 14, 10, 11], ndmin=2).T\n n = results.shape[1] # number of rows\n x = range(start, min(stop, n) if stop else n)\n for i in range(10):\n y = results[i, x]\n if i in [0, 1, 2, 5, 6, 7]:\n y[y == 0] = np.nan # don't show zero loss values\n # y /= y[0] # normalize\n label = labels[fi] if len(labels) else f.stem\n ax[i].plot(x, y, marker='.', label=label, linewidth=2, markersize=8)\n ax[i].set_title(s[i])\n # if i in [5, 6, 7]: # share train and val loss y axes\n # ax[i].get_shared_y_axes().join(ax[i], ax[i - 5])\n except Exception as e:\n print('Warning: Plotting error for %s; %s' % (f, e))\n\n ax[1].legend()\n fig.savefig(Path(save_dir) / 'results.png', dpi=200)" }, { "identifier": "plot_evolution", "path": "utils/plots.py", "snippet": "def plot_evolution(yaml_file='data/hyp.finetune.yaml'): # from utils.plots import *; plot_evolution()\n # Plot hyperparameter evolution results in evolve.txt\n with open(yaml_file) as f:\n hyp = yaml.load(f, Loader=yaml.SafeLoader)\n x = np.loadtxt('evolve.txt', ndmin=2)\n f = fitness(x)\n # weights = (f - f.min()) ** 2 # for weighted results\n plt.figure(figsize=(10, 12), tight_layout=True)\n matplotlib.rc('font', **{'size': 8})\n for i, (k, v) in enumerate(hyp.items()):\n y = x[:, i + 7]\n # mu = (y * weights).sum() / weights.sum() # best weighted result\n mu = y[f.argmax()] # best single result\n plt.subplot(6, 5, i + 1)\n plt.scatter(y, f, c=hist2d(y, f, 20), cmap='viridis', alpha=.8, edgecolors='none')\n plt.plot(mu, f.max(), 'k+', markersize=15)\n plt.title('%s = %.3g' % (k, mu), fontdict={'size': 9}) # limit to 40 characters\n if i % 5 != 0:\n plt.yticks([])\n print('%15s: %.3g' % (k, mu))\n plt.savefig('evolve.png', dpi=200)\n print('\\nPlot saved as evolve.png')" }, { "identifier": "ModelEMA", "path": "utils/torch_utils.py", "snippet": "class ModelEMA:\n \"\"\" Model Exponential Moving Average from https://github.com/rwightman/pytorch-image-models\n Keep a moving average of everything in the model state_dict (parameters and buffers).\n This is intended to allow functionality like\n https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage\n A smoothed version of the weights is necessary for some training schemes to perform well.\n This class is sensitive where it is initialized in the sequence of model init,\n GPU assignment and distributed training wrappers.\n \"\"\"\n\n def __init__(self, model, decay=0.9999, updates=0):\n # Create EMA\n self.ema = deepcopy(model.module if is_parallel(model) else model).eval() # FP32 EMA\n # if next(model.parameters()).device.type != 'cpu':\n # self.ema.half() # FP16 EMA\n self.updates = updates # number of EMA updates\n self.decay = lambda x: decay * (1 - math.exp(-x / 2000)) # decay exponential ramp (to help early epochs)\n for p in self.ema.parameters():\n p.requires_grad_(False)\n\n def update(self, model):\n # Update EMA parameters\n with torch.no_grad():\n self.updates += 1\n d = self.decay(self.updates)\n\n msd = model.module.state_dict() if is_parallel(model) else model.state_dict() # model state_dict\n for k, v in self.ema.state_dict().items():\n if v.dtype.is_floating_point:\n v *= d\n v += (1. - d) * msd[k].detach()\n\n def update_attr(self, model, include=(), exclude=('process_group', 'reducer')):\n # Update EMA attributes\n copy_attr(self.ema, model, include, exclude)" }, { "identifier": "select_device", "path": "utils/torch_utils.py", "snippet": "def select_device(device='', batch_size=None):\n # device = 'cpu' or '0' or '0,1,2,3'\n s = f'YOLOR 🚀 {git_describe() or date_modified()} torch {torch.__version__} ' # string\n cpu = device.lower() == 'cpu'\n if cpu:\n os.environ['CUDA_VISIBLE_DEVICES'] = '-1' # force torch.cuda.is_available() = False\n elif device: # non-cpu device requested\n os.environ['CUDA_VISIBLE_DEVICES'] = device # set environment variable\n assert torch.cuda.is_available(), f'CUDA unavailable, invalid device {device} requested' # check availability\n\n cuda = not cpu and torch.cuda.is_available()\n if cuda:\n n = torch.cuda.device_count()\n if n > 1 and batch_size: # check that batch_size is compatible with device_count\n assert batch_size % n == 0, f'batch-size {batch_size} not multiple of GPU count {n}'\n space = ' ' * len(s)\n for i, d in enumerate(device.split(',') if device else range(n)):\n p = torch.cuda.get_device_properties(i)\n s += f\"{'' if i == 0 else space}CUDA:{d} ({p.name}, {p.total_memory / 1024 ** 2}MB)\\n\" # bytes to MB\n else:\n s += 'CPU\\n'\n\n logger.info(s.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else s) # emoji-safe\n return torch.device('cuda:0' if cuda else 'cpu')" }, { "identifier": "intersect_dicts", "path": "utils/torch_utils.py", "snippet": "def intersect_dicts(da, db, exclude=()):\n # Dictionary intersection of matching keys and shapes, omitting 'exclude' keys, using da values\n return {k: v for k, v in da.items() if k in db and not any(x in k for x in exclude) and v.shape == db[k].shape}" }, { "identifier": "torch_distributed_zero_first", "path": "utils/torch_utils.py", "snippet": "@contextmanager\ndef torch_distributed_zero_first(local_rank: int):\n \"\"\"\n Decorator to make all processes in distributed training wait for each local_master to do something.\n \"\"\"\n if local_rank not in [-1, 0]:\n torch.distributed.barrier()\n yield\n if local_rank == 0:\n torch.distributed.barrier()" }, { "identifier": "is_parallel", "path": "utils/torch_utils.py", "snippet": "def is_parallel(model):\n return type(model) in (nn.parallel.DataParallel, nn.parallel.DistributedDataParallel)" }, { "identifier": "getMask", "path": "utils/distill_utils.py", "snippet": "def getMask(batch_size, gt_boxes, img_size, feat, anchors, max_num_box, device):\r\n # [b, K, 4]\r\n gt_boxes = make_gt_boxes(gt_boxes, max_num_box, batch_size, img_size)\r\n feat_stride = img_size[0] / feat.size(2)\r\n anchors = torch.from_numpy(generate_anchors(feat_stride, anchors))\r\n feat = feat.cpu()\r\n height, width = feat.size(2), feat.size(3)\r\n feat_height, feat_width = feat.size(2), feat.size(3)\r\n shift_x = np.arange(0, feat_width) * feat_stride\r\n shift_y = np.arange(0, feat_height) * feat_stride\r\n shift_x, shift_y = np.meshgrid(shift_x, shift_y)\r\n shifts = torch.from_numpy(np.vstack((shift_x.ravel(), shift_y.ravel(),\r\n shift_x.ravel(), shift_y.ravel())).transpose())\r\n shifts = shifts.contiguous().type_as(feat).float()\r\n\r\n # num of anchors [3]\r\n A = anchors.size(0)\r\n K = shifts.size(0)\r\n\r\n anchors = anchors.type_as(gt_boxes)\r\n # all_anchors [K, A, 4]\r\n all_anchors = anchors.view(1, A, 4) + shifts.view(K, 1, 4)\r\n all_anchors = all_anchors.view(K * A, 4)\r\n # compute iou [all_anchors, gt_boxes]\r\n IOU_map = bbox_overlaps_batch(all_anchors, gt_boxes, img_size).view(batch_size, height, width, A, gt_boxes.shape[1])\r\n\r\n mask_batch = []\r\n for i in range(batch_size):\r\n max_iou, _ = torch.max(IOU_map[i].view(height * width * A, gt_boxes.shape[1]), dim=0)\r\n mask_per_im = torch.zeros([height, width], dtype=torch.int64).to(device)\r\n for k in range(gt_boxes.shape[1]):\r\n if torch.sum(gt_boxes[i][k]) == 0:\r\n break\r\n max_iou_per_gt = max_iou[k] * 0.5\r\n mask_per_gt = torch.sum(IOU_map[i][:, :, :, k] > max_iou_per_gt, dim=2)\r\n mask_per_im += mask_per_gt.to(device)\r\n mask_batch.append(mask_per_im)\r\n return mask_batch\r" }, { "identifier": "compute_mask_loss", "path": "utils/distill_utils.py", "snippet": "def compute_mask_loss(mask_batch, student_feature, teacher_feature, imitation_loss_weight):\r\n mask_list = []\r\n for mask in mask_batch:\r\n mask = (mask > 0).float().unsqueeze(0)\r\n mask_list.append(mask)\r\n mask_batch = torch.stack(mask_list, dim=0)\r\n norms = mask_batch.sum() * 2\r\n mask_batch_s = mask_batch.unsqueeze(4)\r\n no = student_feature.size(-1)\r\n bs, na, height, width, _ = mask_batch_s.shape\r\n mask_batch_no = mask_batch_s.expand((bs, na, height, width, no))\r\n sup_loss = (torch.pow(teacher_feature - student_feature, 2) * mask_batch_no).sum() / norms\r\n sup_loss = sup_loss * imitation_loss_weight\r\n return sup_loss\r" } ]
import argparse import logging import math import os import random import time import numpy as np import torch.distributed as dist import torch.nn as nn import torch.nn.functional as F import torch.optim as optim import torch.optim.lr_scheduler as lr_scheduler import torch.utils.data import yaml import test # import test.py to get mAP after each epoch from copy import deepcopy from pathlib import Path from threading import Thread from torch.cuda import amp from torch.nn.parallel import DistributedDataParallel as DDP from torch.utils.tensorboard import SummaryWriter from tqdm import tqdm from models.experimental import attempt_load from models.experimental import attempt_loadv5 from models.experimental import attempt_load_zxy from models.yolo import Model from utils.autoanchor import check_anchors from utils.datasets import create_dataloader from utils.general import labels_to_class_weights, increment_path, labels_to_image_weights, init_seeds, \ fitness, strip_optimizer, get_latest_run, check_dataset, check_file, check_git_status, check_img_size, \ check_requirements, print_mutation, set_logging, one_cycle, colorstr from utils.google_utils import attempt_download from utils.loss import ComputeLoss, ComputeLossOTA from utils.plots import plot_images, plot_labels, plot_results, plot_evolution from utils.torch_utils import ModelEMA, select_device, intersect_dicts, torch_distributed_zero_first, is_parallel from utils.wandb_logging.wandb_utils import WandbLogger, check_wandb_resume from utils.distill_utils import getMask, compute_mask_loss
18,981
logger = logging.getLogger(__name__) def train(hyp, opt, device, tb_writer=None): logger.info(colorstr('hyperparameters: ') + ', '.join(f'{k}={v}' for k, v in hyp.items())) save_dir, epochs, batch_size, total_batch_size, weights, rank, freeze = \ Path(opt.save_dir), opt.epochs, opt.batch_size, opt.total_batch_size, opt.weights, opt.global_rank, opt.freeze # Directories wdir = save_dir / 'weights' wdir.mkdir(parents=True, exist_ok=True) # make dir last = wdir / 'last.pt' best = wdir / 'best.pt' results_file = save_dir / 'results.txt' # Save run settings with open(save_dir / 'hyp.yaml', 'w') as f: yaml.dump(hyp, f, sort_keys=False) with open(save_dir / 'opt.yaml', 'w') as f: yaml.dump(vars(opt), f, sort_keys=False) # Configure plots = not opt.evolve # create plots cuda = device.type != 'cpu' init_seeds(2 + rank) with open(opt.data) as f: data_dict = yaml.load(f, Loader=yaml.SafeLoader) # data dict is_coco = opt.data.endswith('coco.yaml') # Logging- Doing this before checking the dataset. Might update data_dict loggers = {'wandb': None} # loggers dict if rank in [-1, 0]: opt.hyp = hyp # add hyperparameters run_id = torch.load(weights, map_location=device).get('wandb_id') if weights.endswith('.pt') and os.path.isfile( weights) else None wandb_logger = WandbLogger(opt, Path(opt.save_dir).stem, run_id, data_dict) loggers['wandb'] = wandb_logger.wandb data_dict = wandb_logger.data_dict if wandb_logger.wandb: weights, epochs, hyp = opt.weights, opt.epochs, opt.hyp # WandbLogger might update weights, epochs if resuming nc = 1 if opt.single_cls else int(data_dict['nc']) # number of classes names = ['item'] if opt.single_cls and len(data_dict['names']) != 1 else data_dict['names'] # class names assert len(names) == nc, '%g names found for nc=%g dataset in %s' % (len(names), nc, opt.data) # check # Model pretrained = weights.endswith('.pt') # load teacher model teacher = attempt_load_zxy(opt.teacher_weights, device=device) if pretrained: with torch_distributed_zero_first(rank):
logger = logging.getLogger(__name__) def train(hyp, opt, device, tb_writer=None): logger.info(colorstr('hyperparameters: ') + ', '.join(f'{k}={v}' for k, v in hyp.items())) save_dir, epochs, batch_size, total_batch_size, weights, rank, freeze = \ Path(opt.save_dir), opt.epochs, opt.batch_size, opt.total_batch_size, opt.weights, opt.global_rank, opt.freeze # Directories wdir = save_dir / 'weights' wdir.mkdir(parents=True, exist_ok=True) # make dir last = wdir / 'last.pt' best = wdir / 'best.pt' results_file = save_dir / 'results.txt' # Save run settings with open(save_dir / 'hyp.yaml', 'w') as f: yaml.dump(hyp, f, sort_keys=False) with open(save_dir / 'opt.yaml', 'w') as f: yaml.dump(vars(opt), f, sort_keys=False) # Configure plots = not opt.evolve # create plots cuda = device.type != 'cpu' init_seeds(2 + rank) with open(opt.data) as f: data_dict = yaml.load(f, Loader=yaml.SafeLoader) # data dict is_coco = opt.data.endswith('coco.yaml') # Logging- Doing this before checking the dataset. Might update data_dict loggers = {'wandb': None} # loggers dict if rank in [-1, 0]: opt.hyp = hyp # add hyperparameters run_id = torch.load(weights, map_location=device).get('wandb_id') if weights.endswith('.pt') and os.path.isfile( weights) else None wandb_logger = WandbLogger(opt, Path(opt.save_dir).stem, run_id, data_dict) loggers['wandb'] = wandb_logger.wandb data_dict = wandb_logger.data_dict if wandb_logger.wandb: weights, epochs, hyp = opt.weights, opt.epochs, opt.hyp # WandbLogger might update weights, epochs if resuming nc = 1 if opt.single_cls else int(data_dict['nc']) # number of classes names = ['item'] if opt.single_cls and len(data_dict['names']) != 1 else data_dict['names'] # class names assert len(names) == nc, '%g names found for nc=%g dataset in %s' % (len(names), nc, opt.data) # check # Model pretrained = weights.endswith('.pt') # load teacher model teacher = attempt_load_zxy(opt.teacher_weights, device=device) if pretrained: with torch_distributed_zero_first(rank):
attempt_download(weights) # download if not found locally
7
2023-10-08 13:05:58+00:00
24k
falesiani/torch_ga
tests/test_keras.py
[ { "identifier": "GeometricProductDense", "path": "torch_ga/layers.py", "snippet": "class GeometricProductDense(GeometricAlgebraLayer):\n \"\"\"Analagous to Keras' Dense layer but using multivector-valued matrices\n instead of scalar ones and geometric multiplication instead of standard\n multiplication.\n\n Args:\n algebra: GeometricAlgebra instance to use for the parameters\n blade_indices_kernel: Blade indices to use for the kernel parameter\n blade_indices_bias: Blade indices to use for the bias parameter (if used)\n \"\"\"\n\n def __init__(\n self,\n algebra: GeometricAlgebra,\n units: int,\n blade_indices_kernel: List[int],\n blade_indices_bias: Union[None, List[int]] = None,\n activation='None',\n use_bias=True,\n **kwargs\n ):\n super().__init__(algebra=algebra, **kwargs)\n\n self.units = units\n self.blade_indices_kernel = torch.tensor(blade_indices_kernel, dtype=torch.int64)\n if use_bias: self.blade_indices_bias = torch.tensor(blade_indices_bias, dtype=torch.int64)\n # self.blade_indices_kernel = blade_indices_kernel.to(dtype=torch.int64)\n # if use_bias: self.blade_indices_bias = blade_indices_bias.to(dtype=torch.int64) \n\n self.activation = activation\n self.use_bias = use_bias\n self.activation_fn = activations.get(activation)\n self.built = False\n\n def build(self, input_shape: list):\n if False: print(f\"input_shape={input_shape}\")\n self.num_input_units = input_shape[-2]\n shape_kernel = [\n self.units,\n self.num_input_units,\n int(self.blade_indices_kernel.shape[0])\n ]\n if False: print(f\"shape_kernel={shape_kernel}\")\n self.kernel = nn.Parameter(1./np.prod(shape_kernel)*torch.randn(size=shape_kernel)).to(dtype=torch.float)\n if self.use_bias:\n shape_bias = [self.units, self.blade_indices_bias.shape[0]]\n self.bias = nn.Parameter(1./np.prod(shape_bias)*torch.randn(size=shape_bias)).to(dtype=torch.float)\n else:\n self.bias = None\n self.built = True\n\n def compute_output_shape(self, input_shape):\n return [*input_shape[:-2], self.units, self.algebra.num_blades]\n\n def forward(self, inputs):\n if not self.built: self.build(inputs.shape)\n w_geom = self.algebra.from_tensor(self.kernel, self.blade_indices_kernel)\n\n # Perform a matrix-multiply, but using geometric product instead of\n # standard multiplication. To do this we do the geometric product\n # elementwise and then sum over the common axis.\n # [..., 1, I, X] * [..., O, I, X] -> [..., O, I, X] -> [..., O, X]\n # inputs_expanded = tf.expand_dims(inputs, axis=inputs.shape.ndims - 2)\n # result = tf.reduce_sum(self.algebra.geom_prod(\n # inputs_expanded, w_geom), axis=-2)\n\n inputs_expanded = inputs.unsqueeze(len(inputs.shape) - 2)\n result = self.algebra.geom_prod(inputs_expanded, w_geom).sum(dim=-2)\n if self.bias is not None:\n b_geom = self.algebra.from_tensor(self.bias, self.blade_indices_bias)\n result += b_geom\n if self.activation_fn:\n result = self.activation_fn(result)\n return result\n\n def get_config(self):\n config = super().get_config()\n config.update({\n \"blade_indices_kernel\":\n self.blade_indices_kernel.cpu().detach().numpy(),\n \"blade_indices_bias\":\n self.blade_indices_bias.cpu().detach().numpy(),\n \"units\":\n self.units,\n # \"activation\":\n # activations.serialize(self.activation),\n \"use_bias\":\n self.use_bias,\n })\n return config" }, { "identifier": "GeometricSandwichProductDense", "path": "torch_ga/layers.py", "snippet": "class GeometricSandwichProductDense(GeometricProductDense):\n \"\"\"Analagous to Keras' Dense layer but using multivector-valued matrices\n instead of scalar ones and geometric sandwich multiplication instead of\n standard multiplication.\n\n Args:\n algebra: GeometricAlgebra instance to use for the parameters\n blade_indices_kernel: Blade indices to use for the kernel parameter\n blade_indices_bias: Blade indices to use for the bias parameter (if used)\n \"\"\"\n\n def __init__(\n self, algebra, units, blade_indices_kernel, blade_indices_bias=None,\n activation=None, use_bias=True, \n # kernel_initializer=\"glorot_uniform\",\n # bias_initializer=\"zeros\", kernel_regularizer=None,\n # bias_regularizer=None, activity_regularizer=None,\n # kernel_constraint=None, bias_constraint=None, \n **kwargs\n ):\n super().__init__(\n algebra, units,\n blade_indices_kernel,\n blade_indices_bias=blade_indices_bias,\n activation=activation,\n use_bias=use_bias,\n # kernel_initializer=kernel_initializer,\n # bias_initializer=bias_initializer,\n # kernel_regularizer=kernel_regularizer,\n # bias_regularizer=bias_regularizer,\n # activity_regularizer=activity_regularizer,\n # kernel_constraint=kernel_constraint,\n # bias_constraint=bias_constraint, \n **kwargs\n )\n self.built = False\n\n def forward(self, inputs):\n if not self.built: self.build(inputs.shape)\n w_geom = self.algebra.from_tensor(self.kernel, self.blade_indices_kernel)\n\n # Same as GeometricProductDense but using R*x*~R instead of just R*x\n # inputs_expanded = tf.expand_dims(inputs, axis=inputs.shape.ndims - 2)\n # result = tf.reduce_sum(\n # self.algebra.geom_prod(\n # w_geom,\n # self.algebra.geom_prod(\n # inputs_expanded,\n # self.algebra.reversion(w_geom)\n # )\n # ),\n # axis=-2\n # )\n # if self.bias is not None:\n # b_geom = self.algebra.from_tensor(\n # self.bias, self.blade_indices_bias)\n # result += b_geom\n\n # return self.activation(result)\n\n inputs_expanded = inputs.unsqueeze(len(inputs.shape) - 2)\n result = self.algebra.geom_prod( w_geom, self.algebra.geom_prod(inputs_expanded, self.algebra.reversion(w_geom))).sum(dim=-2)\n if self.bias is not None:\n b_geom = self.algebra.from_tensor(self.bias, self.blade_indices_bias)\n result += b_geom\n if self.activation_fn:\n result = self.activation_fn(result)\n return result" }, { "identifier": "GeometricProductElementwise", "path": "torch_ga/layers.py", "snippet": "class GeometricProductElementwise(GeometricAlgebraLayer):\n \"\"\"Performs the elementwise geometric product with a list of multivectors\n with as many elements as there are input units.\n\n Args:\n algebra: GeometricAlgebra instance to use for the parameters\n blade_indices_kernel: Blade indices to use for the kernel parameter\n blade_indices_bias: Blade indices to use for the bias parameter (if used)\n \"\"\"\n\n def __init__(\n self,\n algebra: GeometricAlgebra,\n blade_indices_kernel: List[int],\n blade_indices_bias: Union[None, List[int]] = None,\n activation=None,\n use_bias=True,\n # kernel_initializer=\"glorot_uniform\",\n # bias_initializer=\"zeros\",\n # kernel_regularizer=None,\n # bias_regularizer=None,\n # activity_regularizer=None,\n # kernel_constraint=None,\n # bias_constraint=None,\n **kwargs\n ):\n # super().__init__(algebra=algebra, activity_regularizer=activity_regularizer, **kwargs)\n super().__init__(algebra=algebra, **kwargs)\n\n self.blade_indices_kernel = torch.tensor(blade_indices_kernel, dtype=torch.int64)\n if use_bias:\n self.blade_indices_bias = torch.tensor(blade_indices_bias, dtype=torch.int64)\n \n # self.blade_indices_kernel = blade_indices_kernel.to(dtype=torch.int64)\n # if use_bias:\n # self.blade_indices_bias = blade_indices_bias.to(dtype=torch.int64)\n\n self.activation_fn = activations.get(activation)\n self.use_bias = use_bias\n # self.kernel_initializer = initializers.get(kernel_initializer)\n # self.bias_initializer = initializers.get(bias_initializer)\n # self.kernel_regularizer = regularizers.get(kernel_regularizer)\n # self.bias_regularizer = regularizers.get(bias_regularizer)\n # self.kernel_constraint = constraints.get(kernel_constraint)\n # self.bias_constraint = constraints.get(bias_constraint)\n self.built = False\n\n def build(self, input_shape: torch.Size):\n self.num_input_units = input_shape[-2]\n shape_kernel = [\n self.num_input_units,\n self.blade_indices_kernel.shape[0]\n ]\n self.kernel = nn.Parameter(1./np.prod(shape_kernel)*torch.randn(shape_kernel)).to(dtype=torch.float)\n if self.use_bias:\n shape_bias = [self.num_input_units,self.blade_indices_bias.shape[0]]\n self.bias = nn.Parameter(1./np.prod(shape_bias)*torch.randn(shape_bias)).to(dtype=torch.float)\n else:\n self.bias = None\n\n # self.kernel = self.add_weight(\n # \"kernel\",\n # shape=shape_kernel,\n # initializer=self.kernel_initializer,\n # regularizer=self.kernel_regularizer,\n # constraint=self.kernel_constraint,\n # dtype=self.dtype,\n # trainable=True\n # )\n # if self.use_bias:\n # shape_bias = [self.num_input_units,\n # self.blade_indices_bias.shape[0]]\n # self.bias = self.add_weight(\n # \"bias\",\n # shape=shape_bias,\n # initializer=self.bias_initializer,\n # regularizer=self.bias_regularizer,\n # constraint=self.bias_constraint,\n # dtype=self.dtype,\n # trainable=True\n # )\n # else:\n # self.bias = None\n self.built = True\n\n def compute_output_shape(self, input_shape):\n return torch.Size([*input_shape[:-1], self.algebra.num_blades])\n\n def forward(self, inputs):\n if not self.built: self.build(inputs.shape)\n w_geom = self.algebra.from_tensor(\n self.kernel, self.blade_indices_kernel)\n\n # Elementwise multiplication for each unit with a multivector.\n # [..., U, X] * [U, X] -> [..., U, X]\n result = self.algebra.geom_prod(inputs, w_geom)\n\n if self.bias is not None:\n b_geom = self.algebra.from_tensor(\n self.bias, self.blade_indices_bias)\n result += b_geom\n\n if self.activation_fn:\n result = self.activation_fn(result)\n return result\n\n def get_config(self):\n config = super().get_config()\n config.update({\n \"blade_indices_kernel\":\n self.blade_indices_kernel.cpu().detach().numpy(),\n \"blade_indices_bias\":\n self.blade_indices_bias.cpu().detach().numpy(),\n # \"activation\":\n # self.activation,\n # activations.serialize(self.activation),\n \"use_bias\":\n self.use_bias,\n # \"kernel_initializer\":\n # initializers.serialize(self.kernel_initializer),\n # \"bias_initializer\":\n # initializers.serialize(self.bias_initializer),\n # \"kernel_regularizer\":\n # regularizers.serialize(self.kernel_regularizer),\n # \"bias_regularizer\":\n # regularizers.serialize(self.bias_regularizer),\n # \"activity_regularizer\":\n # regularizers.serialize(self.activity_regularizer),\n # \"kernel_constraint\":\n # constraints.serialize(self.kernel_constraint),\n # \"bias_constraint\":\n # constraints.serialize(self.bias_constraint)\n })\n return config" }, { "identifier": "GeometricSandwichProductElementwise", "path": "torch_ga/layers.py", "snippet": "class GeometricSandwichProductElementwise(GeometricProductElementwise):\n \"\"\"Performs the elementwise geometric sandwich product with a list of\n multivectors with as many elements as there are input units.\n\n Args:\n algebra: GeometricAlgebra instance to use for the parameters\n blade_indices_kernel: Blade indices to use for the kernel parameter\n blade_indices_bias: Blade indices to use for the bias parameter (if used)\n \"\"\"\n\n def __init__(\n self, algebra, blade_indices_kernel, blade_indices_bias=None,\n activation=None, use_bias=True, \n # kernel_initializer=\"glorot_uniform\",\n # bias_initializer=\"zeros\", kernel_regularizer=None,\n # bias_regularizer=None, activity_regularizer=None,\n # kernel_constraint=None, bias_constraint=None, \n **kwargs\n ):\n super().__init__(\n algebra,\n blade_indices_kernel,\n blade_indices_bias=blade_indices_bias,\n activation=activation,\n use_bias=use_bias,\n # kernel_initializer=kernel_initializer,\n # bias_initializer=bias_initializer,\n # kernel_regularizer=kernel_regularizer,\n # bias_regularizer=bias_regularizer,\n # activity_regularizer=activity_regularizer,\n # kernel_constraint=kernel_constraint,\n # bias_constraint=bias_constraint, \n **kwargs\n )\n\n def forward(self, inputs):\n if not self.built: self.build(inputs.shape)\n w_geom = self.algebra.from_tensor( self.kernel, self.blade_indices_kernel)\n\n # Elementwise multiplication Rx~R for each unit with a multivector.\n # [..., U, X] * [U, X] -> [..., U, X]\n result = self.algebra.geom_prod(\n w_geom,\n self.algebra.geom_prod(\n inputs,\n self.algebra.reversion(w_geom)\n )\n )\n\n if self.bias is not None:\n b_geom = self.algebra.from_tensor(\n self.bias, self.blade_indices_bias)\n result += b_geom\n\n if self.activation_fn:\n result = self.activation_fn(result)\n return result" }, { "identifier": "GeometricProductConv1D", "path": "torch_ga/layers.py", "snippet": "class GeometricProductConv1D(GeometricAlgebraLayer):\n \"\"\"Analagous to Keras' Conv1D layer but using multivector-valued kernels\n instead of scalar ones and geometric product instead of\n standard multiplication.\n\n Args:\n algebra: GeometricAlgebra instance to use for the parameters\n filters: How many channels the output will have\n kernel_size: Size for the convolution kernel\n stride: Stride to use for the convolution\n padding: \"SAME\" (zero-pad input length so output\n length == input length / stride) or \"VALID\" (no padding)\n blade_indices_kernel: Blade indices to use for the kernel parameter\n blade_indices_bias: Blade indices to use for the bias parameter (if used)\n \"\"\"\n\n def __init__(\n self,\n algebra: GeometricAlgebra,\n filters: int,\n kernel_size: int,\n stride: int,\n padding: str,\n blade_indices_kernel: List[int],\n blade_indices_bias: Union[None, List[int]] = None,\n dilations: Union[None, int] = None,\n activation=None,\n use_bias=True,\n # kernel_initializer=\"glorot_uniform\",\n # bias_initializer=\"zeros\",\n # kernel_regularizer=None,\n # bias_regularizer=None,\n # activity_regularizer=None,\n # kernel_constraint=None,\n # bias_constraint=None,\n **kwargs\n ):\n super().__init__(\n algebra=algebra,\n # activity_regularizer=activity_regularizer,\n **kwargs\n )\n\n self.filters = filters\n self.kernel_size = kernel_size\n self.stride = stride\n self.padding = padding\n self.dilations = dilations\n\n self.blade_indices_kernel = torch.tensor( blade_indices_kernel, dtype=torch.int64)\n if use_bias:\n self.blade_indices_bias = torch.tensor( blade_indices_bias, dtype=torch.int64)\n # self.blade_indices_kernel = blade_indices_kernel.to(dtype=torch.int64)\n # if use_bias:\n # self.blade_indices_bias = blade_indices_bias.to(dtype=torch.int64)\n\n self.activation_fn = activations.get(activation)\n self.use_bias = use_bias\n # self.kernel_initializer = initializers.get(kernel_initializer)\n # self.bias_initializer = initializers.get(bias_initializer)\n # self.kernel_regularizer = regularizers.get(kernel_regularizer)\n # self.bias_regularizer = regularizers.get(bias_regularizer)\n # self.kernel_constraint = constraints.get(kernel_constraint)\n # self.bias_constraint = constraints.get(bias_constraint)\n self.built = False\n\n def build(self, input_shape: torch.Size):\n # I: [..., S, C, B]\n self.num_input_filters = input_shape[-2]\n\n # K: [K, IC, OC, B]\n shape_kernel = [\n self.kernel_size,\n self.num_input_filters,\n self.filters,\n self.blade_indices_kernel.shape[0]\n ]\n self.kernel = nn.Parameter(1./np.prod(shape_kernel)*torch.randn(size=shape_kernel)).to(dtype=torch.float)\n if self.use_bias:\n shape_bias = [self.filters, self.blade_indices_bias.shape[0]]\n self.bias = nn.Parameter(1./np.prod(shape_bias)*torch.randn(size=shape_bias)).to(dtype=torch.float)\n else:\n self.bias = None\n\n # self.kernel = self.add_weight(\n # \"kernel\",\n # shape=shape_kernel,\n # initializer=self.kernel_initializer,\n # regularizer=self.kernel_regularizer,\n # constraint=self.kernel_constraint,\n # dtype=self.dtype,\n # trainable=True\n # )\n # if self.use_bias:\n # shape_bias = [self.filters, self.blade_indices_bias.shape[0]]\n # self.bias = self.add_weight(\n # \"bias\",\n # shape=shape_bias,\n # initializer=self.bias_initializer,\n # regularizer=self.bias_regularizer,\n # constraint=self.bias_constraint,\n # dtype=self.dtype,\n # trainable=True\n # )\n # else:\n # self.bias = None\n self.built = True\n\n def forward(self, inputs):\n if not self.built: \n self.build(inputs.shape)\n k_geom = self.algebra.from_tensor(\n self.kernel, self.blade_indices_kernel)\n\n result = self.algebra.geom_conv1d(\n inputs, k_geom,\n stride=self.stride, padding=self.padding,\n dilations=self.dilations\n )\n\n if self.bias is not None:\n b_geom = self.algebra.from_tensor(\n self.bias, self.blade_indices_bias)\n result += b_geom\n\n if self.activation_fn:\n result = self.activation_fn(result)\n return result\n\n def get_config(self):\n config = super().get_config()\n config.update({\n \"filters\":\n self.filters,\n \"kernel_size\":\n self.kernel_size,\n \"stride\":\n self.stride,\n \"padding\":\n self.padding,\n \"dilations\":\n self.dilations,\n \"blade_indices_kernel\":\n self.blade_indices_kernel.numpy(),\n \"blade_indices_bias\":\n self.blade_indices_bias.numpy(),\n # \"activation\":\n # activations.serialize(self.activation),\n \"use_bias\":\n self.use_bias,\n # \"kernel_initializer\":\n # initializers.serialize(self.kernel_initializer),\n # \"bias_initializer\":\n # initializers.serialize(self.bias_initializer),\n # \"kernel_regularizer\":\n # regularizers.serialize(self.kernel_regularizer),\n # \"bias_regularizer\":\n # regularizers.serialize(self.bias_regularizer),\n # \"activity_regularizer\":\n # regularizers.serialize(self.activity_regularizer),\n # \"kernel_constraint\":\n # constraints.serialize(self.kernel_constraint),\n # \"bias_constraint\":\n # constraints.serialize(self.bias_constraint)\n\n })\n\n return config" }, { "identifier": "GeometricAlgebraExp", "path": "torch_ga/layers.py", "snippet": "class GeometricAlgebraExp(GeometricAlgebraLayer):\n \"\"\"Calculates the exponential function of the input. Input must square to\n a scalar.\n\n Args:\n algebra: GeometricAlgebra instance to use\n square_scalar_tolerance: Tolerance to use for the square scalar check\n or None if the check should be skipped\n \"\"\"\n\n def __init__(\n self,\n algebra: GeometricAlgebra,\n square_scalar_tolerance: Union[float, None] = 1e-4,\n **kwargs\n ):\n super().__init__(algebra=algebra, **kwargs)\n self.square_scalar_tolerance = square_scalar_tolerance\n self.built = False\n\n def compute_output_shape(self, input_shape):\n return torch.Size([*input_shape[:-1], self.algebra.num_blades])\n\n def build(self,inputs_shape): self.built = True\n\n def forward(self, inputs):\n if not self.built: self.build(inputs.shape)\n return self.algebra.exp(\n inputs, square_scalar_tolerance=self.square_scalar_tolerance\n )\n\n def get_config(self):\n config = super().get_config()\n config.update({\n \"square_scalar_tolerance\": self.square_scalar_tolerance\n })\n return config" }, { "identifier": "GeometricToTensor", "path": "torch_ga/layers.py", "snippet": "class GeometricToTensor(GeometricAlgebraLayer):\n \"\"\"Layer for extracting given blades from geometric algebra tensors.\n\n Args:\n algebra: GeometricAlgebra instance to use\n blade_indices: blade indices to extract\n \"\"\"\n\n def __init__(self, algebra: GeometricAlgebra, blade_indices: List[int],\n **kwargs):\n super().__init__(algebra=algebra, **kwargs)\n self.blade_indices = torch.tensor(blade_indices).to(dtype=torch.int64)\n # self.blade_indices = blade_indices.to(dtype=torch.int64) \n self.built = False\n\n def compute_output_shape(self, input_shape):\n return [*input_shape[:-1], self.blade_indices.shape[0]]\n def build(self,input_shape): self.built = True\n\n def forward(self, inputs):\n if not self.build: self.build(inputs.shape)\n # return torch.select(inputs, self.blade_indices, axis=-1)\n x = inputs[...,self.blade_indices]\n return x\n\n def get_config(self):\n config = super().get_config()\n config.update({\n \"blade_indices\": self.blade_indices.numpy()\n })\n return config" }, { "identifier": "GeometricToTensorWithKind", "path": "torch_ga/layers.py", "snippet": "class GeometricToTensorWithKind(GeometricToTensor):\n \"\"\"Layer for extracting blades of a kind from geometric algebra tensors.\n\n Args:\n algebra: GeometricAlgebra instance to use\n kind: blade indices of kind to extract\n \"\"\"\n\n def __init__(self, algebra: GeometricAlgebra, kind: BladeKind,\n **kwargs):\n blade_indices = algebra.get_kind_blade_indices(kind)\n super().__init__(algebra=algebra, blade_indices=blade_indices,\n **kwargs)" }, { "identifier": "TensorToGeometric", "path": "torch_ga/layers.py", "snippet": "class TensorToGeometric(GeometricAlgebraLayer):\n \"\"\"Layer for converting tensors with given blade indices to\n geometric algebra tensors.\n\n Args:\n algebra: GeometricAlgebra instance to use\n blade_indices: blade indices to interpret the last axis of the\n input tensor as\n \"\"\"\n\n def __init__(self, algebra: GeometricAlgebra, blade_indices: List[int],\n **kwargs):\n super().__init__(algebra=algebra, **kwargs)\n\n self.blade_indices = torch.tensor(blade_indices, dtype=torch.int64)\n # self.blade_indices = blade_indices.to(dtype=torch.int64) \n self.built = False\n\n def compute_output_shape(self, input_shape):\n return [*input_shape[:-1], self.algebra.num_blades]\n\n def forward(self, inputs):\n if not self.build: self.build(inputs.shape)\n return self.algebra.from_tensor(inputs, blade_indices=self.blade_indices)\n def build(self,input_shape): self.built = True\n def get_config(self):\n config = super().get_config()\n config.update({\n \"blade_indices\": self.blade_indices.numpy()\n })\n return config" }, { "identifier": "TensorWithKindToGeometric", "path": "torch_ga/layers.py", "snippet": "class TensorWithKindToGeometric(GeometricAlgebraLayer):\n \"\"\"Layer for converting tensors with given blade kind to\n geometric algebra tensors.\n\n Args:\n algebra: GeometricAlgebra instance to use\n kind: blade kind indices to interpret the last axis of the\n input tensor as\n \"\"\"\n\n def __init__(self, algebra: GeometricAlgebra, kind: BladeKind,\n **kwargs):\n super().__init__(algebra=algebra, **kwargs)\n self.kind = kind\n self.built = False\n\n def compute_output_shape(self, input_shape):\n return [*input_shape[:-1], self.algebra.get_kind_blade_indices(self.kind).shape[0]]\n\n def build(self,input_shape): self.built = True\n def forward(self, inputs):\n if not self.build: self.build(inputs.shape)\n\n return self.algebra.from_tensor_with_kind(inputs, kind=self.kind)\n\n def get_config(self):\n config = super().get_config()\n config.update({\n \"kind\": self.kind\n })\n return config" }, { "identifier": "BladeKind", "path": "torch_ga/blades.py", "snippet": "class BladeKind(Enum):\n \"\"\"Kind of blade depending on its degree.\"\"\"\n MV = \"mv\"\n EVEN = \"even\"\n ODD = \"odd\"\n SCALAR = \"scalar\"\n VECTOR = \"vector\"\n BIVECTOR = \"bivector\"\n TRIVECTOR = \"trivector\"\n PSEUDOSCALAR = \"pseudoscalar\"\n PSEUDOVECTOR = \"pseudovector\"\n PSEUDOBIVECTOR = \"pseudobivector\"\n PSEUDOTRIVECTOR = \"pseudotrivector\"" }, { "identifier": "GeometricAlgebra", "path": "torch_ga/torch_ga.py", "snippet": "class GeometricAlgebra:\n \"\"\"Class used for performing geometric algebra operations on `torch.Tensor` instances.\n Exposes methods for operating on `torch.Tensor` instances where their last\n axis is interpreted as blades of the algebra.\n Holds the metric and other quantities derived from it.\n \"\"\"\n\n def __init__(self, metric: List[float]):\n \"\"\"Creates a GeometricAlgebra object given a metric.\n The algebra will have as many basis vectors as there are\n elements in the metric.\n\n Args:\n metric: Metric as a list. Specifies what basis vectors square to\n \"\"\"\n self._metric = torch.tensor(metric, dtype=torch.float32)\n\n self._num_bases = len(metric)\n self._bases = list(map(str, range(self._num_bases)))\n\n self._blades, self._blade_degrees = blades_from_bases(self._bases)\n self._blade_degrees = torch.tensor(self._blade_degrees)\n self._num_blades = len(self._blades)\n self._max_degree = self._blade_degrees.max()\n\n # [Blades, Blades, Blades]\n _list = get_cayley_tensor(self.metric, self._bases, self._blades)\n # print(_list)\n if type(_list) in [list,tuple]:\n _list = np.array(_list)\n self._cayley, self._cayley_inner, self._cayley_outer = torch.tensor(\n _list,\n dtype=torch.float32\n )\n\n self._blade_mvs = torch.eye(self._num_blades)\n self._basis_mvs = self._blade_mvs[1:1+self._num_bases]\n\n # Find the dual by looking at the anti-diagonal in the Cayley tensor.\n self._dual_blade_indices = []\n self._dual_blade_signs = []\n\n for blade_index in range(self._num_blades):\n dual_index = self.num_blades - blade_index - 1\n anti_diag = self._cayley[blade_index, dual_index]\n # dual_sign = tf.gather(anti_diag, tf.where(\n # anti_diag != 0.0)[..., 0])[..., 0]\n dual_sign = anti_diag[torch.where(anti_diag != 0.0)]\n\n self._dual_blade_indices.append(dual_index)\n self._dual_blade_signs.append(dual_sign)\n\n self._dual_blade_indices = torch.tensor(\n self._dual_blade_indices, dtype=torch.int64)\n self._dual_blade_signs = torch.tensor(\n self._dual_blade_signs, dtype=torch.float32)\n\n def print(self, *args, **kwargs):\n \"\"\"Same as the default `print` function but formats `torch.Tensor`\n instances that have as many elements on their last axis\n as the algebra has blades using `mv_repr()`.\n \"\"\"\n def _is_mv(arg):\n return isinstance(arg, torch.Tensor) and len(arg.shape) > 0 and arg.shape[-1] == self.num_blades\n new_args = [self.mv_repr(arg) if _is_mv(arg) else arg for arg in args]\n\n print(*new_args, **kwargs)\n\n @property\n def metric(self) -> torch.Tensor:\n \"\"\"Metric list which contains the number that each\n basis vector in the algebra squares to\n (ie. the diagonal of the metric tensor).\n \"\"\"\n return self._metric\n\n @property\n def cayley(self) -> torch.Tensor:\n \"\"\"`MxMxM` tensor where `M` is the number of basis\n blades in the algebra. Used for calculating the\n geometric product:\n\n `a_i, b_j, cayley_ijk -> c_k`\n \"\"\"\n return self._cayley\n\n @property\n def cayley_inner(self) -> torch.Tensor:\n \"\"\"Analagous to cayley but for inner product.\"\"\"\n return self._cayley_inner\n\n @property\n def cayley_outer(self) -> torch.Tensor:\n \"\"\"Analagous to cayley but for outer product.\"\"\"\n return self._cayley_outer\n\n @property\n def blades(self) -> List[str]:\n \"\"\"List of all blade names.\n\n Blades are all possible independent combinations of\n basis vectors. Basis vectors are named starting\n from `\"0\"` and counting up. The scalar blade is the\n empty string `\"\"`.\n\n Example\n - Bases: `[\"0\", \"1\", \"2\"]`\n - Blades: `[\"\", \"0\", \"1\", \"2\", \"01\", \"02\", \"12\", \"012\"]`\n \"\"\"\n return self._blades\n\n @property\n def blade_mvs(self) -> torch.Tensor:\n \"\"\"List of all blade tensors in the algebra.\"\"\"\n return self._blade_mvs\n\n @property\n def dual_blade_indices(self) -> torch.Tensor:\n \"\"\"Indices of the dual blades for each blade.\"\"\"\n return self._dual_blade_indices\n\n @property\n def dual_blade_signs(self) -> torch.Tensor:\n \"\"\"Signs of the dual blades for each blade.\"\"\"\n return self._dual_blade_signs\n\n @property\n def num_blades(self) -> int:\n \"\"\"Total number of blades in the algebra.\"\"\"\n return self._num_blades\n\n @property\n def blade_degrees(self) -> torch.Tensor:\n \"\"\"List of blade-degree for each blade in the algebra.\"\"\"\n return self._blade_degrees\n\n @property\n def max_degree(self) -> int:\n \"\"\"Highest blade degree in the algebra.\"\"\"\n return self._max_degree\n\n @property\n def basis_mvs(self) -> torch.Tensor:\n \"\"\"List of basis vectors as torch.Tensor.\"\"\"\n return self._basis_mvs\n\n def get_kind_blade_indices(self, kind: BladeKind, invert: bool = False) -> torch.Tensor:\n \"\"\"Find all indices of blades of a given kind in the algebra.\n\n Args:\n kind: kind of blade to give indices for\n invert: whether to return all blades not of the kind\n\n Returns:\n indices of blades of a given kind in the algebra\n \"\"\"\n return get_blade_of_kind_indices(self.blade_degrees, kind, self.max_degree, invert=invert)\n\n def get_blade_indices_of_degree(self, degree: int) -> torch.Tensor:\n \"\"\"Find all indices of blades of the given degree.\n\n Args:\n degree: degree to return blades for\n\n Returns:\n indices of blades with the given degree in the algebra\n \"\"\"\n # return tf.gather(tf.range(self.num_blades), tf.where(self.blade_degrees == degree)[..., 0])\n return torch.range(self.num_blades)[torch.where(self.blade_degrees == degree)[..., 0]]\n\n def is_pure(self, tensor: torch.Tensor, blade_indices: torch.Tensor) -> bool:\n \"\"\"Returns whether the given tensor is purely of the given blades\n and has no non-zero values for blades not in the given blades.\n\n Args:\n tensor: tensor to check purity for\n blade_indices: blade indices to check purity for\n\n Returns:\n Whether the tensor is purely of the given blades\n and has no non-zero values for blades not in the given blades\n \"\"\"\n # tensor = torch.tensor(tensor, dtype=torch.float32)\n tensor = tensor.to(dtype=torch.float32)\n if not type(blade_indices) in [torch.Tensor]:\n blade_indices = torch.tensor(blade_indices)\n \n blade_indices = blade_indices.to(dtype=torch.int64)\n\n # blade_indices = torch.tensor(\n # blade_indices, dtype=torch.int64)\n\n inverted_blade_indices = invert_blade_indices(\n self.num_blades, blade_indices)\n\n # return tf.reduce_all(tf.gather(\n # tensor,\n # inverted_blade_indices,\n # axis=-1\n # ) == 0)\n return (tensor[inverted_blade_indices]==0).sum(dim=-1)\n\n def is_pure_kind(self, tensor: torch.Tensor, kind: BladeKind) -> bool:\n \"\"\"Returns whether the given tensor is purely of a given kind\n and has no non-zero values for blades not of the kind.\n\n Args:\n tensor: tensor to check purity for\n kind: kind of blade to check purity for\n\n Returns:\n Whether the tensor is purely of a given kind\n and has no non-zero values for blades not of the kind\n \"\"\"\n # tensor = torch.tensor(tensor, dtype=torch.float32)\n tensor = tensor.to(dtype=torch.float32)\n inverted_kind_indices = self.get_kind_blade_indices(kind, invert=True)\n # print(f\"tensor={tensor}\")\n # print(f\"kind={kind}\")\n # print(f\"inverted_kind_indices={inverted_kind_indices.T}\")\n # print(f\"inverted_kind_indices.shape={inverted_kind_indices.shape}\")\n # print(f\"tensor[inverted_kind_indices]={tensor[inverted_kind_indices].T}\")\n # print(f\"tensor[inverted_kind_indices].shape={tensor[inverted_kind_indices].shape}\")\n # print(f\"tensor[inverted_kind_indices]==0={tensor[inverted_kind_indices].T==0}\")\n\n # return tf.reduce_all(tf.gather(\n # tensor,\n # inverted_kind_indices,\n # axis=-1\n # ) == 0)\n return (tensor[inverted_kind_indices]==0).sum(dim=-1)\n\n # def from_tensor(self, tensor: torch.Tensor, blade_indices: torch.Tensor) -> torch.Tensor:\n # \"\"\"Creates a geometric algebra torch.Tensor from a torch.Tensor and blade\n # indices. The blade indices have to align with the last axis of the\n # tensor.\n\n # Args:\n # tensor: torch.Tensor to take as values for the geometric algebra tensor\n # blade_indices: Blade indices corresponding to the tensor. Can\n # be obtained from blade names eg. using get_kind_blade_indices()\n # or as indices from the blades list property.\n\n # Returns:\n # Geometric algebra torch.Tensor from tensor and blade indices\n # \"\"\"\n # blade_indices = torch.tensor(blade_indices, dtype=torch.int64).to(dtype=torch.int64)\n # tensor = torch.tensor(tensor, dtype=torch.float32)\n # # print(f\"blade_indices={blade_indices}\")\n # # print(f\"tensor={tensor}\")\n \n # _shape = tensor.shape\n # is_scalar = False\n # if len(_shape)==1 :\n # _shape_final = [1]+ [self.num_blades] \n # is_scalar = True\n # else:\n # _shape_final = list(_shape[:-1]) + [self.num_blades] \n # b = torch.zeros(_shape_final)\n \n\n # # i = blade_indices.view([-1,1])\n # # v = tensor.flatten().view([-1,1])\n # i = blade_indices.nonzero().flatten()\n # v = tensor.flatten().unsqueeze(1)\n # b = b.view([-1,self.num_blades])\n # # b[:,i] = v\n # try:\n # b[:,i] = v\n # except:\n # print(f\"_shape={_shape},_shape_final={_shape_final}\")\n # print(f\"i.shape={i.shape},v.shape={v.shape},b.shape={b.shape}\")\n # print(f\"i={i},v={v},b={b}\")\n # raise\n # # raise \"whatever\"\n # b = b.reshape(_shape_final)\n\n # # _shape_tmp = list(v.shape) + [self.num_blades] \n # # print(f\"i,v,_shape_tmp,_shape_final={i},{v},{_shape_tmp},{_shape_final},i.shape={i.shape}\")\n # # b = torch.sparse_coo_tensor(i, v, size=_shape_tmp)\n # # print(f\"b={b}\")\n # # b = torch.sparse_coo_tensor(i, v, size=_shape_tmp).to_dense()\n # # b = b.reshape(_shape_final)\n # if is_scalar:\n # b=b.unsqueeze(0)\n # return b\n\n # # # Put last axis on first axis so scatter_nd becomes easier.\n # # # Later undo the transposition again.\n # # # t = tf.concat([[tensor.shape.ndims - 1],\n # # # tf.range(0, tensor.shape.ndims - 1)], axis=0)\n # # # t_inv = tf.concat([tf.range(1, tensor.shape.ndims), [0]], axis=0)\n\n # # # tensor = tf.transpose(tensor, t)\n\n # # # shape = tf.concat([\n # # # torch.tensor([self.num_blades], dtype=torch.int64),\n # # # tf.shape(tensor, torch.int64)[1:]\n # # # ], axis=0)\n\n # # # tensor = tf.scatter_nd(\n # # # tf.expand_dims(blade_indices, axis=-1),\n # # # tensor,\n # # # shape\n # # # )\n\n # # # return tf.transpose(tensor, t_inv)\n # # # t = torch.concat([torch.tensor([len(tensor.shape) - 1]), torch.range(0, len(tensor.shape)- 1)], axis=0)\n # # # t_inv = torch.concat([torch.range(1, len(tensor.shape)), torch.tensor([0])], axis=0)\n # # t = [len(tensor.shape) - 1] + list(range(0, len(tensor.shape)- 1))\n # # t_inv = list(range(1, len(tensor.shape))) + [0]\n\n # # tensor = torch.permute(tensor, t)\n\n # # a= torch.tensor([self.num_blades], dtype=torch.int64)\n # # b = torch.tensor(tensor, dtype=torch.int64)[1:]\n # # print(\"a,b:\", a,b, tensor)\n\n\n # # shape = torch.concat([\n # # torch.tensor([self.num_blades], dtype=torch.int64),\n # # torch.tensor(tensor, dtype=torch.int64)[1:]\n # # ], axis=0)\n\n\n # # # tensor = torch.scatter_nd(\n # # # blade_indices.unsqueeze(-1),\n # # # tensor,\n # # # shape\n # # # )\n # # a = torch.zeros(shape)\n # # a[blade_indices] = tensor\n # # tensor = a\n\n # # return torch.permute(tensor, t_inv) \n \n\n def from_tensor(self, tensor: torch.Tensor, blade_indices: torch.Tensor) -> torch.Tensor:\n \"\"\"Creates a geometric algebra torch.Tensor from a torch.Tensor and blade\n indices. The blade indices have to align with the last axis of the\n tensor.\n\n Args:\n tensor: torch.Tensor to take as values for the geometric algebra tensor\n blade_indices: Blade indices corresponding to the tensor. Can\n be obtained from blade names eg. using get_kind_blade_indices()\n or as indices from the blades list property.\n\n Returns:\n Geometric algebra torch.Tensor from tensor and blade indices\n \"\"\"\n # blade_indices = torch.tensor(blade_indices, dtype=torch.int64).to(dtype=torch.int64)\n # tensor = torch.tensor(tensor, dtype=torch.float32)\n blade_indices = blade_indices.to(dtype=torch.int64)\n tensor = tensor.to(dtype=torch.float32)\n # print(f\"blade_indices={blade_indices}\")\n # print(f\"tensor={tensor}\")\n \n _shape = tensor.shape\n is_scalar = False\n if len(_shape)==1 :\n _shape_final = [1]+ [self.num_blades] \n is_scalar = True\n else:\n _shape_final = list(_shape[:-1]) + [self.num_blades] \n b = torch.zeros(_shape_final)\n\n if False:\n print(f\"blade_indices.shape={blade_indices.shape}\")\n print(f\"tensor.shape={tensor.shape}\")\n print(f\"_shape_final={_shape_final}\")\n \n\n\n # i = blade_indices.view([-1,1])\n # v = tensor.flatten().view([-1,1])\n # i = blade_indices.nonzero().flatten()\n i = blade_indices.flatten()\n # v = tensor.flatten().unsqueeze(1)\n v = tensor.view([-1,_shape[-1]])\n b = b.view([-1,self.num_blades])\n if False:\n print(f\"_shape={_shape},_shape_final={_shape_final}\")\n print(f\"i.shape={i.shape},v.shape={v.shape},b.shape={b.shape}\")\n print(f\"i={i},v={v},b={b}\")\n\n # b[:,i] = v\n try:\n b[:,i] = v\n except:\n print(f\"_shape={_shape},_shape_final={_shape_final}\")\n print(f\"i.shape={i.shape},v.shape={v.shape},b.shape={b.shape}\")\n print(f\"i={i},v={v},b={b}\")\n raise\n b = b.reshape(_shape_final)\n\n if False:\n print(f\"b.shape={b.shape}\")\n\n if is_scalar:\n # b=b.unsqueeze(0)\n b=b.squeeze(0)\n return b\n\n\n # # i = blade_indices.view([-1,1])\n # # v = tensor.flatten().view([-1,1])\n # i = blade_indices.nonzero().flatten()\n # v = tensor.flatten().unsqueeze(1)\n # b = b.view([-1,self.num_blades])\n # # b[:,i] = v\n # try:\n # b[:,i] = v\n # except:\n # print(f\"_shape={_shape},_shape_final={_shape_final}\")\n # print(f\"i.shape={i.shape},v.shape={v.shape},b.shape={b.shape}\")\n # print(f\"i={i},v={v},b={b}\")\n # raise\n # b = b.reshape(_shape_final)\n\n # if is_scalar:\n # b=b.unsqueeze(0)\n # return b\n\n \n\n def from_tensor_with_kind(self, tensor: torch.Tensor, kind: BladeKind) -> torch.Tensor:\n \"\"\"Creates a geometric algebra torch.Tensor from a torch.Tensor and a kind.\n The kind's blade indices have to align with the last axis of the\n tensor.\n\n Args:\n tensor: torch.Tensor to take as values for the geometric algebra tensor\n kind: Kind corresponding to the tensor\n\n Returns:\n Geometric algebra torch.Tensor from tensor and kind\n \"\"\"\n # Put last axis on first axis so scatter_nd becomes easier.\n # Later undo the transposition again.\n # tensor = torch.tensor(tensor, dtype=torch.float32)\n tensor = tensor.to(dtype=torch.float32)\n kind_indices = self.get_kind_blade_indices(kind)\n if False:\n print(f\"tensor={tensor}\")\n print(f\"kind_indices={kind_indices}\")\n return self.from_tensor(tensor, kind_indices)\n\n def from_scalar(self, scalar: numbers.Number) -> torch.Tensor:\n \"\"\"Creates a geometric algebra torch.Tensor with scalar elements.\n\n Args:\n scalar: Elements to be used as scalars\n\n Returns:\n Geometric algebra torch.Tensor from scalars\n \"\"\"\n # return self.from_tensor_with_kind(tf.expand_dims(scalar, axis=-1), BladeKind.SCALAR)\n # print(\"torch.tensor([scalar]).unsqueeze(-1).shape\",torch.tensor([scalar]).unsqueeze(-1).shape)\n return self.from_tensor_with_kind(torch.tensor([scalar]).unsqueeze(-1), BladeKind.SCALAR).squeeze(0)\n\n def e(self, *blades: List[str]) -> torch.Tensor:\n \"\"\"Returns a geometric algebra torch.Tensor with the given blades set\n to 1.\n\n Args:\n blades: list of blade names, can be unnormalized\n\n Returns:\n torch.Tensor with blades set to 1\n \"\"\"\n blade_signs, blade_indices = get_blade_indices_from_names(\n blades, self.blades)\n\n assert type(blade_indices) in [torch.Tensor], \"should be a tensor\"\n if False: blade_indices = torch.tensor(blade_indices)\n\n # # Don't allow duplicate indices\n # tf.Assert(\n # blade_indices.shape[0] == tf.unique(blade_indices)[0].shape[0],\n # [blades]\n # )\n\n # x = (\n # tf.expand_dims(blade_signs, axis=-1) *\n # tf.gather(self.blade_mvs, blade_indices)\n # )\n\n # # a, b -> b\n # return tf.reduce_sum(x, axis=-2)\n\n # print(f\"blade_indices={blade_indices}\")\n # print(f\"torch.unique(blade_indices)={torch.unique(blade_indices)}\")\n # print(f\"torch.unique(blade_indices)[0]={torch.unique(blade_indices)[0]}\")\n # Don't allow duplicate indices\n # assert(\n # blade_indices.shape[0] == torch.unique(blade_indices).shape[0],\n # [blades]\n # )\n assert blade_indices.shape[0] == torch.unique(blade_indices).shape[0], \"indexes not unique\"\n\n x = blade_signs.unsqueeze(-1) * self.blade_mvs[blade_indices]\n\n # a, b -> b\n return x.sum(dim=-2) \n\n def __getattr__(self, name: str) -> torch.Tensor:\n \"\"\"Returns basis blade tensors if name was a basis.\"\"\"\n if name.startswith(\"e\") and (name[1:] == \"\" or int(name[1:]) >= 0):\n return self.e(name[1:])\n raise AttributeError\n\n def dual(self, tensor: torch.Tensor) -> torch.Tensor:\n \"\"\"Returns the dual of the geometric algebra tensor.\n\n Args:\n tensor: Geometric algebra tensor to return dual for\n\n Returns:\n Dual of the geometric algebra tensor\n \"\"\"\n tensor = torch.tensor(tensor, dtype=torch.float32)\n # return self.dual_blade_signs * tf.gather(tensor, self.dual_blade_indices, axis=-1)\n return self.dual_blade_signs * tensor[...,self.dual_blade_indices]\n\n def grade_automorphism(self, tensor: torch.Tensor) -> torch.Tensor:\n \"\"\"Returns the geometric algebra tensor with odd grades negated.\n See https://en.wikipedia.org/wiki/Paravector#Grade_automorphism.\n\n Args:\n tensor: Geometric algebra tensor to return grade automorphism for\n\n Returns:\n Geometric algebra tensor with odd grades negated\n \"\"\"\n tensor = tensor.to(dtype=torch.float32)\n return mv_grade_automorphism(tensor, self.blade_degrees)\n\n def reversion(self, tensor: torch.Tensor) -> torch.Tensor:\n \"\"\"Returns the grade-reversed geometric algebra tensor.\n See https://en.wikipedia.org/wiki/Paravector#Reversion_conjugation.\n\n Args:\n tensor: Geometric algebra tensor to return grade-reversion for\n\n Returns:\n Grade-reversed geometric algebra tensor\n \"\"\"\n tensor = tensor.to(dtype=torch.float32)\n\n return mv_reversion(tensor, self.blade_degrees)\n\n def conjugation(self, tensor: torch.Tensor) -> torch.Tensor:\n \"\"\"Combines reversion and grade automorphism.\n See https://en.wikipedia.org/wiki/Paravector#Clifford_conjugation.\n\n Args:\n tensor: Geometric algebra tensor to return conjugate for\n\n Returns:\n Geometric algebra tensor after `reversion()` and `grade_automorphism()`\n \"\"\"\n tensor = tensor.to(dtype=torch.float32)\n return self.grade_automorphism(self.reversion(tensor))\n\n def simple_inverse(self, a: torch.Tensor) -> torch.Tensor:\n \"\"\"Returns the inverted geometric algebra tensor\n `X^-1` such that `X * X^-1 = 1`. Only works for elements that\n square to scalars. Faster than the general inverse.\n\n Args:\n a: Geometric algebra tensor to return inverse for\n\n Returns:\n inverted geometric algebra tensor\n \"\"\"\n a = a.to(dtype=torch.float32)\n\n\n rev_a = self.reversion(a)\n divisor = self.geom_prod(a, rev_a)\n # print(f\"divisor={divisor}\")\n # print(f\"self.is_pure_kind(divisor, BladeKind.SCALAR)={self.is_pure_kind(divisor, BladeKind.SCALAR)}\")\n if not self.is_pure_kind(divisor, BladeKind.SCALAR):\n raise Exception(\n \"Can't invert multi-vector (inversion divisor V ~V not scalar: %s).\" % divisor)\n\n # Divide by scalar part\n return rev_a / divisor[..., :1]\n\n def reg_prod(self, a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:\n \"\"\"Returns the regressive product of two geometric\n algebra tensors.\n\n Args:\n a: Geometric algebra tensor on the left hand side of\n the regressive product\n b: Geometric algebra tensor on the right hand side of\n the regressive product\n\n Returns:\n regressive product of a and b\n \"\"\"\n a = torch.tensor(a, dtype=torch.float32)\n b = torch.tensor(b, dtype=torch.float32)\n\n return self.dual(self.ext_prod(self.dual(a), self.dual(b)))\n\n def ext_prod(self, a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:\n \"\"\"Returns the exterior product of two geometric\n algebra tensors.\n\n Args:\n a: Geometric algebra tensor on the left hand side of\n the exterior product\n b: Geometric algebra tensor on the right hand side of\n the exterior product\n\n Returns:\n exterior product of a and b\n \"\"\"\n a = a.to(dtype=torch.float32)\n b = b.to(dtype=torch.float32)\n\n return mv_multiply(a, b, self._cayley_outer)\n\n def geom_prod(self, a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:\n \"\"\"Returns the geometric product of two geometric\n algebra tensors.\n\n Args:\n a: Geometric algebra tensor on the left hand side of\n the geometric product\n b: Geometric algebra tensor on the right hand side of\n the geometric product\n\n Returns:\n geometric product of a and b\n \"\"\"\n # a = torch.tensor(a, dtype=torch.float32)\n # b = torch.tensor(b, dtype=torch.float32)\n\n # a = torch.tensor(a)\n # b = torch.tensor(b)\n\n a = a.to(dtype=torch.float32)\n b = b.to(dtype=torch.float32)\n return mv_multiply(a, b, self._cayley)\n\n \n def element_wise_prod(self, a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:\n \"\"\"Returns the element-wise product of two geometric\n algebra tensors.\n\n Args:\n a: Geometric algebra tensor on the left hand side of\n the geometric product\n b: Geometric algebra tensor on the right hand side of\n the geometric product\n\n Returns:\n geometric product of a and b\n \"\"\"\n # a = torch.tensor(a, dtype=torch.float32)\n # b = torch.tensor(b, dtype=torch.float32)\n\n # a = torch.tensor(a)\n # b = torch.tensor(b)\n\n a = a.to(dtype=torch.float32)\n b = b.to(dtype=torch.float32)\n return mv_multiply_element_wise(a, b, self._cayley)\n\n\n def inner_prod(self, a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:\n \"\"\"Returns the inner product of two geometric\n algebra tensors.\n\n Args:\n a: Geometric algebra tensor on the left hand side of\n the inner product\n b: Geometric algebra tensor on the right hand side of\n the inner product\n\n Returns:\n inner product of a and b\n \"\"\"\n a = a.to(dtype=torch.float32)\n b = b.to(dtype=torch.float32)\n\n return mv_multiply(a, b, self._cayley_inner)\n\n def geom_conv1d(self, a: torch.Tensor, k: torch.Tensor,\n stride: int, padding: str,\n dilations: Union[int, None] = None) -> torch.Tensor:\n \"\"\"Returns the 1D convolution of a sequence with a geometric algebra\n tensor kernel. The convolution is performed using the geometric\n product.\n\n Args:\n a: Input geometric algebra tensor of shape\n [..., Length, ChannelsIn, Blades]\n k: Geometric algebra tensor for the convolution kernel of shape\n [KernelSize, ChannelsIn, ChannelsOut, Blades]\n stride: Stride to use for the convolution\n padding: \"SAME\" (zero-pad input length so output\n length == input length / stride) or \"VALID\" (no padding)\n Returns:\n Geometric algbra tensor of shape\n [..., OutputLength, ChannelsOut, Blades]\n representing `a` convolved with `k`\n \"\"\"\n a = a.to(dtype=torch.float32)\n k = k.to(dtype=torch.float32)\n\n # return mv_conv1d(a, k, self._cayley, stride=stride, padding=padding)\n return f_mv_conv1d(a, k, self._cayley, stride=stride, padding=padding)\n\n def mv_repr(self, a: torch.Tensor) -> str:\n \"\"\"Returns a string representation for the given\n geometric algebra tensor.\n\n Args:\n a: Geometric algebra tensor to return the representation for\n\n Returns:\n string representation for `a`\n \"\"\"\n a = a.to(dtype=torch.float32)\n\n\n if len(a.shape) == 1:\n return \"MultiVector[%s]\" % \" + \".join(\n \"%.2f*%s\" % (value, get_blade_repr(blade_name))\n for value, blade_name\n in zip(a, self.blades)\n if value != 0\n )\n else:\n return f\"MultiVector[batch_shape={a.shape[:-1]}]\"\n\n def approx_exp(self, a: torch.Tensor, order: int = 50) -> torch.Tensor:\n \"\"\"Returns an approximation of the exponential using a centered taylor series.\n\n Args:\n a: Geometric algebra tensor to return exponential for\n order: order of the approximation\n\n Returns:\n Approximation of `exp(a)`\n \"\"\"\n a = a.to(dtype=torch.float32)\n\n v = self.from_scalar(1.0)\n result = self.from_scalar(1.0)\n for i in range(1, order + 1):\n v = self.geom_prod(a, v)\n # i_factorial = tf.exp(tf.math.lgamma(i + 1.0))\n i_factorial = torch.exp(torch.lgamma(torch.tensor([i + 1.0])))\n result += v / i_factorial\n return result\n\n def exp(self, a: torch.Tensor, square_scalar_tolerance: Union[float, None] = 1e-4) -> torch.Tensor:\n \"\"\"Returns the exponential of the passed geometric algebra tensor.\n Only works for multivectors that square to scalars.\n\n Args:\n a: Geometric algebra tensor to return exponential for\n square_scalar_tolerance: Tolerance to use for the square scalar check\n or None if the check should be skipped\n\n Returns:\n `exp(a)`\n \"\"\"\n # See https://www.euclideanspace.com/maths/algebra/clifford/algebra/functions/exponent/index.htm\n # for an explanation of how to exponentiate multivectors.\n\n self_sq = self.geom_prod(a, a)\n\n if square_scalar_tolerance is not None:\n # tf.Assert(tf.reduce_all(\n # tf.abs(self_sq[..., 1:]) < square_scalar_tolerance\n # ), [self_sq])\n \n # assert torch.equal(torch.all(self_sq[..., 1:].abs() < square_scalar_tolerance),[self_sq]), \"not sure what\"\n assert torch.all(self_sq[..., 1:].abs() < square_scalar_tolerance), \"square_scalar_tolerance not met\"\n\n scalar_self_sq = self_sq[..., :1]\n\n # \"Complex\" square root (argument can be negative)\n s_sqrt = torch.sign(scalar_self_sq) * torch.sqrt(torch.abs(scalar_self_sq))\n\n # Square to +1: cosh(sqrt(||a||)) + a / sqrt(||a||) sinh(sqrt(||a||))\n # Square to -1: cos(sqrt(||a||)) + a / sqrt(||a||) sin(sqrt(||a||))\n # TODO: Does this work for values other than 1 too? eg. square to +0.5?\n # TODO: Find a solution that doesnt require calculating all possibilities\n # first.\n non_zero_result = torch.where(\n scalar_self_sq < 0,\n (self.from_tensor(torch.cos(s_sqrt), torch.tensor([0])) + a / s_sqrt * torch.sin(s_sqrt)),\n (self.from_tensor(torch.cosh(s_sqrt), torch.tensor([0])) + a / s_sqrt * torch.sinh(s_sqrt))\n )\n\n return torch.where(scalar_self_sq == 0, self.from_scalar(1.0) + a, non_zero_result)\n\n def approx_log(self, a: torch.Tensor, order: int = 50) -> torch.Tensor:\n \"\"\"Returns an approximation of the natural logarithm using a centered\n taylor series. Only converges for multivectors where `||mv - 1|| < 1`.\n\n Args:\n a: Geometric algebra tensor to return logarithm for\n order: order of the approximation\n\n Returns:\n Approximation of `log(a)`\n \"\"\"\n a = a.to(dtype=torch.float32)\n\n result = self.from_scalar(0.0)\n\n a_minus_one = a - self.from_scalar(1.0)\n v = None\n\n for i in range(1, order + 1):\n v = a_minus_one if v is None else v * a_minus_one\n result += (((-1.0) ** i) / i) * v\n\n return -result\n\n def int_pow(self, a: torch.Tensor, n: int) -> torch.Tensor:\n \"\"\"Returns the geometric algebra tensor to the power of an integer\n using repeated multiplication.\n\n Args:\n a: Geometric algebra tensor to raise\n n: integer power to raise the multivector to\n\n Returns:\n `a` to the power of `n`\n \"\"\"\n a = a.to(dtype=torch.float32)\n\n\n if not isinstance(n, int):\n raise Exception(\"n must be an integer.\")\n if n < 0:\n raise Exception(\"Can't raise to negative powers.\")\n\n if n == 0:\n # TODO: more efficient (ones only in scalar)\n return torch.ones_like(a) * self.e(\"\")\n\n result = a\n for i in range(n - 1):\n result = self.geom_prod(result, a)\n return result\n\n def keep_blades(self, a: torch.Tensor, blade_indices: List[int]) -> torch.Tensor:\n \"\"\"Takes a geometric algebra tensor and returns it with only the given\n blade_indices as non-zeros.\n\n Args:\n a: Geometric algebra tensor to copy\n blade_indices: Indices for blades to keep\n\n Returns:\n `a` with only `blade_indices` components as non-zeros\n \"\"\"\n a = a.to(dtype=torch.float32)\n blade_indices = blade_indices.to(dtype=torch.int64)\n\n # blade_values = tf.gather(a, blade_indices, axis=-1)\n blade_values = a[...,blade_indices]\n if True: \n b = self.from_tensor(blade_values, blade_indices)\n else:\n blade_mask = torch.zeros(self.num_blades)\n blade_mask[blade_indices] = 1\n b = self.from_tensor(blade_values, blade_mask)\n # print(f\"blade_values, blade_indices, b={blade_values}, {blade_indices}, {b}\")\n # print(f\"blade_mask={blade_mask}\")\n return b\n\n # return self.from_tensor(blade_values, blade_indices)\n\n def keep_blades_with_name(self, a: torch.Tensor, blade_names: Union[List[str], str]) -> torch.Tensor:\n \"\"\"Takes a geometric algebra tensor and returns it with only the given\n blades as non-zeros.\n\n Args:\n a: Geometric algebra tensor to copy\n blade_names: Blades to keep\n\n Returns:\n `a` with only `blade_names` components as non-zeros\n \"\"\"\n if isinstance(blade_names, str):\n blade_names = [blade_names]\n\n _, blade_indices = get_blade_indices_from_names(blade_names, self.blades)\n\n if False:\n print(f\"self.blades={self.blades}\")\n print(f\"blade_names={blade_names}\")\n print(f\"blade_indices={blade_indices}\")\n\n return self.keep_blades(a, blade_indices)\n\n def select_blades(self, a: torch.Tensor, blade_indices: List[int]) -> torch.Tensor:\n \"\"\"Takes a geometric algebra tensor and returns a `torch.Tensor` with the\n blades in blade_indices on the last axis.\n\n\n Args:\n a: Geometric algebra tensor to copy\n blade_indices: Indices for blades to select\n\n Returns:\n `torch.Tensor` based on `a` with `blade_indices` on last axis.\n \"\"\"\n a = a.to(dtype=torch.float32) \n # blade_indices = torch.tensor(blade_indices, dtype=torch.int64).to(dtype=torch.int64)\n blade_indices = blade_indices.to(dtype=torch.int64)\n\n # result = tf.gather(a, blade_indices, axis=-1)\n try:\n if len(a.shape)==1 or a.shape[-1]==a.size().numel():\n result = a.squeeze()[blade_indices]\n else:\n result = a[...,blade_indices]\n except:\n print(f\"a={a},blade_indices={blade_indices}\")\n print(f\"a.shape={a.shape},blade_indices.shape={blade_indices.shape},a.size().numel()={a.size().numel()}\")\n raise\n \n return result\n\n def select_blades_with_name(self, a: torch.Tensor, blade_names: Union[List[str], str]) -> torch.Tensor:\n \"\"\"Takes a geometric algebra tensor and returns a `torch.Tensor` with the\n blades in blade_names on the last axis.\n\n\n Args:\n a: Geometric algebra tensor to copy\n blade_names: Blades to keep\n\n Returns:\n `torch.Tensor` based on `a` with `blade_names` on last axis.\n \"\"\"\n a = a.to(dtype=torch.float32)\n\n is_single_blade = isinstance(blade_names, str)\n if is_single_blade:\n blade_names = [blade_names]\n\n blade_signs, blade_indices = get_blade_indices_from_names(\n blade_names, self.blades)\n\n result = blade_signs * self.select_blades(a, blade_indices)\n # if True:\n # print(f\"\")\n\n if is_single_blade:\n return result[..., 0]\n\n return result\n\n def inverse(self, a: torch.Tensor) -> torch.Tensor:\n \"\"\"Returns the inverted geometric algebra tensor\n `X^-1` such that `X * X^-1 = 1`.\n\n Using Shirokov's inverse algorithm that works in arbitrary dimensions,\n see https://arxiv.org/abs/2005.04015 Theorem 4.\n\n Args:\n a: Geometric algebra tensor to return inverse for\n\n Returns:\n inverted geometric algebra tensor\n \"\"\"\n # a = torch.tensor(a, dtype=torch.float32)\n a = a.to(dtype=torch.float32)\n if False:\n print(f\"a={a}\")\n\n n = 2 ** ((len(self.metric) + 1) // 2)\n\n # u = a.clone()\n u = a\n for k in range(1, n):\n # c = n / k * self.keep_blades_with_name(u, \"\")\n d = self.keep_blades_with_name(u, \"\")\n c = n / k * d\n u_minus_c = u - c\n if False:\n print(f\"a,d,c,u_minus_c, u = {a},{d},{c},{u_minus_c}, {u}\")\n u = self.geom_prod(a, u_minus_c)\n if False:\n print(f\"u={u}\")\n \n if False:\n print(f\"n={n}\")\n print(f\"a={a}\")\n print(f\"u={u}\")\n if not torch.all(self.is_pure_kind(u, BladeKind.SCALAR)):\n raise Exception(\n \"Can't invert multi-vector (det U not scalar: %s).\" % u)\n\n # adj / det\n return u_minus_c / u[..., :1]\n\n def __call__(self, a: torch.Tensor) -> MultiVector:\n \"\"\"Creates a `MultiVector` from a geometric algebra tensor.\n Mainly used as a wrapper for the algebra's functions for convenience.\n\n Args:\n a: Geometric algebra tensor to return `MultiVector` for\n\n Returns:\n `MultiVector` for `a`\n \"\"\"\n a = a.to(dtype=torch.float32)\n return MultiVector(a, self)\n # return MultiVector(torch.tensor(a), self)" } ]
import unittest as ut import h5py import torch import torch.nn as nn import torch.nn.functional as F import torch from io import BytesIO from torch_ga.layers import ( GeometricProductDense, GeometricSandwichProductDense, GeometricProductElementwise, GeometricSandwichProductElementwise, GeometricProductConv1D, GeometricAlgebraExp, GeometricToTensor, GeometricToTensorWithKind, TensorToGeometric, TensorWithKindToGeometric, ) from torch_ga.blades import BladeKind from torch_ga import GeometricAlgebra
18,116
sta = GeometricAlgebra([1, -1, -1, -1]) gt_tensor = torch.ones([32, 4]) geom_tensor = torch.concat( [torch.zeros([32, 1]), torch.ones([32, 4]), torch.zeros([32, 11])], axis=-1 ) vector_blade_indices = [1, 2, 3, 4] geom_to_tensor_kind_layer = GeometricToTensorWithKind( sta, BladeKind.VECTOR) self.assertTensorsEqual( geom_to_tensor_kind_layer(geom_tensor), gt_tensor) def test_geometric_product_dense_v_v(self): sta = GeometricAlgebra([1, -1, -1, -1]) geom_tensor = torch.concat( [torch.zeros([32, 6, 1]), torch.ones([32, 6, 4]), torch.zeros([32, 6, 11])], axis=-1 ) vector_blade_indices = [1, 2, 3, 4] geom_prod_layer = GeometricProductDense( sta, 8, blade_indices_kernel=vector_blade_indices, blade_indices_bias=vector_blade_indices, # bias_initializer=tf.keras.initializers.RandomNormal() ) result = geom_prod_layer(geom_tensor) print(f"test_geometric_product_dense_v_v:") print(f"geom_tensor={geom_tensor}") print(f"result={result}") # vector * vector + vector -> scalar + bivector + vector expected_result_indices = torch.tensor([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) self.assertTrue(torch.all(sta.is_pure(result, expected_result_indices))) def test_geometric_product_dense_s_mv(self): sta = GeometricAlgebra([1, -1, -1, -1]) geom_tensor = torch.concat( [torch.ones([20, 6, 1]), torch.zeros([20, 6, 15])], axis=-1 ) mv_blade_indices = list(range(16)) geom_prod_layer = GeometricProductDense( sta, 8, blade_indices_kernel=mv_blade_indices, blade_indices_bias=mv_blade_indices ) geom_prod_layer.build(geom_tensor.shape) result = geom_prod_layer(geom_tensor) print(f"test_geometric_product_dense_s_mv:") print(f"geom_tensor={geom_tensor}") print(f"result={result}") # scalar * multivector + multivector -> multivector # Check that nothing is zero (it would be extremely unlikely # but not impossible to randomly get a zero here). assert torch.all(result != 0.0) # self.assertTrue(tf.reduce_all(result != 0.0)) def test_geometric_product_dense_sequence(self): sta = GeometricAlgebra([1, -1, -1, -1]) tensor = torch.ones([20, 6, 4]) vector_blade_indices = [1, 2, 3, 4] mv_blade_indices = list(range(16)) # vector * vector + vector -> scalar + bivector + vector scalar_bivector_blade_indices = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10] sequence = nn.Sequential(*[ TensorToGeometric(sta, blade_indices=vector_blade_indices), GeometricProductDense( sta, 8, blade_indices_kernel=vector_blade_indices, blade_indices_bias=vector_blade_indices, # bias_initializer=tf.keras.initializers.RandomNormal() ), GeometricToTensor(sta, blade_indices=scalar_bivector_blade_indices) ]) for e in sequence: e.build(tensor.shape) result = sequence(tensor) print(f"test_geometric_product_dense_sequence:") print(f"tensor={tensor}") print(f"result={result}") self.assertEqual(result.shape[-1], len(scalar_bivector_blade_indices)) def test_geometric_sandwich_product_dense_v_v(self): sta = GeometricAlgebra([1, -1, -1, -1]) geom_tensor = torch.concat( [torch.zeros([32, 6, 1]), torch.ones([32, 6, 4]), torch.zeros([32, 6, 11])], axis=-1 ) vector_blade_indices = [1, 2, 3, 4] result_indices = torch.concat([ sta.get_kind_blade_indices(BladeKind.VECTOR), sta.get_kind_blade_indices(BladeKind.TRIVECTOR) ], axis=0)
torch.manual_seed(0) class TestKerasLayers(ut.TestCase): def assertTensorsEqual(self, a, b): # self.assertTrue(tf.reduce_all(a == b), "%s not equal to %s" % (a, b)) print(f"assertTensorsEqual(a={a},b={b})") assert torch.all(a.squeeze() == b.squeeze()), "%s not equal to %s" % (a, b) def test_tensor_to_geometric(self): sta = GeometricAlgebra([1, -1, -1, -1]) tensor = torch.ones([32, 4]) gt_geom_tensor = torch.concat( [torch.zeros([32, 1]), torch.ones([32, 4]), torch.zeros([32, 11])], axis=-1 ) vector_blade_indices = [1, 2, 3, 4] tensor_to_geom_layer = TensorToGeometric(sta, vector_blade_indices) self.assertTensorsEqual(tensor_to_geom_layer(tensor), gt_geom_tensor) def test_tensor_with_kind_to_geometric(self): sta = GeometricAlgebra([1, -1, -1, -1]) tensor = torch.ones([32, 4]) gt_geom_tensor = torch.concat( [torch.zeros([32, 1]), torch.ones([32, 4]), torch.zeros([32, 11])], axis=-1 ) vector_blade_indices = [1, 2, 3, 4] tensor_kind_to_geom_layer = TensorWithKindToGeometric( sta, BladeKind.VECTOR) self.assertTensorsEqual( tensor_kind_to_geom_layer(tensor), gt_geom_tensor) def test_geometric_to_tensor(self): sta = GeometricAlgebra([1, -1, -1, -1]) gt_tensor = torch.ones([32, 4]) geom_tensor = torch.concat( [torch.zeros([32, 1]), torch.ones([32, 4]), torch.zeros([32, 11])], axis=-1 ) vector_blade_indices = [1, 2, 3, 4] geom_to_tensor_layer = GeometricToTensor(sta, vector_blade_indices) self.assertTensorsEqual(geom_to_tensor_layer(geom_tensor), gt_tensor) def test_geometric_to_tensor_with_kind(self): sta = GeometricAlgebra([1, -1, -1, -1]) gt_tensor = torch.ones([32, 4]) geom_tensor = torch.concat( [torch.zeros([32, 1]), torch.ones([32, 4]), torch.zeros([32, 11])], axis=-1 ) vector_blade_indices = [1, 2, 3, 4] geom_to_tensor_kind_layer = GeometricToTensorWithKind( sta, BladeKind.VECTOR) self.assertTensorsEqual( geom_to_tensor_kind_layer(geom_tensor), gt_tensor) def test_geometric_product_dense_v_v(self): sta = GeometricAlgebra([1, -1, -1, -1]) geom_tensor = torch.concat( [torch.zeros([32, 6, 1]), torch.ones([32, 6, 4]), torch.zeros([32, 6, 11])], axis=-1 ) vector_blade_indices = [1, 2, 3, 4] geom_prod_layer = GeometricProductDense( sta, 8, blade_indices_kernel=vector_blade_indices, blade_indices_bias=vector_blade_indices, # bias_initializer=tf.keras.initializers.RandomNormal() ) result = geom_prod_layer(geom_tensor) print(f"test_geometric_product_dense_v_v:") print(f"geom_tensor={geom_tensor}") print(f"result={result}") # vector * vector + vector -> scalar + bivector + vector expected_result_indices = torch.tensor([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) self.assertTrue(torch.all(sta.is_pure(result, expected_result_indices))) def test_geometric_product_dense_s_mv(self): sta = GeometricAlgebra([1, -1, -1, -1]) geom_tensor = torch.concat( [torch.ones([20, 6, 1]), torch.zeros([20, 6, 15])], axis=-1 ) mv_blade_indices = list(range(16)) geom_prod_layer = GeometricProductDense( sta, 8, blade_indices_kernel=mv_blade_indices, blade_indices_bias=mv_blade_indices ) geom_prod_layer.build(geom_tensor.shape) result = geom_prod_layer(geom_tensor) print(f"test_geometric_product_dense_s_mv:") print(f"geom_tensor={geom_tensor}") print(f"result={result}") # scalar * multivector + multivector -> multivector # Check that nothing is zero (it would be extremely unlikely # but not impossible to randomly get a zero here). assert torch.all(result != 0.0) # self.assertTrue(tf.reduce_all(result != 0.0)) def test_geometric_product_dense_sequence(self): sta = GeometricAlgebra([1, -1, -1, -1]) tensor = torch.ones([20, 6, 4]) vector_blade_indices = [1, 2, 3, 4] mv_blade_indices = list(range(16)) # vector * vector + vector -> scalar + bivector + vector scalar_bivector_blade_indices = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10] sequence = nn.Sequential(*[ TensorToGeometric(sta, blade_indices=vector_blade_indices), GeometricProductDense( sta, 8, blade_indices_kernel=vector_blade_indices, blade_indices_bias=vector_blade_indices, # bias_initializer=tf.keras.initializers.RandomNormal() ), GeometricToTensor(sta, blade_indices=scalar_bivector_blade_indices) ]) for e in sequence: e.build(tensor.shape) result = sequence(tensor) print(f"test_geometric_product_dense_sequence:") print(f"tensor={tensor}") print(f"result={result}") self.assertEqual(result.shape[-1], len(scalar_bivector_blade_indices)) def test_geometric_sandwich_product_dense_v_v(self): sta = GeometricAlgebra([1, -1, -1, -1]) geom_tensor = torch.concat( [torch.zeros([32, 6, 1]), torch.ones([32, 6, 4]), torch.zeros([32, 6, 11])], axis=-1 ) vector_blade_indices = [1, 2, 3, 4] result_indices = torch.concat([ sta.get_kind_blade_indices(BladeKind.VECTOR), sta.get_kind_blade_indices(BladeKind.TRIVECTOR) ], axis=0)
geom_prod_layer = GeometricSandwichProductDense(
1
2023-10-07 13:34:07+00:00
24k
Significant-Gravitas/autostandup
bot.py
[ { "identifier": "StreaksDB", "path": "streaks/streaks_db.py", "snippet": "class StreaksDB(BaseDB):\n \"\"\"\n StreaksDB class handles all operations related to the 'streaks' table.\n Inherits from the BaseDB class.\n \"\"\"\n\n def __init__(self, host, user, password, database, port):\n \"\"\"\n Initializes the StreaksDB class and creates the 'streaks' table if it doesn't exist.\n\n :param host: The MySQL host address.\n :param user: The MySQL user.\n :param password: The MySQL password.\n :param database: The MySQL database name.\n :param port: The MySQL port number.\n \"\"\"\n super().__init__(host, user, password, database, port)\n self._create_streaks_table()\n\n def _create_streaks_table(self):\n \"\"\"\n Creates the 'streaks' table if it doesn't already exist.\n \"\"\"\n query = '''\n CREATE TABLE IF NOT EXISTS streaks (\n discord_id BIGINT PRIMARY KEY,\n current_streak INT DEFAULT 0,\n FOREIGN KEY (discord_id) REFERENCES team_members(discord_id) ON DELETE CASCADE\n );\n '''\n try:\n self.execute_query(query)\n finally:\n self.close()\n\n def update_streak(self, discord_id: int, new_streak: int):\n \"\"\"\n Updates the streak for a given user.\n\n :param discord_id: The Discord ID of the user.\n :param new_streak: The new streak count.\n \"\"\"\n query = \"\"\"\n INSERT INTO streaks (discord_id, current_streak)\n VALUES (%s, %s)\n ON DUPLICATE KEY UPDATE current_streak = %s\n \"\"\"\n params = (discord_id, new_streak, new_streak)\n try:\n self.execute_query(query, params)\n finally:\n self.close()\n\n def get_streak(self, discord_id: int) -> int:\n \"\"\"\n Fetches the current streak for a given user.\n\n :param discord_id: The Discord ID of the user.\n :return: The current streak count.\n \"\"\"\n if not self.conn.is_connected():\n print(\"Reconnecting to MySQL\")\n self.connect()\n c = self.conn.cursor()\n query = \"SELECT current_streak FROM streaks WHERE discord_id = %s\"\n params = (discord_id,)\n try:\n c.execute(query, params)\n row = c.fetchone()\n return row[0] if row else 0\n finally:\n c.close()\n self.close()" }, { "identifier": "TeamMemberDB", "path": "team_members/team_member_db.py", "snippet": "class TeamMemberDB(BaseDB):\n \"\"\"\n TeamMemberDB class handles operations related to the 'team_members' table.\n\n :param host: The MySQL host address.\n :param user: The MySQL user.\n :param password: The MySQL password.\n :param database: The MySQL database name.\n :param port: The MySQL port number.\n \"\"\"\n\n def __init__(self, host: str, user: str, password: str, database: str, port: str):\n \"\"\"\n Initializes the TeamMemberDB class and creates the 'team_members' table if it doesn't exist.\n \"\"\"\n super().__init__(host, user, password, database, port)\n self._create_team_members_table()\n\n def _create_team_members_table(self):\n \"\"\"\n Creates the 'team_members' table if it doesn't already exist.\n \"\"\"\n query = '''\n CREATE TABLE IF NOT EXISTS team_members (\n discord_id BIGINT PRIMARY KEY,\n name VARCHAR(255) NOT NULL,\n time_zone VARCHAR(50) NOT NULL,\n github_username VARCHAR(255),\n on_vacation BOOLEAN DEFAULT FALSE\n );\n '''\n try:\n self.execute_query(query)\n finally:\n self.close()\n\n def insert_new_member(self, discord_id: int, name: str, time_zone: str, github_username: str):\n \"\"\"\n Inserts a new team member into the 'team_members' table.\n\n :param discord_id: The Discord ID of the team member.\n :param name: The name of the team member.\n :param time_zone: The time zone of the team member.\n :param github_username: The GitHub username of the team member.\n \"\"\"\n query = \"\"\"\n INSERT INTO team_members (discord_id, name, time_zone, github_username)\n VALUES (%s, %s, %s, %s)\n ON DUPLICATE KEY UPDATE name = %s, time_zone = %s, github_username = %s\n \"\"\"\n params = (discord_id, name, time_zone, github_username, name, time_zone, github_username)\n try:\n self.execute_query(query, params)\n finally:\n self.close()\n\n def remove_member(self, discord_id: int):\n \"\"\"\n Removes a team member from the 'team_members' table.\n\n :param discord_id: The Discord ID of the team member to remove.\n \"\"\"\n query = \"DELETE FROM team_members WHERE discord_id = %s\"\n params = (discord_id,)\n try:\n self.execute_query(query, params)\n finally:\n self.close()\n\n def list_all_members(self) -> List[Tuple[int, str, str, str, bool]]:\n \"\"\"\n Fetches all team members from the 'team_members' table.\n\n :return: A list of tuples, each containing the Discord ID, name, time zone, GitHub username, and vacation status of a team member.\n \"\"\"\n if not self.conn.is_connected():\n print(\"Reconnecting to MySQL\")\n self.connect()\n c = self.conn.cursor()\n try:\n c.execute(\"SELECT discord_id, name, time_zone, github_username, on_vacation FROM team_members\")\n return c.fetchall()\n finally:\n c.close()\n self.close()\n\n def update_member_timezone(self, discord_id: int, new_time_zone: str):\n \"\"\"\n Updates the timezone of a team member in the 'team_members' table.\n\n :param discord_id: The Discord ID of the team member.\n :param new_time_zone: The new timezone to be set for the team member.\n \"\"\"\n query = \"UPDATE team_members SET time_zone = %s WHERE discord_id = %s\"\n params = (new_time_zone, discord_id)\n try:\n self.execute_query(query, params)\n finally:\n self.close()\n\n def set_vacation_status(self, discord_id: int, on_vacation: bool):\n \"\"\"\n Sets the vacation status of a team member in the 'team_members' table.\n\n :param discord_id: The Discord ID of the team member.\n :param on_vacation: The vacation status to be set for the team member.\n \"\"\"\n query = \"UPDATE team_members SET on_vacation = %s WHERE discord_id = %s\"\n params = (on_vacation, discord_id)\n try:\n self.execute_query(query, params)\n finally:\n self.close()" }, { "identifier": "UpdatesDB", "path": "updates/updates_db.py", "snippet": "class UpdatesDB(BaseDB):\n \"\"\"\n Database class for handling operations related to the 'updates' table.\n \"\"\"\n\n def __init__(self, host: str, user: str, password: str, database: str, port: str):\n \"\"\"\n Initializes the UpdatesDB class and creates the 'updates' table if it doesn't exist.\n\n :param host: The MySQL host address.\n :param user: The MySQL user.\n :param password: The MySQL password.\n :param database: The MySQL database name.\n :param port: The MySQL port number.\n \"\"\"\n super().__init__(host, user, password, database, port)\n self._create_updates_table()\n\n def _create_updates_table(self):\n \"\"\"\n Creates the 'updates' table if it doesn't already exist.\n \"\"\"\n query = '''\n CREATE TABLE IF NOT EXISTS updates (\n id INT AUTO_INCREMENT PRIMARY KEY,\n discord_id BIGINT,\n status TEXT NOT NULL,\n summarized_status TEXT,\n timestamp TIMESTAMP DEFAULT CURRENT_TIMESTAMP,\n time_zone VARCHAR(255),\n FOREIGN KEY (discord_id) REFERENCES team_members(discord_id) ON DELETE CASCADE\n )\n '''\n try:\n self.execute_query(query)\n finally:\n self.close()\n\n def insert_status(self, discord_id: int, status: str, time_zone: str):\n \"\"\"\n Inserts a new status update into the 'updates' table.\n\n :param discord_id: The Discord ID of the team member.\n :param status: The status update.\n :param time_zone: The time zone of the user.\n \"\"\"\n # Convert current UTC time to user's local time zone\n utc_now = datetime.utcnow().replace(tzinfo=pytz.utc)\n local_now = utc_now.astimezone(pytz.timezone(time_zone))\n\n query = \"INSERT INTO updates (discord_id, status, timestamp, time_zone) VALUES (%s, %s, %s, %s)\"\n params = (discord_id, status, local_now, time_zone)\n try:\n self.execute_query(query, params)\n finally:\n self.close()\n\n def update_summarized_status(self, discord_id: int, summarized_status: str):\n \"\"\"\n Updates the summarized_status for the most recent update for a given user.\n\n :param discord_id: The Discord ID of the team member.\n :param summarized_status: The summarized status update.\n \"\"\"\n query = \"\"\"\n UPDATE updates\n SET summarized_status = %s\n WHERE discord_id = %s\n ORDER BY timestamp DESC\n LIMIT 1\n \"\"\"\n params = (summarized_status, discord_id)\n try:\n self.execute_query(query, params)\n finally:\n self.close()\n \n def get_weekly_checkins_count(self, discord_id: int, time_zone: str) -> int:\n \"\"\"\n Fetches the number of check-ins for a given user in the current week.\n\n :param discord_id: The Discord ID of the user.\n :param time_zone: The time zone of the user.\n :return: The count of check-ins in the current week.\n \"\"\"\n if not self.conn.is_connected():\n print(\"Reconnecting to MySQL\")\n self.connect()\n\n c = self.conn.cursor()\n \n # Adjusting the current time to the user's time zone\n local_tz = pytz.timezone(time_zone)\n local_now = datetime.now(local_tz)\n \n # Getting the Monday of the current week in the user's time zone\n monday = local_now - timedelta(days=local_now.weekday())\n monday = monday.replace(hour=0, minute=0, second=0, microsecond=0)\n\n query = \"\"\"\n SELECT COUNT(*) FROM updates\n WHERE discord_id = %s AND timestamp >= %s\n \"\"\"\n params = (discord_id, monday)\n try:\n c.execute(query, params)\n \n row = c.fetchone()\n return row[0] if row else 0\n finally:\n c.close()\n self.close()\n\n def get_statuses_in_date_range(self, discord_id: int, start_date: datetime, end_date: datetime) -> List[str]:\n \"\"\"\n Fetches all raw status updates for a given user within a specified date range.\n\n Args:\n discord_id: The Discord ID of the user.\n start_date: The start date of the date range.\n end_date: The end date of the date range.\n\n Returns:\n A list of raw status updates.\n \"\"\"\n if not self.conn.is_connected():\n print(\"Reconnecting to MySQL\")\n self.connect()\n\n c = self.conn.cursor()\n \n query = \"\"\"\n SELECT summarized_status FROM updates\n WHERE discord_id = %s AND timestamp >= %s AND timestamp <= %s\n \"\"\"\n params = (discord_id, start_date, end_date)\n try:\n c.execute(query, params)\n \n statuses = [row[0] for row in c.fetchall()]\n return statuses\n finally:\n c.close()\n self.close()\n \n def get_all_statuses_for_user(self, discord_id: int) -> List[dict]:\n \"\"\"\n Fetches all status updates (both raw and summarized) for a given user.\n\n Args:\n discord_id: The Discord ID of the user.\n\n Returns:\n A list of dictionaries, each containing the status update details for a given record.\n \"\"\"\n if not self.conn.is_connected():\n print(\"Reconnecting to MySQL\")\n self.connect()\n\n c = self.conn.cursor(dictionary=True) # Set dictionary=True to return results as dictionaries\n \n query = \"\"\"\n SELECT id, discord_id, status, summarized_status, timestamp \n FROM updates\n WHERE discord_id = %s\n ORDER BY timestamp DESC\n \"\"\"\n params = (discord_id,)\n try:\n c.execute(query, params)\n \n statuses = c.fetchall()\n return statuses\n finally:\n c.close()\n self.close()\n \n def get_last_update_timestamp(self, discord_id: int) -> Tuple[datetime, str]:\n \"\"\"\n Fetches the timestamp and time zone of the last status update for a given user.\n\n Args:\n discord_id: The Discord ID of the user.\n\n Returns:\n A tuple containing the timestamp of the last update and its time zone, or (None, None) if there are no updates.\n \"\"\"\n if not self.conn.is_connected():\n print(\"Reconnecting to MySQL\")\n self.connect()\n\n c = self.conn.cursor()\n \n query = \"\"\"\n SELECT timestamp, time_zone FROM updates\n WHERE discord_id = %s\n ORDER BY timestamp DESC\n LIMIT 1\n \"\"\"\n params = (discord_id,)\n try:\n c.execute(query, params)\n \n row = c.fetchone()\n return (row[0], row[1]) if row else (None, None)\n finally:\n c.close()\n self.close()\n \n def delete_newest_status(self, discord_id: int) -> None:\n \"\"\"\n Deletes the most recent status update for a given user.\n\n Args:\n discord_id: The Discord ID of the user.\n \"\"\"\n if not self.conn.is_connected():\n print(\"Reconnecting to MySQL\")\n self.connect()\n\n c = self.conn.cursor()\n \n # Fetch the ID of the newest status update for the given user\n query_get_id = \"\"\"\n SELECT id FROM updates\n WHERE discord_id = %s\n ORDER BY timestamp DESC\n LIMIT 1\n \"\"\"\n try:\n c.execute(query_get_id, (discord_id,))\n \n row = c.fetchone()\n if row:\n status_id = row[0]\n \n # Now, delete the status update using its ID\n query_delete = \"\"\"\n DELETE FROM updates WHERE id = %s\n \"\"\"\n c.execute(query_delete, (status_id,))\n \n self.conn.commit()\n finally:\n c.close()\n self.close()" }, { "identifier": "WeeklyPostsDB", "path": "weekly_posts/weekly_posts_db.py", "snippet": "class WeeklyPostsDB(BaseDB):\n \"\"\"\n Database class that handles operations related to the 'weekly_posts' table.\n \"\"\"\n\n def __init__(self, host: str, user: str, password: str, database: str, port: str):\n \"\"\"\n Initializes the WeeklyPostsDB class, connects to the MySQL database,\n and creates the 'weekly_posts' table if it doesn't exist.\n\n :param host: The MySQL host address.\n :param user: The MySQL user.\n :param password: The MySQL password.\n :param database: The MySQL database name.\n :param port: The MySQL port number.\n \"\"\"\n super().__init__(host, user, password, database, port)\n self._create_weekly_posts_table()\n\n def _create_weekly_posts_table(self):\n \"\"\"\n Creates the 'weekly_posts' table if it doesn't already exist.\n \"\"\"\n query = '''\n CREATE TABLE IF NOT EXISTS weekly_posts (\n post_id BIGINT PRIMARY KEY,\n timestamp TIMESTAMP DEFAULT CURRENT_TIMESTAMP\n );\n '''\n try:\n self.execute_query(query)\n finally:\n self.close()\n\n def get_weekly_post_data(self) -> Optional[Dict[str, datetime.datetime]]:\n \"\"\"\n Fetches the most recent weekly post data from the 'weekly_posts' table.\n\n :return: A dictionary containing the post ID and timestamp, or None if no data exists.\n \"\"\"\n query = \"SELECT post_id, timestamp FROM weekly_posts ORDER BY timestamp DESC LIMIT 1\"\n \n if not self.conn.is_connected():\n print(\"Reconnecting to MySQL\")\n self.connect()\n\n c = self.conn.cursor()\n try:\n c.execute(query)\n row = c.fetchone()\n\n if row:\n return {'post_id': row[0], 'timestamp': row[1]}\n return None\n finally:\n c.close()\n self.close()\n\n def save_weekly_post_data(self, post_id: int, timestamp: datetime.datetime):\n \"\"\"\n Inserts or updates the weekly post data in the 'weekly_posts' table.\n\n :param post_id: The ID of the weekly post.\n :param timestamp: The timestamp of the weekly post.\n \"\"\"\n query = \"\"\"\n INSERT INTO weekly_posts (post_id, timestamp)\n VALUES (%s, %s)\n ON DUPLICATE KEY UPDATE timestamp = %s\n \"\"\"\n params = (post_id, timestamp, timestamp)\n try:\n self.execute_query(query, params)\n finally:\n self.close()" }, { "identifier": "StreaksManager", "path": "streaks/streaks_manager.py", "snippet": "class StreaksManager:\n \"\"\"\n Manages the streaks for team members.\n \"\"\"\n \n def __init__(self, streaks_db: StreaksDB):\n \"\"\"\n Initializes a new StreaksManager instance.\n\n Args:\n streaks_db: The StreaksDB object that handles database operations.\n \"\"\"\n self.streaks_db = streaks_db\n \n def get_streak(self, discord_id: int) -> int:\n \"\"\"\n Fetches the current streak for a given user.\n\n Args:\n discord_id: The Discord ID of the user.\n\n Returns:\n The current streak count.\n \"\"\"\n return self.streaks_db.get_streak(discord_id)\n\n def update_streak(self, discord_id: int, new_streak: int):\n \"\"\"\n Updates the streak for a given user.\n\n Args:\n discord_id: The Discord ID of the user.\n new_streak: The new streak count.\n \"\"\"\n self.streaks_db.update_streak(discord_id, new_streak)\n \n def reset_streak(self, discord_id: int):\n \"\"\"\n Resets the streak for a given user to zero.\n\n Args:\n discord_id: The Discord ID of the user.\n \"\"\"\n self.streaks_db.update_streak(discord_id, 0)" }, { "identifier": "TeamMemberManager", "path": "team_members/team_member_manager.py", "snippet": "class TeamMemberManager:\n \"\"\"\n Manages operations related to team members.\n \"\"\"\n\n def __init__(self, db: TeamMemberDB):\n \"\"\"\n Initialize a TeamMemberManager object.\n\n :param db: TeamMemberDB object for interacting with the database.\n \"\"\"\n self.db = db\n self.team_members = self.load_team_members()\n\n def load_team_members(self) -> List[TeamMember]:\n \"\"\"\n Load team members from the MySQL database into a list of TeamMember objects.\n\n :return: List of TeamMember objects.\n \"\"\"\n team_members = []\n members_data = self.db.list_all_members()\n\n for member_data in members_data:\n member = TeamMember(\n discord_id=member_data[0],\n time_zone=member_data[2],\n name=member_data[1],\n github_username=member_data[3],\n on_vacation=member_data[4]\n )\n team_members.append(member)\n\n return team_members\n\n def find_member(self, discord_id: int) -> TeamMember:\n \"\"\"\n Find and return a team member by their Discord ID.\n\n :param discord_id: The Discord ID of the team member.\n :return: A TeamMember object if found, otherwise None.\n \"\"\"\n for member in self.team_members:\n if member.discord_id == discord_id:\n return member\n return None\n\n def add_member(self, discord_id: int, name: str, time_zone: str, github_username: str):\n \"\"\"\n Add a new team member to the list and the database.\n\n :param discord_id: The Discord ID of the new member.\n :param name: The name of the new member.\n :param time_zone: The time zone of the new member.\n :param github_username: The GitHub username of the new member.\n \"\"\"\n new_member = TeamMember(discord_id, time_zone, name, github_username)\n self.db.insert_new_member(discord_id, name, time_zone, github_username)\n self.team_members.append(new_member)\n\n def remove_member(self, discord_id: int):\n \"\"\"\n Remove a team member from the list and the database.\n\n :param discord_id: The Discord ID of the member to remove.\n \"\"\"\n self.db.remove_member(discord_id)\n self.team_members = [member for member in self.team_members if member.discord_id != discord_id]\n\n def update_member_timezone(self, discord_id: int, new_time_zone: str):\n \"\"\"\n Update the timezone of a team member in the database and the list.\n\n :param discord_id: The Discord ID of the member to update.\n :param new_time_zone: The new timezone string to set for the member.\n \"\"\"\n # Update the timezone in the database\n self.db.update_member_timezone(discord_id, new_time_zone)\n\n # Find the member in the team_members list and update their timezone\n member = self.find_member(discord_id)\n if member:\n member.time_zone = new_time_zone\n\n def set_member_vacation_status(self, discord_id: int, on_vacation: bool):\n \"\"\"\n Sets the vacation status of a team member.\n\n :param discord_id: The Discord ID of the team member.\n :param on_vacation: The vacation status to be set for the team member.\n \"\"\"\n # Update the vacation status in the database\n self.db.set_vacation_status(discord_id, on_vacation)\n\n # Find the member in the team_members list and update their vacation status\n member = self.find_member(discord_id)\n if member:\n member.on_vacation = on_vacation" }, { "identifier": "UpdatesManager", "path": "updates/updates_manager.py", "snippet": "class UpdatesManager:\n \"\"\"\n Manages status updates for team members.\n \"\"\"\n\n def __init__(self, updates_db: UpdatesDB):\n \"\"\"\n Initializes a new UpdatesManager instance.\n\n Args:\n updates_db: The UpdatesDB object that handles database operations.\n \"\"\"\n self.updates_db = updates_db\n\n def insert_status(self, discord_id: int, status: str, time_zone: str):\n \"\"\"\n Inserts a new status update.\n\n Args:\n discord_id: The Discord ID of the team member.\n status: The status update.\n \"\"\"\n self.updates_db.insert_status(discord_id, status, time_zone)\n\n def update_summarized_status(self, discord_id: int, summarized_status: str):\n \"\"\"\n Updates the summarized status for the most recent update for a given user.\n\n Args:\n discord_id: The Discord ID of the team member.\n summarized_status: The summarized status update.\n \"\"\"\n self.updates_db.update_summarized_status(discord_id, summarized_status)\n\n def get_weekly_checkins_count(self, discord_id: int, time_zone: str) -> int:\n \"\"\"\n Fetches the number of check-ins for a given user in the current week.\n\n Args:\n discord_id: The Discord ID of the user.\n time_zone: The time zone of the user.\n\n Returns:\n The count of check-ins in the current week.\n \"\"\"\n return self.updates_db.get_weekly_checkins_count(discord_id, time_zone)\n \n def get_all_statuses_for_user(self, discord_id: int) -> List[dict]:\n \"\"\"\n Fetches all status updates (both raw and summarized) for a given user.\n\n Args:\n discord_id: The Discord ID of the user.\n\n Returns:\n A list of dictionaries, each containing the status update details for a given record.\n \"\"\"\n return self.updates_db.get_all_statuses_for_user(discord_id)\n\n def get_last_update_timestamp(self, discord_id: int) -> Tuple[datetime, str]:\n \"\"\"\n Fetches the timestamp and time zone of the last status update for a given user.\n\n Args:\n discord_id: The Discord ID of the user.\n\n Returns:\n A tuple containing the timestamp of the last update and its time zone, or (None, None) if there are no updates.\n \"\"\"\n return self.updates_db.get_last_update_timestamp(discord_id)\n\n def delete_newest_status(self, discord_id: int) -> None:\n \"\"\"\n Deletes the most recent status update for a given user.\n\n Args:\n discord_id: The Discord ID of the user.\n \"\"\"\n self.updates_db.delete_newest_status(discord_id)\n\n async def generate_daily_summary(self, user_message: str) -> str:\n \"\"\"\n Generates a daily summary of the user's message using a large language model.\n\n Args:\n user_message: The user's message that needs to be summarized.\n\n Returns:\n The summarized message.\n \"\"\"\n # Prepare a system message to guide OpenAI's model\n system_message = \"Please summarize the user's update into two sections: 'Did' for tasks completed yesterday and 'Do' for tasks planned for today.\"\n \n # Prepare the messages input for ChatCompletion\n messages = [\n {\"role\": \"system\", \"content\": system_message},\n {\"role\": \"user\", \"content\": user_message}\n ]\n \n # Specify the model engine you want to use\n model_engine = \"gpt-3.5-turbo-1106\"\n \n try:\n # Make an API call to OpenAI's ChatCompletion\n response = openai.ChatCompletion.create(\n model=model_engine,\n messages=messages\n )\n \n # Extract the generated text\n summarized_message = response['choices'][0]['message']['content'].strip()\n\n return summarized_message\n \n except Exception as e:\n print(f\"An error occurred while generating the summary: {e}\")\n return \"Error in generating summary\"\n\n async def generate_weekly_summary(self, discord_id: int, start_date: datetime, end_date: datetime) -> str:\n \"\"\"\n Generates a weekly summary of the user's status updates using a large language model.\n\n Args:\n discord_id: The Discord ID of the user.\n start_date: The start date of the date range.\n end_date: The end date of the date range.\n\n Returns:\n The summarized weekly status update.\n \"\"\"\n # Fetch all raw status updates for the specified date range using the new method in UpdatesDB\n weekly_statuses = self.updates_db.get_statuses_in_date_range(discord_id, start_date, end_date)\n\n if not weekly_statuses:\n return \"There are no status updates for this week.\"\n \n # Combine all raw statuses into a single string\n combined_statuses = \"\\n\".join(weekly_statuses)\n \n # Prepare a system message to guide OpenAI's model for weekly summary\n system_message = \"Please generate a comprehensive weekly summary based on the provided daily status updates, including only tasks that have been accomplished. Ignore tasks that are not in the 'Did' section.\"\n \n # Prepare the messages input for ChatCompletion\n messages = [\n {\"role\": \"system\", \"content\": system_message},\n {\"role\": \"user\", \"content\": combined_statuses}\n ]\n \n # Specify the model engine you want to use\n model_engine = \"gpt-4-0613\"\n \n try:\n # Make an API call to OpenAI's ChatCompletion\n response = openai.ChatCompletion.create(\n model=model_engine,\n messages=messages\n )\n \n # Extract the generated text\n weekly_summary = response['choices'][0]['message']['content'].strip()\n\n return weekly_summary\n \n except Exception as e:\n print(f\"An error occurred while generating the weekly summary: {e}\")\n return \"Error in generating weekly summary\"\n \n async def summarize_technical_updates(self, commit_messages: List[str]) -> str:\n \"\"\"\n Summarizes the technical updates based on commit messages.\n\n Args:\n commit_messages: List of commit messages for the day.\n\n Returns:\n A summarized version of the technical updates.\n \"\"\"\n\n # Combine commit messages into a single string for the LLM\n combined_commits = \"\\n\".join(commit_messages)\n\n # If there are no commit messages, return a default message\n if not combined_commits:\n return \"No technical updates found based on commit messages.\"\n\n # Summarization using LLM\n system_message = \"Please provide a concise summary of the technical updates based on the provided commit messages.\"\n\n messages = [\n {\"role\": \"system\", \"content\": system_message},\n {\"role\": \"user\", \"content\": combined_commits}\n ]\n\n model_engine = \"gpt-3.5-turbo-1106\"\n\n try:\n response = openai.ChatCompletion.create(\n model=model_engine,\n messages=messages\n )\n\n # Extract the generated summary\n summarized_message = response['choices'][0]['message']['content'].strip()\n\n return summarized_message\n\n except Exception as e:\n print(f\"An error occurred while generating the technical summary: {e}\")\n return \"Error in generating technical summary.\"\n\n async def summarize_feedback_and_revisions(self, original_report: str, feedback: str) -> str:\n \"\"\"\n Takes the original report and user feedback and generates a revised summary.\n\n Args:\n original_report: The original summarized report.\n feedback: The user's feedback or suggested edits.\n\n Returns:\n The revised summary.\n \"\"\"\n # Prepare a system message to guide OpenAI's model\n system_message = \"Revise the original report based on the user's feedback.\"\n\n # Prepare the messages input for ChatCompletion\n messages = [\n {\"role\": \"system\", \"content\": system_message},\n {\"role\": \"user\", \"content\": f\"Original Report: {original_report}\"},\n {\"role\": \"user\", \"content\": f\"Feedback: {feedback}\"}\n ]\n \n # Specify the model engine you want to use\n model_engine = \"gpt-3.5-turbo-1106\"\n \n try:\n # Make an API call to OpenAI's ChatCompletion\n response = openai.ChatCompletion.create(\n model=model_engine,\n messages=messages\n )\n \n # Extract the generated text\n revised_summary = response['choices'][0]['message']['content'].strip()\n\n return revised_summary\n \n except Exception as e:\n print(f\"An error occurred while generating the revised summary: {e}\")\n return \"Error in generating revised summary\"\n\n async def summarize_non_technical_updates(self, update: str) -> str:\n \"\"\"\n Summarizes a non-technical update using a large language model.\n\n Args:\n update: The raw non-technical update provided by the user.\n\n Returns:\n The summarized non-technical update.\n \"\"\"\n\n # System message to guide the LLM for a concise summary\n system_message = \"Please provide a concise summary of the non-technical update shared by the user.\"\n\n # Prepare the messages input for ChatCompletion\n messages = [\n {\"role\": \"system\", \"content\": system_message},\n {\"role\": \"user\", \"content\": update}\n ]\n\n # Specify the model engine you want to use\n model_engine = \"gpt-3.5-turbo-1106\"\n\n try:\n # Make an API call to OpenAI's ChatCompletion\n response = openai.ChatCompletion.create(\n model=model_engine,\n messages=messages\n )\n\n # Extract the generated summary\n summarized_message = response['choices'][0]['message']['content'].strip()\n\n return summarized_message\n\n except Exception as e:\n print(f\"An error occurred while generating the non-technical summary: {e}\")\n return \"Error in generating summary\"\n\n async def summarize_goals_for_the_day(self, goals: str) -> str:\n \"\"\"\n Summarizes the user's goals for the day using a large language model.\n\n Args:\n goals: The user's raw input on their goals for the day.\n\n Returns:\n The summarized goals for the day.\n \"\"\"\n # Initiate the conversation with the model\n system_message = \"Please provide a concise summary of the user's goals for today.\"\n \n # Prepare the messages input for ChatCompletion\n messages = [\n {\"role\": \"system\", \"content\": system_message},\n {\"role\": \"user\", \"content\": goals}\n ]\n \n # Specify the model engine you want to use (this is an example and can be adjusted based on your needs)\n model_engine = \"gpt-3.5-turbo-1106\"\n \n try:\n # Provide user's input and retrieve model's response\n response = openai.ChatCompletion.create(\n model=model_engine,\n messages=messages\n )\n \n # Extract the generated text\n summarized_goals = response['choices'][0]['message']['content'].strip()\n\n # Return the summary\n return summarized_goals\n \n except Exception as e:\n print(f\"An error occurred while generating the goals summary: {e}\")\n return \"Error in generating goals summary\"\n \n async def evaluate_performance(self, user_message: str) -> str:\n \"\"\"\n Evaluates the performance of the user based on their update.\n\n Args:\n user_message: The user's message that needs to be evaluated.\n\n Returns:\n The evaluation of the user's performance.\n \"\"\"\n # Prepare a system message to guide OpenAI's model\n system_message = \"\"\"\n You are a project manager at a fast-paced tech startup, recognized for providing clear and actionable feedback during stand-up meetings. Your role is to evaluate the quality of team members' daily stand-up reports, with a focus on clear communication, comprehensive planning, and problem-solving abilities.\n It is essential to note that team members should neither be penalized nor rewarded for merely mentioning issues; instead, the emphasis should be on the clarity of the report and the quality of strategies proposed to address these issues.\n Your feedback is candid and aimed at encouraging high-quality reporting and effective planning within the startup environment.\n Please provide a two-sentence summary of the stand-up and assign a grade (A, B, C, D, or F) based on the following criteria:\n\n - A: Excellent - The report is exceptionally clear and detailed, with well-defined tasks and a thorough approach to tackling issues, exemplifying the proactive and problem-solving ethos of our startup.\n - B: Good - The report is clear and adequately detailed, outlining tasks and addressing issues with a reasonable approach, indicating a commitment to momentum and resolution.\n - C: Fair - The report is understandable but lacks detail in some areas, with a basic approach to resolving issues, suggesting a need for further strategy development.\n - D: Poor - The report is vague or missing details, with a limited or unclear approach to issues, necessitating better communication and planning skills.\n - F: Fail - The report is missing, overly vague, or lacks a coherent structure, with no apparent approach to issues, reflecting a need for significant improvement in reporting and strategizing.\n\n A comprehensive stand-up report effectively communicates what was done and what is planned, clearly identifies any issues, and connects daily tasks with broader business objectives.\n\n Provide clear and constructive feedback, aiming to foster a culture of excellence and continuous improvement in how we plan and communicate our daily activities.\n \"\"\"\n \n # Prepare the messages input for ChatCompletion\n messages = [\n {\"role\": \"system\", \"content\": system_message},\n {\"role\": \"user\", \"content\": user_message}\n ]\n \n # Specify the model engine you want to use\n model_engine = \"gpt-3.5-turbo-1106\"\n \n try:\n # Make an API call to OpenAI's ChatCompletion\n response = openai.ChatCompletion.create(\n model=model_engine,\n messages=messages\n )\n \n # Extract the generated text\n performance_evaluation = response['choices'][0]['message']['content'].strip()\n\n return performance_evaluation\n \n except Exception as e:\n print(f\"An error occurred while evaluating the performance: {e}\")\n return \"Error in evaluating performance\"" }, { "identifier": "WeeklyPostManager", "path": "weekly_posts/weekly_post_manager.py", "snippet": "class WeeklyPostManager:\n \"\"\"Manages the status post in a Discord channel.\"\"\"\n \n def __init__(self, channel, weekly_posts_db: WeeklyPostsDB):\n \"\"\"\n Initializes a new WeeklyPostManager instance.\n \"\"\"\n self.channel = channel\n self.weekly_posts_db = weekly_posts_db\n self.editable_weekly_post = None\n self.load_weekly_post_data()\n\n def load_weekly_post_data(self):\n \"\"\"\n Load the weekly post data from the database.\n \n This method queries the 'weekly_posts' table to get the ID and timestamp of \n the last weekly post. If no data exists, it sets the ID and timestamp to None.\n \"\"\"\n data = self.weekly_posts_db.get_weekly_post_data()\n self.editable_weekly_post_id = data.get('post_id', None)\n self.weekly_post_timestamp = data.get('timestamp', None)\n\n def save_weekly_post_data(self):\n \"\"\"\n Save the weekly post data to the database.\n \n This method inserts or updates the ID and timestamp of the current weekly post \n in the 'weekly_posts' table.\n \"\"\"\n self.weekly_posts_db.save_weekly_post_data(self.editable_weekly_post.id, datetime.now())\n\n async def initialize_post(self, team_members: List[TeamMember]):\n \"\"\"\n Initializes or retrieves the weekly status post on Discord.\n\n This function checks if a valid weekly post already exists for the current week.\n If it does, it retrieves that post. Otherwise, it sends a new message in the Discord\n channel with the list of team members and their statuses.\n\n Args:\n team_members: A list of TeamMember objects to be displayed in the post.\n \"\"\"\n current_week_number = datetime.now().isocalendar()[1]\n saved_week_number = self.weekly_post_timestamp.isocalendar()[1] if self.weekly_post_timestamp else None\n\n # Skip initialization if the post already exists and is for the current week\n if self.editable_weekly_post_id and current_week_number == saved_week_number:\n self.editable_weekly_post = await self.channel.fetch_message(self.editable_weekly_post_id)\n return\n\n utc_now = pytz.utc.localize(datetime.utcnow())\n today_weekday = utc_now.weekday()\n last_monday = utc_now - timedelta(days=today_weekday)\n next_sunday = last_monday + timedelta(days=6)\n\n start_date = self.format_date(last_monday)\n end_date = self.format_date(next_sunday)\n\n # Calculate the max name length for alignment purposes\n max_name_length = max([len(m.name) for m in team_members])\n\n member_list = []\n for m in team_members:\n # Include the streak with the fire emoji if the streak is greater than 0\n streak_str = f\" {m.current_streak}🔥\" if m.current_streak > 0 else \"\"\n\n # Construct the new line for the member with the updated information\n new_line = f\"# `{m.name.ljust(max_name_length)} {'❓' * 5} {streak_str}`\"\n member_list.append(new_line)\n\n member_list_str = '\\n'.join(member_list)\n\n await self.channel.send(f\"# Weekly Status Updates\")\n await self.channel.send(f\"## {start_date} to {end_date}\")\n if member_list_str:\n self.editable_weekly_post = await self.channel.send(f\"{member_list_str}\")\n self.save_weekly_post_data() # Save the ID and timestamp after creating the post\n\n async def rebuild_post(self, team_members: List[TeamMember]):\n \"\"\"\n Rebuilds the entire weekly status post from the team members' data.\n\n Args:\n team_members: A list of TeamMember objects with updated statuses and streaks.\n \"\"\"\n # If there are no team members, delete the post and return\n if not team_members:\n if self.editable_weekly_post:\n await self.editable_weekly_post.delete()\n self.editable_weekly_post = None\n return\n\n # Calculate the max name length for alignment purposes\n max_name_length = max([len(m.name) for m in team_members])\n\n member_list = []\n for m in team_members:\n # Get the streak and number of weekly check-ins for the member\n streak = m.current_streak\n check_ins = m.weekly_checkins\n\n # Generate the marks based on the number of check-ins\n marks = \"✅\" * check_ins + \"❓\" * (5 - check_ins)\n\n # Include the streak with the fire emoji if the streak is greater than 0\n streak_str = f\" {streak}🔥\" if streak > 0 else \"\"\n\n # Construct the new line for the member with the updated information\n new_line = f\"# `{m.name.ljust(max_name_length)} {marks} {streak_str}`\"\n member_list.append(new_line)\n\n new_content = '\\n'.join(member_list)\n\n # Update the existing post or create a new one if it doesn't exist\n if self.editable_weekly_post:\n self.editable_weekly_post = await self.editable_weekly_post.edit(content=new_content)\n else:\n self.editable_weekly_post = await self.channel.send(new_content)\n\n # Save the ID and timestamp of the post\n self.save_weekly_post_data()\n\n def format_date(self, dt: datetime) -> str:\n \"\"\"\n Formats a datetime object into a human-readable string.\n\n Args:\n dt: The datetime object to format.\n\n Returns:\n A human-readable date string.\n \"\"\"\n suffix = ['th', 'st', 'nd', 'rd']\n day = int(dt.strftime('%d'))\n if 4 <= day <= 20 or 24 <= day <= 30:\n suffix_index = 0 # use 'th'\n else:\n suffix_index = day % 10 # use 'st', 'nd', 'rd' as appropriate\n\n return dt.strftime(f\"%B {day}{suffix[suffix_index]}\")" }, { "identifier": "Scheduler", "path": "scheduler.py", "snippet": "class Scheduler:\n \"\"\"Scheduler class to manage timed jobs for sending status requests.\n\n Attributes:\n scheduler: The APScheduler object.\n job_ids: A dictionary to store lists of job IDs for each member.\n \"\"\"\n \n def __init__(self) -> None:\n \"\"\"Initialize the Scheduler object and start the APScheduler.\"\"\"\n self.scheduler: AsyncIOScheduler = AsyncIOScheduler()\n self.job_ids: Dict[int, List[str]] = {} # Store job IDs indexed by member's Discord ID\n self.weekly_post_job_id = None # To store the ID of the scheduled weekly post job\n self.scheduler.start()\n\n def add_job(self, func: callable, member: TeamMember, weekly_post_manager: WeeklyPostManager, streaks_manager: StreaksManager, updates_manager: UpdatesManager) -> None:\n \"\"\"Add a new job to the scheduler for a specific team member.\n \n Args:\n func: The function to call when the job is run.\n member: The TeamMember object for whom the job is added.\n \"\"\"\n time_zone = pytz.timezone(member.time_zone)\n \n weekday_trigger = CronTrigger(day_of_week='mon,tue,wed,thu,fri', hour=10, timezone=time_zone)\n weekend_trigger = CronTrigger(day_of_week='sat,sun', hour=11, timezone=time_zone)\n\n weekday_job = self.scheduler.add_job(func, weekday_trigger, args=[member, weekly_post_manager, streaks_manager, updates_manager])\n weekend_job = self.scheduler.add_job(func, weekend_trigger, args=[member, weekly_post_manager, streaks_manager, updates_manager])\n\n self.job_ids.setdefault(member.discord_id, []).extend([weekday_job.id, weekend_job.id])\n\n def remove_job(self, discord_id: int) -> None:\n \"\"\"Remove jobs for a specific team member.\n \n Args:\n discord_id: The Discord ID of the member for whom the job should be removed.\n \"\"\"\n job_ids = self.job_ids.get(discord_id, [])\n for job_id in job_ids:\n self.scheduler.remove_job(job_id)\n\n if discord_id in self.job_ids:\n del self.job_ids[discord_id] # Remove the job IDs from the dictionary\n\n def schedule_weekly_post(self, func: callable, weekly_post_manager: WeeklyPostManager, streaks_manager: StreaksManager, team_members: List[TeamMember]) -> None:\n \"\"\"Schedules the weekly post based on the latest time zone among the team members.\"\"\"\n \n # Determine the latest time zone\n latest_time_zone = max([member.time_zone for member in team_members], key=lambda tz: pytz.timezone(tz).utcoffset(datetime.utcnow()))\n\n # Set the trigger for 9:10 AM in the earliest time zone on Monday\n trigger = CronTrigger(day_of_week='mon', hour=9, minute=10, timezone=latest_time_zone)\n\n # Schedule the function with the trigger\n job = self.scheduler.add_job(func, trigger, args=[weekly_post_manager, streaks_manager, team_members])\n self.weekly_post_job_id = job.id\n\n def unschedule_weekly_post(self) -> None:\n \"\"\"Removes the weekly post job from the scheduler.\"\"\"\n if self.weekly_post_job_id:\n self.scheduler.remove_job(self.weekly_post_job_id)\n self.weekly_post_job_id = None\n\n def get_all_scheduled_jobs(self, team_member_manager) -> List[str]:\n \"\"\"Retrieve all scheduled jobs as a list of strings.\"\"\"\n job_descriptions = []\n\n for job in self.scheduler.get_jobs():\n # Determine the associated team member by looking up the job ID in the job_ids dictionary\n member_discord_id = next((discord_id for discord_id, job_ids in self.job_ids.items() if job.id in job_ids), None)\n member_name = team_member_manager.find_member(member_discord_id).name if member_discord_id else \"Unknown\"\n\n # Calculate the remaining time until the next run\n now = datetime.now(job.next_run_time.tzinfo) # Get the current time with the same timezone as the job's next_run_time\n remaining_time = job.next_run_time - now\n remaining_time_str = str(remaining_time).split('.')[0] # Remove the microseconds part\n\n # If this job is the weekly post job\n if job.id == self.weekly_post_job_id:\n job_descriptions.append(f\"ID: {job.id}, Type: Weekly Post, Next Run: {job.next_run_time}, Remaining Time: {remaining_time_str}, Func: {job.func.__name__}\")\n else:\n job_descriptions.append(f\"ID: {job.id}, Member: {member_name}, Next Run: {job.next_run_time}, Remaining Time: {remaining_time_str}, Func: {job.func.__name__}\")\n\n return job_descriptions" }, { "identifier": "TeamMember", "path": "team_members/team_member.py", "snippet": "class TeamMember:\n \"\"\"TeamMember class to store individual team member details.\n \n Attributes:\n discord_id: The Discord ID of the team member.\n time_zone: The time zone in which the team member resides.\n name: The name of the team member.\n github_username: The GitHub username of the team member.\n current_streak: The current streak of daily updates/check-ins of the team member.\n weekly_checkins: The number of check-ins for the current week.\n \"\"\"\n \n def __init__(self, discord_id: int, time_zone: str, name: str, github_username: str,\n current_streak: int = 0, weekly_checkins: int = 0, on_vacation: bool = False) -> None:\n \"\"\"Initialize a new TeamMember object.\n \n Args:\n discord_id: The Discord ID of the team member.\n time_zone: The time zone of the team member.\n name: The name of the team member.\n github_username: The GitHub username of the team member.\n current_streak: The current streak of daily updates/check-ins. Defaults to 0.\n weekly_checkins: The number of check-ins for the current week. Defaults to 0.\n \"\"\"\n self.discord_id: int = discord_id\n self.time_zone: str = time_zone\n self.name: str = name\n self.github_username: str = github_username\n self.current_streak: int = current_streak\n self.weekly_checkins: int = weekly_checkins\n self.on_vacation: bool = on_vacation\n \n def update_streak(self, streak: int) -> None:\n \"\"\"Update the current streak of the team member.\n \n Args:\n streak: The new streak count.\n \"\"\"\n self.current_streak = streak\n \n def reset_streak(self) -> None:\n \"\"\"Reset the current streak of the team member to 0.\"\"\"\n self.current_streak = 0\n\n def update_weekly_checkins(self, count: int):\n \"\"\"\n Update the weekly check-ins count.\n\n Args:\n count: The new count of weekly check-ins.\n \"\"\"\n self.weekly_checkins = count\n \n def increment_weekly_checkins(self) -> None:\n \"\"\"Increment the number of check-ins for the current week by 1.\"\"\"\n self.weekly_checkins += 1\n \n def reset_weekly_checkins(self) -> None:\n \"\"\"Reset the number of check-ins for the current week to 0.\"\"\"\n self.weekly_checkins = 0" } ]
import os import pytz import asyncio import openai import requests from typing import List from dotenv import load_dotenv from datetime import datetime, timedelta from multiprocessing import Process from streaks.streaks_db import StreaksDB from team_members.team_member_db import TeamMemberDB from updates.updates_db import UpdatesDB from weekly_posts.weekly_posts_db import WeeklyPostsDB from streaks.streaks_manager import StreaksManager from team_members.team_member_manager import TeamMemberManager from updates.updates_manager import UpdatesManager from weekly_posts.weekly_post_manager import WeeklyPostManager from scheduler import Scheduler from team_members.team_member import TeamMember from discord.ext import commands, tasks from discord import Intents, DMChannel from flask import Flask from asyncio import Task, ensure_future, CancelledError
15,383
# Find the member object using the Discord ID member_to_update = team_member_manager.find_member(discord_id) if member_to_update: # Update the streak in the database streaks_manager.update_streak(discord_id, new_streak) member_to_update.update_streak(new_streak) # Update the Discord post using WeeklyPostManager await weekly_post_manager.rebuild_post(team_member_manager.team_members) await ctx.send(f"Streak for user with Discord ID {discord_id} updated to {new_streak}.") else: await ctx.send(f"No user with Discord ID {discord_id} found.") @bot.command(name='forcepostrebuild') async def force_post_rebuild(ctx): if ctx.message.author.id != ADMIN_DISCORD_ID or not isinstance(ctx.channel, DMChannel): await ctx.send("You're not authorized to force a post rebuild.") return # Rebuild the post await weekly_post_manager.rebuild_post(team_member_manager.team_members) await ctx.send("Post rebuilt successfully.") @bot.command(name='deletelateststatus') async def delete_latest_status(ctx, discord_id: int): if ctx.message.author.id != ADMIN_DISCORD_ID or not isinstance(ctx.channel, DMChannel): await ctx.send("You're not authorized to delete status updates.") return # Find the member object using the Discord ID member = team_member_manager.find_member(discord_id) if not member: await ctx.send(f"No user with Discord ID {discord_id} found.") return # Delete the newest status using the UpdatesManager's method updates_manager.delete_newest_status(discord_id) await ctx.send(f"Latest status update for user with Discord ID {discord_id} deleted successfully.") @bot.command(name='viewuser') async def view_user(ctx, discord_id: int): if ctx.message.author.id != ADMIN_DISCORD_ID or not isinstance(ctx.channel, DMChannel): await ctx.send("You're not authorized to view user data.") return # Get the member's statuses using the UpdatesManager's method statuses = updates_manager.get_all_statuses_for_user(discord_id) if not statuses: await ctx.send(f"No status updates found for user with Discord ID {discord_id}.") return # Loop through the statuses and send individual messages for status in statuses: await ctx.send(f"### **Timestamp:** {status['timestamp']}") await ctx.send(f"### **Raw Status:** {status['status']}") await ctx.send(f"### **Summarized Status:** \n{status['summarized_status']}") @bot.command(name='setvacationstatus') async def set_vacation_status(ctx, discord_id: int): if ctx.message.author.id != ADMIN_DISCORD_ID or not isinstance(ctx.channel, DMChannel): await ctx.send("You're not authorized to set vacation status.") return member = team_member_manager.find_member(discord_id) if member: new_status = not member.on_vacation team_member_manager.set_member_vacation_status(discord_id, new_status) await ctx.send(f"Vacation status for user with Discord ID {discord_id} set to {'on vacation' if new_status else 'not on vacation'}.") else: await ctx.send(f"No user with Discord ID {discord_id} found.") @bot.command(name='weeklysummary') async def weekly_summary(ctx, discord_id: int, start_date: str, end_date: str): if ctx.message.author.id != ADMIN_DISCORD_ID or not isinstance(ctx.channel, DMChannel): await ctx.send("You're not authorized to generate weekly summaries.") return # Find the member object using the Discord ID member = team_member_manager.find_member(discord_id) if not member: await ctx.send(f"No user with Discord ID {discord_id} found.") return # Convert the start_date and end_date strings to datetime objects # Adjusting the date format to MM-DD-YYYY and setting the time try: start_date = datetime.strptime(start_date, '%m-%d-%Y') end_date = datetime.strptime(end_date, '%m-%d-%Y') # Setting the time to ensure the whole week is captured start_date = start_date.replace(hour=0, minute=0, second=0, microsecond=0) end_date = end_date.replace(hour=23, minute=59, second=59, microsecond=999999) except ValueError: await ctx.send("Invalid date format. Please use MM-DD-YYYY.") return # Generate the weekly summary weekly_summary = await updates_manager.generate_weekly_summary(discord_id, start_date, end_date) # Send the weekly summary to the admin user admin_user = bot.get_user(ADMIN_DISCORD_ID) if admin_user: await admin_user.send(f"**{member.name}'s Weekly Summary for {start_date.strftime('%m-%d-%Y')} to {end_date.strftime('%m-%d-%Y')}:**\n{weekly_summary}") else: await ctx.send("Unable to find the admin user.") @bot.event async def on_ready(): print("Bot is online!") # Log that the bot is online streaks_db = StreaksDB(MYSQL_HOST, MYSQL_USER, MYSQL_PASSWORD, MYSQL_DB, MYSQL_PORT) team_member_db = TeamMemberDB(MYSQL_HOST, MYSQL_USER, MYSQL_PASSWORD, MYSQL_DB, MYSQL_PORT) weekly_posts_db = WeeklyPostsDB(MYSQL_HOST, MYSQL_USER, MYSQL_PASSWORD, MYSQL_DB, MYSQL_PORT)
# Import required modules app = Flask(__name__) # Load environment variables from the .env file load_dotenv() # Retrieve bot, guild, and channel tokens from environment variables BOT_TOKEN = os.getenv('DISCORD_BOT_TOKEN') GUILD_TOKEN = int(os.getenv('DISCORD_GUILD_TOKEN')) CHANNEL_TOKEN = int(os.getenv('DISCORD_CHANNEL_TOKEN')) ADMIN_DISCORD_ID = int(os.getenv('ADMIN_DISCORD_ID')) # Retrieve database credentials from environment variables MYSQL_HOST = os.getenv('MYSQL_HOST') MYSQL_USER = os.getenv('MYSQL_USER') MYSQL_PASSWORD = os.getenv('MYSQL_PASSWORD') MYSQL_DB = os.getenv('MYSQL_DB') MYSQL_PORT = os.getenv('MYSQL_PORT') ORG_NAME = os.getenv('GITHUB_ORG_NAME') ORG_TOKEN = os.getenv('GITHUB_ORG_TOKEN') OPENAI_API_KEY = os.getenv('OPENAI_API_KEY') # Initialize bot with default intents intents = Intents.default() intents.members = True intents.message_content = True bot = commands.Bot(command_prefix='!', intents=intents) openai.api_key = OPENAI_API_KEY # TODO: Remove these globals streaks_manager = None weekly_post_manager = None team_member_manager = None updates_manager = None scheduler = None ongoing_status_requests = {} THUMBS_UP_EMOJI = "👍" PENCIL_EMOJI = "✏️" REPORT_SUBMISSION_EMOJI = '📝' async def weekly_state_reset(weekly_post_manager: WeeklyPostManager, streaks_manager: StreaksManager, team_members: List[TeamMember]): # Reset streaks for the previous week for member in team_members: if not member.on_vacation and member.weekly_checkins < 5: streaks_manager.reset_streak(member.discord_id) member.reset_streak() member.reset_weekly_checkins() # Initialize new weekly post await weekly_post_manager.initialize_post(team_members) def get_all_commit_messages_for_user(org_name: str, token: str, member: TeamMember) -> list: """Retrieve all commit messages for a user across all repos in an organization from the last 24 hours.""" headers = { "Authorization": f"token {token}", "Accept": "application/vnd.github.v3+json" } last_update_timestamp, user_time_zone = updates_manager.get_last_update_timestamp(member.discord_id) if last_update_timestamp: # Convert the timestamp to UTC local_tz = pytz.timezone(user_time_zone) localized_timestamp = local_tz.localize(last_update_timestamp) utc_timestamp = localized_timestamp.astimezone(pytz.utc) # Format the timestamp for the GitHub API and append 'Z' since_date = utc_timestamp.isoformat() if not since_date.endswith('Z'): since_date = utc_timestamp.isoformat().replace('+00:00', '') + 'Z' else: # If no updates found, default to last 24 hours since_date = (datetime.utcnow() - timedelta(days=1)).isoformat() + 'Z' all_commit_messages = [] # Paginate through all repositories in the organization repos_url = f"https://api.github.com/orgs/{org_name}/repos?type=all&per_page=100" while repos_url: response = requests.get(repos_url, headers=headers) if response.status_code != 200: # Log error and break loop print(f"Failed to fetch repos: {response.status_code} {response.text}") break repos = response.json() # Iterate over each repository for repo in repos: repo_name = repo["name"] commits_url = f"https://api.github.com/repos/{org_name}/{repo_name}/commits?author={member.github_username}&since={since_date}&per_page=100" # Paginate through commits for the repository while commits_url: response = requests.get(commits_url, headers=headers) if response.status_code != 200: # Log error and continue to the next repository print(f"Failed to fetch commits for {repo_name}: {response.status_code} {response.text}") break commits = response.json() repo_commit_messages = [commit["commit"]["message"] for commit in commits] all_commit_messages.extend(repo_commit_messages) # Check for the 'next' link for commits pagination commits_url = get_pagination_link(response.headers, 'next') # Check for the 'next' link for repositories pagination repos_url = get_pagination_link(response.headers, 'next') return all_commit_messages def get_pagination_link(headers, rel): """Extract pagination link for the 'rel' type from the Link header.""" link = headers.get('Link', None) if link: links = link.split(', ') for link in links: if 'rel="{}"'.format(rel) in link: return link.split('; ')[0].strip('<>') return None async def send_status_request(member: TeamMember, weekly_post_manager: WeeklyPostManager, streaks_manager: StreaksManager, updates_manager: UpdatesManager): if member.weekly_checkins == 5: return # If already completed 5 check-ins, do nothing user = bot.get_user(member.discord_id) if user: # Notify the admin that a status request is being sent admin_user = bot.get_user(ADMIN_DISCORD_ID) if admin_user: await admin_user.send(f"Status request sent to {member.name}.") # Cancel the previous task if it exists ongoing_task: Task = ongoing_status_requests.get(member.discord_id) if ongoing_task: ongoing_task.cancel() # Retrieve all commit messages for the member commit_messages = get_all_commit_messages_for_user(ORG_NAME, ORG_TOKEN, member) if not commit_messages: summarized_report = "You have no commits for the previous working day." msg = f"{summarized_report}\nReact with {THUMBS_UP_EMOJI} to confirm, {PENCIL_EMOJI} to iterate with AI, or {REPORT_SUBMISSION_EMOJI} to submit your own report." else: summarized_report = await updates_manager.summarize_technical_updates(commit_messages) msg = f"Here's your summarized report based on your commits:\n{summarized_report}\nReact with {THUMBS_UP_EMOJI} to confirm, {PENCIL_EMOJI} to iterate with AI, or {REPORT_SUBMISSION_EMOJI} to submit your own report." raw_updates = summarized_report # Send initial message and wait for reaction await user.send( f"# Good morning {member.name}, time for your daily status update!\n" f"### I'm first going to check your commit messages and try to build a technical report for you.\n" f"### Next I will ask you for any non-technical updates from your previous work day.\n" f"### Finally I will ask you what you plan to work on today." ) sent_message = await user.send(msg) await sent_message.add_reaction(THUMBS_UP_EMOJI) await sent_message.add_reaction(PENCIL_EMOJI) await sent_message.add_reaction(REPORT_SUBMISSION_EMOJI) def check(m) -> bool: return m.author == user and isinstance(m.channel, DMChannel) # Store the new wait_for reaction task in the global dictionary ongoing_task = ensure_future(bot.wait_for('reaction_add', check=lambda r, u: u == user and r.message.id == sent_message.id and isinstance(r.message.channel, DMChannel) and str(r.emoji) in [THUMBS_UP_EMOJI, PENCIL_EMOJI, REPORT_SUBMISSION_EMOJI])) ongoing_status_requests[member.discord_id] = ongoing_task reaction, reactor = await ongoing_task ongoing_status_requests.pop(member.discord_id, None) # Remove the task once we get the reaction for emoji in [THUMBS_UP_EMOJI, PENCIL_EMOJI, REPORT_SUBMISSION_EMOJI]: await sent_message.remove_reaction(emoji, bot.user) while str(reaction.emoji) in [PENCIL_EMOJI, REPORT_SUBMISSION_EMOJI]: if str(reaction.emoji) == PENCIL_EMOJI: await user.send("What would you like me to change?") # Store the new wait_for message (feedback) task in the global dictionary ongoing_task = ensure_future(bot.wait_for('message', check=check)) ongoing_status_requests[member.discord_id] = ongoing_task feedback = await ongoing_task ongoing_status_requests.pop(member.discord_id, None) # Remove the task once we get the feedback # Send original + feedback to LLM for reformatting summarized_report = await updates_manager.summarize_feedback_and_revisions(summarized_report, feedback.content) elif str(reaction.emoji) == REPORT_SUBMISSION_EMOJI: await user.send("Please submit your technical report directly.") # Store the new wait_for message (report submission) task in the global dictionary ongoing_task = ensure_future(bot.wait_for('message', check=check)) ongoing_status_requests[member.discord_id] = ongoing_task direct_report = await ongoing_task ongoing_status_requests.pop(member.discord_id, None) # Remove the task once we get the report summarized_report = direct_report.content break # Exit the while loop as the user has submitted their report directly msg = f"Here's the revised report:\n{summarized_report}\nReact with {THUMBS_UP_EMOJI} to confirm, {PENCIL_EMOJI} to iterate with AI, or {REPORT_SUBMISSION_EMOJI} to submit your own report." last_sent_message = await send_long_message(user, msg) if last_sent_message: await last_sent_message.add_reaction(THUMBS_UP_EMOJI) await last_sent_message.add_reaction(PENCIL_EMOJI) await last_sent_message.add_reaction(REPORT_SUBMISSION_EMOJI) # Store the new wait_for reaction task in the global dictionary ongoing_task = ensure_future(bot.wait_for('reaction_add', check=lambda r, u: u == user and r.message.id == last_sent_message.id and isinstance(r.message.channel, DMChannel) and str(r.emoji) in [THUMBS_UP_EMOJI, PENCIL_EMOJI, REPORT_SUBMISSION_EMOJI])) ongoing_status_requests[member.discord_id] = ongoing_task reaction, user = await ongoing_task ongoing_status_requests.pop(member.discord_id, None) # Remove the task once we get the reaction for emoji in [THUMBS_UP_EMOJI, PENCIL_EMOJI, REPORT_SUBMISSION_EMOJI]: await last_sent_message.remove_reaction(emoji, bot.user) # Prompt user for non-technical updates from the previous day non_technical_msg_prompt = "Please provide any non-technical updates from your previous working day, e.g., important meetings, interviews, etc." await user.send(non_technical_msg_prompt) # Store the new wait_for message (non-technical update) task in the global dictionary ongoing_task = ensure_future(bot.wait_for('message', check=check)) ongoing_status_requests[member.discord_id] = ongoing_task non_technical_update_raw = await ongoing_task ongoing_status_requests.pop(member.discord_id, None) # Remove the task once we get the non-technical update raw_updates += f"\n\n{non_technical_update_raw.content}" # Summarize non-technical update with LLM non_technical_update = await updates_manager.summarize_non_technical_updates(non_technical_update_raw.content) # Prompt user for their goals for the day goals_msg_prompt = "What do you plan to work on or accomplish today?" await user.send(goals_msg_prompt) # Store the new wait_for message (goals for the day) task in the global dictionary ongoing_task = ensure_future(bot.wait_for('message', check=check)) ongoing_status_requests[member.discord_id] = ongoing_task goals_for_today_raw = await ongoing_task ongoing_status_requests.pop(member.discord_id, None) # Remove the task once we get the goals # Summarize goals for the day with LLM goals_for_today = await updates_manager.summarize_goals_for_the_day(goals_for_today_raw.content) # Update the streak for this member streak = streaks_manager.get_streak(member.discord_id) streaks_manager.update_streak(member.discord_id, streak + 1) member.update_streak(streaks_manager.get_streak(member.discord_id)) member.increment_weekly_checkins() raw_updates += f"\n\n{goals_for_today_raw.content}" final_updates = f"{summarized_report}\n\n{non_technical_update}\n\n{goals_for_today}" updates_manager.insert_status(member.discord_id, raw_updates, member.time_zone) updates_manager.update_summarized_status(member.discord_id, final_updates) # Update the Discord post using WeeklyPostManager await weekly_post_manager.rebuild_post(team_member_manager.team_members) # Member name update as a header member_update_header = f"## {member.name}'s Update:" # Compile the final report with Markdown formatting final_report = ( f"\n### Technical Update:\n" f"{summarized_report}\n" f"### Non-Technical Update:\n" f"{non_technical_update}\n" f"### Goals for Today:\n" f"{goals_for_today}" ) stand_up_feedback = await updates_manager.evaluate_performance(final_report) # Concatenate the member name update with the final report and send to the designated Discord channel complete_message = f"{member_update_header}{final_report}" guild = bot.get_guild(GUILD_TOKEN) channel_to_post_in = guild.get_channel(CHANNEL_TOKEN) await user.send(stand_up_feedback) await send_long_message(channel_to_post_in, complete_message) async def send_long_message(destination, msg): max_length = 2000 # Discord's max character limit for a message sent_messages = [] # Keep track of all messages sent while len(msg) > 0: # If the message is shorter than the max length, send it as is if len(msg) <= max_length: sent_message = await destination.send(msg) sent_messages.append(sent_message) break # The message is sent, so break out of the loop # Find the nearest newline character before the max_length split_index = msg.rfind('\n', 0, max_length) # If no newline is found, just split at max_length if split_index == -1: split_index = max_length # Split the message at the found index and send the first part part_to_send = msg[:split_index].strip() sent_message = await destination.send(part_to_send) sent_messages.append(sent_message) # Wait a bit to respect Discord's rate limits await asyncio.sleep(1) # Remove the part that was sent from the message msg = msg[split_index:].strip() # Return the last message sent for reaction addition return sent_messages[-1] if sent_messages else None @bot.command(name='viewscheduledjobs') async def view_scheduled_jobs(ctx): if ctx.message.author.id != ADMIN_DISCORD_ID or not isinstance(ctx.channel, DMChannel): await ctx.send("You're not authorized to view scheduled jobs.") return # Get all scheduled jobs using the Scheduler's method scheduled_jobs = scheduler.get_all_scheduled_jobs(team_member_manager) # Send the scheduled jobs to the admin user for job in scheduled_jobs: await ctx.send(job) @bot.command(name='statusrequest') async def status_request(ctx, discord_id: int): if ctx.message.author.id != ADMIN_DISCORD_ID or not isinstance(ctx.channel, DMChannel): await ctx.send("You're not authorized to request status.") return # Find the member object using the Discord ID member_to_request = team_member_manager.find_member(discord_id) if member_to_request: for member in team_member_manager.team_members: scheduler.remove_job(member.discord_id) scheduler.unschedule_weekly_post() # Send the status request to the member await ctx.send(f"Status request sent to user with Discord ID {discord_id}.") for member in team_member_manager.team_members: scheduler.add_job(send_status_request, member, weekly_post_manager, streaks_manager, updates_manager) scheduler.schedule_weekly_post(weekly_state_reset, weekly_post_manager, streaks_manager, team_member_manager.team_members) await send_status_request(member_to_request, weekly_post_manager, streaks_manager, updates_manager) await ctx.send(f"Status request received from user with Discord ID {discord_id}.") else: await ctx.send(f"No user with Discord ID {discord_id} found.") @bot.command(name='adduser') async def add_user(ctx, discord_id: int, time_zone: str, name: str, github_username: str): if ctx.message.author.id != ADMIN_DISCORD_ID or not isinstance(ctx.channel, DMChannel): await ctx.send("You're not authorized to add users.") return # Add the new member using team_member_manager team_member_manager.add_member(discord_id, name, time_zone, github_username) # Update the weekly post to include the new member new_member = team_member_manager.find_member(discord_id) if new_member: await weekly_post_manager.rebuild_post(team_member_manager.team_members) scheduler.add_job(send_status_request, new_member, weekly_post_manager, streaks_manager, updates_manager) scheduler.unschedule_weekly_post() scheduler.schedule_weekly_post(weekly_state_reset, weekly_post_manager, streaks_manager, team_member_manager.team_members) await ctx.send(f"User {name} added successfully.") @bot.command(name='removeuser') async def remove_user(ctx, discord_id: int): if ctx.message.author.id != ADMIN_DISCORD_ID or not isinstance(ctx.channel, DMChannel): await ctx.send("You're not authorized to remove users.") return # Find the member object member_to_remove = team_member_manager.find_member(discord_id) if member_to_remove: # Remove the member from the database team_member_manager.remove_member(discord_id) # Update the weekly post to remove the member await weekly_post_manager.rebuild_post(team_member_manager.team_members) scheduler.remove_job(discord_id) scheduler.unschedule_weekly_post() scheduler.schedule_weekly_post(weekly_state_reset, weekly_post_manager, streaks_manager, team_member_manager.team_members) await ctx.send(f"User with Discord ID {discord_id} removed successfully.") else: await ctx.send(f"No user with Discord ID {discord_id} found.") @bot.command(name='listusers') async def list_users(ctx): if ctx.message.author.id != ADMIN_DISCORD_ID or not isinstance(ctx.channel, DMChannel): await ctx.send("You're not authorized to list users.") return # List users using team_member_manager users = [(member.discord_id, member.name, member.time_zone, member.github_username, member.current_streak) for member in team_member_manager.team_members] user_list = '\n'.join([f"Name: {user[1]}, Discord ID: {user[0]}, Time Zone: {user[2]}, GitHub Username: {user[3]}, Current Streak: {user[4]}" for user in users]) await ctx.send(f"List of users:\n{user_list}") @bot.command(name='updatetimezone') async def update_timezone(ctx, discord_id: int, new_time_zone: str): if ctx.message.author.id != ADMIN_DISCORD_ID or not isinstance(ctx.channel, DMChannel): await ctx.send("You're not authorized to update timezones.") return # Find the member object using the Discord ID member_to_update = team_member_manager.find_member(discord_id) if member_to_update: # Update the timezone in the database team_member_manager.update_member_timezone(discord_id, new_time_zone) scheduler.remove_job(discord_id) scheduler.add_job(send_status_request, member_to_update, weekly_post_manager, streaks_manager, updates_manager) scheduler.unschedule_weekly_post() scheduler.schedule_weekly_post(weekly_state_reset, weekly_post_manager, streaks_manager, team_member_manager.team_members) await ctx.send(f"Timezone for user with Discord ID {discord_id} updated to {new_time_zone}.") else: await ctx.send(f"No user with Discord ID {discord_id} found.") @bot.command(name='updatestreak') async def update_streak(ctx, discord_id: int, new_streak: int): if ctx.message.author.id != ADMIN_DISCORD_ID or not isinstance(ctx.channel, DMChannel): await ctx.send("You're not authorized to update streaks.") return # Find the member object using the Discord ID member_to_update = team_member_manager.find_member(discord_id) if member_to_update: # Update the streak in the database streaks_manager.update_streak(discord_id, new_streak) member_to_update.update_streak(new_streak) # Update the Discord post using WeeklyPostManager await weekly_post_manager.rebuild_post(team_member_manager.team_members) await ctx.send(f"Streak for user with Discord ID {discord_id} updated to {new_streak}.") else: await ctx.send(f"No user with Discord ID {discord_id} found.") @bot.command(name='forcepostrebuild') async def force_post_rebuild(ctx): if ctx.message.author.id != ADMIN_DISCORD_ID or not isinstance(ctx.channel, DMChannel): await ctx.send("You're not authorized to force a post rebuild.") return # Rebuild the post await weekly_post_manager.rebuild_post(team_member_manager.team_members) await ctx.send("Post rebuilt successfully.") @bot.command(name='deletelateststatus') async def delete_latest_status(ctx, discord_id: int): if ctx.message.author.id != ADMIN_DISCORD_ID or not isinstance(ctx.channel, DMChannel): await ctx.send("You're not authorized to delete status updates.") return # Find the member object using the Discord ID member = team_member_manager.find_member(discord_id) if not member: await ctx.send(f"No user with Discord ID {discord_id} found.") return # Delete the newest status using the UpdatesManager's method updates_manager.delete_newest_status(discord_id) await ctx.send(f"Latest status update for user with Discord ID {discord_id} deleted successfully.") @bot.command(name='viewuser') async def view_user(ctx, discord_id: int): if ctx.message.author.id != ADMIN_DISCORD_ID or not isinstance(ctx.channel, DMChannel): await ctx.send("You're not authorized to view user data.") return # Get the member's statuses using the UpdatesManager's method statuses = updates_manager.get_all_statuses_for_user(discord_id) if not statuses: await ctx.send(f"No status updates found for user with Discord ID {discord_id}.") return # Loop through the statuses and send individual messages for status in statuses: await ctx.send(f"### **Timestamp:** {status['timestamp']}") await ctx.send(f"### **Raw Status:** {status['status']}") await ctx.send(f"### **Summarized Status:** \n{status['summarized_status']}") @bot.command(name='setvacationstatus') async def set_vacation_status(ctx, discord_id: int): if ctx.message.author.id != ADMIN_DISCORD_ID or not isinstance(ctx.channel, DMChannel): await ctx.send("You're not authorized to set vacation status.") return member = team_member_manager.find_member(discord_id) if member: new_status = not member.on_vacation team_member_manager.set_member_vacation_status(discord_id, new_status) await ctx.send(f"Vacation status for user with Discord ID {discord_id} set to {'on vacation' if new_status else 'not on vacation'}.") else: await ctx.send(f"No user with Discord ID {discord_id} found.") @bot.command(name='weeklysummary') async def weekly_summary(ctx, discord_id: int, start_date: str, end_date: str): if ctx.message.author.id != ADMIN_DISCORD_ID or not isinstance(ctx.channel, DMChannel): await ctx.send("You're not authorized to generate weekly summaries.") return # Find the member object using the Discord ID member = team_member_manager.find_member(discord_id) if not member: await ctx.send(f"No user with Discord ID {discord_id} found.") return # Convert the start_date and end_date strings to datetime objects # Adjusting the date format to MM-DD-YYYY and setting the time try: start_date = datetime.strptime(start_date, '%m-%d-%Y') end_date = datetime.strptime(end_date, '%m-%d-%Y') # Setting the time to ensure the whole week is captured start_date = start_date.replace(hour=0, minute=0, second=0, microsecond=0) end_date = end_date.replace(hour=23, minute=59, second=59, microsecond=999999) except ValueError: await ctx.send("Invalid date format. Please use MM-DD-YYYY.") return # Generate the weekly summary weekly_summary = await updates_manager.generate_weekly_summary(discord_id, start_date, end_date) # Send the weekly summary to the admin user admin_user = bot.get_user(ADMIN_DISCORD_ID) if admin_user: await admin_user.send(f"**{member.name}'s Weekly Summary for {start_date.strftime('%m-%d-%Y')} to {end_date.strftime('%m-%d-%Y')}:**\n{weekly_summary}") else: await ctx.send("Unable to find the admin user.") @bot.event async def on_ready(): print("Bot is online!") # Log that the bot is online streaks_db = StreaksDB(MYSQL_HOST, MYSQL_USER, MYSQL_PASSWORD, MYSQL_DB, MYSQL_PORT) team_member_db = TeamMemberDB(MYSQL_HOST, MYSQL_USER, MYSQL_PASSWORD, MYSQL_DB, MYSQL_PORT) weekly_posts_db = WeeklyPostsDB(MYSQL_HOST, MYSQL_USER, MYSQL_PASSWORD, MYSQL_DB, MYSQL_PORT)
updates_db = UpdatesDB(MYSQL_HOST, MYSQL_USER, MYSQL_PASSWORD, MYSQL_DB, MYSQL_PORT)
2
2023-10-12 02:01:46+00:00
24k
azuline/rose
rose/virtualfs.py
[ { "identifier": "SUPPORTED_AUDIO_EXTENSIONS", "path": "rose/audiotags.py", "snippet": "SUPPORTED_AUDIO_EXTENSIONS = [\n \".mp3\",\n \".m4a\",\n \".ogg\",\n \".opus\",\n \".flac\",\n]" }, { "identifier": "AudioTags", "path": "rose/audiotags.py", "snippet": "class AudioTags:\n id: str | None\n release_id: str | None\n title: str | None\n year: int | None\n tracknumber: str | None\n tracktotal: int | None\n discnumber: str | None\n disctotal: int | None\n album: str | None\n genre: list[str]\n label: list[str]\n releasetype: str\n\n albumartists: ArtistMapping\n trackartists: ArtistMapping\n\n duration_sec: int\n\n path: Path\n\n @classmethod\n def from_file(cls, p: Path) -> AudioTags:\n \"\"\"Read the tags of an audio file on disk.\"\"\"\n if not any(p.suffix.lower() == ext for ext in SUPPORTED_AUDIO_EXTENSIONS):\n raise UnsupportedFiletypeError(f\"{p.suffix} not a supported filetype\")\n try:\n m = mutagen.File(p) # type: ignore\n except mutagen.MutagenError as e: # type: ignore\n raise UnsupportedFiletypeError(f\"Failed to open file: {e}\") from e\n if isinstance(m, mutagen.mp3.MP3):\n # ID3 returns trackno/discno tags as no/total. We have to parse.\n tracknumber = discnumber = tracktotal = disctotal = None\n if tracknos := _get_tag(m.tags, [\"TRCK\"]):\n try:\n tracknumber, tracktotalstr = tracknos.split(\"/\", 1)\n tracktotal = _parse_int(tracktotalstr)\n except ValueError:\n tracknumber = tracknos\n if discnos := _get_tag(m.tags, [\"TPOS\"]):\n try:\n discnumber, disctotalstr = discnos.split(\"/\", 1)\n disctotal = _parse_int(disctotalstr)\n except ValueError:\n discnumber = discnos\n\n def _get_paired_frame(x: str) -> str | None:\n if not m.tags:\n return None\n for tag in [\"TIPL\", \"IPLS\"]:\n try:\n frame = m.tags[tag]\n except KeyError:\n continue\n return r\" \\\\ \".join([p[1] for p in frame.people if p[0].lower() == x.lower()])\n return None\n\n return AudioTags(\n id=_get_tag(m.tags, [\"TXXX:ROSEID\"]),\n release_id=_get_tag(m.tags, [\"TXXX:ROSERELEASEID\"]),\n title=_get_tag(m.tags, [\"TIT2\"]),\n year=_parse_year(_get_tag(m.tags, [\"TDRC\", \"TYER\"])),\n tracknumber=tracknumber,\n tracktotal=tracktotal,\n discnumber=discnumber,\n disctotal=disctotal,\n album=_get_tag(m.tags, [\"TALB\"]),\n genre=_split_tag(_get_tag(m.tags, [\"TCON\"], split=True)),\n label=_split_tag(_get_tag(m.tags, [\"TPUB\"], split=True)),\n releasetype=_normalize_rtype(_get_tag(m.tags, [\"TXXX:RELEASETYPE\"], first=True)),\n albumartists=parse_artist_string(main=_get_tag(m.tags, [\"TPE2\"], split=True)),\n trackartists=parse_artist_string(\n main=_get_tag(m.tags, [\"TPE1\"], split=True),\n remixer=_get_tag(m.tags, [\"TPE4\"], split=True),\n composer=_get_tag(m.tags, [\"TCOM\"], split=True),\n conductor=_get_tag(m.tags, [\"TPE3\"], split=True),\n producer=_get_paired_frame(\"producer\"),\n dj=_get_paired_frame(\"DJ-mix\"),\n ),\n duration_sec=round(m.info.length),\n path=p,\n )\n if isinstance(m, mutagen.mp4.MP4):\n tracknumber = discnumber = tracktotal = disctotal = None\n with contextlib.suppress(ValueError):\n tracknumber, tracktotalstr = _get_tuple_tag(m.tags, [\"trkn\"]) # type: ignore\n tracktotal = _parse_int(tracktotalstr)\n with contextlib.suppress(ValueError):\n discnumber, disctotalstr = _get_tuple_tag(m.tags, [\"disk\"]) # type: ignore\n disctotal = _parse_int(disctotalstr)\n\n return AudioTags(\n id=_get_tag(m.tags, [\"----:net.sunsetglow.rose:ID\"]),\n release_id=_get_tag(m.tags, [\"----:net.sunsetglow.rose:RELEASEID\"]),\n title=_get_tag(m.tags, [\"\\xa9nam\"]),\n year=_parse_year(_get_tag(m.tags, [\"\\xa9day\"])),\n tracknumber=str(tracknumber),\n tracktotal=tracktotal,\n discnumber=str(discnumber),\n disctotal=disctotal,\n album=_get_tag(m.tags, [\"\\xa9alb\"]),\n genre=_split_tag(_get_tag(m.tags, [\"\\xa9gen\"], split=True)),\n label=_split_tag(_get_tag(m.tags, [\"----:com.apple.iTunes:LABEL\"], split=True)),\n releasetype=_normalize_rtype(\n _get_tag(m.tags, [\"----:com.apple.iTunes:RELEASETYPE\"], first=True)\n ),\n albumartists=parse_artist_string(main=_get_tag(m.tags, [\"aART\"], split=True)),\n trackartists=parse_artist_string(\n main=_get_tag(m.tags, [\"\\xa9ART\"], split=True),\n remixer=_get_tag(m.tags, [\"----:com.apple.iTunes:REMIXER\"], split=True),\n producer=_get_tag(m.tags, [\"----:com.apple.iTunes:PRODUCER\"], split=True),\n composer=_get_tag(m.tags, [\"\\xa9wrt\"], split=True),\n conductor=_get_tag(m.tags, [\"----:com.apple.iTunes:CONDUCTOR\"], split=True),\n dj=_get_tag(m.tags, [\"----:com.apple.iTunes:DJMIXER\"], split=True),\n ),\n duration_sec=round(m.info.length), # type: ignore\n path=p,\n )\n if isinstance(m, (mutagen.flac.FLAC, mutagen.oggvorbis.OggVorbis, mutagen.oggopus.OggOpus)):\n return AudioTags(\n id=_get_tag(m.tags, [\"roseid\"]),\n release_id=_get_tag(m.tags, [\"rosereleaseid\"]),\n title=_get_tag(m.tags, [\"title\"]),\n year=_parse_year(_get_tag(m.tags, [\"date\", \"year\"])),\n tracknumber=_get_tag(m.tags, [\"tracknumber\"], first=True),\n tracktotal=_parse_int(_get_tag(m.tags, [\"tracktotal\"], first=True)),\n discnumber=_get_tag(m.tags, [\"discnumber\"], first=True),\n disctotal=_parse_int(_get_tag(m.tags, [\"disctotal\"], first=True)),\n album=_get_tag(m.tags, [\"album\"]),\n genre=_split_tag(_get_tag(m.tags, [\"genre\"], split=True)),\n label=_split_tag(\n _get_tag(m.tags, [\"organization\", \"label\", \"recordlabel\"], split=True)\n ),\n releasetype=_normalize_rtype(_get_tag(m.tags, [\"releasetype\"], first=True)),\n albumartists=parse_artist_string(\n main=_get_tag(m.tags, [\"albumartist\"], split=True)\n ),\n trackartists=parse_artist_string(\n main=_get_tag(m.tags, [\"artist\"], split=True),\n remixer=_get_tag(m.tags, [\"remixer\"], split=True),\n producer=_get_tag(m.tags, [\"producer\"], split=True),\n composer=_get_tag(m.tags, [\"composer\"], split=True),\n conductor=_get_tag(m.tags, [\"conductor\"], split=True),\n dj=_get_tag(m.tags, [\"djmixer\"], split=True),\n ),\n duration_sec=round(m.info.length), # type: ignore\n path=p,\n )\n raise UnsupportedFiletypeError(f\"{p} is not a supported audio file\")\n\n @no_type_check\n def flush(self, *, validate: bool = True) -> None:\n \"\"\"Flush the current tags to the file on disk.\"\"\"\n m = mutagen.File(self.path)\n if not validate and \"pytest\" not in sys.modules:\n raise Exception(\"Validate can only be turned off by tests.\")\n\n self.releasetype = (self.releasetype or \"unknown\").lower()\n if validate and self.releasetype not in SUPPORTED_RELEASE_TYPES:\n raise UnsupportedTagValueTypeError(\n f\"Release type {self.releasetype} is not a supported release type.\\n\"\n f\"Supported release types: {', '.join(SUPPORTED_RELEASE_TYPES)}\"\n )\n\n if isinstance(m, mutagen.mp3.MP3):\n if m.tags is None:\n m.tags = mutagen.id3.ID3()\n\n def _write_standard_tag(key: str, value: str | None) -> None:\n m.tags.delall(key)\n frame = getattr(mutagen.id3, key)(text=value)\n if value:\n m.tags.add(frame)\n\n def _write_tag_with_description(name: str, value: str | None) -> None:\n key, desc = name.split(\":\", 1)\n # Since the ID3 tags work with the shared prefix key before `:`, manually preserve\n # the other tags with the shared prefix key.\n keep_fields = [f for f in m.tags.getall(key) if getattr(f, \"desc\", None) != desc]\n m.tags.delall(key)\n if value:\n frame = getattr(mutagen.id3, key)(desc=desc, text=value)\n m.tags.add(frame)\n for f in keep_fields:\n m.tags.add(f)\n\n _write_tag_with_description(\"TXXX:ROSEID\", self.id)\n _write_tag_with_description(\"TXXX:ROSERELEASEID\", self.release_id)\n _write_standard_tag(\"TIT2\", self.title)\n _write_standard_tag(\"TDRC\", str(self.year).zfill(4))\n _write_standard_tag(\"TRCK\", self.tracknumber)\n _write_standard_tag(\"TPOS\", self.discnumber)\n _write_standard_tag(\"TALB\", self.album)\n _write_standard_tag(\"TCON\", \";\".join(self.genre))\n _write_standard_tag(\"TPUB\", \";\".join(self.label))\n _write_tag_with_description(\"TXXX:RELEASETYPE\", self.releasetype)\n _write_standard_tag(\"TPE2\", format_artist_string(self.albumartists))\n _write_standard_tag(\"TPE1\", format_artist_string(self.trackartists))\n # Wipe the alt. role artist tags, since we encode the full artist into the main tag.\n m.tags.delall(\"TPE4\")\n m.tags.delall(\"TCOM\")\n m.tags.delall(\"TPE3\")\n # Delete all paired text frames, since these represent additional artist roles. We don't\n # want to preserve them.\n m.tags.delall(\"TIPL\")\n m.tags.delall(\"IPLS\")\n m.save()\n return\n if isinstance(m, mutagen.mp4.MP4):\n if m.tags is None:\n m.tags = mutagen.mp4.MP4Tags()\n m.tags[\"----:net.sunsetglow.rose:ID\"] = (self.id or \"\").encode()\n m.tags[\"----:net.sunsetglow.rose:RELEASEID\"] = (self.release_id or \"\").encode()\n m.tags[\"\\xa9nam\"] = self.title or \"\"\n m.tags[\"\\xa9day\"] = str(self.year).zfill(4)\n m.tags[\"\\xa9alb\"] = self.album or \"\"\n m.tags[\"\\xa9gen\"] = \";\".join(self.genre)\n m.tags[\"----:com.apple.iTunes:LABEL\"] = \";\".join(self.label).encode()\n m.tags[\"----:com.apple.iTunes:RELEASETYPE\"] = self.releasetype.encode()\n m.tags[\"aART\"] = format_artist_string(self.albumartists)\n m.tags[\"\\xa9ART\"] = format_artist_string(self.trackartists)\n # Wipe the alt. role artist tags, since we encode the full artist into the main tag.\n with contextlib.suppress(KeyError):\n del m.tags[\"----:com.apple.iTunes:REMIXER\"]\n with contextlib.suppress(KeyError):\n del m.tags[\"----:com.apple.iTunes:PRODUCER\"]\n with contextlib.suppress(KeyError):\n del m.tags[\"\\xa9wrt\"]\n with contextlib.suppress(KeyError):\n del m.tags[\"----:com.apple.iTunes:CONDUCTOR\"]\n with contextlib.suppress(KeyError):\n del m.tags[\"----:com.apple.iTunes:DJMIXER\"]\n\n # The track and disc numbers in MP4 are a bit annoying, because they must be a\n # single-element list of 2-tuple ints. We preserve the previous tracktotal/disctotal (as\n # Rose does not care about those values), and then attempt to write our own tracknumber\n # and discnumber.\n try:\n prev_tracktotal = m.tags[\"trkn\"][0][1]\n except (KeyError, IndexError):\n prev_tracktotal = 1\n try:\n prev_disctotal = m.tags[\"disk\"][0][1]\n except (KeyError, IndexError):\n prev_disctotal = 1\n try:\n m.tags[\"trkn\"] = [(int(self.tracknumber or \"0\"), prev_tracktotal)]\n m.tags[\"disk\"] = [(int(self.discnumber or \"0\"), prev_disctotal)]\n except ValueError as e:\n raise UnsupportedTagValueTypeError(\n \"Could not write m4a trackno/discno tags: must be integers. \"\n f\"Got: {self.tracknumber=} / {self.discnumber=}\"\n ) from e\n\n m.save()\n return\n if isinstance(m, (mutagen.flac.FLAC, mutagen.oggvorbis.OggVorbis, mutagen.oggopus.OggOpus)):\n if m.tags is None:\n if isinstance(m, mutagen.flac.FLAC):\n m.tags = mutagen.flac.VCFLACDict()\n elif isinstance(m, mutagen.oggvorbis.OggVorbis):\n m.tags = mutagen.oggvorbis.OggVCommentDict()\n else:\n m.tags = mutagen.oggopus.OggOpusVComment()\n assert not isinstance(m.tags, mutagen.flac.MetadataBlock)\n m.tags[\"roseid\"] = self.id or \"\"\n m.tags[\"rosereleaseid\"] = self.release_id or \"\"\n m.tags[\"title\"] = self.title or \"\"\n m.tags[\"date\"] = str(self.year).zfill(4)\n m.tags[\"tracknumber\"] = self.tracknumber or \"\"\n m.tags[\"discnumber\"] = self.discnumber or \"\"\n m.tags[\"album\"] = self.album or \"\"\n m.tags[\"genre\"] = \";\".join(self.genre)\n m.tags[\"organization\"] = \";\".join(self.label)\n m.tags[\"releasetype\"] = self.releasetype\n m.tags[\"albumartist\"] = format_artist_string(self.albumartists)\n m.tags[\"artist\"] = format_artist_string(self.trackartists)\n # Wipe the alt. role artist tags, since we encode the full artist into the main tag.\n with contextlib.suppress(KeyError):\n del m.tags[\"remixer\"]\n with contextlib.suppress(KeyError):\n del m.tags[\"producer\"]\n with contextlib.suppress(KeyError):\n del m.tags[\"composer\"]\n with contextlib.suppress(KeyError):\n del m.tags[\"conductor\"]\n with contextlib.suppress(KeyError):\n del m.tags[\"djmixer\"]\n m.save()\n return\n\n raise RoseError(f\"Impossible: unknown mutagen type: {type(m)=} ({repr(m)=})\")" }, { "identifier": "STORED_DATA_FILE_REGEX", "path": "rose/cache.py", "snippet": "STORED_DATA_FILE_REGEX = re.compile(r\"\\.rose\\.([^.]+)\\.toml\")" }, { "identifier": "CachedRelease", "path": "rose/cache.py", "snippet": "class CachedRelease:\n id: str\n source_path: Path\n cover_image_path: Path | None\n added_at: str # ISO8601 timestamp\n datafile_mtime: str\n albumtitle: str\n releasetype: str\n year: int | None\n new: bool\n disctotal: int\n genres: list[str]\n labels: list[str]\n albumartists: ArtistMapping\n metahash: str\n\n @classmethod\n def from_view(cls, c: Config, row: dict[str, Any], aliases: bool = True) -> CachedRelease:\n return CachedRelease(\n id=row[\"id\"],\n source_path=Path(row[\"source_path\"]),\n cover_image_path=Path(row[\"cover_image_path\"]) if row[\"cover_image_path\"] else None,\n added_at=row[\"added_at\"],\n datafile_mtime=row[\"datafile_mtime\"],\n albumtitle=row[\"albumtitle\"],\n releasetype=row[\"releasetype\"],\n year=row[\"year\"],\n disctotal=row[\"disctotal\"],\n new=bool(row[\"new\"]),\n genres=_split(row[\"genres\"]) if row[\"genres\"] else [],\n labels=_split(row[\"labels\"]) if row[\"labels\"] else [],\n albumartists=_unpack_artists(\n c, row[\"albumartist_names\"], row[\"albumartist_roles\"], aliases=aliases\n ),\n metahash=row[\"metahash\"],\n )\n\n def dump(self) -> dict[str, Any]:\n return {\n \"id\": self.id,\n \"source_path\": str(self.source_path.resolve()),\n \"cover_image_path\": str(self.cover_image_path.resolve())\n if self.cover_image_path\n else None,\n \"added_at\": self.added_at,\n \"albumtitle\": self.albumtitle,\n \"releasetype\": self.releasetype,\n \"year\": self.year,\n \"new\": self.new,\n \"disctotal\": self.disctotal,\n \"genres\": self.genres,\n \"labels\": self.labels,\n \"albumartists\": self.albumartists.dump(),\n }" }, { "identifier": "CachedTrack", "path": "rose/cache.py", "snippet": "class CachedTrack:\n id: str\n source_path: Path\n source_mtime: str\n tracktitle: str\n tracknumber: str\n tracktotal: int\n discnumber: str\n disctotal: int\n duration_seconds: int\n trackartists: ArtistMapping\n metahash: str\n\n release: CachedRelease\n\n @classmethod\n def from_view(\n cls, c: Config, row: dict[str, Any], release: CachedRelease, aliases: bool = True\n ) -> CachedTrack:\n return CachedTrack(\n id=row[\"id\"],\n source_path=Path(row[\"source_path\"]),\n source_mtime=row[\"source_mtime\"],\n tracktitle=row[\"tracktitle\"],\n tracknumber=row[\"tracknumber\"],\n tracktotal=row[\"tracktotal\"],\n discnumber=row[\"discnumber\"],\n disctotal=row[\"disctotal\"],\n duration_seconds=row[\"duration_seconds\"],\n trackartists=_unpack_artists(\n c,\n row[\"trackartist_names\"],\n row[\"trackartist_roles\"],\n aliases=aliases,\n ),\n metahash=row[\"metahash\"],\n release=release,\n )\n\n def dump(self, with_release_info: bool = True) -> dict[str, Any]:\n r = {\n \"id\": self.id,\n \"source_path\": str(self.source_path.resolve()),\n \"tracktitle\": self.tracktitle,\n \"tracknumber\": self.tracknumber,\n \"tracktotal\": self.tracktotal,\n \"discnumber\": self.discnumber,\n \"disctotal\": self.disctotal,\n \"duration_seconds\": self.duration_seconds,\n \"trackartists\": self.trackartists.dump(),\n }\n if with_release_info:\n r.update(\n {\n \"release_id\": self.release.id,\n \"added_at\": self.release.added_at,\n \"albumtitle\": self.release.albumtitle,\n \"releasetype\": self.release.releasetype,\n \"year\": self.release.year,\n \"new\": self.release.new,\n \"genres\": self.release.genres,\n \"labels\": self.release.labels,\n \"albumartists\": self.release.albumartists.dump(),\n }\n )\n return r" }, { "identifier": "artist_exists", "path": "rose/cache.py", "snippet": "def artist_exists(c: Config, artist_sanitized: str) -> bool:\n args: list[str] = [artist_sanitized]\n for alias in c.sanitized_artist_aliases_map.get(artist_sanitized, []):\n args.append(alias)\n with connect(c) as conn:\n cursor = conn.execute(\n f\"\"\"\n SELECT EXISTS(\n SELECT * FROM releases_artists\n WHERE artist_sanitized IN ({','.join(['?']*len(args))})\n )\n \"\"\",\n args,\n )\n return bool(cursor.fetchone()[0])" }, { "identifier": "calculate_release_logtext", "path": "rose/cache.py", "snippet": "def calculate_release_logtext(\n title: str,\n year: int | None,\n artists: ArtistMapping,\n) -> str:\n logtext = f\"{artistsfmt(artists)} - \"\n if year:\n logtext += f\"{year}. \"\n logtext += title\n return logtext" }, { "identifier": "calculate_track_logtext", "path": "rose/cache.py", "snippet": "def calculate_track_logtext(title: str, artists: ArtistMapping, suffix: str) -> str:\n return f\"{artistsfmt(artists)} - {title or 'Unknown Title'}{suffix}\"" }, { "identifier": "genre_exists", "path": "rose/cache.py", "snippet": "def genre_exists(c: Config, genre_sanitized: str) -> bool:\n with connect(c) as conn:\n cursor = conn.execute(\n \"SELECT EXISTS(SELECT * FROM releases_genres WHERE genre_sanitized = ?)\",\n (genre_sanitized,),\n )\n return bool(cursor.fetchone()[0])" }, { "identifier": "get_collage", "path": "rose/cache.py", "snippet": "def get_collage(c: Config, collage_name: str) -> tuple[CachedCollage, list[CachedRelease]] | None:\n with connect(c) as conn:\n cursor = conn.execute(\n \"SELECT name, source_mtime FROM collages WHERE name = ?\",\n (collage_name,),\n )\n row = cursor.fetchone()\n if not row:\n return None\n collage = CachedCollage(\n name=row[\"name\"],\n source_mtime=row[\"source_mtime\"],\n # Accumulated below when we query the releases.\n release_ids=[],\n )\n cursor = conn.execute(\n \"\"\"\n SELECT r.*\n FROM releases_view r\n JOIN collages_releases cr ON cr.release_id = r.id\n WHERE cr.collage_name = ? AND NOT cr.missing\n ORDER BY cr.position ASC\n \"\"\",\n (collage_name,),\n )\n releases: list[CachedRelease] = []\n for row in cursor:\n collage.release_ids.append(row[\"id\"])\n releases.append(CachedRelease.from_view(c, row))\n\n return (collage, releases)" }, { "identifier": "get_playlist", "path": "rose/cache.py", "snippet": "def get_playlist(c: Config, playlist_name: str) -> tuple[CachedPlaylist, list[CachedTrack]] | None:\n with connect(c) as conn:\n cursor = conn.execute(\n \"\"\"\n SELECT\n name\n , source_mtime\n , cover_path\n FROM playlists\n WHERE name = ?\n \"\"\",\n (playlist_name,),\n )\n row = cursor.fetchone()\n if not row:\n return None\n playlist = CachedPlaylist(\n name=row[\"name\"],\n source_mtime=row[\"source_mtime\"],\n cover_path=Path(row[\"cover_path\"]) if row[\"cover_path\"] else None,\n # Accumulated below when we query the tracks.\n track_ids=[],\n )\n\n cursor = conn.execute(\n \"\"\"\n SELECT t.*\n FROM tracks_view t\n JOIN playlists_tracks pt ON pt.track_id = t.id\n WHERE pt.playlist_name = ? AND NOT pt.missing\n ORDER BY pt.position ASC\n \"\"\",\n (playlist_name,),\n )\n trackrows = cursor.fetchall()\n\n release_ids = [r[\"release_id\"] for r in trackrows]\n cursor = conn.execute(\n f\"\"\"\n SELECT *\n FROM releases_view\n WHERE id IN ({','.join(['?']*len(release_ids))})\n \"\"\",\n release_ids,\n )\n releases_map: dict[str, CachedRelease] = {}\n for row in cursor:\n releases_map[row[\"id\"]] = CachedRelease.from_view(c, row)\n\n tracks: list[CachedTrack] = []\n for row in trackrows:\n playlist.track_ids.append(row[\"id\"])\n tracks.append(CachedTrack.from_view(c, row, releases_map[row[\"release_id\"]]))\n\n return playlist, tracks" }, { "identifier": "get_release", "path": "rose/cache.py", "snippet": "def get_release(c: Config, release_id: str) -> CachedRelease | None:\n with connect(c) as conn:\n cursor = conn.execute(\n \"SELECT * FROM releases_view WHERE id = ?\",\n (release_id,),\n )\n row = cursor.fetchone()\n if not row:\n return None\n return CachedRelease.from_view(c, row)" }, { "identifier": "get_track", "path": "rose/cache.py", "snippet": "def get_track(c: Config, uuid: str) -> CachedTrack | None:\n with connect(c) as conn:\n cursor = conn.execute(\"SELECT * FROM tracks_view WHERE id = ?\", (uuid,))\n trackrow = cursor.fetchone()\n if not trackrow:\n return None\n cursor = conn.execute(\"SELECT * FROM releases_view WHERE id = ?\", (trackrow[\"release_id\"],))\n release = CachedRelease.from_view(c, cursor.fetchone())\n return CachedTrack.from_view(c, trackrow, release)" }, { "identifier": "get_tracks_associated_with_release", "path": "rose/cache.py", "snippet": "def get_tracks_associated_with_release(\n c: Config,\n release: CachedRelease,\n) -> list[CachedTrack]:\n with connect(c) as conn:\n cursor = conn.execute(\n \"\"\"\n SELECT *\n FROM tracks_view\n WHERE release_id = ?\n ORDER BY release_id, FORMAT('%4d.%4d', discnumber, tracknumber)\n \"\"\",\n (release.id,),\n )\n rval = []\n for row in cursor:\n rval.append(CachedTrack.from_view(c, row, release))\n return rval" }, { "identifier": "label_exists", "path": "rose/cache.py", "snippet": "def label_exists(c: Config, label_sanitized: str) -> bool:\n with connect(c) as conn:\n cursor = conn.execute(\n \"SELECT EXISTS(SELECT * FROM releases_labels WHERE label_sanitized = ?)\",\n (label_sanitized,),\n )\n return bool(cursor.fetchone()[0])" }, { "identifier": "list_artists", "path": "rose/cache.py", "snippet": "def list_artists(c: Config) -> list[tuple[str, str]]:\n with connect(c) as conn:\n cursor = conn.execute(\"SELECT DISTINCT artist, artist_sanitized FROM releases_artists\")\n return [(row[\"artist\"], row[\"artist_sanitized\"]) for row in cursor]" }, { "identifier": "list_collages", "path": "rose/cache.py", "snippet": "def list_collages(c: Config) -> list[str]:\n with connect(c) as conn:\n cursor = conn.execute(\"SELECT DISTINCT name FROM collages\")\n return [r[\"name\"] for r in cursor]" }, { "identifier": "list_genres", "path": "rose/cache.py", "snippet": "def list_genres(c: Config) -> list[tuple[str, str]]:\n with connect(c) as conn:\n cursor = conn.execute(\"SELECT DISTINCT genre, genre_sanitized FROM releases_genres\")\n return [(row[\"genre\"], row[\"genre_sanitized\"]) for row in cursor]" }, { "identifier": "list_labels", "path": "rose/cache.py", "snippet": "def list_labels(c: Config) -> list[tuple[str, str]]:\n with connect(c) as conn:\n cursor = conn.execute(\"SELECT DISTINCT label, label_sanitized FROM releases_labels\")\n return [(row[\"label\"], row[\"label_sanitized\"]) for row in cursor]" }, { "identifier": "list_playlists", "path": "rose/cache.py", "snippet": "def list_playlists(c: Config) -> list[str]:\n with connect(c) as conn:\n cursor = conn.execute(\"SELECT DISTINCT name FROM playlists\")\n return [r[\"name\"] for r in cursor]" }, { "identifier": "list_releases_delete_this", "path": "rose/cache.py", "snippet": "def list_releases_delete_this(\n c: Config,\n sanitized_artist_filter: str | None = None,\n sanitized_genre_filter: str | None = None,\n sanitized_label_filter: str | None = None,\n new: bool | None = None,\n) -> list[CachedRelease]:\n with connect(c) as conn:\n query = \"SELECT * FROM releases_view WHERE 1=1\"\n args: list[str | bool] = []\n if sanitized_artist_filter:\n sanitized_artists: list[str] = [sanitized_artist_filter]\n for alias in c.sanitized_artist_aliases_map.get(sanitized_artist_filter, []):\n sanitized_artists.append(alias)\n query += f\"\"\"\n AND EXISTS (\n SELECT * FROM releases_artists\n WHERE release_id = id AND artist_sanitized IN ({','.join(['?']*len(sanitized_artists))})\n )\n \"\"\"\n args.extend(sanitized_artists)\n if sanitized_genre_filter:\n query += \"\"\"\n AND EXISTS (\n SELECT * FROM releases_genres\n WHERE release_id = id AND genre_sanitized = ?\n )\n \"\"\"\n args.append(sanitized_genre_filter)\n if sanitized_label_filter:\n query += \"\"\"\n AND EXISTS (\n SELECT * FROM releases_labels\n WHERE release_id = id AND label_sanitized = ?\n )\n \"\"\"\n args.append(sanitized_label_filter)\n if new is not None:\n query += \" AND new = ?\"\n args.append(new)\n query += \" ORDER BY source_path\"\n\n cursor = conn.execute(query, args)\n releases: list[CachedRelease] = []\n for row in cursor:\n releases.append(CachedRelease.from_view(c, row))\n return releases" }, { "identifier": "update_cache_for_releases", "path": "rose/cache.py", "snippet": "def update_cache_for_releases(\n c: Config,\n # Leave as None to update all releases.\n release_dirs: list[Path] | None = None,\n force: bool = False,\n # For testing.\n force_multiprocessing: bool = False,\n) -> None:\n \"\"\"\n Update the read cache to match the data for any passed-in releases. If a directory lacks a\n .rose.{uuid}.toml datafile, create the datafile for the release and set it to the initial state.\n\n This is a hot path and is thus performance-optimized. The bottleneck is disk accesses, so we\n structure this function in order to minimize them. We solely read files that have changed since\n last run and batch writes together. We trade higher memory for reduced disk accesses.\n Concretely, we:\n\n 1. Execute one big SQL query at the start to fetch the relevant previous caches.\n 2. Skip reading a file's data if the mtime has not changed since the previous cache update.\n 3. Batch SQLite write operations to the end of this function, and only execute a SQLite upsert\n if the read data differs from the previous caches.\n\n We also shard the directories across multiple processes and execute them simultaneously.\n \"\"\"\n release_dirs = release_dirs or [\n Path(d.path) for d in os.scandir(c.music_source_dir) if d.is_dir()\n ]\n release_dirs = [\n d\n for d in release_dirs\n if d.name != \"!collages\"\n and d.name != \"!playlists\"\n and d.name not in c.ignore_release_directories\n ]\n if not release_dirs:\n logger.debug(\"No-Op: No whitelisted releases passed into update_cache_for_releases\")\n return\n logger.debug(f\"Refreshing the read cache for {len(release_dirs)} releases\")\n if len(release_dirs) < 10:\n logger.debug(f\"Refreshing cached data for {', '.join([r.name for r in release_dirs])}\")\n\n # If the number of releases changed is less than 50; do not bother with all that multiprocessing\n # gunk: instead, directly call the executor.\n #\n # This has an added benefit of not spawning processes from the virtual filesystem and watchdog\n # processes, as those processes always update the cache for one release at a time and are\n # multithreaded. Starting other processes from threads is bad!\n if not force_multiprocessing and len(release_dirs) < 50:\n logger.debug(\n f\"Running cache update executor in same process because {len(release_dirs)=} < 50\"\n )\n _update_cache_for_releases_executor(c, release_dirs, force)\n return\n\n # Batch size defaults to equal split across all processes. However, if the number of directories\n # is small, we shrink the # of processes to save on overhead.\n num_proc = c.max_proc\n if len(release_dirs) < c.max_proc * 50:\n num_proc = max(1, math.ceil(len(release_dirs) // 50))\n batch_size = len(release_dirs) // num_proc + 1\n\n manager = multiprocessing.Manager()\n # Have each process propagate the collages and playlists it wants to update back upwards. We\n # will dispatch the force updater only once in the main process, instead of many times in each\n # process.\n collages_to_force_update = manager.list()\n playlists_to_force_update = manager.list()\n\n errors: list[BaseException] = []\n\n logger.debug(\"Creating multiprocessing pool to parallelize cache executors.\")\n with multiprocessing.Pool(processes=c.max_proc) as pool:\n # At 0, no batch. At 1, 1 batch. At 49, 1 batch. At 50, 1 batch. At 51, 2 batches.\n for i in range(0, len(release_dirs), batch_size):\n logger.debug(\n f\"Spawning release cache update process for releases [{i}, {i+batch_size})\"\n )\n pool.apply_async(\n _update_cache_for_releases_executor,\n (\n c,\n release_dirs[i : i + batch_size],\n force,\n collages_to_force_update,\n playlists_to_force_update,\n ),\n error_callback=lambda e: errors.append(e),\n )\n pool.close()\n pool.join()\n\n if errors:\n raise ExceptionGroup(\"Exception occurred in cache update subprocesses\", errors) # type: ignore\n\n if collages_to_force_update:\n update_cache_for_collages(c, uniq(list(collages_to_force_update)), force=True)\n if playlists_to_force_update:\n update_cache_for_playlists(c, uniq(list(playlists_to_force_update)), force=True)" }, { "identifier": "add_release_to_collage", "path": "rose/collages.py", "snippet": "def add_release_to_collage(\n c: Config,\n collage_name: str,\n release_id: str,\n) -> None:\n release_logtext = get_release_logtext(c, release_id)\n if not release_logtext:\n raise ReleaseDoesNotExistError(f\"Release {release_id} does not exist\")\n\n path = collage_path(c, collage_name)\n if not path.exists():\n raise CollageDoesNotExistError(f\"Collage {collage_name} does not exist\")\n\n with lock(c, collage_lock_name(collage_name)):\n with path.open(\"rb\") as fp:\n data = tomllib.load(fp)\n data[\"releases\"] = data.get(\"releases\", [])\n # Check to see if release is already in the collage. If so, no op. We don't support\n # duplicate collage entries.\n for r in data[\"releases\"]:\n if r[\"uuid\"] == release_id:\n logger.info(f\"No-Op: Release {release_logtext} already in collage {collage_name}\")\n return\n data[\"releases\"].append({\"uuid\": release_id, \"description_meta\": release_logtext})\n with path.open(\"wb\") as fp:\n tomli_w.dump(data, fp)\n logger.info(f\"Added release {release_logtext} to collage {collage_name}\")\n update_cache_for_collages(c, [collage_name], force=True)" }, { "identifier": "create_collage", "path": "rose/collages.py", "snippet": "def create_collage(c: Config, name: str) -> None:\n (c.music_source_dir / \"!collages\").mkdir(parents=True, exist_ok=True)\n path = collage_path(c, name)\n with lock(c, collage_lock_name(name)):\n if path.exists():\n raise CollageAlreadyExistsError(f\"Collage {name} already exists\")\n path.touch()\n logger.info(f\"Created collage {name} in source directory\")\n update_cache_for_collages(c, [name], force=True)" }, { "identifier": "delete_collage", "path": "rose/collages.py", "snippet": "def delete_collage(c: Config, name: str) -> None:\n path = collage_path(c, name)\n with lock(c, collage_lock_name(name)):\n if not path.exists():\n raise CollageDoesNotExistError(f\"Collage {name} does not exist\")\n send2trash(path)\n logger.info(f\"Deleted collage {name} from source directory\")\n update_cache_evict_nonexistent_collages(c)" }, { "identifier": "remove_release_from_collage", "path": "rose/collages.py", "snippet": "def remove_release_from_collage(c: Config, collage_name: str, release_id: str) -> None:\n release_logtext = get_release_logtext(c, release_id)\n if not release_logtext:\n raise ReleaseDoesNotExistError(f\"Release {release_id} does not exist\")\n\n path = collage_path(c, collage_name)\n if not path.exists():\n raise CollageDoesNotExistError(f\"Collage {collage_name} does not exist\")\n with lock(c, collage_lock_name(collage_name)):\n with path.open(\"rb\") as fp:\n data = tomllib.load(fp)\n old_releases = data.get(\"releases\", [])\n new_releases = [r for r in old_releases if r[\"uuid\"] != release_id]\n if old_releases == new_releases:\n logger.info(f\"No-Op: Release {release_logtext} not in collage {collage_name}\")\n return\n data[\"releases\"] = new_releases\n with path.open(\"wb\") as fp:\n tomli_w.dump(data, fp)\n logger.info(f\"Removed release {release_logtext} from collage {collage_name}\")\n update_cache_for_collages(c, [collage_name], force=True)" }, { "identifier": "rename_collage", "path": "rose/collages.py", "snippet": "def rename_collage(c: Config, old_name: str, new_name: str) -> None:\n old_path = collage_path(c, old_name)\n new_path = collage_path(c, new_name)\n with lock(c, collage_lock_name(old_name)), lock(c, collage_lock_name(new_name)):\n if not old_path.exists():\n raise CollageDoesNotExistError(f\"Collage {old_name} does not exist\")\n if new_path.exists():\n raise CollageAlreadyExistsError(f\"Collage {new_name} already exists\")\n old_path.rename(new_path)\n # And also rename all files with the same stem (e.g. cover arts).\n for old_adjacent_file in (c.music_source_dir / \"!collages\").iterdir():\n if old_adjacent_file.stem != old_path.stem:\n continue\n new_adjacent_file = old_adjacent_file.with_name(\n new_path.stem + old_adjacent_file.suffix\n )\n if new_adjacent_file.exists():\n continue\n old_adjacent_file.rename(new_adjacent_file)\n logger.debug(\n \"Renaming collage-adjacent file {old_adjacent_file} to {new_adjacent_file}\"\n )\n logger.info(f\"Renamed collage {old_name} to {new_name}\")\n update_cache_for_collages(c, [new_name], force=True)\n update_cache_evict_nonexistent_collages(c)" }, { "identifier": "RoseError", "path": "rose/common.py", "snippet": "class RoseError(Exception):\n pass" }, { "identifier": "sanitize_dirname", "path": "rose/common.py", "snippet": "def sanitize_dirname(name: str, enforce_maxlen: bool) -> str:\n \"\"\"\n Replace illegal characters and truncate. We have 255 bytes in ext4, and we truncate to 240 in\n order to leave room for any collision numbers.\n\n enforce_maxlen is for host filesystems, which are sometimes subject to length constraints (e.g.\n ext4).\n \"\"\"\n name = ILLEGAL_FS_CHARS_REGEX.sub(\"_\", name)\n if enforce_maxlen:\n name = name.encode(\"utf-8\")[:240].decode(\"utf-8\", \"ignore\")\n return name" }, { "identifier": "sanitize_filename", "path": "rose/common.py", "snippet": "def sanitize_filename(name: str, enforce_maxlen: bool) -> str:\n \"\"\"Same as sanitize dirname, except we preserve file extension.\"\"\"\n name = ILLEGAL_FS_CHARS_REGEX.sub(\"_\", name)\n if enforce_maxlen:\n # Preserve the extension.\n stem, ext = os.path.splitext(name)\n # But ignore if the extension is longer than 6 characters; that means it's probably bullshit.\n if len(ext.encode()) > 6:\n stem = name\n ext = \"\"\n stem = stem.encode(\"utf-8\")[:240].decode(\"utf-8\", \"ignore\")\n name = stem + ext\n return name" }, { "identifier": "Config", "path": "rose/config.py", "snippet": "class Config:\n music_source_dir: Path\n fuse_mount_dir: Path\n cache_dir: Path\n # Maximum parallel processes for cache updates. Defaults to nproc/2.\n max_proc: int\n ignore_release_directories: list[str]\n\n # A map from parent artist -> subartists.\n artist_aliases_map: dict[str, list[str]]\n # A map from subartist -> parent artists.\n artist_aliases_parents_map: dict[str, list[str]]\n\n fuse_artists_whitelist: list[str] | None\n fuse_genres_whitelist: list[str] | None\n fuse_labels_whitelist: list[str] | None\n fuse_artists_blacklist: list[str] | None\n fuse_genres_blacklist: list[str] | None\n fuse_labels_blacklist: list[str] | None\n\n cover_art_stems: list[str]\n valid_art_exts: list[str]\n\n rename_source_files: bool\n path_templates: PathTemplateConfig\n\n stored_metadata_rules: list[MetadataRule]\n\n @classmethod\n def parse(cls, config_path_override: Path | None = None) -> Config:\n # As we parse, delete consumed values from the data dictionary. If any are left over at the\n # end of the config, warn that unknown config keys were found.\n cfgpath = config_path_override or CONFIG_PATH\n cfgtext = \"\"\n try:\n with cfgpath.open(\"r\") as fp:\n cfgtext = fp.read()\n data = tomllib.loads(cfgtext)\n except FileNotFoundError as e:\n raise ConfigNotFoundError(f\"Configuration file not found ({cfgpath})\") from e\n except tomllib.TOMLDecodeError as e:\n raise ConfigDecodeError(\n f\"Failed to decode configuration file: invalid TOML: {e}\"\n ) from e\n\n try:\n music_source_dir = Path(data[\"music_source_dir\"]).expanduser()\n del data[\"music_source_dir\"]\n except KeyError as e:\n raise MissingConfigKeyError(\n f\"Missing key music_source_dir in configuration file ({cfgpath})\"\n ) from e\n except (ValueError, TypeError) as e:\n raise InvalidConfigValueError(\n f\"Invalid value for music_source_dir in configuration file ({cfgpath}): must be a path\"\n ) from e\n\n try:\n fuse_mount_dir = Path(data[\"fuse_mount_dir\"]).expanduser()\n del data[\"fuse_mount_dir\"]\n except KeyError as e:\n raise MissingConfigKeyError(\n f\"Missing key fuse_mount_dir in configuration file ({cfgpath})\"\n ) from e\n except (ValueError, TypeError) as e:\n raise InvalidConfigValueError(\n f\"Invalid value for fuse_mount_dir in configuration file ({cfgpath}): must be a path\"\n ) from e\n\n try:\n cache_dir = Path(data[\"cache_dir\"]).expanduser()\n del data[\"cache_dir\"]\n except KeyError:\n cache_dir = XDG_CACHE_ROSE\n except (TypeError, ValueError) as e:\n raise InvalidConfigValueError(\n f\"Invalid value for cache_dir in configuration file ({cfgpath}): must be a path\"\n ) from e\n cache_dir.mkdir(parents=True, exist_ok=True)\n\n try:\n max_proc = int(data[\"max_proc\"])\n del data[\"max_proc\"]\n if max_proc <= 0:\n raise ValueError(f\"must be a positive integer: got {max_proc}\")\n except KeyError:\n max_proc = max(1, multiprocessing.cpu_count() // 2)\n except ValueError as e:\n raise InvalidConfigValueError(\n f\"Invalid value for max_proc in configuration file ({cfgpath}): must be a positive integer\"\n ) from e\n\n artist_aliases_map: dict[str, list[str]] = defaultdict(list)\n artist_aliases_parents_map: dict[str, list[str]] = defaultdict(list)\n try:\n for entry in data.get(\"artist_aliases\", []):\n if not isinstance(entry[\"artist\"], str):\n raise ValueError(f\"Artists must be of type str: got {type(entry['artist'])}\")\n artist_aliases_map[entry[\"artist\"]] = entry[\"aliases\"]\n if not isinstance(entry[\"aliases\"], list):\n raise ValueError(\n f\"Aliases must be of type list[str]: got {type(entry['aliases'])}\"\n )\n for s in entry[\"aliases\"]:\n if not isinstance(s, str):\n raise ValueError(f\"Each alias must be of type str: got {type(s)}\")\n artist_aliases_parents_map[s].append(entry[\"artist\"])\n with contextlib.suppress(KeyError):\n del data[\"artist_aliases\"]\n except (ValueError, TypeError, KeyError) as e:\n raise InvalidConfigValueError(\n f\"Invalid value for artist_aliases in configuration file ({cfgpath}): must be a list of {{ artist = str, aliases = list[str] }} records\"\n ) from e\n\n try:\n fuse_artists_whitelist = data[\"fuse_artists_whitelist\"]\n del data[\"fuse_artists_whitelist\"]\n if not isinstance(fuse_artists_whitelist, list):\n raise ValueError(f\"Must be a list[str]: got {type(fuse_artists_whitelist)}\")\n for s in fuse_artists_whitelist:\n if not isinstance(s, str):\n raise ValueError(f\"Each artist must be of type str: got {type(s)}\")\n except KeyError:\n fuse_artists_whitelist = None\n except ValueError as e:\n raise InvalidConfigValueError(\n f\"Invalid value for fuse_artists_whitelist in configuration file ({cfgpath}): {e}\"\n ) from e\n\n try:\n fuse_genres_whitelist = data[\"fuse_genres_whitelist\"]\n del data[\"fuse_genres_whitelist\"]\n if not isinstance(fuse_genres_whitelist, list):\n raise ValueError(f\"Must be a list[str]: got {type(fuse_genres_whitelist)}\")\n for s in fuse_genres_whitelist:\n if not isinstance(s, str):\n raise ValueError(f\"Each genre must be of type str: got {type(s)}\")\n except KeyError:\n fuse_genres_whitelist = None\n except ValueError as e:\n raise InvalidConfigValueError(\n f\"Invalid value for fuse_genres_whitelist in configuration file ({cfgpath}): {e}\"\n ) from e\n\n try:\n fuse_labels_whitelist = data[\"fuse_labels_whitelist\"]\n del data[\"fuse_labels_whitelist\"]\n if not isinstance(fuse_labels_whitelist, list):\n raise ValueError(f\"Must be a list[str]: got {type(fuse_labels_whitelist)}\")\n for s in fuse_labels_whitelist:\n if not isinstance(s, str):\n raise ValueError(f\"Each label must be of type str: got {type(s)}\")\n except KeyError:\n fuse_labels_whitelist = None\n except ValueError as e:\n raise InvalidConfigValueError(\n f\"Invalid value for fuse_labels_whitelist in configuration file ({cfgpath}): {e}\"\n ) from e\n\n try:\n fuse_artists_blacklist = data[\"fuse_artists_blacklist\"]\n del data[\"fuse_artists_blacklist\"]\n if not isinstance(fuse_artists_blacklist, list):\n raise ValueError(f\"Must be a list[str]: got {type(fuse_artists_blacklist)}\")\n for s in fuse_artists_blacklist:\n if not isinstance(s, str):\n raise ValueError(f\"Each artist must be of type str: got {type(s)}\")\n except KeyError:\n fuse_artists_blacklist = None\n except ValueError as e:\n raise InvalidConfigValueError(\n f\"Invalid value for fuse_artists_blacklist in configuration file ({cfgpath}): {e}\"\n ) from e\n\n try:\n fuse_genres_blacklist = data[\"fuse_genres_blacklist\"]\n del data[\"fuse_genres_blacklist\"]\n if not isinstance(fuse_genres_blacklist, list):\n raise ValueError(f\"Must be a list[str]: got {type(fuse_genres_blacklist)}\")\n for s in fuse_genres_blacklist:\n if not isinstance(s, str):\n raise ValueError(f\"Each genre must be of type str: got {type(s)}\")\n except KeyError:\n fuse_genres_blacklist = None\n except ValueError as e:\n raise InvalidConfigValueError(\n f\"Invalid value for fuse_genres_blacklist in configuration file ({cfgpath}): {e}\"\n ) from e\n\n try:\n fuse_labels_blacklist = data[\"fuse_labels_blacklist\"]\n del data[\"fuse_labels_blacklist\"]\n if not isinstance(fuse_labels_blacklist, list):\n raise ValueError(f\"Must be a list[str]: got {type(fuse_labels_blacklist)}\")\n for s in fuse_labels_blacklist:\n if not isinstance(s, str):\n raise ValueError(f\"Each label must be of type str: got {type(s)}\")\n except KeyError:\n fuse_labels_blacklist = None\n except ValueError as e:\n raise InvalidConfigValueError(\n f\"Invalid value for fuse_labels_blacklist in configuration file ({cfgpath}): {e}\"\n ) from e\n\n if fuse_artists_whitelist and fuse_artists_blacklist:\n raise InvalidConfigValueError(\n f\"Cannot specify both fuse_artists_whitelist and fuse_artists_blacklist in configuration file ({cfgpath}): must specify only one or the other\"\n )\n if fuse_genres_whitelist and fuse_genres_blacklist:\n raise InvalidConfigValueError(\n f\"Cannot specify both fuse_genres_whitelist and fuse_genres_blacklist in configuration file ({cfgpath}): must specify only one or the other\"\n )\n if fuse_labels_whitelist and fuse_labels_blacklist:\n raise InvalidConfigValueError(\n f\"Cannot specify both fuse_labels_whitelist and fuse_labels_blacklist in configuration file ({cfgpath}): must specify only one or the other\"\n )\n\n try:\n cover_art_stems = data[\"cover_art_stems\"]\n del data[\"cover_art_stems\"]\n if not isinstance(cover_art_stems, list):\n raise ValueError(f\"Must be a list[str]: got {type(cover_art_stems)}\")\n for s in cover_art_stems:\n if not isinstance(s, str):\n raise ValueError(f\"Each cover art stem must be of type str: got {type(s)}\")\n except KeyError:\n cover_art_stems = [\"folder\", \"cover\", \"art\", \"front\"]\n except ValueError as e:\n raise InvalidConfigValueError(\n f\"Invalid value for cover_art_stems in configuration file ({cfgpath}): {e}\"\n ) from e\n\n try:\n valid_art_exts = data[\"valid_art_exts\"]\n del data[\"valid_art_exts\"]\n if not isinstance(valid_art_exts, list):\n raise ValueError(f\"Must be a list[str]: got {type(valid_art_exts)}\")\n for s in valid_art_exts:\n if not isinstance(s, str):\n raise ValueError(f\"Each art extension must be of type str: got {type(s)}\")\n except KeyError:\n valid_art_exts = [\"jpg\", \"jpeg\", \"png\"]\n except ValueError as e:\n raise InvalidConfigValueError(\n f\"Invalid value for valid_art_exts in configuration file ({cfgpath}): {e}\"\n ) from e\n\n cover_art_stems = [x.lower() for x in cover_art_stems]\n valid_art_exts = [x.lower() for x in valid_art_exts]\n\n try:\n rename_source_files = data[\"rename_source_files\"]\n del data[\"rename_source_files\"]\n if not isinstance(rename_source_files, bool):\n raise ValueError(f\"Must be a bool: got {type(rename_source_files)}\")\n except KeyError:\n rename_source_files = False\n except ValueError as e:\n raise InvalidConfigValueError(\n f\"Invalid value for rename_source_files in configuration file ({cfgpath}): {e}\"\n ) from e\n\n try:\n ignore_release_directories = data[\"ignore_release_directories\"]\n del data[\"ignore_release_directories\"]\n if not isinstance(ignore_release_directories, list):\n raise ValueError(f\"Must be a list[str]: got {type(ignore_release_directories)}\")\n for s in ignore_release_directories:\n if not isinstance(s, str):\n raise ValueError(f\"Each release directory must be of type str: got {type(s)}\")\n except KeyError:\n ignore_release_directories = []\n except ValueError as e:\n raise InvalidConfigValueError(\n f\"Invalid value for ignore_release_directories in configuration file ({cfgpath}): {e}\"\n ) from e\n\n stored_metadata_rules: list[MetadataRule] = []\n for d in data.get(\"stored_metadata_rules\", []):\n if not isinstance(d, dict):\n raise InvalidConfigValueError(\n f\"Invalid value in stored_metadata_rules in configuration file ({cfgpath}): list values must be a dict: got {type(d)}\"\n )\n\n try:\n matcher = d[\"matcher\"]\n except KeyError as e:\n raise InvalidConfigValueError(\n f\"Missing key `matcher` in stored_metadata_rules in configuration file ({cfgpath}): rule {d}\"\n ) from e\n if not isinstance(matcher, str):\n raise InvalidConfigValueError(\n f\"Invalid value for `matcher` in stored_metadata_rules in configuration file ({cfgpath}): rule {d}: must be a string\"\n )\n\n try:\n actions = d[\"actions\"]\n except KeyError as e:\n raise InvalidConfigValueError(\n f\"Missing key `actions` in stored_metadata_rules in configuration file ({cfgpath}): rule {d}\"\n ) from e\n if not isinstance(actions, list):\n raise InvalidConfigValueError(\n f\"Invalid value for `actions` in stored_metadata_rules in configuration file ({cfgpath}): rule {d}: must be a list of strings\"\n )\n for action in actions:\n if not isinstance(action, str):\n raise InvalidConfigValueError(\n f\"Invalid value for `actions` in stored_metadata_rules in configuration file ({cfgpath}): rule {d}: must be a list of strings: got {type(action)}\"\n )\n\n try:\n stored_metadata_rules.append(MetadataRule.parse(matcher, actions))\n except RuleSyntaxError as e:\n raise InvalidConfigValueError(\n f\"Failed to parse stored_metadata_rules in configuration file ({cfgpath}): rule {d}: {e}\"\n ) from e\n if \"stored_metadata_rules\" in data:\n del data[\"stored_metadata_rules\"]\n\n # Get the potential default template before evaluating the rest.\n default_templates = deepcopy(DEFAULT_TEMPLATE_PAIR)\n with contextlib.suppress(KeyError):\n default_templates.release = PathTemplate(data[\"path_templates\"][\"default\"][\"release\"])\n del data[\"path_templates\"][\"default\"][\"release\"]\n with contextlib.suppress(KeyError):\n default_templates.track = PathTemplate(data[\"path_templates\"][\"default\"][\"track\"])\n del data[\"path_templates\"][\"default\"][\"track\"]\n with contextlib.suppress(KeyError):\n if not data[\"path_templates\"][\"default\"]:\n del data[\"path_templates\"][\"default\"]\n\n path_templates = PathTemplateConfig.with_defaults(default_templates)\n if tmpl_config := data.get(\"path_templates\", None):\n for key in [\n \"source\",\n \"all_releases\",\n \"new_releases\",\n \"recently_added_releases\",\n \"artists\",\n \"genres\",\n \"labels\",\n \"collages\",\n ]:\n with contextlib.suppress(KeyError):\n getattr(path_templates, key).release = PathTemplate(tmpl_config[key][\"release\"])\n del tmpl_config[key][\"release\"]\n with contextlib.suppress(KeyError):\n getattr(path_templates, key).track = PathTemplate(tmpl_config[key][\"track\"])\n del tmpl_config[key][\"track\"]\n with contextlib.suppress(KeyError):\n if not tmpl_config[key]:\n del tmpl_config[key]\n\n with contextlib.suppress(KeyError):\n path_templates.playlists = PathTemplate(tmpl_config[\"playlists\"])\n del tmpl_config[\"playlists\"]\n with contextlib.suppress(KeyError):\n if not data[\"path_templates\"]:\n del data[\"path_templates\"]\n\n try:\n path_templates.parse()\n except InvalidPathTemplateError as e:\n raise InvalidConfigValueError(\n f\"Invalid path template in configuration file ({cfgpath}) for template {e.key}: {e}\"\n ) from e\n\n if data:\n unrecognized_accessors: list[str] = []\n # Do a DFS over the data keys to assemble the map of unknown keys. State is a tuple of\n # (\"accessor\", node).\n dfs_state: deque[tuple[str, dict[str, Any]]] = deque([(\"\", data)])\n while dfs_state:\n accessor, node = dfs_state.pop()\n if isinstance(node, dict):\n for k, v in node.items():\n child_accessor = k if not accessor else f\"{accessor}.{k}\"\n dfs_state.append((child_accessor, v))\n continue\n unrecognized_accessors.append(accessor)\n logger.warning(\n f\"Unrecognized options found in configuration file: {', '.join(unrecognized_accessors)}\"\n )\n\n return Config(\n music_source_dir=music_source_dir,\n fuse_mount_dir=fuse_mount_dir,\n cache_dir=cache_dir,\n max_proc=max_proc,\n artist_aliases_map=artist_aliases_map,\n artist_aliases_parents_map=artist_aliases_parents_map,\n fuse_artists_whitelist=fuse_artists_whitelist,\n fuse_genres_whitelist=fuse_genres_whitelist,\n fuse_labels_whitelist=fuse_labels_whitelist,\n fuse_artists_blacklist=fuse_artists_blacklist,\n fuse_genres_blacklist=fuse_genres_blacklist,\n fuse_labels_blacklist=fuse_labels_blacklist,\n cover_art_stems=cover_art_stems,\n valid_art_exts=valid_art_exts,\n path_templates=path_templates,\n rename_source_files=rename_source_files,\n ignore_release_directories=ignore_release_directories,\n stored_metadata_rules=stored_metadata_rules,\n )\n\n @functools.cached_property\n def valid_cover_arts(self) -> list[str]:\n return [s + \".\" + e for s in self.cover_art_stems for e in self.valid_art_exts]\n\n @functools.cached_property\n def cache_database_path(self) -> Path:\n return self.cache_dir / \"cache.sqlite3\"\n\n @functools.cached_property\n def watchdog_pid_path(self) -> Path:\n return self.cache_dir / \"watchdog.pid\"\n\n @functools.cached_property\n def sanitized_artist_aliases_map(self) -> dict[str, list[str]]:\n return {sanitize_dirname(k, False): v for k, v in self.artist_aliases_map.items()}\n\n @functools.cached_property\n def sanitized_artist_aliases_parents_map(self) -> dict[str, list[str]]:\n return {sanitize_dirname(k, False): v for k, v in self.artist_aliases_parents_map.items()}" }, { "identifier": "add_track_to_playlist", "path": "rose/playlists.py", "snippet": "def add_track_to_playlist(\n c: Config,\n playlist_name: str,\n track_id: str,\n) -> None:\n track_logtext = get_track_logtext(c, track_id)\n if not track_logtext:\n raise TrackDoesNotExistError(f\"Track {track_id} does not exist\")\n path = playlist_path(c, playlist_name)\n if not path.exists():\n raise PlaylistDoesNotExistError(f\"Playlist {playlist_name} does not exist\")\n with lock(c, playlist_lock_name(playlist_name)):\n with path.open(\"rb\") as fp:\n data = tomllib.load(fp)\n data[\"tracks\"] = data.get(\"tracks\", [])\n # Check to see if track is already in the playlist. If so, no op. We don't support\n # duplicate playlist entries.\n for r in data[\"tracks\"]:\n if r[\"uuid\"] == track_id:\n logger.info(f\"No-Op: Track {track_logtext} already in playlist {playlist_name}\")\n return\n data[\"tracks\"].append({\"uuid\": track_id, \"description_meta\": track_logtext})\n with path.open(\"wb\") as fp:\n tomli_w.dump(data, fp)\n logger.info(f\"Added track {track_logtext} to playlist {playlist_name}\")\n update_cache_for_playlists(c, [playlist_name], force=True)" }, { "identifier": "create_playlist", "path": "rose/playlists.py", "snippet": "def create_playlist(c: Config, name: str) -> None:\n (c.music_source_dir / \"!playlists\").mkdir(parents=True, exist_ok=True)\n path = playlist_path(c, name)\n with lock(c, playlist_lock_name(name)):\n if path.exists():\n raise PlaylistAlreadyExistsError(f\"Playlist {name} already exists\")\n path.touch()\n logger.info(f\"Created playlist {name} in source directory\")\n update_cache_for_playlists(c, [name], force=True)" }, { "identifier": "delete_playlist", "path": "rose/playlists.py", "snippet": "def delete_playlist(c: Config, name: str) -> None:\n path = playlist_path(c, name)\n with lock(c, playlist_lock_name(name)):\n if not path.exists():\n raise PlaylistDoesNotExistError(f\"Playlist {name} does not exist\")\n send2trash(path)\n logger.info(f\"Deleted playlist {name} from source directory\")\n update_cache_evict_nonexistent_playlists(c)" }, { "identifier": "delete_playlist_cover_art", "path": "rose/playlists.py", "snippet": "def delete_playlist_cover_art(c: Config, playlist_name: str) -> None:\n \"\"\"This function removes all potential cover arts for the playlist.\"\"\"\n path = playlist_path(c, playlist_name)\n if not path.exists():\n raise PlaylistDoesNotExistError(f\"Playlist {playlist_name} does not exist\")\n found = False\n for f in (c.music_source_dir / \"!playlists\").iterdir():\n if f.stem == playlist_name and f.suffix[1:].lower() in c.valid_art_exts:\n logger.debug(f\"Deleting existing cover art {f.name} in playlists\")\n f.unlink()\n found = True\n if found:\n logger.info(f\"Deleted cover arts of playlist {playlist_name}\")\n else:\n logger.info(f\"No-Op: No cover arts found for playlist {playlist_name}\")\n update_cache_for_playlists(c, [playlist_name])" }, { "identifier": "remove_track_from_playlist", "path": "rose/playlists.py", "snippet": "def remove_track_from_playlist(\n c: Config,\n playlist_name: str,\n track_id: str,\n) -> None:\n track_logtext = get_track_logtext(c, track_id)\n if not track_logtext:\n raise TrackDoesNotExistError(f\"Track {track_id} does not exist\")\n path = playlist_path(c, playlist_name)\n if not path.exists():\n raise PlaylistDoesNotExistError(f\"Playlist {playlist_name} does not exist\")\n with lock(c, playlist_lock_name(playlist_name)):\n with path.open(\"rb\") as fp:\n data = tomllib.load(fp)\n old_tracks = data.get(\"tracks\", [])\n new_tracks = [r for r in old_tracks if r[\"uuid\"] != track_id]\n if old_tracks == new_tracks:\n logger.info(f\"No-Op: Track {track_logtext} not in playlist {playlist_name}\")\n return\n data[\"tracks\"] = new_tracks\n with path.open(\"wb\") as fp:\n tomli_w.dump(data, fp)\n logger.info(f\"Removed track {track_logtext} from playlist {playlist_name}\")\n update_cache_for_playlists(c, [playlist_name], force=True)" }, { "identifier": "rename_playlist", "path": "rose/playlists.py", "snippet": "def rename_playlist(c: Config, old_name: str, new_name: str) -> None:\n logger.info(f\"Renamed playlist {old_name} to {new_name}\")\n old_path = playlist_path(c, old_name)\n new_path = playlist_path(c, new_name)\n with lock(c, playlist_lock_name(old_name)), lock(c, playlist_lock_name(new_name)):\n if not old_path.exists():\n raise PlaylistDoesNotExistError(f\"Playlist {old_name} does not exist\")\n if new_path.exists():\n raise PlaylistAlreadyExistsError(f\"Playlist {new_name} already exists\")\n old_path.rename(new_path)\n # And also rename all files with the same stem (e.g. cover arts).\n for old_adjacent_file in (c.music_source_dir / \"!playlists\").iterdir():\n if old_adjacent_file.stem != old_path.stem:\n continue\n new_adjacent_file = old_adjacent_file.with_name(\n new_path.stem + old_adjacent_file.suffix\n )\n if new_adjacent_file.exists():\n continue\n old_adjacent_file.rename(new_adjacent_file)\n logger.debug(\n \"Renaming playlist-adjacent file {old_adjacent_file} to {new_adjacent_file}\"\n )\n update_cache_for_playlists(c, [new_name], force=True)\n update_cache_evict_nonexistent_playlists(c)" }, { "identifier": "set_playlist_cover_art", "path": "rose/playlists.py", "snippet": "def set_playlist_cover_art(c: Config, playlist_name: str, new_cover_art_path: Path) -> None:\n \"\"\"\n This function removes all potential cover arts for the playlist, and then copies the file\n file located at the passed in path to be the playlist's art file.\n \"\"\"\n suffix = new_cover_art_path.suffix.lower()\n if suffix[1:] not in c.valid_art_exts:\n raise InvalidCoverArtFileError(\n f\"File {new_cover_art_path.name}'s extension is not supported for cover images: \"\n \"To change this, please read the configuration documentation\"\n )\n\n path = playlist_path(c, playlist_name)\n if not path.exists():\n raise PlaylistDoesNotExistError(f\"Playlist {playlist_name} does not exist\")\n for f in (c.music_source_dir / \"!playlists\").iterdir():\n if f.stem == playlist_name and f.suffix[1:].lower() in c.valid_art_exts:\n logger.debug(f\"Deleting existing cover art {f.name} in playlists\")\n f.unlink()\n shutil.copyfile(new_cover_art_path, path.with_suffix(suffix))\n logger.info(f\"Set the cover of playlist {playlist_name} to {new_cover_art_path.name}\")\n update_cache_for_playlists(c, [playlist_name])" }, { "identifier": "delete_release", "path": "rose/releases.py", "snippet": "def delete_release(c: Config, release_id: str) -> None:\n release = get_release(c, release_id)\n if not release:\n raise ReleaseDoesNotExistError(f\"Release {release_id} does not exist\")\n with lock(c, release_lock_name(release_id)):\n send2trash(release.source_path)\n release_logtext = calculate_release_logtext(\n title=release.albumtitle,\n year=release.year,\n artists=release.albumartists,\n )\n logger.info(f\"Trashed release {release_logtext}\")\n update_cache_evict_nonexistent_releases(c)\n # Update all collages so that the release is removed from whichever collages it was in.\n update_cache_for_collages(c, None, force=True)" }, { "identifier": "set_release_cover_art", "path": "rose/releases.py", "snippet": "def set_release_cover_art(\n c: Config,\n release_id: str,\n new_cover_art_path: Path,\n) -> None:\n \"\"\"\n This function removes all potential cover arts in the release source directory and copies the\n file located at the passed in path to `cover.{ext}` in the release source directory.\n \"\"\"\n suffix = new_cover_art_path.suffix.lower()\n if suffix[1:] not in c.valid_art_exts:\n raise InvalidCoverArtFileError(\n f\"File {new_cover_art_path.name}'s extension is not supported for cover images: \"\n \"To change this, please read the configuration documentation\"\n )\n\n release = get_release(c, release_id)\n if not release:\n raise ReleaseDoesNotExistError(f\"Release {release_id} does not exist\")\n\n release_logtext = calculate_release_logtext(\n title=release.albumtitle,\n year=release.year,\n artists=release.albumartists,\n )\n\n for f in release.source_path.iterdir():\n if f.name.lower() in c.valid_cover_arts:\n logger.debug(f\"Deleting existing cover art {f.name} in {release_logtext}\")\n send2trash(f)\n shutil.copyfile(new_cover_art_path, release.source_path / f\"cover{new_cover_art_path.suffix}\")\n logger.info(f\"Set the cover of release {release_logtext} to {new_cover_art_path.name}\")\n update_cache_for_releases(c, [release.source_path])" }, { "identifier": "PathTemplate", "path": "rose/templates.py", "snippet": "class PathTemplate:\n \"\"\"\n A wrapper for a template that stores the template as a string and compiles on-demand as a\n derived propery. This grants us serialization of the config.\n \"\"\"\n\n text: str\n\n @cached_property\n def compiled(self) -> jinja2.Template:\n return ENVIRONMENT.from_string(self.text)\n\n def __hash__(self) -> int:\n return hash(self.text)\n\n def __getstate__(self) -> dict[str, Any]:\n # We cannot pickle a compiled path template, so remove it from the state before we pickle\n # it. We can cheaply recompute it in the subprocess anyways.\n state = self.__dict__.copy()\n if \"compiled\" in state:\n del state[\"compiled\"]\n return state" }, { "identifier": "eval_release_template", "path": "rose/templates.py", "snippet": "def eval_release_template(\n template: PathTemplate,\n release: CachedRelease,\n position: str | None = None,\n) -> str:\n return _collapse_spacing(template.compiled.render(**_calc_release_variables(release, position)))" }, { "identifier": "eval_track_template", "path": "rose/templates.py", "snippet": "def eval_track_template(\n template: PathTemplate,\n track: CachedTrack,\n position: str | None = None,\n) -> str:\n return (\n _collapse_spacing(template.compiled.render(**_calc_track_variables(track, position)))\n + track.source_path.suffix\n )" } ]
import collections import contextlib import errno import logging import os import random import re import stat import subprocess import tempfile import time import llfuse from collections.abc import Iterator from dataclasses import dataclass from pathlib import Path from typing import Any, Generic, Literal, TypeVar from rose.audiotags import SUPPORTED_AUDIO_EXTENSIONS, AudioTags from rose.cache import ( STORED_DATA_FILE_REGEX, CachedRelease, CachedTrack, artist_exists, calculate_release_logtext, calculate_track_logtext, genre_exists, get_collage, get_playlist, get_release, get_track, get_tracks_associated_with_release, label_exists, list_artists, list_collages, list_genres, list_labels, list_playlists, list_releases_delete_this, update_cache_for_releases, ) from rose.collages import ( add_release_to_collage, create_collage, delete_collage, remove_release_from_collage, rename_collage, ) from rose.common import RoseError, sanitize_dirname, sanitize_filename from rose.config import Config from rose.playlists import ( add_track_to_playlist, create_playlist, delete_playlist, delete_playlist_cover_art, remove_track_from_playlist, rename_playlist, set_playlist_cover_art, ) from rose.releases import ( delete_release, set_release_cover_art, ) from rose.templates import PathTemplate, eval_release_template, eval_track_template
19,926
return VirtualPath(view="Genres") if len(parts) == 2: return VirtualPath(view="Genres", genre=parts[1]) if len(parts) == 3: return VirtualPath(view="Genres", genre=parts[1], release=parts[2]) if len(parts) == 4: return VirtualPath(view="Genres", genre=parts[1], release=parts[2], file=parts[3]) raise llfuse.FUSEError(errno.ENOENT) if parts[0] == "6. Labels": if len(parts) == 1: return VirtualPath(view="Labels") if len(parts) == 2: return VirtualPath(view="Labels", label=parts[1]) if len(parts) == 3: return VirtualPath(view="Labels", label=parts[1], release=parts[2]) if len(parts) == 4: return VirtualPath(view="Labels", label=parts[1], release=parts[2], file=parts[3]) raise llfuse.FUSEError(errno.ENOENT) if parts[0] == "7. Collages": if len(parts) == 1: return VirtualPath(view="Collages") if len(parts) == 2: return VirtualPath(view="Collages", collage=parts[1]) if len(parts) == 3: return VirtualPath(view="Collages", collage=parts[1], release=parts[2]) if len(parts) == 4: return VirtualPath( view="Collages", collage=parts[1], release=parts[2], file=parts[3] ) raise llfuse.FUSEError(errno.ENOENT) if parts[0] == "8. Playlists": if len(parts) == 1: return VirtualPath(view="Playlists") if len(parts) == 2: return VirtualPath(view="Playlists", playlist=parts[1]) if len(parts) == 3: return VirtualPath( view="Playlists", playlist=parts[1], file=parts[2], ) raise llfuse.FUSEError(errno.ENOENT) raise llfuse.FUSEError(errno.ENOENT) class VirtualNameGenerator: """ Generates virtual dirnames and filenames for releases and tracks, and maintains an inverse mapping for looking up release/track UUIDs from their virtual paths. This object's data has the following lifecycle: 1. RoseLogicalCore calls `list_virtual_x_paths` to generate all paths in a directory. 2. Once generated, path->ID can be looked up. This means that RoseLogicalCore is responsible for invoking `list_virtual_x_paths` upon cache misses / missing file accesses. We end up invoking `list_virtual_x_path` whenever a non-existent path is getattr'ed, which is somewhat excessive, however, we can decouple the virtual templates from the cache this way, and the lookup _miss_ case should be rather rare in normal operations The VirtualNameGenerator also remembers all previous path mappings for 15 minutes since last use. This allows Rose to continue to serving accesses to old paths, even after the file metadata changed. This is useful, for example, if a directory or file is renamed (due to a metadata change) while its tracks are in a mpv playlist. mpv's requests to the old paths will still resolve, but the old paths will not show up in a readdir call. If old paths collide with new paths, new paths will take precedence. """ def __init__(self, config: Config): # fmt: off self._config = config # These are the stateful maps that we use to remember path mappings. They are maps from the # (parent_path, virtual path) -> entity ID. # # Entries expire after 15 minutes, which implements the "serve accesses to previous paths" # behavior as specified in the class docstring. self._release_store: TTLCache[tuple[VirtualPath, str], str] = TTLCache(ttl_seconds=60 * 15) self._track_store: TTLCache[tuple[VirtualPath, str], str] = TTLCache(ttl_seconds=60 * 15) # Cache template evaluations because they're expensive. self._release_template_eval_cache: dict[tuple[VirtualPath, PathTemplate, str, str | None], str] = {} self._track_template_eval_cache: dict[tuple[VirtualPath, PathTemplate, str, str | None], str] = {} # fmt: on def list_release_paths( self, release_parent: VirtualPath, releases: list[CachedRelease], ) -> Iterator[tuple[CachedRelease, str]]: """ Given a parent directory and a list of releases, calculates the virtual directory names for those releases, and returns a zipped iterator of the releases and their virtual directory names. """ # For collision number generation. seen: set[str] = set() prefix_pad_size = len(str(len(releases))) for idx, release in enumerate(releases): # Determine the proper template. template = None if release_parent.view == "Releases": template = self._config.path_templates.all_releases.release elif release_parent.view == "New": template = self._config.path_templates.new_releases.release elif release_parent.view == "Recently Added": template = self._config.path_templates.recently_added_releases.release elif release_parent.view == "Artists": template = self._config.path_templates.artists.release elif release_parent.view == "Genres": template = self._config.path_templates.genres.release elif release_parent.view == "Labels": template = self._config.path_templates.labels.release elif release_parent.view == "Collages": template = self._config.path_templates.collages.release else: raise RoseError(f"VNAMES: No release template found for {release_parent=}.")
""" The virtualfs module renders a virtual filesystem from the read cache. It is written in an Object-Oriented style, against my typical sensibilities, because that's how the FUSE libraries tend to be implemented. But it's OK :) Since this is a pretty hefty module, we'll cover the organization. This module contains 8 classes: 1. TTLCache: A wrapper around dict that expires key/value pairs after a given TTL. 2. VirtualPath: A semantic representation of a path in the virtual filesystem along with a parser. All virtual filesystem paths are parsed by this class into a far more ergonomic dataclass. 3. VirtualNameGenerator: A class that generates virtual directory and filenames given releases and tracks, and maintains inverse mappings for resolving release IDs from virtual paths. 4. "CanShow"er: An abstraction that encapsulates the logic of whether an artist, genre, or label should be shown in their respective virtual views, based on the whitelist/blacklist configuration parameters. 5. FileHandleGenerator: A class that keeps generates new file handles. It is a counter that wraps back to 5 when the file handles exceed ~10k, as to avoid any overflows. 6. RoseLogicalCore: A logical representation of Rose's filesystem logic, freed from the annoying implementation details that a low-level library like `llfuse` comes with. 7. INodeMapper: A class that tracks the INode <-> Path mappings. It is used to convert inodes to paths in VirtualFS. 8. VirtualFS: The main Virtual Filesystem class, which manages the annoying implementation details of a low-level virtual filesystem, and delegates logic to the above classes. It uses INodeMapper and VirtualPath to translate inodes into semantically useful dataclasses, and then passes them into RoseLogicalCore. """ from __future__ import annotations logger = logging.getLogger(__name__) K = TypeVar("K") V = TypeVar("V") T = TypeVar("T") class TTLCache(Generic[K, V]): """ TTLCache is a dictionary with a time-to-live (TTL) for each key/value pair. After the TTL passes, the key/value pair is no longer accessible. We do not currently free entries in this cache, because we expect little churn to occur in entries in normal operation. We do not have a great time to clear the cache that does not affect performance. We will probably implement freeing entries later when we give more of a shit or someone complains about the memory usage. I happen to have a lot of free RAM! """ def __init__(self, ttl_seconds: int = 5): self.ttl_seconds = ttl_seconds self.__backing: dict[K, tuple[V, float]] = {} def __contains__(self, key: K) -> bool: try: _, insert_time = self.__backing[key] except KeyError: return False return time.time() - insert_time <= self.ttl_seconds def __getitem__(self, key: K) -> V: v, insert_time = self.__backing[key] if time.time() - insert_time > self.ttl_seconds: raise KeyError(key) return v def __setitem__(self, key: K, value: V) -> None: self.__backing[key] = (value, time.time()) def __delitem__(self, key: K) -> None: del self.__backing[key] def get(self, key: K, default: T) -> V | T: try: return self[key] except KeyError: return default # In collages, playlists, and releases, we print directories with position of the release/track in # the collage. When parsing, strip it out. Otherwise we will have to handle this parsing in every # method. POSITION_REGEX = re.compile(r"^([^.]+)\. ") # In recently added, we print the date that the release was added to the library. When parsing, # strip it out. ADDED_AT_REGEX = re.compile(r"^\[[\d-]{10}\] ") @dataclass(frozen=True, slots=True) class VirtualPath: view: ( Literal[ "Root", "Releases", "Artists", "Genres", "Labels", "Collages", "Playlists", "New", "Recently Added", ] | None ) artist: str | None = None genre: str | None = None label: str | None = None collage: str | None = None playlist: str | None = None release: str | None = None file: str | None = None @property def release_parent(self) -> VirtualPath: """Parent path of a release: Used as an input to the VirtualNameGenerator.""" return VirtualPath( view=self.view, artist=self.artist, genre=self.genre, label=self.label, collage=self.collage, ) @property def track_parent(self) -> VirtualPath: """Parent path of a track: Used as an input to the VirtualNameGenerator.""" return VirtualPath( view=self.view, artist=self.artist, genre=self.genre, label=self.label, collage=self.collage, playlist=self.playlist, release=self.release, ) @classmethod def parse(cls, path: Path) -> VirtualPath: parts = str(path.resolve()).split("/")[1:] # First part is always empty string. if len(parts) == 1 and parts[0] == "": return VirtualPath(view="Root") # Let's abort early if we recognize a path that we _know_ is not valid. This is because # invalid file accesses trigger a recalculation of virtual file paths, which we decided to # do under the assumption that invalid file accesses would be _rare_. That's not true if we # keep getting requests for these stupid paths from shell plugins. if parts[-1] in [".git", ".DS_Store", ".Trash", ".Trash-1000", "HEAD", ".envrc"]: logger.debug( f"Raising ENOENT early in the VirtualPath parser because last path part {parts[-1]} in blacklist." ) raise llfuse.FUSEError(errno.ENOENT) if parts[0] == "1. Releases": if len(parts) == 1: return VirtualPath(view="Releases") if len(parts) == 2: return VirtualPath(view="Releases", release=parts[1]) if len(parts) == 3: return VirtualPath(view="Releases", release=parts[1], file=parts[2]) raise llfuse.FUSEError(errno.ENOENT) if parts[0] == "2. Releases - New": if len(parts) == 1: return VirtualPath(view="New") if len(parts) == 2: return VirtualPath(view="New", release=parts[1]) if len(parts) == 3: return VirtualPath(view="New", release=parts[1], file=parts[2]) raise llfuse.FUSEError(errno.ENOENT) if parts[0] == "3. Releases - Recently Added": if len(parts) == 1: return VirtualPath(view="Recently Added") if len(parts) == 2: return VirtualPath(view="Recently Added", release=parts[1]) if len(parts) == 3: return VirtualPath(view="Recently Added", release=parts[1], file=parts[2]) raise llfuse.FUSEError(errno.ENOENT) if parts[0] == "4. Artists": if len(parts) == 1: return VirtualPath(view="Artists") if len(parts) == 2: return VirtualPath(view="Artists", artist=parts[1]) if len(parts) == 3: return VirtualPath(view="Artists", artist=parts[1], release=parts[2]) if len(parts) == 4: return VirtualPath(view="Artists", artist=parts[1], release=parts[2], file=parts[3]) raise llfuse.FUSEError(errno.ENOENT) if parts[0] == "5. Genres": if len(parts) == 1: return VirtualPath(view="Genres") if len(parts) == 2: return VirtualPath(view="Genres", genre=parts[1]) if len(parts) == 3: return VirtualPath(view="Genres", genre=parts[1], release=parts[2]) if len(parts) == 4: return VirtualPath(view="Genres", genre=parts[1], release=parts[2], file=parts[3]) raise llfuse.FUSEError(errno.ENOENT) if parts[0] == "6. Labels": if len(parts) == 1: return VirtualPath(view="Labels") if len(parts) == 2: return VirtualPath(view="Labels", label=parts[1]) if len(parts) == 3: return VirtualPath(view="Labels", label=parts[1], release=parts[2]) if len(parts) == 4: return VirtualPath(view="Labels", label=parts[1], release=parts[2], file=parts[3]) raise llfuse.FUSEError(errno.ENOENT) if parts[0] == "7. Collages": if len(parts) == 1: return VirtualPath(view="Collages") if len(parts) == 2: return VirtualPath(view="Collages", collage=parts[1]) if len(parts) == 3: return VirtualPath(view="Collages", collage=parts[1], release=parts[2]) if len(parts) == 4: return VirtualPath( view="Collages", collage=parts[1], release=parts[2], file=parts[3] ) raise llfuse.FUSEError(errno.ENOENT) if parts[0] == "8. Playlists": if len(parts) == 1: return VirtualPath(view="Playlists") if len(parts) == 2: return VirtualPath(view="Playlists", playlist=parts[1]) if len(parts) == 3: return VirtualPath( view="Playlists", playlist=parts[1], file=parts[2], ) raise llfuse.FUSEError(errno.ENOENT) raise llfuse.FUSEError(errno.ENOENT) class VirtualNameGenerator: """ Generates virtual dirnames and filenames for releases and tracks, and maintains an inverse mapping for looking up release/track UUIDs from their virtual paths. This object's data has the following lifecycle: 1. RoseLogicalCore calls `list_virtual_x_paths` to generate all paths in a directory. 2. Once generated, path->ID can be looked up. This means that RoseLogicalCore is responsible for invoking `list_virtual_x_paths` upon cache misses / missing file accesses. We end up invoking `list_virtual_x_path` whenever a non-existent path is getattr'ed, which is somewhat excessive, however, we can decouple the virtual templates from the cache this way, and the lookup _miss_ case should be rather rare in normal operations The VirtualNameGenerator also remembers all previous path mappings for 15 minutes since last use. This allows Rose to continue to serving accesses to old paths, even after the file metadata changed. This is useful, for example, if a directory or file is renamed (due to a metadata change) while its tracks are in a mpv playlist. mpv's requests to the old paths will still resolve, but the old paths will not show up in a readdir call. If old paths collide with new paths, new paths will take precedence. """ def __init__(self, config: Config): # fmt: off self._config = config # These are the stateful maps that we use to remember path mappings. They are maps from the # (parent_path, virtual path) -> entity ID. # # Entries expire after 15 minutes, which implements the "serve accesses to previous paths" # behavior as specified in the class docstring. self._release_store: TTLCache[tuple[VirtualPath, str], str] = TTLCache(ttl_seconds=60 * 15) self._track_store: TTLCache[tuple[VirtualPath, str], str] = TTLCache(ttl_seconds=60 * 15) # Cache template evaluations because they're expensive. self._release_template_eval_cache: dict[tuple[VirtualPath, PathTemplate, str, str | None], str] = {} self._track_template_eval_cache: dict[tuple[VirtualPath, PathTemplate, str, str | None], str] = {} # fmt: on def list_release_paths( self, release_parent: VirtualPath, releases: list[CachedRelease], ) -> Iterator[tuple[CachedRelease, str]]: """ Given a parent directory and a list of releases, calculates the virtual directory names for those releases, and returns a zipped iterator of the releases and their virtual directory names. """ # For collision number generation. seen: set[str] = set() prefix_pad_size = len(str(len(releases))) for idx, release in enumerate(releases): # Determine the proper template. template = None if release_parent.view == "Releases": template = self._config.path_templates.all_releases.release elif release_parent.view == "New": template = self._config.path_templates.new_releases.release elif release_parent.view == "Recently Added": template = self._config.path_templates.recently_added_releases.release elif release_parent.view == "Artists": template = self._config.path_templates.artists.release elif release_parent.view == "Genres": template = self._config.path_templates.genres.release elif release_parent.view == "Labels": template = self._config.path_templates.labels.release elif release_parent.view == "Collages": template = self._config.path_templates.collages.release else: raise RoseError(f"VNAMES: No release template found for {release_parent=}.")
logtext = calculate_release_logtext(
6
2023-10-09 14:42:23+00:00
24k
zhaoyizhou1123/mbrcsl
examples/roboverse/run_mbrcsl_mlpbeh_roboverse.py
[ { "identifier": "RcslModule", "path": "offlinerlkit/modules/rcsl_module.py", "snippet": "class RcslModule(nn.Module):\n '''\n rcsl policy network\n '''\n def __init__(\n self,\n backbone: nn.Module,\n device: str = \"cpu\"\n ) -> None:\n super().__init__()\n\n self.device = torch.device(device)\n self.backbone = backbone.to(device)\n\n def forward(self, obs: Union[np.ndarray, torch.Tensor], rtg: Union[np.ndarray, torch.Tensor]) -> torch.Tensor:\n '''\n obs: (batch, obs_dim) \n rtg: (batch,) / (batch,1)\n '''\n obs = torch.as_tensor(obs, device=self.device, dtype=torch.float32)\n rtg = torch.as_tensor(rtg, device=self.device, dtype=torch.float32)\n if rtg.dim() == 1:\n rtg = rtg.unsqueeze(-1)\n in_tensor = torch.cat([obs,rtg], dim=-1) #(batch, obs_dim + 1)\n action = self.backbone(in_tensor)\n return action" }, { "identifier": "TransformerDynamicsModel", "path": "offlinerlkit/modules/transformer_dynamics_module.py", "snippet": "class TransformerDynamicsModel(nn.Module):\n def __init__(\n self, obs_dim, act_dim, obs_min, obs_max, act_min, act_max,\n r_min, r_max,\n ckpt_dir: str, device=\"cpu\",\n n_layer = 4, n_head= 4, n_embd = 32\n ):\n super().__init__()\n self._obs_dim = obs_dim\n self._act_dim = act_dim\n self.device = device\n # self.logger = logger\n\n block_size = (\n self._obs_dim + self._act_dim + self._obs_dim + 1 - 1\n ) # -1 since only need n-1 autoregressive steps, + 1 for reward\n vocab_size = 500\n\n conf = GPTConfig(\n vocab_size=vocab_size,\n block_size=block_size,\n discrete_dim=self._obs_dim + self._act_dim + self._obs_dim + 1,\n n_layer=n_layer,\n n_head=n_head,\n n_embd=n_embd * n_head,\n savepath=ckpt_dir,\n )\n\n self._model = conf.make(self.device)\n self._target_model = copy.deepcopy(self._model)\n self._obs_discretizer = Discretizer(obs_min, obs_max, vocab_size)\n self._act_discretizer = Discretizer(act_min, act_max, vocab_size)\n self._r_discretizer = Discretizer(r_min, r_max, vocab_size)\n\n def configure_optimizer(self, lr, weight_decay, betas):\n return self._model.configure_optimizer(lr, weight_decay, betas)\n\n def fit(self, obs_act, r_next_obs, weight):\n # State marginal conditioned on the initial state\n obs = obs_act[:, :self._obs_dim]\n act = obs_act[:, self._obs_dim:]\n next_obs = r_next_obs[:, 1:]\n rew = r_next_obs[:, :1]\n obs_discrete, obs_recon, obs_error = self._obs_discretizer(obs)\n act_discrete, act_recon, act_error = self._act_discretizer(act)\n next_obs_discrete, _ , _ = self._obs_discretizer(next_obs)\n rew_discrete, _, _ = self._r_discretizer(rew)\n target_discrete = torch.cat([rew_discrete, next_obs_discrete], dim = -1)\n input_discrete = torch.cat([obs_discrete, act_discrete], dim = -1)\n logits, loss_p = self._model(input_discrete, targets=target_discrete)\n return loss_p\n\n @torch.no_grad()\n def sample(self, obs, act, temperature=1.0, top_k=None):\n # Discretize observation\n obs = self._obs_discretizer.discretize(obs)\n act = self._act_discretizer.discretize(act)\n\n batch_size = len(obs)\n total_probs = torch.zeros(batch_size, device=self.device)\n block_size = self._model.get_block_size()\n self._model.eval()\n\n x = np.concatenate([obs, act], axis= -1)\n x = torch.as_tensor(x).to(self.device)\n for k in range(self._obs_dim + 1):\n x_cond = x\n if x_cond.shape[1] > block_size:\n raise RuntimeError(\"Sequence length greater than block size\")\n logits, _ = self._model(x_cond)\n # Pluck the logits at the final step and scale by temperature\n logits = logits[:, -1, :] / temperature\n probs_for_calc = F.softmax(logits, dim=-1)\n if top_k is not None:\n logits = top_k_logits(logits, top_k)\n # Apply softmax to convert to probabilities\n probs = F.softmax(logits, dim=-1)\n # Sample next token\n ix = torch.multinomial(probs, num_samples=1)\n # Compute conditional probability\n pr = probs_for_calc[torch.arange(batch_size), ix.squeeze()]\n terminated = (ix == self._model.vocab_size).squeeze()\n pr[terminated] = -10\n total_probs += pr\n # Append action to the sequence and continue\n x = torch.cat((x, ix), dim=1)\n\n # Reconstruct next_obs\n next_obs = x[:, -self._obs_dim:]\n rew = x[:, -self._obs_dim-1:-self._obs_dim]\n next_obs = self._obs_discretizer.reconstruct_torch(next_obs)\n rew = self._r_discretizer.reconstruct_torch(rew)\n return next_obs, rew, total_probs" }, { "identifier": "TransformerDynamics", "path": "offlinerlkit/dynamics/transformer_dynamics.py", "snippet": "class TransformerDynamics(BaseDynamics):\n def __init__(\n self,\n model: TransformerDynamicsModel,\n optim: torch.optim.Optimizer,\n ) -> None:\n super().__init__(model, optim)\n\n @ torch.no_grad()\n def step(\n self,\n obs: np.ndarray,\n action: np.ndarray\n ) -> Tuple[np.ndarray, np.ndarray, np.ndarray, Dict]:\n '''\n Return:\n reward (B,1) (if obs has batch)\n terminal (B,1)\n '''\n \"imagine single forward step\"\n next_obs, reward, _ = self.model.sample(obs, action) # (batch, obs_dim + 1) [reward, obs]\n\n next_obs = next_obs.cpu().numpy()\n reward = reward.cpu().numpy()\n\n terminal = np.array([False for _ in range(reward.shape[0])])\n \n return next_obs, reward, terminal, {}\n\n def format_samples_for_training(self, data: Dict) -> Tuple[np.ndarray, np.ndarray]:\n obss = data[\"observations\"]\n actions = data[\"actions\"]\n next_obss = data[\"next_observations\"]\n rewards = data[\"rewards\"]\n rewards = rewards.reshape(rewards.shape[0], -1)\n inputs = np.concatenate((obss, actions), axis=-1)\n targets = np.concatenate((rewards, next_obss), axis=-1) # estimate reward first\n if 'weights' in data:\n weights = data['weights']\n weights = weights.reshape(weights.shape[0], -1) # (N,1)\n else:\n weights = None\n return inputs, targets, weights\n \n def train(\n self,\n data: Dict,\n logger: Logger,\n max_epochs: int = 80,\n batch_size: int = 256,\n holdout_ratio: float = 0.2,\n ) -> None:\n inputs, targets, weights = self.format_samples_for_training(data)\n data_size = inputs.shape[0]\n holdout_size = min(int(data_size * holdout_ratio), 1000)\n train_size = data_size - holdout_size\n train_splits, holdout_splits = torch.utils.data.random_split(range(data_size), (train_size, holdout_size))\n train_inputs, train_targets = inputs[train_splits.indices], targets[train_splits.indices]\n holdout_inputs, holdout_targets = inputs[holdout_splits.indices], targets[holdout_splits.indices]\n if weights is not None:\n train_weights, holdout_weights = weights[train_splits.indices], weights[holdout_splits.indices]\n else: \n train_weights, holdout_weights = None, None\n\n data_idxes = np.arange(train_size)\n np.random.shuffle(data_idxes)\n\n epoch = 0\n logger.log(\"Training dynamics:\")\n while True:\n epoch += 1\n if train_weights is not None:\n train_loss = self.learn(train_inputs[data_idxes], train_targets[data_idxes], train_weights[data_idxes], batch_size)\n else:\n train_loss = self.learn(train_inputs[data_idxes], train_targets[data_idxes], None, batch_size)\n new_holdout_loss = self.validate(holdout_inputs, holdout_targets, holdout_weights)\n logger.logkv(\"loss/dynamics_train_loss\", train_loss)\n logger.logkv(\"loss/dynamics_holdout_loss\", new_holdout_loss)\n logger.set_timestep(epoch)\n logger.dumpkvs(exclude=[\"policy_training_progress\"])\n\n np.random.shuffle(data_idxes)\n \n if epoch >= max_epochs:\n break\n\n self.save(logger.model_dir)\n self.model.eval()\n \n def learn(\n self,\n inputs: np.ndarray,\n targets: np.ndarray,\n weights: Optional[np.ndarray],\n batch_size: int = 256,\n ) -> float:\n '''\n inputs, targets: (N, dim). N is sampled with replacement\n weights: None / (N, 1)\n '''\n self.model.train()\n assert inputs.ndim == 2, f\"{inputs.shape}\"\n train_size = inputs.shape[0]\n losses = []\n\n for batch_num in range(int(np.ceil(train_size / batch_size))):\n inputs_batch = inputs[batch_num * batch_size:(batch_num + 1) * batch_size]\n inputs_batch = torch.as_tensor(inputs_batch).type(torch.float32).to(self.model.device)\n targets_batch = targets[batch_num * batch_size:(batch_num + 1) * batch_size]\n targets_batch = torch.as_tensor(targets_batch).type(torch.float32).to(self.model.device)\n if weights is not None:\n weights_batch = weights[batch_num * batch_size:(batch_num + 1) * batch_size]\n weights_batch = torch.as_tensor(weights_batch).type(torch.float32).to(self.model.device)\n else:\n weights_batch is None\n \n loss = self.model.fit(inputs_batch, targets_batch, weights_batch)\n\n self.optim.zero_grad()\n loss.backward()\n self.optim.step()\n\n losses.append(loss.item())\n return np.mean(losses)\n \n @ torch.no_grad()\n def validate(self, inputs: np.ndarray, targets: np.ndarray, weights: Optional[np.ndarray]) -> float:\n inputs = torch.as_tensor(inputs).type(torch.float32).to(self.model.device)\n targets = torch.as_tensor(targets).type(torch.float32).to(self.model.device)\n if weights is not None:\n weights = torch.as_tensor(weights).type(torch.float32).to(self.model.device)\n else:\n weights = None\n val_loss = self.model.fit(inputs, targets, weights)\n return val_loss.item()\n \n\n def save(self, save_path: str) -> None:\n torch.save(self.model.state_dict(), os.path.join(save_path, \"dynamics.pth\"))\n \n def load(self, load_path: str) -> None:\n '''\n load_type: 'all', 'obs', 'r'\n '''\n self.model.load_state_dict(torch.load(os.path.join(load_path, \"dynamics.pth\"), map_location=self.model.device))" }, { "identifier": "PickPlaceObsWrapper", "path": "offlinerlkit/utils/roboverse_utils.py", "snippet": "class PickPlaceObsWrapper(gym.ObservationWrapper):\n '''\n Wrap pick place environment to return desired obs\n '''\n def __init__(self, env):\n super().__init__(env)\n # Get observation space\n tmp_obs = env.reset()\n\n tmp_true_obs = get_pickplace_obs(tmp_obs)\n low = env.observation_space['state'].low[0]\n high = env.observation_space['state'].high[0]\n self.observation_space = Box(shape = tmp_true_obs.shape, low = low, high = high)\n\n def observation(self, observation: Dict[str, np.ndarray]) -> np.ndarray:\n return get_pickplace_obs(observation)\n\n def reset(self, seed = None):\n if seed is not None:\n np.random.seed(seed) # controls env seed\n return self.observation(self.env.reset())" }, { "identifier": "DoubleDrawerObsWrapper", "path": "offlinerlkit/utils/roboverse_utils.py", "snippet": "class DoubleDrawerObsWrapper(gym.Wrapper):\n '''\n Wrap pick place environment to return desired obs\n '''\n def __init__(self, env):\n super().__init__(env)\n # Get observation space\n tmp_obs = env.reset()\n info = env.get_info()\n\n tmp_true_obs = get_doubledrawer_obs(tmp_obs, info)\n low = env.observation_space['state'].low[0]\n high = env.observation_space['state'].high[0]\n self.observation_space = Box(shape = tmp_true_obs.shape, low = low, high = high)\n\n def step(self, action):\n obs, reward, done, info = self.env.step(action)\n obs = get_doubledrawer_obs(obs, info)\n return obs, reward, done, info\n\n def reset(self, seed = None):\n if seed is not None:\n np.random.seed(seed) # controls env seed\n obs = self.env.reset()\n info = self.env.get_info()\n return get_doubledrawer_obs(obs, info)" }, { "identifier": "get_pickplace_dataset", "path": "offlinerlkit/utils/roboverse_utils.py", "snippet": "def get_pickplace_dataset(\n prior_data_path: str, \n task_data_path: str,\n prior_weight: float =1., \n task_weight: float = 1., \n set_type: str = 'full', \n sample_ratio: float = 1.) -> Tuple[Dict, np.ndarray]:\n '''\n Concatenate prior_data and task_data\n prior_weight and task_weight: weight of data point\n\n Args:\n set_type: 'prior', 'task', 'full'\n sample_ratio: Ratio of trajectories sampled. Sometimes we want to train on a smaller dataset.\n\n Return:\n dataset: Dict, additional key 'weights'\n init_obss: np.ndarray (num_traj, obs_dim)\n '''\n with open(prior_data_path, \"rb\") as fp:\n prior_data = np.load(fp, allow_pickle=True)\n with open(task_data_path, \"rb\") as ft:\n task_data = np.load(ft, allow_pickle=True)\n set_weight(prior_data, prior_weight)\n set_weight(task_data, task_weight)\n\n # Sample trajectories\n num_trajs_prior = int(len(prior_data) * sample_ratio)\n idxs_prior = np.random.choice(len(prior_data), size=(num_trajs_prior), replace = False)\n prior_data = prior_data[idxs_prior]\n\n num_trajs_task = int(len(task_data) * sample_ratio)\n idxs_task = np.random.choice(len(task_data), size=(num_trajs_task), replace = False)\n task_data = task_data[idxs_task]\n\n if set_type == 'full':\n full_data = np.concatenate([prior_data, task_data], axis=0) # list of dict\n elif set_type == 'prior':\n full_data = prior_data\n elif set_type =='task':\n full_data = task_data\n keys = ['observations', 'actions', 'rewards', 'next_observations', 'terminals', 'weights']\n\n init_obss = []\n for d in prior_data:\n obs_list = d['observations']\n init_obss.append(get_pickplace_obs(obs_list[0]))\n \n dict_data = {}\n for key in keys:\n values = []\n for d in full_data: # trajectory, dict of lists\n value_list = d[key] # list of timesteps data\n if key == 'observations':\n values += [get_pickplace_obs(obs) for obs in value_list] # element is list\n elif key == 'next_observations':\n values += [get_pickplace_obs(obs) for obs in value_list] # element is list\n else:\n values += value_list # element is list\n values = np.asarray(values)\n dict_data[key] = values\n rtgs = np.zeros_like(dict_data['rewards']) # no return\n dict_data['rtgs'] = rtgs\n\n init_obss = np.asarray(init_obss)\n return dict_data, init_obss" }, { "identifier": "get_doubledrawer_dataset", "path": "offlinerlkit/utils/roboverse_utils.py", "snippet": "def get_doubledrawer_dataset(\n prior_data_path: str, \n task_data_path: str,\n prior_weight: float =1., \n task_weight: float = 1., \n set_type: str = 'full', \n sample_ratio: float = 1.) -> Tuple[Dict, np.ndarray]:\n '''\n Concatenate prior_data and task_data\n prior_weight and task_weight: weight of data point\n\n Args:\n set_type: 'prior', 'task', 'full'\n sample_ratio: Ratio of trajectories sampled. Sometimes we want to train on a smaller dataset.\n\n Return:\n dataset: Dict, additional key 'weights'\n init_obss: np.ndarray (num_traj, obs_dim)\n '''\n with open(prior_data_path, \"rb\") as fp:\n prior_data = np.load(fp, allow_pickle=True)\n with open(task_data_path, \"rb\") as ft:\n task_data = np.load(ft, allow_pickle=True)\n set_weight(prior_data, prior_weight)\n set_weight(task_data, task_weight)\n\n # Sample trajectories\n num_trajs_prior = int(len(prior_data) * sample_ratio)\n idxs_prior = np.random.choice(len(prior_data), size=(num_trajs_prior), replace = False)\n prior_data = prior_data[idxs_prior]\n\n num_trajs_task = int(len(task_data) * sample_ratio)\n idxs_task = np.random.choice(len(task_data), size=(num_trajs_task), replace = False)\n task_data = task_data[idxs_task]\n\n if set_type == 'full':\n full_data = np.concatenate([prior_data, task_data], axis=0) # list of dict\n elif set_type == 'prior':\n full_data = prior_data\n elif set_type =='task':\n full_data = task_data\n keys = ['observations', 'actions', 'rewards', 'next_observations', 'terminals', 'weights']\n\n init_obss = []\n for d in prior_data:\n obs_list = d['observations']\n info_list = d['env_infos']\n init_obss.append(get_doubledrawer_obs(obs_list[0], info_list[0]))\n \n dict_data = {}\n for key in keys:\n values = []\n for d in full_data: # trajectory, dict of lists\n value_list = d[key] # list of timesteps data\n if key == 'observations':\n info_list = d['env_infos']\n # initial info is similar to step 1\n values += [get_doubledrawer_obs(obs,info) for obs,info in zip(value_list, [info_list[0]] + info_list[:-1])]\n elif key == 'next_observations':\n info_list = d['env_infos']\n values += [get_doubledrawer_obs(obs,info) for obs,info in zip(value_list, info_list)]\n else:\n values += value_list # element is list\n values = np.asarray(values)\n dict_data[key] = values\n rtgs = np.zeros_like(dict_data['rewards']) # no return\n dict_data['rtgs'] = rtgs\n\n init_obss = np.asarray(init_obss)\n return dict_data, init_obss" }, { "identifier": "Logger", "path": "offlinerlkit/utils/logger.py", "snippet": "class Logger(object):\n def __init__(self, dir: str, ouput_config: Dict) -> None:\n self._dir = dir\n self._init_dirs()\n self._init_ouput_handlers(ouput_config)\n self._name2val = defaultdict(float)\n self._name2cnt = defaultdict(int)\n self._level = INFO\n self._timestep = 0\n \n def _init_dirs(self) -> None:\n self._record_dir = os.path.join(self._dir, \"record\")\n self._checkpoint_dir = os.path.join(self._dir, \"checkpoint\")\n self._model_dir = os.path.join(self._dir, \"model\")\n self._result_dir = os.path.join(self._dir, \"result\")\n os.mkdir(self._record_dir)\n os.mkdir(self._checkpoint_dir)\n os.mkdir(self._model_dir)\n os.mkdir(self._result_dir)\n \n def _init_ouput_handlers(self, output_config: Dict) -> None:\n self._output_handlers = []\n for file_name, fmt in output_config.items():\n try:\n self._output_handlers.append(HANDLER[fmt](os.path.join(self._record_dir, file_name)))\n except KeyError:\n warnings.warn(\"Invalid output type, Valid types: stdout, csv, tensorboard\", DeprecationWarning)\n # default output to console\n self._output_handlers.append(StandardOutputHandler(sys.stdout))\n \n def log_hyperparameters(self, hyper_param: Dict) -> None:\n json_output_handler = JSONOutputHandler(os.path.join(self._record_dir, \"hyper_param\"))\n json_output_handler.writekvs(hyper_param)\n json_output_handler.close()\n for handler in self._output_handlers:\n if isinstance(handler, TensorBoardOutputHandler):\n handler.add_hyper_params_to_tb(hyper_param)\n\n def logkv(self, key: Any, val: Any) -> None:\n \"\"\"\n Log a value of some diagnostic\n Call this once for each diagnostic quantity, each iteration\n If called many times, last value will be used.\n \"\"\"\n self._name2val[key] = val\n\n def logkv_mean(self, key: Any, val: Number) -> None:\n \"\"\"\n The same as logkv(), but if called many times, values averaged.\n \"\"\"\n oldval, cnt = self._name2val[key], self._name2cnt[key]\n self._name2val[key] = oldval*cnt/(cnt+1) + val/(cnt+1)\n self._name2cnt[key] = cnt + 1\n\n def dumpkvs(self, exclude:Optional[Union[str, Tuple[str, ...]]]=None) -> None:\n # log timestep\n self.logkv(DEFAULT_X_NAME, self._timestep)\n for handler in self._output_handlers:\n if isinstance(handler, KVWriter):\n if exclude is not None and handler.handler_name in exclude:\n continue\n handler.writekvs(self._name2val)\n self._name2val.clear()\n self._name2cnt.clear()\n\n def log(self, s: str, level=INFO) -> None:\n for handler in self._output_handlers:\n if isinstance(handler, StandardOutputHandler):\n handler.writestr(s)\n \n def set_timestep(self, timestep: int) -> None:\n self._timestep = timestep\n for handler in self._output_handlers:\n if isinstance(handler, TensorBoardOutputHandler):\n handler.set_step(timestep)\n\n def set_level(self, level) -> None:\n self._level = level\n\n @property\n def record_dir(self) -> str:\n return self._record_dir\n \n @property\n def checkpoint_dir(self) -> str:\n return self._checkpoint_dir\n\n @property\n def model_dir(self) -> str:\n return self._model_dir\n \n @property\n def result_dir(self) -> str:\n return self._result_dir\n \n def close(self) -> None:\n for handler in self._output_handlers:\n handler.close()" }, { "identifier": "make_log_dirs", "path": "offlinerlkit/utils/logger.py", "snippet": "def make_log_dirs(\n task_name: str,\n algo_name: str,\n exp_name: str,\n args: Dict,\n part: Optional[str] = None,\n record_params: Optional[List]=None\n) -> str:\n if record_params is not None:\n for param_name in record_params:\n algo_name += f\"&{param_name}={args[param_name]}\"\n\n if part is not None:\n log_dirs = os.path.join(ROOT_DIR, task_name, algo_name, exp_name, part)\n else:\n log_dirs = os.path.join(ROOT_DIR, task_name, algo_name, exp_name)\n os.makedirs(log_dirs)\n return log_dirs" }, { "identifier": "RcslPolicyTrainer", "path": "offlinerlkit/policy_trainer/rcsl_policy_trainer.py", "snippet": "class RcslPolicyTrainer:\n def __init__(\n self,\n policy: BasePolicy,\n eval_env: Union[gym.Env, gymnasium.Env],\n offline_dataset: Dict[str, np.ndarray],\n rollout_dataset: Optional[Dict[str, np.ndarray]],\n goal: float,\n logger: Logger,\n seed,\n eval_env2: Optional[Union[gym.Env, gymnasium.Env]] = None,\n epoch: int = 1000,\n batch_size: int = 256,\n offline_ratio: float = 0,\n eval_episodes: int = 10,\n lr_scheduler: Optional[torch.optim.lr_scheduler._LRScheduler] = None,\n horizon: Optional[int] = None,\n num_workers = 1,\n has_terminal = False,\n binary_return = True\n ) -> None:\n '''\n offline_ratio = 0: rollout only, 1: offline only\n '''\n self.policy = policy\n self.eval_env = eval_env\n self.eval_env2 = eval_env2\n self.horizon = horizon\n self.offline_dataset = offline_dataset\n self.rollout_dataset = rollout_dataset\n self.goal = goal\n self.logger = logger\n\n self._epoch = epoch\n self._batch_size = batch_size\n self._offline_ratio = offline_ratio\n self._eval_episodes = eval_episodes\n self.lr_scheduler = lr_scheduler\n self.num_workers = num_workers\n self.env_seed = seed\n self.binary_return = binary_return\n\n self.is_gymnasium_env = hasattr(self.eval_env, \"get_true_observation\")\n assert (not self.is_gymnasium_env) or (self.horizon is not None), \"Horizon must be specified for Gymnasium env\"\n self.has_terminal = has_terminal\n\n def train(self, holdout_ratio: float = 0.1, last_eval = False, find_best_start: Optional[int] = None, improve_threshold: float = 0.01) -> Dict[str, float]:\n '''\n last_eval: If True, only evaluates at the last epoch\n find_best_start: If >=0, begin to find the best epoch by holdout loss\n '''\n start_time = time.time()\n\n num_timesteps = 0\n last_10_performance = deque(maxlen=10)\n\n dataset = DictDataset(self.offline_dataset)\n\n if holdout_ratio == 0.:\n has_holdout = False\n train_dataset = dataset\n else:\n has_holdout = True\n holdout_size = int(len(dataset) * holdout_ratio)\n train_size = len(dataset) - holdout_size\n train_dataset, holdout_dataset = torch.utils.data.random_split(dataset, [train_size, holdout_size], \n generator=torch.Generator().manual_seed(self.env_seed))\n data_loader = DataLoader(\n train_dataset,\n batch_size = self._batch_size,\n shuffle = True,\n pin_memory = True,\n num_workers = self.num_workers\n )\n best_policy_dict = self.policy.state_dict()\n best_holdout_loss = 1e10\n epochs_since_upd = 0\n stop_by_holdout = (find_best_start is not None)\n for e in range(1, self._epoch + 1):\n\n self.policy.train()\n\n pbar = tqdm(enumerate(data_loader), desc=f\"Epoch #{e}/{self._epoch}\")\n for it, batch in pbar:\n '''\n batch: dict with keys\n 'observations'\n 'next_observations'\n 'actions'\n 'terminals'\n 'rewards'\n 'rtgs'\n\n '''\n loss_dict = self.policy.learn(batch)\n pbar.set_postfix(**loss_dict)\n\n for k, v in loss_dict.items():\n self.logger.logkv_mean(k, v)\n \n num_timesteps += 1\n\n if self.lr_scheduler is not None:\n self.lr_scheduler.step()\n\n # Test validation loss\n if has_holdout:\n holdout_loss = self.validate(holdout_dataset)\n if stop_by_holdout and e >= find_best_start: # test holdout improvement\n if (best_holdout_loss - holdout_loss) / best_holdout_loss > improve_threshold:\n best_holdout_loss = holdout_loss\n best_policy_dict = deepcopy(self.policy.state_dict())\n epochs_since_upd = 0\n else:\n epochs_since_upd += 1\n\n if last_eval and e < self._epoch: # When last_eval is True, only evaluate on last epoch\n pass\n else:\n eval_info = self._evaluate()\n ep_reward_mean, ep_reward_std = np.mean(eval_info[\"eval/episode_reward\"]), np.std(eval_info[\"eval/episode_reward\"])\n ep_reward_max, ep_reward_min = np.max(eval_info[\"eval/episode_reward\"]), np.min(eval_info[\"eval/episode_reward\"])\n ep_length_mean, ep_length_std = np.mean(eval_info[\"eval/episode_length\"]), np.std(eval_info[\"eval/episode_length\"])\n\n if not hasattr(self.eval_env, \"get_normalized_score\"): # gymnasium_env does not have normalized score\n last_10_performance.append(ep_reward_mean)\n self.logger.logkv(\"eval/episode_reward\", ep_reward_mean)\n self.logger.logkv(\"eval/episode_reward_std\", ep_reward_std) \n else: \n norm_ep_rew_mean = self.eval_env.get_normalized_score(ep_reward_mean) * 100\n norm_ep_rew_std = self.eval_env.get_normalized_score(ep_reward_std) * 100\n norm_ep_rew_max = self.eval_env.get_normalized_score(ep_reward_max) * 100\n norm_ep_rew_min = self.eval_env.get_normalized_score(ep_reward_min) * 100\n last_10_performance.append(norm_ep_rew_mean)\n self.logger.logkv(\"eval/normalized_episode_reward\", norm_ep_rew_mean)\n self.logger.logkv(\"eval/normalized_episode_reward_std\", norm_ep_rew_std)\n self.logger.logkv(\"eval/normalized_episode_reward_max\", norm_ep_rew_max)\n self.logger.logkv(\"eval/normalized_episode_reward_min\", norm_ep_rew_min)\n self.logger.logkv(\"eval/episode_length\", ep_length_mean)\n self.logger.logkv(\"eval/episode_length_std\", ep_length_std)\n\n self.logger.set_timestep(num_timesteps)\n self.logger.dumpkvs(exclude=[\"dynamics_training_progress\"])\n\n if stop_by_holdout and epochs_since_upd >= 5: # Stop, evaluate for the last time\n self.policy.load_state_dict(best_policy_dict)\n eval_info = self._evaluate()\n ep_reward_mean, ep_reward_std = np.mean(eval_info[\"eval/episode_reward\"]), np.std(eval_info[\"eval/episode_reward\"])\n self.logger.log(f\"Final evaluation: Mean {ep_reward_mean}, std {ep_reward_std}\\n\")\n break\n \n self.logger.log(\"total time: {:.2f}s\".format(time.time() - start_time))\n torch.save(self.policy.state_dict(), os.path.join(self.logger.model_dir, \"policy_final.pth\"))\n self.logger.close()\n \n return {\"last_10_performance\": np.mean(last_10_performance)}\n\n def _evaluate(self, eval_episodes: int = -1) -> Dict[str, List[float]]:\n '''\n Always set desired rtg to 0\n '''\n # Pointmaze obs has different format, needs to be treated differently\n if eval_episodes == -1:\n real_eval_episodes = self._eval_episodes\n else:\n real_eval_episodes = eval_episodes\n is_gymnasium_env = self.is_gymnasium_env\n\n self.eval_env.reset(seed=self.env_seed) # Fix seed\n \n self.policy.eval()\n if is_gymnasium_env:\n obs, _ = self.eval_env.reset()\n obs = self.eval_env.get_true_observation(obs)\n else:\n obs = self.eval_env.reset()\n \n eval_ep_info_buffer = []\n num_episodes = 0\n episode_reward, episode_length = 0, 0\n\n if not self.has_terminal: # don't use terminal signal, terminate when reach horizon\n while num_episodes < real_eval_episodes:\n rtg = torch.tensor([[self.goal]]).type(torch.float32)\n for timestep in range(self.horizon): # One epoch\n action = self.policy.select_action(obs.reshape(1, -1), rtg)\n if hasattr(self.eval_env, \"get_true_observation\"): # gymnasium env \n next_obs, reward, terminal, _, _ = self.eval_env.step(action.flatten())\n else:\n next_obs, reward, terminal, info = self.eval_env.step(action.flatten())\n if is_gymnasium_env:\n next_obs = self.eval_env.get_true_observation(next_obs)\n episode_reward += reward\n rtg = rtg - reward\n episode_length += 1\n\n obs = next_obs\n if self.binary_return:\n episode_reward = 1 if episode_reward > 0 else 0 # Clip to 1\n eval_ep_info_buffer.append(\n {\"episode_reward\": episode_reward, \"episode_length\": episode_length}\n )\n num_episodes +=1\n episode_reward, episode_length = 0, 0\n if is_gymnasium_env:\n obs, _ = self.eval_env.reset()\n obs = self.eval_env.get_true_observation(obs)\n else:\n obs = self.eval_env.reset()\n else:\n rtg = torch.tensor([[self.goal]]).type(torch.float32)\n while num_episodes < self._eval_episodes:\n action = self.policy.select_action(obs.reshape(1, -1), rtg)\n if hasattr(self.eval_env, \"get_true_observation\"): # gymnasium env \n next_obs, reward, terminal, _, _ = self.eval_env.step(action.flatten())\n else:\n next_obs, reward, terminal, _ = self.eval_env.step(action.flatten())\n if is_gymnasium_env:\n next_obs = self.eval_env.get_true_observation(next_obs)\n episode_reward += reward\n episode_length += 1\n\n obs = next_obs\n\n if terminal: # Episode finishes\n if self.binary_return:\n episode_reward = 1 if episode_reward > 0 else 0 # Clip to 1\n eval_ep_info_buffer.append(\n {\"episode_reward\": episode_reward, \"episode_length\": episode_length}\n )\n episode_reward, episode_length = 0, 0\n if is_gymnasium_env:\n obs, _ = self.eval_env.reset()\n obs = self.eval_env.get_true_observation(obs)\n else:\n obs = self.eval_env.reset()\n rtg = torch.tensor([[self.goal]]).type(torch.float32)\n \n return {\n \"eval/episode_reward\": [ep_info[\"episode_reward\"] for ep_info in eval_ep_info_buffer],\n \"eval/episode_length\": [ep_info[\"episode_length\"] for ep_info in eval_ep_info_buffer]\n }\n \n @ torch.no_grad()\n def validate(self, holdout_dataset: torch.utils.data.Dataset) -> Optional[float]:\n data_loader = DataLoader(\n holdout_dataset,\n batch_size = self._batch_size,\n shuffle = True,\n pin_memory = True,\n num_workers = self.num_workers\n )\n self.policy.eval()\n\n pbar = tqdm(enumerate(data_loader), total=len(data_loader))\n losses = []\n for it, batch in pbar:\n '''\n batch: dict with keys\n 'observations'\n 'next_observations'\n 'actions'\n 'terminals'\n 'rewards'\n 'rtgs'\n '''\n loss_dict = self.policy.validate(batch)\n\n for k, v in loss_dict.items():\n self.logger.logkv_mean(k, v)\n\n if \"holdout_loss\" in loss_dict:\n loss = loss_dict[\"holdout_loss\"]\n losses.append(loss)\n\n if len(losses) > 0:\n return(sum(losses) / len(losses))\n else:\n return None" }, { "identifier": "DiffusionPolicyTrainer", "path": "offlinerlkit/policy_trainer/diffusion_policy_trainer.py", "snippet": "class DiffusionPolicyTrainer:\n def __init__(\n self,\n policy: BasePolicy,\n offline_dataset: Dict[str, np.ndarray],\n logger: Logger,\n seed,\n epoch: int = 25,\n batch_size: int = 256,\n lr_scheduler: Optional[torch.optim.lr_scheduler._LRScheduler] = None,\n horizon: Optional[int] = None,\n num_workers = 1,\n has_terminal = False\n ) -> None:\n '''\n offline_ratio = 0: rollout only, 1: offline only\n '''\n self.policy = policy\n self.horizon = horizon\n self.offline_dataset = offline_dataset\n self.logger = logger\n\n self._epoch = epoch\n self._batch_size = batch_size\n self.lr_scheduler = lr_scheduler\n self.num_workers = num_workers\n self.env_seed = seed\n self.has_terminal = has_terminal\n\n def train(self) -> Dict[str, float]:\n start_time = time.time()\n\n num_timesteps = 0\n last_10_performance = deque(maxlen=10)\n\n data_loader = DataLoader(\n DictDataset(self.offline_dataset),\n batch_size = self._batch_size,\n shuffle = True,\n pin_memory = True,\n num_workers = self.num_workers\n ) \n\n # train loop\n for e in range(1, self._epoch + 1):\n\n self.policy.train()\n\n pbar = tqdm(enumerate(data_loader), desc=f\"Epoch #{e}/{self._epoch}\")\n for it, batch in pbar:\n '''\n batch: dict with keys\n 'observations'\n 'next_observations'\n 'actions'\n 'terminals'\n 'rewards'\n 'rtgs'\n\n '''\n loss_dict = self.policy.learn(batch)\n pbar.set_postfix(**loss_dict)\n\n for k, v in loss_dict.items():\n self.logger.logkv_mean(k, v)\n \n num_timesteps += 1\n\n if self.lr_scheduler is not None:\n self.lr_scheduler.step()\n\n self.logger.set_timestep(num_timesteps)\n self.logger.dumpkvs(exclude=[\"dynamics_training_progress\"])\n \n # save checkpoint\n torch.save(self.policy.state_dict(), os.path.join(self.logger.checkpoint_dir, \"policy.pth\"))\n\n self.logger.log(\"total time: {:.2f}s\".format(time.time() - start_time))\n torch.save(self.policy.state_dict(), os.path.join(self.logger.model_dir, \"policy.pth\"))\n self.logger.close()\n \n return {\"last_10_performance\": np.mean(last_10_performance)}" }, { "identifier": "none_or_str", "path": "offlinerlkit/utils/none_or_str.py", "snippet": "def none_or_str(value):\n if value == 'None':\n return None\n return value" }, { "identifier": "SimpleDiffusionPolicy", "path": "offlinerlkit/policy/diffusion/simple_diffusion.py", "snippet": "class SimpleDiffusionPolicy(ConditionalDiffusionModel):\n '''\n Note: When loading DiffusionPolicy, also need to load scaler manually\n '''\n def __init__(\n self,\n obs_shape,\n act_shape,\n feature_dim,\n num_training_steps,\n num_diffusion_steps,\n device,\n **kwargs,\n ):\n super().__init__(\n input_dim=np.prod(act_shape),\n cond_shape_dict={\"obs\": obs_shape, \"feat\": (feature_dim,)},\n num_training_steps=num_training_steps,\n num_diffusion_steps=num_diffusion_steps,\n clip_sample=True,\n device=device,\n **kwargs,\n )\n\n def learn(self, batch: Dict):\n '''\n Update one batch\n '''\n obss = batch['observations'].type(torch.float32).to(self.device)\n actions = batch['actions'].type(torch.float32).to(self.device)\n rtgs = batch['rtgs']\n rtgs = rtgs.reshape(rtgs.shape[0], -1).type(torch.float32).to(self.device)\n if 'weights' in batch:\n weights = batch['weights'].type(torch.float32).to(self.device) # (batch, )\n else:\n weights = None\n\n return super().learn(actions, {\"obs\": obss, \"feat\": rtgs}, weights)\n\n def validate(self, batch: Dict):\n '''\n Update one batch\n '''\n obss = batch['observations'].type(torch.float32).to(self.device)\n actions = batch['actions'].type(torch.float32).to(self.device)\n rtgs = batch['rtgs']\n rtgs = rtgs.reshape(rtgs.shape[0], -1).type(torch.float32).to(self.device)\n if 'weights' in batch:\n weights = batch['weights'].type(torch.float32).to(self.device) # (batch, )\n else:\n weights = None\n\n return super().validate(actions, {\"obs\": obss, \"feat\": rtgs}, weights)\n\n def select_action(self, obs, feat):\n # print(f\"DiffusionPolicy: select action with obs shape {obs.shape}, feat(rtg) shape {feat.shape}\")\n obs = torch.as_tensor(obs, dtype = torch.float32, device = self.device)\n feat = torch.as_tensor(feat, dtype = torch.float32, device = self.device)\n\n with torch.no_grad():\n action = super().sample({\"obs\": obs, \"feat\": feat})\n # print(action)\n return action.cpu().numpy()\n\n def train(self) -> None:\n self.noise_pred_net.train()\n self.cond_encoders.train()\n\n def eval(self) -> None:\n self.noise_pred_net.eval()\n self.cond_encoders.eval()" }, { "identifier": "AutoregressivePolicy", "path": "offlinerlkit/policy/rcsl/rcsl_autoregressive.py", "snippet": "class AutoregressivePolicy(nn.Module):\n def __init__(self, obs_dim, act_dim, hidden_dims, lr, device):\n super().__init__()\n self.obs_dim = obs_dim\n self.act_dim = act_dim\n\n # Input is obs + act + one-hot for the predicted dimension\n # Output is the mean and standard deviation of the predicted dimension\n input_dim = obs_dim + 1 + act_dim + act_dim # also depend on return\n all_dims = [input_dim] + hidden_dims + [2]\n self.model = nn.ModuleList()\n for in_dim, out_dim in zip(all_dims[:-1], all_dims[1:]):\n self.model.append(nn.Linear(in_dim, out_dim))\n self.model.append(nn.LeakyReLU())\n\n self.rcsl_optim = torch.optim.Adam(self.model.parameters(), lr=lr)\n self.device = device\n self.register_parameter(\n \"max_logstd\",\n nn.Parameter(torch.ones(1) * 0.5, requires_grad=True)\n )\n self.register_parameter(\n \"min_logstd\",\n nn.Parameter(torch.ones(1) * -10, requires_grad=True)\n )\n self.to(self.device)\n\n def forward(self, obs, rtg, deterministic: bool = False):\n batch_size = obs.size(0)\n rtg = rtg.reshape(batch_size, 1)\n\n # Initialize action to zeros\n act = torch.zeros((batch_size, self.act_dim), device=obs.device)\n\n # One-hot encoding for all dimensions\n one_hot_all = torch.eye(self.act_dim, device=obs.device)\n\n # Predict each dimension autoregressively\n for i in range(self.act_dim):\n one_hot = one_hot_all[i][None, :].repeat(batch_size, 1)\n x = torch.cat([obs, rtg, act, one_hot], dim=1)\n for layer in self.model:\n x = layer(x)\n mean, logstd = torch.chunk(x, 2, dim=-1)\n logstd = soft_clamp(logstd, self.min_logstd, self.max_logstd)\n\n # logstd might be too small\n if deterministic:\n next_dim = mean\n else:\n assert logstd.exp() != float('nan'), f\"{logstd}\"\n if logstd.exp() == 0:\n next_dim = mean\n else:\n dist = Normal(mean, logstd.exp())\n next_dim = dist.sample()\n act = torch.cat([act[:, :i], next_dim, act[:, i + 1 :]], dim=1)\n\n return act\n\n def select_action(self, obs: np.ndarray, rtg: np.ndarray, deterministic: bool = False) -> np.ndarray:\n with torch.no_grad():\n obs = torch.tensor(obs, dtype=torch.float32).to(self.device)\n rtg = torch.as_tensor(rtg).type(torch.float32).to(self.device)\n action = self.forward(obs, rtg, deterministic)\n return action.cpu().numpy()\n\n def fit(self, obs, rtg, act, weights = None):\n batch_size = obs.size(0)\n\n # Generate all the one-hot vectors, expand by repeat\n one_hot_all = torch.eye(self.act_dim, device=obs.device)\n one_hot_full = one_hot_all.repeat_interleave(batch_size, dim=0)\n\n # Repeat act by act_dim times and mask by one-hot encoding\n mask = (\n torch.tril(torch.ones((self.act_dim, self.act_dim), device=obs.device))\n - one_hot_all\n ) # lower trig - diag\n mask_full = mask.repeat_interleave(batch_size, dim=0)\n act_full = act.repeat(self.act_dim, 1) # (batch*act_dim, act_dim)\n act_masked = act_full * mask_full\n\n # Repeat obs by act_dim times\n rtg = rtg.reshape(batch_size, 1)\n obs_rtg = torch.cat([obs, rtg], dim = 1)\n obs_rtg_full = obs_rtg.repeat(self.act_dim, 1)\n\n # Concatenate everything to get input\n input_full = torch.cat([obs_rtg_full, act_masked, one_hot_full], dim=1)\n\n # Use the one-hot vector as boolean mask to get target\n target = act_full[one_hot_full.bool()].unsqueeze(1)\n\n # Forward through model and compute loss\n x = input_full\n for layer in self.model:\n x = layer(x)\n mean, logstd = torch.chunk(x, 2, dim=-1)\n logstd = soft_clamp(logstd, self.min_logstd, self.max_logstd)\n if any(torch.isnan(mean)):\n torch.save(self.model.state_dict(), \"model_debug.pth\")\n torch.save(input_full, \"input_debug.pth\")\n raise Exception(f\"Mean is nan, input_full {input_full.detach().cpu().numpy()}\")\n dist = Normal(mean, logstd.exp())\n loss = -dist.log_prob(target)\n if weights is None:\n loss = loss.mean()\n else:\n loss = loss.reshape(loss.shape[0], -1) # (batch * act_dim, 1)\n weights = weights.reshape(weights.shape[0], -1) # (batch, 1)\n weights = weights.repeat(self.act_dim, 1) # (batch * act_dim, 1)\n loss = torch.sum(loss * weights) / (torch.sum(weights) * loss.shape[-1])\n return loss\n \n def learn(self, batch: Dict) -> Dict[str, float]:\n obss, actions, rtgs = batch[\"observations\"], batch[\"actions\"], batch[\"rtgs\"]\n obss = obss.type(torch.float32).to(self.device)\n actions = actions.type(torch.float32).to(self.device)\n rtgs = rtgs.type(torch.float32).to(self.device)\n if 'weights' in batch:\n weights = batch['weights'].type(torch.float32).to(self.device) # (batch, )\n else:\n weights = None\n loss = self.fit(obss, rtgs, actions,weights)\n\n self.rcsl_optim.zero_grad()\n loss.backward()\n self.rcsl_optim.step()\n\n result = {\n \"loss\": loss.item(),\n }\n \n return result\n\n def validate(self, batch: Dict) -> Dict[str, float]:\n obss, actions, rtgs = batch[\"observations\"], batch[\"actions\"], batch[\"rtgs\"]\n obss = obss.type(torch.float32).to(self.device)\n actions = actions.type(torch.float32).to(self.device)\n rtgs = rtgs.type(torch.float32).to(self.device)\n if 'weights' in batch:\n weights = batch['weights'].type(torch.float32).to(self.device) # (batch, )\n else:\n weights = None\n with torch.no_grad():\n loss = self.fit(obss, rtgs, actions, weights)\n return {\n \"holdout_loss\": loss.item()\n }" }, { "identifier": "RcslPolicy", "path": "offlinerlkit/policy/rcsl/rcsl_mlp.py", "snippet": "class RcslPolicy(BasePolicy):\n \"\"\"\n wrapped rcsl policy\n \"\"\"\n\n def __init__(\n self,\n rcsl: RcslModule,\n rcsl_optim: torch.optim.Optimizer,\n device: Union[str, torch.device]\n ) -> None:\n super().__init__()\n\n self.rcsl = rcsl \n self.rcsl_optim = rcsl_optim\n\n self.device = device\n \n # One batch update\n def learn(self, batch: Dict) -> Dict[str, float]:\n obss, actions, rtgs = batch[\"observations\"], batch[\"actions\"], batch[\"rtgs\"]\n\n pred_actions = self.rcsl.forward(obss, rtgs)\n # Average over batch and dim, sum over ensembles.\n loss = torch.pow(pred_actions - actions.to(pred_actions.device), 2).mean() # MSE error\n\n self.rcsl_optim.zero_grad()\n loss.backward()\n self.rcsl_optim.step()\n\n result = {\n \"loss\": loss.item(),\n }\n \n return result\n\n @ torch.no_grad()\n def validate(self, batch: Dict) -> Dict[str, float]:\n obss, actions, rtgs = batch[\"observations\"], batch[\"actions\"], batch[\"rtgs\"]\n\n pred_actions = self.rcsl.forward(obss, rtgs)\n # Average over batch and dim, sum over ensembles.\n loss = torch.pow(pred_actions - actions.to(pred_actions.device), 2).mean() # MSE error\n\n result = {\n \"holdout_loss\": loss.item(),\n }\n \n return result\n \n def select_action(self, obs: np.ndarray, rtg: np.ndarray) -> np.ndarray:\n with torch.no_grad():\n action = self.rcsl.forward(obs, rtg)\n return action.cpu().numpy()\n \n def train(self) -> None:\n self.rcsl.train()\n\n def eval(self) -> None:\n self.rcsl.eval()" }, { "identifier": "MLP", "path": "offlinerlkit/nets/mlp.py", "snippet": "class MLP(nn.Module):\n def __init__(\n self,\n input_dim: int,\n hidden_dims: Union[List[int], Tuple[int]],\n output_dim: Optional[int] = None,\n activation: nn.Module = nn.ReLU,\n dropout_rate: Optional[float] = None,\n init_last: bool = False\n ) -> None:\n super().__init__()\n hidden_dims = [input_dim] + list(hidden_dims)\n model = []\n for in_dim, out_dim in zip(hidden_dims[:-1], hidden_dims[1:]):\n model += [nn.Linear(in_dim, out_dim), activation()]\n if dropout_rate is not None:\n model += [nn.Dropout(p=dropout_rate)]\n\n self.output_dim = hidden_dims[-1]\n if output_dim is not None:\n last_layer = nn.Linear(hidden_dims[-1], output_dim)\n if init_last:\n nn.init.xavier_uniform_(last_layer.weight, gain=1e-2)\n nn.init.constant_(last_layer.bias, 0.0)\n model += [last_layer]\n self.output_dim = output_dim\n self.model = nn.Sequential(*model)\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n return self.model(x)" } ]
import numpy as np import torch import roboverse import argparse import os import random import pickle import datetime from copy import deepcopy from typing import Dict, Tuple from collections import defaultdict from offlinerlkit.modules import TransformerDynamicsModel, RcslModule from offlinerlkit.dynamics import TransformerDynamics from offlinerlkit.utils.roboverse_utils import PickPlaceObsWrapper, DoubleDrawerObsWrapper, get_pickplace_dataset, get_doubledrawer_dataset from offlinerlkit.utils.logger import Logger, make_log_dirs from offlinerlkit.policy_trainer import RcslPolicyTrainer, DiffusionPolicyTrainer from offlinerlkit.utils.none_or_str import none_or_str from offlinerlkit.policy import SimpleDiffusionPolicy, AutoregressivePolicy, RcslPolicy from offlinerlkit.nets import MLP
15,345
max_rewards = np.maximum(max_rewards, rewards.flatten()) # Update max reward acc_returns = acc_returns + rewards.flatten() observations = deepcopy(next_observations) for k, v in rollout_transitions.items(): rollout_transitions[k] = np.concatenate(v, axis=0) traj_idxs = rollout_transitions["traj_idxs"] rtgs = returns[traj_idxs] - rollout_transitions["acc_rets"] # rtgs = returns[traj_idxs] rollout_transitions["rtgs"] = rtgs[..., None] # (N,1) return rollout_transitions, \ {"num_transitions": num_transitions, "reward_mean": rewards_arr.mean(), "returns": returns, "max_rewards": max_rewards, "rewards_full": rewards_full} def train(args=get_args()): # seed random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) torch.cuda.manual_seed_all(args.seed) torch.backends.cudnn.deterministic = True # create env and dataset if args.task == 'pickplace': env = roboverse.make('Widow250PickTray-v0') env = PickPlaceObsWrapper(env) obs_space = env.observation_space args.obs_shape = obs_space.shape obs_dim = np.prod(args.obs_shape) args.action_shape = env.action_space.shape action_dim = np.prod(args.action_shape) prior_data_path = os.path.join(args.data_dir, "pickplace_prior.npy") task_data_path = os.path.join(args.data_dir, "pickplace_task.npy") diff_dataset, _ = get_pickplace_dataset( prior_data_path=prior_data_path, task_data_path=task_data_path, sample_ratio =args.sample_ratio, task_weight=args.task_weight) dyn_dataset, init_obss_dataset = get_pickplace_dataset( prior_data_path=prior_data_path, task_data_path=task_data_path) elif args.task == 'doubledraweropen': env = roboverse.make('Widow250DoubleDrawerOpenGraspNeutral-v0') env = DoubleDrawerObsWrapper(env) obs_space = env.observation_space args.obs_shape = obs_space.shape obs_dim = np.prod(args.obs_shape) args.action_shape = env.action_space.shape action_dim = np.prod(args.action_shape) prior_data_path = os.path.join(args.data_dir, "closed_drawer_prior.npy") task_data_path = os.path.join(args.data_dir, "drawer_task.npy") diff_dataset, _ = get_doubledrawer_dataset( prior_data_path=prior_data_path, task_data_path=task_data_path, sample_ratio =args.sample_ratio, task_weight=args.task_weight) dyn_dataset, init_obss_dataset = get_doubledrawer_dataset( prior_data_path=prior_data_path, task_data_path=task_data_path) elif args.task == 'doubledrawercloseopen': env = roboverse.make('Widow250DoubleDrawerCloseOpenGraspNeutral-v0') env = DoubleDrawerObsWrapper(env) obs_space = env.observation_space args.obs_shape = obs_space.shape obs_dim = np.prod(args.obs_shape) args.action_shape = env.action_space.shape action_dim = np.prod(args.action_shape) prior_data_path = os.path.join(args.data_dir, "blocked_drawer_1_prior.npy") task_data_path = os.path.join(args.data_dir, "drawer_task.npy") diff_dataset, _ = get_doubledrawer_dataset( prior_data_path=prior_data_path, task_data_path=task_data_path, sample_ratio =args.sample_ratio, task_weight=args.task_weight) dyn_dataset, init_obss_dataset = get_doubledrawer_dataset( prior_data_path=prior_data_path, task_data_path=task_data_path) elif args.task == 'doubledrawerpickplaceopen': env = roboverse.make('Widow250DoubleDrawerPickPlaceOpenGraspNeutral-v0') env = DoubleDrawerObsWrapper(env) obs_space = env.observation_space args.obs_shape = obs_space.shape obs_dim = np.prod(args.obs_shape) args.action_shape = env.action_space.shape action_dim = np.prod(args.action_shape) prior_data_path = os.path.join(args.data_dir, "blocked_drawer_2_prior.npy") task_data_path = os.path.join(args.data_dir, "drawer_task.npy") diff_dataset, _ = get_doubledrawer_dataset( prior_data_path=prior_data_path, task_data_path=task_data_path, sample_ratio =args.sample_ratio, task_weight=args.task_weight) dyn_dataset, init_obss_dataset = get_doubledrawer_dataset( prior_data_path=prior_data_path, task_data_path=task_data_path) else: raise NotImplementedError env.reset(seed=args.seed) timestamp = datetime.datetime.now().strftime("%y-%m%d-%H%M%S") exp_name = f"timestamp_{timestamp}&{args.seed}" log_dirs = make_log_dirs(args.task, args.algo_name, exp_name, vars(args), part = "dynamics") # key: output file name, value: output handler type print(f"Logging dynamics to {log_dirs}") output_config = { "consoleout_backup": "stdout", "policy_training_progress": "csv", "dynamics_training_progress": "csv", "tb": "tensorboard" }
# Behavior policy ablation study ''' Recommended hyperparameters: pickplace, horizon=40, behavior_epoch=30 doubledraweropen, horizon=50, behavior_epoch=40 doubledrawercloseopen, horizon=80, behavior_epoch=40 ''' def get_args(): parser = argparse.ArgumentParser() # general parser.add_argument("--algo-name", type=str, default="mbrcsl_mlpbeh") parser.add_argument("--task", type=str, default="pickplace", help="task name") parser.add_argument("--seed", type=int, default=0) parser.add_argument("--num_workers", type=int, default=1, help="Dataloader workers, align with cpu number") parser.add_argument("--device", type=str, default="cuda" if torch.cuda.is_available() else "cpu") parser.add_argument("--last_eval", action="store_false") # env config parser.add_argument('--data_dir', type=str, required=True) parser.add_argument('--horizon', type=int, default=40, help="max path length for pickplace") # transformer_autoregressive dynamics parser.add_argument("--n_layer", type=int, default=4) parser.add_argument("--n_head", type=int, default=4) parser.add_argument("--n_embd", type=int, default=32) parser.add_argument("--dynamics_lr", type=float, default=1e-3) parser.add_argument("--dynamics_epoch", type=int, default=80) parser.add_argument("--load_dynamics_path", type=none_or_str, default=None) # Behavior policy parser.add_argument("--behavior_epoch", type=int, default=30) parser.add_argument('--behavior_batch', type=int, default=256) parser.add_argument('--load_diffusion_path', type=none_or_str, default=None) parser.add_argument('--task_weight', type=float, default=1.4, help="Weight on task data when training diffusion policy") parser.add_argument('--sample_ratio', type=float, default=0.8, help="Use (sample_ratio * num_total_data) data to train diffusion policy") parser.add_argument("--behavior_hidden_dims", type=int, nargs='*', default=[200,200,200,200]) parser.add_argument("--behavior_lr", type=float, default=1e-3) parser.add_argument("--behavior_weight_decay", type=float, default=0.1) # Rollout parser.add_argument('--rollout_ckpt_path', type=none_or_str, default=None, help="file dir, used to load/store rollout trajs" ) parser.add_argument('--rollout_epoch', type=int, default=1000, help="Max number of epochs to rollout the policy") parser.add_argument('--num_need_traj', type=int, default=5000, help="Needed valid trajs in rollout") parser.add_argument("--rollout-batch", type=int, default=200, help="Number of trajs to be sampled at one time") # RCSL policy (mlp) parser.add_argument("--rcsl_hidden_dims", type=int, nargs='*', default=[200, 200, 200, 200]) parser.add_argument("--rcsl_lr", type=float, default=1e-3) parser.add_argument("--rcsl_batch", type=int, default=256) parser.add_argument("--rcsl_epoch", type=int, default=100) parser.add_argument("--eval_episodes", type=int, default=100) parser.add_argument("--holdout_ratio", type=float, default=0.2) return parser.parse_args() def rollout_simple( init_obss: np.ndarray, dynamics: TransformerDynamicsModel, rollout_policy: SimpleDiffusionPolicy, rollout_length: int ) -> Tuple[Dict[str, np.ndarray], Dict]: ''' Only serves for non-terminal cases Sample a batch of trajectories at the same time. Output rollout_transitions contain keys: obss, next_obss, actions rewards, (N,1) rtgs, (N,1) traj_idxs, (N) ''' num_transitions = 0 rewards_arr = np.array([]) rollout_transitions = defaultdict(list) batch_size = init_obss.shape[0] valid_idxs = np.arange(init_obss.shape[0]) # maintain current valid trajectory indexes returns = np.zeros(init_obss.shape[0]) # maintain return of each trajectory acc_returns = np.zeros(init_obss.shape[0]) # maintain accumulated return of each valid trajectory max_rewards = np.zeros(init_obss.shape[0]) # maintain max reward seen in trajectory rewards_full = np.zeros((init_obss.shape[0], rollout_length)) # full rewards (batch, H) # rollout observations = init_obss goal = np.zeros((init_obss.shape[0],1), dtype = np.float32) for t in range(rollout_length): actions = rollout_policy.select_action(observations, goal) next_observations, rewards, terminals, info = dynamics.step(observations, actions) rollout_transitions["observations"].append(observations) rollout_transitions["next_observations"].append(next_observations) rollout_transitions["actions"].append(actions) rollout_transitions["rewards"].append(rewards) rollout_transitions["terminals"].append(terminals) rollout_transitions["traj_idxs"].append(valid_idxs) rollout_transitions["acc_rets"].append(acc_returns) rewards = rewards.reshape(batch_size) # (B) rewards_full[:, t] = rewards num_transitions += len(observations) rewards_arr = np.append(rewards_arr, rewards.flatten()) returns = returns + rewards.flatten() # Update return (for valid idxs only) max_rewards = np.maximum(max_rewards, rewards.flatten()) # Update max reward acc_returns = acc_returns + rewards.flatten() observations = deepcopy(next_observations) for k, v in rollout_transitions.items(): rollout_transitions[k] = np.concatenate(v, axis=0) traj_idxs = rollout_transitions["traj_idxs"] rtgs = returns[traj_idxs] - rollout_transitions["acc_rets"] # rtgs = returns[traj_idxs] rollout_transitions["rtgs"] = rtgs[..., None] # (N,1) return rollout_transitions, \ {"num_transitions": num_transitions, "reward_mean": rewards_arr.mean(), "returns": returns, "max_rewards": max_rewards, "rewards_full": rewards_full} def train(args=get_args()): # seed random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) torch.cuda.manual_seed_all(args.seed) torch.backends.cudnn.deterministic = True # create env and dataset if args.task == 'pickplace': env = roboverse.make('Widow250PickTray-v0') env = PickPlaceObsWrapper(env) obs_space = env.observation_space args.obs_shape = obs_space.shape obs_dim = np.prod(args.obs_shape) args.action_shape = env.action_space.shape action_dim = np.prod(args.action_shape) prior_data_path = os.path.join(args.data_dir, "pickplace_prior.npy") task_data_path = os.path.join(args.data_dir, "pickplace_task.npy") diff_dataset, _ = get_pickplace_dataset( prior_data_path=prior_data_path, task_data_path=task_data_path, sample_ratio =args.sample_ratio, task_weight=args.task_weight) dyn_dataset, init_obss_dataset = get_pickplace_dataset( prior_data_path=prior_data_path, task_data_path=task_data_path) elif args.task == 'doubledraweropen': env = roboverse.make('Widow250DoubleDrawerOpenGraspNeutral-v0') env = DoubleDrawerObsWrapper(env) obs_space = env.observation_space args.obs_shape = obs_space.shape obs_dim = np.prod(args.obs_shape) args.action_shape = env.action_space.shape action_dim = np.prod(args.action_shape) prior_data_path = os.path.join(args.data_dir, "closed_drawer_prior.npy") task_data_path = os.path.join(args.data_dir, "drawer_task.npy") diff_dataset, _ = get_doubledrawer_dataset( prior_data_path=prior_data_path, task_data_path=task_data_path, sample_ratio =args.sample_ratio, task_weight=args.task_weight) dyn_dataset, init_obss_dataset = get_doubledrawer_dataset( prior_data_path=prior_data_path, task_data_path=task_data_path) elif args.task == 'doubledrawercloseopen': env = roboverse.make('Widow250DoubleDrawerCloseOpenGraspNeutral-v0') env = DoubleDrawerObsWrapper(env) obs_space = env.observation_space args.obs_shape = obs_space.shape obs_dim = np.prod(args.obs_shape) args.action_shape = env.action_space.shape action_dim = np.prod(args.action_shape) prior_data_path = os.path.join(args.data_dir, "blocked_drawer_1_prior.npy") task_data_path = os.path.join(args.data_dir, "drawer_task.npy") diff_dataset, _ = get_doubledrawer_dataset( prior_data_path=prior_data_path, task_data_path=task_data_path, sample_ratio =args.sample_ratio, task_weight=args.task_weight) dyn_dataset, init_obss_dataset = get_doubledrawer_dataset( prior_data_path=prior_data_path, task_data_path=task_data_path) elif args.task == 'doubledrawerpickplaceopen': env = roboverse.make('Widow250DoubleDrawerPickPlaceOpenGraspNeutral-v0') env = DoubleDrawerObsWrapper(env) obs_space = env.observation_space args.obs_shape = obs_space.shape obs_dim = np.prod(args.obs_shape) args.action_shape = env.action_space.shape action_dim = np.prod(args.action_shape) prior_data_path = os.path.join(args.data_dir, "blocked_drawer_2_prior.npy") task_data_path = os.path.join(args.data_dir, "drawer_task.npy") diff_dataset, _ = get_doubledrawer_dataset( prior_data_path=prior_data_path, task_data_path=task_data_path, sample_ratio =args.sample_ratio, task_weight=args.task_weight) dyn_dataset, init_obss_dataset = get_doubledrawer_dataset( prior_data_path=prior_data_path, task_data_path=task_data_path) else: raise NotImplementedError env.reset(seed=args.seed) timestamp = datetime.datetime.now().strftime("%y-%m%d-%H%M%S") exp_name = f"timestamp_{timestamp}&{args.seed}" log_dirs = make_log_dirs(args.task, args.algo_name, exp_name, vars(args), part = "dynamics") # key: output file name, value: output handler type print(f"Logging dynamics to {log_dirs}") output_config = { "consoleout_backup": "stdout", "policy_training_progress": "csv", "dynamics_training_progress": "csv", "tb": "tensorboard" }
logger = Logger(log_dirs, output_config)
7
2023-10-11 08:36:06+00:00
24k
lmb-freiburg/ldce
ldm/models/diffusion/dpm_solver/sampler.py
[ { "identifier": "NoiseScheduleVP", "path": "ldm/models/diffusion/dpm_solver/dpm_solver.py", "snippet": "class NoiseScheduleVP:\n def __init__(\n self,\n schedule='discrete',\n betas=None,\n alphas_cumprod=None,\n continuous_beta_0=0.1,\n continuous_beta_1=20.,\n ):\n \"\"\"Create a wrapper class for the forward SDE (VP type).\n\n ***\n Update: We support discrete-time diffusion models by implementing a picewise linear interpolation for log_alpha_t.\n We recommend to use schedule='discrete' for the discrete-time diffusion models, especially for high-resolution images.\n ***\n\n The forward SDE ensures that the condition distribution q_{t|0}(x_t | x_0) = N ( alpha_t * x_0, sigma_t^2 * I ).\n We further define lambda_t = log(alpha_t) - log(sigma_t), which is the half-logSNR (described in the DPM-Solver paper).\n Therefore, we implement the functions for computing alpha_t, sigma_t and lambda_t. For t in [0, T], we have:\n\n log_alpha_t = self.marginal_log_mean_coeff(t)\n sigma_t = self.marginal_std(t)\n lambda_t = self.marginal_lambda(t)\n\n Moreover, as lambda(t) is an invertible function, we also support its inverse function:\n\n t = self.inverse_lambda(lambda_t)\n\n ===============================================================\n\n We support both discrete-time DPMs (trained on n = 0, 1, ..., N-1) and continuous-time DPMs (trained on t in [t_0, T]).\n\n 1. For discrete-time DPMs:\n\n For discrete-time DPMs trained on n = 0, 1, ..., N-1, we convert the discrete steps to continuous time steps by:\n t_i = (i + 1) / N\n e.g. for N = 1000, we have t_0 = 1e-3 and T = t_{N-1} = 1.\n We solve the corresponding diffusion ODE from time T = 1 to time t_0 = 1e-3.\n\n Args:\n betas: A `torch.Tensor`. The beta array for the discrete-time DPM. (See the original DDPM paper for details)\n alphas_cumprod: A `torch.Tensor`. The cumprod alphas for the discrete-time DPM. (See the original DDPM paper for details)\n\n Note that we always have alphas_cumprod = cumprod(betas). Therefore, we only need to set one of `betas` and `alphas_cumprod`.\n\n **Important**: Please pay special attention for the args for `alphas_cumprod`:\n The `alphas_cumprod` is the \\hat{alpha_n} arrays in the notations of DDPM. Specifically, DDPMs assume that\n q_{t_n | 0}(x_{t_n} | x_0) = N ( \\sqrt{\\hat{alpha_n}} * x_0, (1 - \\hat{alpha_n}) * I ).\n Therefore, the notation \\hat{alpha_n} is different from the notation alpha_t in DPM-Solver. In fact, we have\n alpha_{t_n} = \\sqrt{\\hat{alpha_n}},\n and\n log(alpha_{t_n}) = 0.5 * log(\\hat{alpha_n}).\n\n\n 2. For continuous-time DPMs:\n\n We support two types of VPSDEs: linear (DDPM) and cosine (improved-DDPM). The hyperparameters for the noise\n schedule are the default settings in DDPM and improved-DDPM:\n\n Args:\n beta_min: A `float` number. The smallest beta for the linear schedule.\n beta_max: A `float` number. The largest beta for the linear schedule.\n cosine_s: A `float` number. The hyperparameter in the cosine schedule.\n cosine_beta_max: A `float` number. The hyperparameter in the cosine schedule.\n T: A `float` number. The ending time of the forward process.\n\n ===============================================================\n\n Args:\n schedule: A `str`. The noise schedule of the forward SDE. 'discrete' for discrete-time DPMs,\n 'linear' or 'cosine' for continuous-time DPMs.\n Returns:\n A wrapper object of the forward SDE (VP type).\n \n ===============================================================\n\n Example:\n\n # For discrete-time DPMs, given betas (the beta array for n = 0, 1, ..., N - 1):\n >>> ns = NoiseScheduleVP('discrete', betas=betas)\n\n # For discrete-time DPMs, given alphas_cumprod (the \\hat{alpha_n} array for n = 0, 1, ..., N - 1):\n >>> ns = NoiseScheduleVP('discrete', alphas_cumprod=alphas_cumprod)\n\n # For continuous-time DPMs (VPSDE), linear schedule:\n >>> ns = NoiseScheduleVP('linear', continuous_beta_0=0.1, continuous_beta_1=20.)\n\n \"\"\"\n\n if schedule not in ['discrete', 'linear', 'cosine']:\n raise ValueError(\"Unsupported noise schedule {}. The schedule needs to be 'discrete' or 'linear' or 'cosine'\".format(schedule))\n\n self.schedule = schedule\n if schedule == 'discrete':\n if betas is not None:\n log_alphas = 0.5 * torch.log(1 - betas).cumsum(dim=0)\n else:\n assert alphas_cumprod is not None\n log_alphas = 0.5 * torch.log(alphas_cumprod)\n self.total_N = len(log_alphas)\n self.T = 1.\n self.t_array = torch.linspace(0., 1., self.total_N + 1)[1:].reshape((1, -1))\n self.log_alpha_array = log_alphas.reshape((1, -1,))\n else:\n self.total_N = 1000\n self.beta_0 = continuous_beta_0\n self.beta_1 = continuous_beta_1\n self.cosine_s = 0.008\n self.cosine_beta_max = 999.\n self.cosine_t_max = math.atan(self.cosine_beta_max * (1. + self.cosine_s) / math.pi) * 2. * (1. + self.cosine_s) / math.pi - self.cosine_s\n self.cosine_log_alpha_0 = math.log(math.cos(self.cosine_s / (1. + self.cosine_s) * math.pi / 2.))\n self.schedule = schedule\n if schedule == 'cosine':\n # For the cosine schedule, T = 1 will have numerical issues. So we manually set the ending time T.\n # Note that T = 0.9946 may be not the optimal setting. However, we find it works well.\n self.T = 0.9946\n else:\n self.T = 1.\n\n def marginal_log_mean_coeff(self, t):\n \"\"\"\n Compute log(alpha_t) of a given continuous-time label t in [0, T].\n \"\"\"\n if self.schedule == 'discrete':\n return interpolate_fn(t.reshape((-1, 1)), self.t_array.to(t.device), self.log_alpha_array.to(t.device)).reshape((-1))\n elif self.schedule == 'linear':\n return -0.25 * t ** 2 * (self.beta_1 - self.beta_0) - 0.5 * t * self.beta_0\n elif self.schedule == 'cosine':\n log_alpha_fn = lambda s: torch.log(torch.cos((s + self.cosine_s) / (1. + self.cosine_s) * math.pi / 2.))\n log_alpha_t = log_alpha_fn(t) - self.cosine_log_alpha_0\n return log_alpha_t\n\n def marginal_alpha(self, t):\n \"\"\"\n Compute alpha_t of a given continuous-time label t in [0, T].\n \"\"\"\n return torch.exp(self.marginal_log_mean_coeff(t))\n\n def marginal_std(self, t):\n \"\"\"\n Compute sigma_t of a given continuous-time label t in [0, T].\n \"\"\"\n return torch.sqrt(1. - torch.exp(2. * self.marginal_log_mean_coeff(t)))\n\n def marginal_lambda(self, t):\n \"\"\"\n Compute lambda_t = log(alpha_t) - log(sigma_t) of a given continuous-time label t in [0, T].\n \"\"\"\n log_mean_coeff = self.marginal_log_mean_coeff(t)\n log_std = 0.5 * torch.log(1. - torch.exp(2. * log_mean_coeff))\n return log_mean_coeff - log_std\n\n def inverse_lambda(self, lamb):\n \"\"\"\n Compute the continuous-time label t in [0, T] of a given half-logSNR lambda_t.\n \"\"\"\n if self.schedule == 'linear':\n tmp = 2. * (self.beta_1 - self.beta_0) * torch.logaddexp(-2. * lamb, torch.zeros((1,)).to(lamb))\n Delta = self.beta_0**2 + tmp\n return tmp / (torch.sqrt(Delta) + self.beta_0) / (self.beta_1 - self.beta_0)\n elif self.schedule == 'discrete':\n log_alpha = -0.5 * torch.logaddexp(torch.zeros((1,)).to(lamb.device), -2. * lamb)\n t = interpolate_fn(log_alpha.reshape((-1, 1)), torch.flip(self.log_alpha_array.to(lamb.device), [1]), torch.flip(self.t_array.to(lamb.device), [1]))\n return t.reshape((-1,))\n else:\n log_alpha = -0.5 * torch.logaddexp(-2. * lamb, torch.zeros((1,)).to(lamb))\n t_fn = lambda log_alpha_t: torch.arccos(torch.exp(log_alpha_t + self.cosine_log_alpha_0)) * 2. * (1. + self.cosine_s) / math.pi - self.cosine_s\n t = t_fn(log_alpha)\n return t" }, { "identifier": "model_wrapper", "path": "ldm/models/diffusion/dpm_solver/dpm_solver.py", "snippet": "def model_wrapper(\n model,\n noise_schedule,\n model_type=\"noise\",\n model_kwargs={},\n guidance_type=\"uncond\",\n condition=None,\n unconditional_condition=None,\n guidance_scale=1.,\n classifier_fn=None,\n classifier_kwargs={},\n):\n \"\"\"Create a wrapper function for the noise prediction model.\n\n DPM-Solver needs to solve the continuous-time diffusion ODEs. For DPMs trained on discrete-time labels, we need to\n firstly wrap the model function to a noise prediction model that accepts the continuous time as the input.\n\n We support four types of the diffusion model by setting `model_type`:\n\n 1. \"noise\": noise prediction model. (Trained by predicting noise).\n\n 2. \"x_start\": data prediction model. (Trained by predicting the data x_0 at time 0).\n\n 3. \"v\": velocity prediction model. (Trained by predicting the velocity).\n The \"v\" prediction is derivation detailed in Appendix D of [1], and is used in Imagen-Video [2].\n\n [1] Salimans, Tim, and Jonathan Ho. \"Progressive distillation for fast sampling of diffusion models.\"\n arXiv preprint arXiv:2202.00512 (2022).\n [2] Ho, Jonathan, et al. \"Imagen Video: High Definition Video Generation with Diffusion Models.\"\n arXiv preprint arXiv:2210.02303 (2022).\n \n 4. \"score\": marginal score function. (Trained by denoising score matching).\n Note that the score function and the noise prediction model follows a simple relationship:\n ```\n noise(x_t, t) = -sigma_t * score(x_t, t)\n ```\n\n We support three types of guided sampling by DPMs by setting `guidance_type`:\n 1. \"uncond\": unconditional sampling by DPMs.\n The input `model` has the following format:\n ``\n model(x, t_input, **model_kwargs) -> noise | x_start | v | score\n ``\n\n 2. \"classifier\": classifier guidance sampling [3] by DPMs and another classifier.\n The input `model` has the following format:\n ``\n model(x, t_input, **model_kwargs) -> noise | x_start | v | score\n `` \n\n The input `classifier_fn` has the following format:\n ``\n classifier_fn(x, t_input, cond, **classifier_kwargs) -> logits(x, t_input, cond)\n ``\n\n [3] P. Dhariwal and A. Q. Nichol, \"Diffusion models beat GANs on image synthesis,\"\n in Advances in Neural Information Processing Systems, vol. 34, 2021, pp. 8780-8794.\n\n 3. \"classifier-free\": classifier-free guidance sampling by conditional DPMs.\n The input `model` has the following format:\n ``\n model(x, t_input, cond, **model_kwargs) -> noise | x_start | v | score\n `` \n And if cond == `unconditional_condition`, the model output is the unconditional DPM output.\n\n [4] Ho, Jonathan, and Tim Salimans. \"Classifier-free diffusion guidance.\"\n arXiv preprint arXiv:2207.12598 (2022).\n \n\n The `t_input` is the time label of the model, which may be discrete-time labels (i.e. 0 to 999)\n or continuous-time labels (i.e. epsilon to T).\n\n We wrap the model function to accept only `x` and `t_continuous` as inputs, and outputs the predicted noise:\n ``\n def model_fn(x, t_continuous) -> noise:\n t_input = get_model_input_time(t_continuous)\n return noise_pred(model, x, t_input, **model_kwargs) \n ``\n where `t_continuous` is the continuous time labels (i.e. epsilon to T). And we use `model_fn` for DPM-Solver.\n\n ===============================================================\n\n Args:\n model: A diffusion model with the corresponding format described above.\n noise_schedule: A noise schedule object, such as NoiseScheduleVP.\n model_type: A `str`. The parameterization type of the diffusion model.\n \"noise\" or \"x_start\" or \"v\" or \"score\".\n model_kwargs: A `dict`. A dict for the other inputs of the model function.\n guidance_type: A `str`. The type of the guidance for sampling.\n \"uncond\" or \"classifier\" or \"classifier-free\".\n condition: A pytorch tensor. The condition for the guided sampling.\n Only used for \"classifier\" or \"classifier-free\" guidance type.\n unconditional_condition: A pytorch tensor. The condition for the unconditional sampling.\n Only used for \"classifier-free\" guidance type.\n guidance_scale: A `float`. The scale for the guided sampling.\n classifier_fn: A classifier function. Only used for the classifier guidance.\n classifier_kwargs: A `dict`. A dict for the other inputs of the classifier function.\n Returns:\n A noise prediction model that accepts the noised data and the continuous time as the inputs.\n \"\"\"\n\n def get_model_input_time(t_continuous):\n \"\"\"\n Convert the continuous-time `t_continuous` (in [epsilon, T]) to the model input time.\n For discrete-time DPMs, we convert `t_continuous` in [1 / N, 1] to `t_input` in [0, 1000 * (N - 1) / N].\n For continuous-time DPMs, we just use `t_continuous`.\n \"\"\"\n if noise_schedule.schedule == 'discrete':\n return (t_continuous - 1. / noise_schedule.total_N) * 1000.\n else:\n return t_continuous\n\n def noise_pred_fn(x, t_continuous, cond=None):\n if t_continuous.reshape((-1,)).shape[0] == 1:\n t_continuous = t_continuous.expand((x.shape[0]))\n t_input = get_model_input_time(t_continuous)\n if cond is None:\n output = model(x, t_input, **model_kwargs)\n else:\n output = model(x, t_input, cond, **model_kwargs)\n if model_type == \"noise\":\n return output\n elif model_type == \"x_start\":\n alpha_t, sigma_t = noise_schedule.marginal_alpha(t_continuous), noise_schedule.marginal_std(t_continuous)\n dims = x.dim()\n return (x - expand_dims(alpha_t, dims) * output) / expand_dims(sigma_t, dims)\n elif model_type == \"v\":\n alpha_t, sigma_t = noise_schedule.marginal_alpha(t_continuous), noise_schedule.marginal_std(t_continuous)\n dims = x.dim()\n return expand_dims(alpha_t, dims) * output + expand_dims(sigma_t, dims) * x\n elif model_type == \"score\":\n sigma_t = noise_schedule.marginal_std(t_continuous)\n dims = x.dim()\n return -expand_dims(sigma_t, dims) * output\n\n def cond_grad_fn(x, t_input):\n \"\"\"\n Compute the gradient of the classifier, i.e. nabla_{x} log p_t(cond | x_t).\n \"\"\"\n with torch.enable_grad():\n x_in = x.detach().requires_grad_(True)\n log_prob = classifier_fn(x_in, t_input, condition, **classifier_kwargs)\n return torch.autograd.grad(log_prob.sum(), x_in)[0]\n\n def model_fn(x, t_continuous):\n \"\"\"\n The noise predicition model function that is used for DPM-Solver.\n \"\"\"\n if t_continuous.reshape((-1,)).shape[0] == 1:\n t_continuous = t_continuous.expand((x.shape[0]))\n if guidance_type == \"uncond\":\n return noise_pred_fn(x, t_continuous)\n elif guidance_type == \"classifier\":\n assert classifier_fn is not None\n t_input = get_model_input_time(t_continuous)\n cond_grad = cond_grad_fn(x, t_input)\n sigma_t = noise_schedule.marginal_std(t_continuous)\n noise = noise_pred_fn(x, t_continuous)\n return noise - guidance_scale * expand_dims(sigma_t, dims=cond_grad.dim()) * cond_grad\n elif guidance_type == \"classifier-free\":\n if guidance_scale == 1. or unconditional_condition is None:\n return noise_pred_fn(x, t_continuous, cond=condition)\n else:\n x_in = torch.cat([x] * 2)\n t_in = torch.cat([t_continuous] * 2)\n c_in = torch.cat([unconditional_condition, condition])\n noise_uncond, noise = noise_pred_fn(x_in, t_in, cond=c_in).chunk(2)\n return noise_uncond + guidance_scale * (noise - noise_uncond)\n\n assert model_type in [\"noise\", \"x_start\", \"v\"]\n assert guidance_type in [\"uncond\", \"classifier\", \"classifier-free\"]\n return model_fn" }, { "identifier": "DPM_Solver", "path": "ldm/models/diffusion/dpm_solver/dpm_solver.py", "snippet": "class DPM_Solver:\n def __init__(self, model_fn, noise_schedule, predict_x0=False, thresholding=False, max_val=1.):\n \"\"\"Construct a DPM-Solver. \n\n We support both the noise prediction model (\"predicting epsilon\") and the data prediction model (\"predicting x0\").\n If `predict_x0` is False, we use the solver for the noise prediction model (DPM-Solver).\n If `predict_x0` is True, we use the solver for the data prediction model (DPM-Solver++).\n In such case, we further support the \"dynamic thresholding\" in [1] when `thresholding` is True.\n The \"dynamic thresholding\" can greatly improve the sample quality for pixel-space DPMs with large guidance scales.\n\n Args:\n model_fn: A noise prediction model function which accepts the continuous-time input (t in [epsilon, T]):\n ``\n def model_fn(x, t_continuous):\n return noise\n ``\n noise_schedule: A noise schedule object, such as NoiseScheduleVP.\n predict_x0: A `bool`. If true, use the data prediction model; else, use the noise prediction model.\n thresholding: A `bool`. Valid when `predict_x0` is True. Whether to use the \"dynamic thresholding\" in [1].\n max_val: A `float`. Valid when both `predict_x0` and `thresholding` are True. The max value for thresholding.\n \n [1] Chitwan Saharia, William Chan, Saurabh Saxena, Lala Li, Jay Whang, Emily Denton, Seyed Kamyar Seyed Ghasemipour, Burcu Karagol Ayan, S Sara Mahdavi, Rapha Gontijo Lopes, et al. Photorealistic text-to-image diffusion models with deep language understanding. arXiv preprint arXiv:2205.11487, 2022b.\n \"\"\"\n self.model = model_fn\n self.noise_schedule = noise_schedule\n self.predict_x0 = predict_x0\n self.thresholding = thresholding\n self.max_val = max_val\n\n def noise_prediction_fn(self, x, t):\n \"\"\"\n Return the noise prediction model.\n \"\"\"\n return self.model(x, t)\n\n def data_prediction_fn(self, x, t):\n \"\"\"\n Return the data prediction model (with thresholding).\n \"\"\"\n noise = self.noise_prediction_fn(x, t)\n dims = x.dim()\n alpha_t, sigma_t = self.noise_schedule.marginal_alpha(t), self.noise_schedule.marginal_std(t)\n x0 = (x - expand_dims(sigma_t, dims) * noise) / expand_dims(alpha_t, dims)\n if self.thresholding:\n p = 0.995 # A hyperparameter in the paper of \"Imagen\" [1].\n s = torch.quantile(torch.abs(x0).reshape((x0.shape[0], -1)), p, dim=1)\n s = expand_dims(torch.maximum(s, self.max_val * torch.ones_like(s).to(s.device)), dims)\n x0 = torch.clamp(x0, -s, s) / s\n return x0\n\n def model_fn(self, x, t):\n \"\"\"\n Convert the model to the noise prediction model or the data prediction model. \n \"\"\"\n if self.predict_x0:\n return self.data_prediction_fn(x, t)\n else:\n return self.noise_prediction_fn(x, t)\n\n def get_time_steps(self, skip_type, t_T, t_0, N, device):\n \"\"\"Compute the intermediate time steps for sampling.\n\n Args:\n skip_type: A `str`. The type for the spacing of the time steps. We support three types:\n - 'logSNR': uniform logSNR for the time steps.\n - 'time_uniform': uniform time for the time steps. (**Recommended for high-resolutional data**.)\n - 'time_quadratic': quadratic time for the time steps. (Used in DDIM for low-resolutional data.)\n t_T: A `float`. The starting time of the sampling (default is T).\n t_0: A `float`. The ending time of the sampling (default is epsilon).\n N: A `int`. The total number of the spacing of the time steps.\n device: A torch device.\n Returns:\n A pytorch tensor of the time steps, with the shape (N + 1,).\n \"\"\"\n if skip_type == 'logSNR':\n lambda_T = self.noise_schedule.marginal_lambda(torch.tensor(t_T).to(device))\n lambda_0 = self.noise_schedule.marginal_lambda(torch.tensor(t_0).to(device))\n logSNR_steps = torch.linspace(lambda_T.cpu().item(), lambda_0.cpu().item(), N + 1).to(device)\n return self.noise_schedule.inverse_lambda(logSNR_steps)\n elif skip_type == 'time_uniform':\n return torch.linspace(t_T, t_0, N + 1).to(device)\n elif skip_type == 'time_quadratic':\n t_order = 2\n t = torch.linspace(t_T**(1. / t_order), t_0**(1. / t_order), N + 1).pow(t_order).to(device)\n return t\n else:\n raise ValueError(\"Unsupported skip_type {}, need to be 'logSNR' or 'time_uniform' or 'time_quadratic'\".format(skip_type))\n\n def get_orders_and_timesteps_for_singlestep_solver(self, steps, order, skip_type, t_T, t_0, device):\n \"\"\"\n Get the order of each step for sampling by the singlestep DPM-Solver.\n\n We combine both DPM-Solver-1,2,3 to use all the function evaluations, which is named as \"DPM-Solver-fast\".\n Given a fixed number of function evaluations by `steps`, the sampling procedure by DPM-Solver-fast is:\n - If order == 1:\n We take `steps` of DPM-Solver-1 (i.e. DDIM).\n - If order == 2:\n - Denote K = (steps // 2). We take K or (K + 1) intermediate time steps for sampling.\n - If steps % 2 == 0, we use K steps of DPM-Solver-2.\n - If steps % 2 == 1, we use K steps of DPM-Solver-2 and 1 step of DPM-Solver-1.\n - If order == 3:\n - Denote K = (steps // 3 + 1). We take K intermediate time steps for sampling.\n - If steps % 3 == 0, we use (K - 2) steps of DPM-Solver-3, and 1 step of DPM-Solver-2 and 1 step of DPM-Solver-1.\n - If steps % 3 == 1, we use (K - 1) steps of DPM-Solver-3 and 1 step of DPM-Solver-1.\n - If steps % 3 == 2, we use (K - 1) steps of DPM-Solver-3 and 1 step of DPM-Solver-2.\n\n ============================================\n Args:\n order: A `int`. The max order for the solver (2 or 3).\n steps: A `int`. The total number of function evaluations (NFE).\n skip_type: A `str`. The type for the spacing of the time steps. We support three types:\n - 'logSNR': uniform logSNR for the time steps.\n - 'time_uniform': uniform time for the time steps. (**Recommended for high-resolutional data**.)\n - 'time_quadratic': quadratic time for the time steps. (Used in DDIM for low-resolutional data.)\n t_T: A `float`. The starting time of the sampling (default is T).\n t_0: A `float`. The ending time of the sampling (default is epsilon).\n device: A torch device.\n Returns:\n orders: A list of the solver order of each step.\n \"\"\"\n if order == 3:\n K = steps // 3 + 1\n if steps % 3 == 0:\n orders = [3,] * (K - 2) + [2, 1]\n elif steps % 3 == 1:\n orders = [3,] * (K - 1) + [1]\n else:\n orders = [3,] * (K - 1) + [2]\n elif order == 2:\n if steps % 2 == 0:\n K = steps // 2\n orders = [2,] * K\n else:\n K = steps // 2 + 1\n orders = [2,] * (K - 1) + [1]\n elif order == 1:\n K = 1\n orders = [1,] * steps\n else:\n raise ValueError(\"'order' must be '1' or '2' or '3'.\")\n if skip_type == 'logSNR':\n # To reproduce the results in DPM-Solver paper\n timesteps_outer = self.get_time_steps(skip_type, t_T, t_0, K, device)\n else:\n timesteps_outer = self.get_time_steps(skip_type, t_T, t_0, steps, device)[torch.cumsum(torch.tensor([0,] + orders)).to(device)]\n return timesteps_outer, orders\n\n def denoise_to_zero_fn(self, x, s):\n \"\"\"\n Denoise at the final step, which is equivalent to solve the ODE from lambda_s to infty by first-order discretization. \n \"\"\"\n return self.data_prediction_fn(x, s)\n\n def dpm_solver_first_update(self, x, s, t, model_s=None, return_intermediate=False):\n \"\"\"\n DPM-Solver-1 (equivalent to DDIM) from time `s` to time `t`.\n\n Args:\n x: A pytorch tensor. The initial value at time `s`.\n s: A pytorch tensor. The starting time, with the shape (x.shape[0],).\n t: A pytorch tensor. The ending time, with the shape (x.shape[0],).\n model_s: A pytorch tensor. The model function evaluated at time `s`.\n If `model_s` is None, we evaluate the model by `x` and `s`; otherwise we directly use it.\n return_intermediate: A `bool`. If true, also return the model value at time `s`.\n Returns:\n x_t: A pytorch tensor. The approximated solution at time `t`.\n \"\"\"\n ns = self.noise_schedule\n dims = x.dim()\n lambda_s, lambda_t = ns.marginal_lambda(s), ns.marginal_lambda(t)\n h = lambda_t - lambda_s\n log_alpha_s, log_alpha_t = ns.marginal_log_mean_coeff(s), ns.marginal_log_mean_coeff(t)\n sigma_s, sigma_t = ns.marginal_std(s), ns.marginal_std(t)\n alpha_t = torch.exp(log_alpha_t)\n\n if self.predict_x0:\n phi_1 = torch.expm1(-h)\n if model_s is None:\n model_s = self.model_fn(x, s)\n x_t = (\n expand_dims(sigma_t / sigma_s, dims) * x\n - expand_dims(alpha_t * phi_1, dims) * model_s\n )\n if return_intermediate:\n return x_t, {'model_s': model_s}\n else:\n return x_t\n else:\n phi_1 = torch.expm1(h)\n if model_s is None:\n model_s = self.model_fn(x, s)\n x_t = (\n expand_dims(torch.exp(log_alpha_t - log_alpha_s), dims) * x\n - expand_dims(sigma_t * phi_1, dims) * model_s\n )\n if return_intermediate:\n return x_t, {'model_s': model_s}\n else:\n return x_t\n\n def singlestep_dpm_solver_second_update(self, x, s, t, r1=0.5, model_s=None, return_intermediate=False, solver_type='dpm_solver'):\n \"\"\"\n Singlestep solver DPM-Solver-2 from time `s` to time `t`.\n\n Args:\n x: A pytorch tensor. The initial value at time `s`.\n s: A pytorch tensor. The starting time, with the shape (x.shape[0],).\n t: A pytorch tensor. The ending time, with the shape (x.shape[0],).\n r1: A `float`. The hyperparameter of the second-order solver.\n model_s: A pytorch tensor. The model function evaluated at time `s`.\n If `model_s` is None, we evaluate the model by `x` and `s`; otherwise we directly use it.\n return_intermediate: A `bool`. If true, also return the model value at time `s` and `s1` (the intermediate time).\n solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers.\n The type slightly impacts the performance. We recommend to use 'dpm_solver' type.\n Returns:\n x_t: A pytorch tensor. The approximated solution at time `t`.\n \"\"\"\n if solver_type not in ['dpm_solver', 'taylor']:\n raise ValueError(\"'solver_type' must be either 'dpm_solver' or 'taylor', got {}\".format(solver_type))\n if r1 is None:\n r1 = 0.5\n ns = self.noise_schedule\n dims = x.dim()\n lambda_s, lambda_t = ns.marginal_lambda(s), ns.marginal_lambda(t)\n h = lambda_t - lambda_s\n lambda_s1 = lambda_s + r1 * h\n s1 = ns.inverse_lambda(lambda_s1)\n log_alpha_s, log_alpha_s1, log_alpha_t = ns.marginal_log_mean_coeff(s), ns.marginal_log_mean_coeff(s1), ns.marginal_log_mean_coeff(t)\n sigma_s, sigma_s1, sigma_t = ns.marginal_std(s), ns.marginal_std(s1), ns.marginal_std(t)\n alpha_s1, alpha_t = torch.exp(log_alpha_s1), torch.exp(log_alpha_t)\n\n if self.predict_x0:\n phi_11 = torch.expm1(-r1 * h)\n phi_1 = torch.expm1(-h)\n\n if model_s is None:\n model_s = self.model_fn(x, s)\n x_s1 = (\n expand_dims(sigma_s1 / sigma_s, dims) * x\n - expand_dims(alpha_s1 * phi_11, dims) * model_s\n )\n model_s1 = self.model_fn(x_s1, s1)\n if solver_type == 'dpm_solver':\n x_t = (\n expand_dims(sigma_t / sigma_s, dims) * x\n - expand_dims(alpha_t * phi_1, dims) * model_s\n - (0.5 / r1) * expand_dims(alpha_t * phi_1, dims) * (model_s1 - model_s)\n )\n elif solver_type == 'taylor':\n x_t = (\n expand_dims(sigma_t / sigma_s, dims) * x\n - expand_dims(alpha_t * phi_1, dims) * model_s\n + (1. / r1) * expand_dims(alpha_t * ((torch.exp(-h) - 1.) / h + 1.), dims) * (model_s1 - model_s)\n )\n else:\n phi_11 = torch.expm1(r1 * h)\n phi_1 = torch.expm1(h)\n\n if model_s is None:\n model_s = self.model_fn(x, s)\n x_s1 = (\n expand_dims(torch.exp(log_alpha_s1 - log_alpha_s), dims) * x\n - expand_dims(sigma_s1 * phi_11, dims) * model_s\n )\n model_s1 = self.model_fn(x_s1, s1)\n if solver_type == 'dpm_solver':\n x_t = (\n expand_dims(torch.exp(log_alpha_t - log_alpha_s), dims) * x\n - expand_dims(sigma_t * phi_1, dims) * model_s\n - (0.5 / r1) * expand_dims(sigma_t * phi_1, dims) * (model_s1 - model_s)\n )\n elif solver_type == 'taylor':\n x_t = (\n expand_dims(torch.exp(log_alpha_t - log_alpha_s), dims) * x\n - expand_dims(sigma_t * phi_1, dims) * model_s\n - (1. / r1) * expand_dims(sigma_t * ((torch.exp(h) - 1.) / h - 1.), dims) * (model_s1 - model_s)\n )\n if return_intermediate:\n return x_t, {'model_s': model_s, 'model_s1': model_s1}\n else:\n return x_t\n\n def singlestep_dpm_solver_third_update(self, x, s, t, r1=1./3., r2=2./3., model_s=None, model_s1=None, return_intermediate=False, solver_type='dpm_solver'):\n \"\"\"\n Singlestep solver DPM-Solver-3 from time `s` to time `t`.\n\n Args:\n x: A pytorch tensor. The initial value at time `s`.\n s: A pytorch tensor. The starting time, with the shape (x.shape[0],).\n t: A pytorch tensor. The ending time, with the shape (x.shape[0],).\n r1: A `float`. The hyperparameter of the third-order solver.\n r2: A `float`. The hyperparameter of the third-order solver.\n model_s: A pytorch tensor. The model function evaluated at time `s`.\n If `model_s` is None, we evaluate the model by `x` and `s`; otherwise we directly use it.\n model_s1: A pytorch tensor. The model function evaluated at time `s1` (the intermediate time given by `r1`).\n If `model_s1` is None, we evaluate the model at `s1`; otherwise we directly use it.\n return_intermediate: A `bool`. If true, also return the model value at time `s`, `s1` and `s2` (the intermediate times).\n solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers.\n The type slightly impacts the performance. We recommend to use 'dpm_solver' type.\n Returns:\n x_t: A pytorch tensor. The approximated solution at time `t`.\n \"\"\"\n if solver_type not in ['dpm_solver', 'taylor']:\n raise ValueError(\"'solver_type' must be either 'dpm_solver' or 'taylor', got {}\".format(solver_type))\n if r1 is None:\n r1 = 1. / 3.\n if r2 is None:\n r2 = 2. / 3.\n ns = self.noise_schedule\n dims = x.dim()\n lambda_s, lambda_t = ns.marginal_lambda(s), ns.marginal_lambda(t)\n h = lambda_t - lambda_s\n lambda_s1 = lambda_s + r1 * h\n lambda_s2 = lambda_s + r2 * h\n s1 = ns.inverse_lambda(lambda_s1)\n s2 = ns.inverse_lambda(lambda_s2)\n log_alpha_s, log_alpha_s1, log_alpha_s2, log_alpha_t = ns.marginal_log_mean_coeff(s), ns.marginal_log_mean_coeff(s1), ns.marginal_log_mean_coeff(s2), ns.marginal_log_mean_coeff(t)\n sigma_s, sigma_s1, sigma_s2, sigma_t = ns.marginal_std(s), ns.marginal_std(s1), ns.marginal_std(s2), ns.marginal_std(t)\n alpha_s1, alpha_s2, alpha_t = torch.exp(log_alpha_s1), torch.exp(log_alpha_s2), torch.exp(log_alpha_t)\n\n if self.predict_x0:\n phi_11 = torch.expm1(-r1 * h)\n phi_12 = torch.expm1(-r2 * h)\n phi_1 = torch.expm1(-h)\n phi_22 = torch.expm1(-r2 * h) / (r2 * h) + 1.\n phi_2 = phi_1 / h + 1.\n phi_3 = phi_2 / h - 0.5\n\n if model_s is None:\n model_s = self.model_fn(x, s)\n if model_s1 is None:\n x_s1 = (\n expand_dims(sigma_s1 / sigma_s, dims) * x\n - expand_dims(alpha_s1 * phi_11, dims) * model_s\n )\n model_s1 = self.model_fn(x_s1, s1)\n x_s2 = (\n expand_dims(sigma_s2 / sigma_s, dims) * x\n - expand_dims(alpha_s2 * phi_12, dims) * model_s\n + r2 / r1 * expand_dims(alpha_s2 * phi_22, dims) * (model_s1 - model_s)\n )\n model_s2 = self.model_fn(x_s2, s2)\n if solver_type == 'dpm_solver':\n x_t = (\n expand_dims(sigma_t / sigma_s, dims) * x\n - expand_dims(alpha_t * phi_1, dims) * model_s\n + (1. / r2) * expand_dims(alpha_t * phi_2, dims) * (model_s2 - model_s)\n )\n elif solver_type == 'taylor':\n D1_0 = (1. / r1) * (model_s1 - model_s)\n D1_1 = (1. / r2) * (model_s2 - model_s)\n D1 = (r2 * D1_0 - r1 * D1_1) / (r2 - r1)\n D2 = 2. * (D1_1 - D1_0) / (r2 - r1)\n x_t = (\n expand_dims(sigma_t / sigma_s, dims) * x\n - expand_dims(alpha_t * phi_1, dims) * model_s\n + expand_dims(alpha_t * phi_2, dims) * D1\n - expand_dims(alpha_t * phi_3, dims) * D2\n )\n else:\n phi_11 = torch.expm1(r1 * h)\n phi_12 = torch.expm1(r2 * h)\n phi_1 = torch.expm1(h)\n phi_22 = torch.expm1(r2 * h) / (r2 * h) - 1.\n phi_2 = phi_1 / h - 1.\n phi_3 = phi_2 / h - 0.5\n\n if model_s is None:\n model_s = self.model_fn(x, s)\n if model_s1 is None:\n x_s1 = (\n expand_dims(torch.exp(log_alpha_s1 - log_alpha_s), dims) * x\n - expand_dims(sigma_s1 * phi_11, dims) * model_s\n )\n model_s1 = self.model_fn(x_s1, s1)\n x_s2 = (\n expand_dims(torch.exp(log_alpha_s2 - log_alpha_s), dims) * x\n - expand_dims(sigma_s2 * phi_12, dims) * model_s\n - r2 / r1 * expand_dims(sigma_s2 * phi_22, dims) * (model_s1 - model_s)\n )\n model_s2 = self.model_fn(x_s2, s2)\n if solver_type == 'dpm_solver':\n x_t = (\n expand_dims(torch.exp(log_alpha_t - log_alpha_s), dims) * x\n - expand_dims(sigma_t * phi_1, dims) * model_s\n - (1. / r2) * expand_dims(sigma_t * phi_2, dims) * (model_s2 - model_s)\n )\n elif solver_type == 'taylor':\n D1_0 = (1. / r1) * (model_s1 - model_s)\n D1_1 = (1. / r2) * (model_s2 - model_s)\n D1 = (r2 * D1_0 - r1 * D1_1) / (r2 - r1)\n D2 = 2. * (D1_1 - D1_0) / (r2 - r1)\n x_t = (\n expand_dims(torch.exp(log_alpha_t - log_alpha_s), dims) * x\n - expand_dims(sigma_t * phi_1, dims) * model_s\n - expand_dims(sigma_t * phi_2, dims) * D1\n - expand_dims(sigma_t * phi_3, dims) * D2\n )\n\n if return_intermediate:\n return x_t, {'model_s': model_s, 'model_s1': model_s1, 'model_s2': model_s2}\n else:\n return x_t\n\n def multistep_dpm_solver_second_update(self, x, model_prev_list, t_prev_list, t, solver_type=\"dpm_solver\"):\n \"\"\"\n Multistep solver DPM-Solver-2 from time `t_prev_list[-1]` to time `t`.\n\n Args:\n x: A pytorch tensor. The initial value at time `s`.\n model_prev_list: A list of pytorch tensor. The previous computed model values.\n t_prev_list: A list of pytorch tensor. The previous times, each time has the shape (x.shape[0],)\n t: A pytorch tensor. The ending time, with the shape (x.shape[0],).\n solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers.\n The type slightly impacts the performance. We recommend to use 'dpm_solver' type.\n Returns:\n x_t: A pytorch tensor. The approximated solution at time `t`.\n \"\"\"\n if solver_type not in ['dpm_solver', 'taylor']:\n raise ValueError(\"'solver_type' must be either 'dpm_solver' or 'taylor', got {}\".format(solver_type))\n ns = self.noise_schedule\n dims = x.dim()\n model_prev_1, model_prev_0 = model_prev_list\n t_prev_1, t_prev_0 = t_prev_list\n lambda_prev_1, lambda_prev_0, lambda_t = ns.marginal_lambda(t_prev_1), ns.marginal_lambda(t_prev_0), ns.marginal_lambda(t)\n log_alpha_prev_0, log_alpha_t = ns.marginal_log_mean_coeff(t_prev_0), ns.marginal_log_mean_coeff(t)\n sigma_prev_0, sigma_t = ns.marginal_std(t_prev_0), ns.marginal_std(t)\n alpha_t = torch.exp(log_alpha_t)\n\n h_0 = lambda_prev_0 - lambda_prev_1\n h = lambda_t - lambda_prev_0\n r0 = h_0 / h\n D1_0 = expand_dims(1. / r0, dims) * (model_prev_0 - model_prev_1)\n if self.predict_x0:\n if solver_type == 'dpm_solver':\n x_t = (\n expand_dims(sigma_t / sigma_prev_0, dims) * x\n - expand_dims(alpha_t * (torch.exp(-h) - 1.), dims) * model_prev_0\n - 0.5 * expand_dims(alpha_t * (torch.exp(-h) - 1.), dims) * D1_0\n )\n elif solver_type == 'taylor':\n x_t = (\n expand_dims(sigma_t / sigma_prev_0, dims) * x\n - expand_dims(alpha_t * (torch.exp(-h) - 1.), dims) * model_prev_0\n + expand_dims(alpha_t * ((torch.exp(-h) - 1.) / h + 1.), dims) * D1_0\n )\n else:\n if solver_type == 'dpm_solver':\n x_t = (\n expand_dims(torch.exp(log_alpha_t - log_alpha_prev_0), dims) * x\n - expand_dims(sigma_t * (torch.exp(h) - 1.), dims) * model_prev_0\n - 0.5 * expand_dims(sigma_t * (torch.exp(h) - 1.), dims) * D1_0\n )\n elif solver_type == 'taylor':\n x_t = (\n expand_dims(torch.exp(log_alpha_t - log_alpha_prev_0), dims) * x\n - expand_dims(sigma_t * (torch.exp(h) - 1.), dims) * model_prev_0\n - expand_dims(sigma_t * ((torch.exp(h) - 1.) / h - 1.), dims) * D1_0\n )\n return x_t\n\n def multistep_dpm_solver_third_update(self, x, model_prev_list, t_prev_list, t, solver_type='dpm_solver'):\n \"\"\"\n Multistep solver DPM-Solver-3 from time `t_prev_list[-1]` to time `t`.\n\n Args:\n x: A pytorch tensor. The initial value at time `s`.\n model_prev_list: A list of pytorch tensor. The previous computed model values.\n t_prev_list: A list of pytorch tensor. The previous times, each time has the shape (x.shape[0],)\n t: A pytorch tensor. The ending time, with the shape (x.shape[0],).\n solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers.\n The type slightly impacts the performance. We recommend to use 'dpm_solver' type.\n Returns:\n x_t: A pytorch tensor. The approximated solution at time `t`.\n \"\"\"\n ns = self.noise_schedule\n dims = x.dim()\n model_prev_2, model_prev_1, model_prev_0 = model_prev_list\n t_prev_2, t_prev_1, t_prev_0 = t_prev_list\n lambda_prev_2, lambda_prev_1, lambda_prev_0, lambda_t = ns.marginal_lambda(t_prev_2), ns.marginal_lambda(t_prev_1), ns.marginal_lambda(t_prev_0), ns.marginal_lambda(t)\n log_alpha_prev_0, log_alpha_t = ns.marginal_log_mean_coeff(t_prev_0), ns.marginal_log_mean_coeff(t)\n sigma_prev_0, sigma_t = ns.marginal_std(t_prev_0), ns.marginal_std(t)\n alpha_t = torch.exp(log_alpha_t)\n\n h_1 = lambda_prev_1 - lambda_prev_2\n h_0 = lambda_prev_0 - lambda_prev_1\n h = lambda_t - lambda_prev_0\n r0, r1 = h_0 / h, h_1 / h\n D1_0 = expand_dims(1. / r0, dims) * (model_prev_0 - model_prev_1)\n D1_1 = expand_dims(1. / r1, dims) * (model_prev_1 - model_prev_2)\n D1 = D1_0 + expand_dims(r0 / (r0 + r1), dims) * (D1_0 - D1_1)\n D2 = expand_dims(1. / (r0 + r1), dims) * (D1_0 - D1_1)\n if self.predict_x0:\n x_t = (\n expand_dims(sigma_t / sigma_prev_0, dims) * x\n - expand_dims(alpha_t * (torch.exp(-h) - 1.), dims) * model_prev_0\n + expand_dims(alpha_t * ((torch.exp(-h) - 1.) / h + 1.), dims) * D1\n - expand_dims(alpha_t * ((torch.exp(-h) - 1. + h) / h**2 - 0.5), dims) * D2\n )\n else:\n x_t = (\n expand_dims(torch.exp(log_alpha_t - log_alpha_prev_0), dims) * x\n - expand_dims(sigma_t * (torch.exp(h) - 1.), dims) * model_prev_0\n - expand_dims(sigma_t * ((torch.exp(h) - 1.) / h - 1.), dims) * D1\n - expand_dims(sigma_t * ((torch.exp(h) - 1. - h) / h**2 - 0.5), dims) * D2\n )\n return x_t\n\n def singlestep_dpm_solver_update(self, x, s, t, order, return_intermediate=False, solver_type='dpm_solver', r1=None, r2=None):\n \"\"\"\n Singlestep DPM-Solver with the order `order` from time `s` to time `t`.\n\n Args:\n x: A pytorch tensor. The initial value at time `s`.\n s: A pytorch tensor. The starting time, with the shape (x.shape[0],).\n t: A pytorch tensor. The ending time, with the shape (x.shape[0],).\n order: A `int`. The order of DPM-Solver. We only support order == 1 or 2 or 3.\n return_intermediate: A `bool`. If true, also return the model value at time `s`, `s1` and `s2` (the intermediate times).\n solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers.\n The type slightly impacts the performance. We recommend to use 'dpm_solver' type.\n r1: A `float`. The hyperparameter of the second-order or third-order solver.\n r2: A `float`. The hyperparameter of the third-order solver.\n Returns:\n x_t: A pytorch tensor. The approximated solution at time `t`.\n \"\"\"\n if order == 1:\n return self.dpm_solver_first_update(x, s, t, return_intermediate=return_intermediate)\n elif order == 2:\n return self.singlestep_dpm_solver_second_update(x, s, t, return_intermediate=return_intermediate, solver_type=solver_type, r1=r1)\n elif order == 3:\n return self.singlestep_dpm_solver_third_update(x, s, t, return_intermediate=return_intermediate, solver_type=solver_type, r1=r1, r2=r2)\n else:\n raise ValueError(\"Solver order must be 1 or 2 or 3, got {}\".format(order))\n\n def multistep_dpm_solver_update(self, x, model_prev_list, t_prev_list, t, order, solver_type='dpm_solver'):\n \"\"\"\n Multistep DPM-Solver with the order `order` from time `t_prev_list[-1]` to time `t`.\n\n Args:\n x: A pytorch tensor. The initial value at time `s`.\n model_prev_list: A list of pytorch tensor. The previous computed model values.\n t_prev_list: A list of pytorch tensor. The previous times, each time has the shape (x.shape[0],)\n t: A pytorch tensor. The ending time, with the shape (x.shape[0],).\n order: A `int`. The order of DPM-Solver. We only support order == 1 or 2 or 3.\n solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers.\n The type slightly impacts the performance. We recommend to use 'dpm_solver' type.\n Returns:\n x_t: A pytorch tensor. The approximated solution at time `t`.\n \"\"\"\n if order == 1:\n return self.dpm_solver_first_update(x, t_prev_list[-1], t, model_s=model_prev_list[-1])\n elif order == 2:\n return self.multistep_dpm_solver_second_update(x, model_prev_list, t_prev_list, t, solver_type=solver_type)\n elif order == 3:\n return self.multistep_dpm_solver_third_update(x, model_prev_list, t_prev_list, t, solver_type=solver_type)\n else:\n raise ValueError(\"Solver order must be 1 or 2 or 3, got {}\".format(order))\n\n def dpm_solver_adaptive(self, x, order, t_T, t_0, h_init=0.05, atol=0.0078, rtol=0.05, theta=0.9, t_err=1e-5, solver_type='dpm_solver'):\n \"\"\"\n The adaptive step size solver based on singlestep DPM-Solver.\n\n Args:\n x: A pytorch tensor. The initial value at time `t_T`.\n order: A `int`. The (higher) order of the solver. We only support order == 2 or 3.\n t_T: A `float`. The starting time of the sampling (default is T).\n t_0: A `float`. The ending time of the sampling (default is epsilon).\n h_init: A `float`. The initial step size (for logSNR).\n atol: A `float`. The absolute tolerance of the solver. For image data, the default setting is 0.0078, followed [1].\n rtol: A `float`. The relative tolerance of the solver. The default setting is 0.05.\n theta: A `float`. The safety hyperparameter for adapting the step size. The default setting is 0.9, followed [1].\n t_err: A `float`. The tolerance for the time. We solve the diffusion ODE until the absolute error between the \n current time and `t_0` is less than `t_err`. The default setting is 1e-5.\n solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers.\n The type slightly impacts the performance. We recommend to use 'dpm_solver' type.\n Returns:\n x_0: A pytorch tensor. The approximated solution at time `t_0`.\n\n [1] A. Jolicoeur-Martineau, K. Li, R. Piché-Taillefer, T. Kachman, and I. Mitliagkas, \"Gotta go fast when generating data with score-based models,\" arXiv preprint arXiv:2105.14080, 2021.\n \"\"\"\n ns = self.noise_schedule\n s = t_T * torch.ones((x.shape[0],)).to(x)\n lambda_s = ns.marginal_lambda(s)\n lambda_0 = ns.marginal_lambda(t_0 * torch.ones_like(s).to(x))\n h = h_init * torch.ones_like(s).to(x)\n x_prev = x\n nfe = 0\n if order == 2:\n r1 = 0.5\n lower_update = lambda x, s, t: self.dpm_solver_first_update(x, s, t, return_intermediate=True)\n higher_update = lambda x, s, t, **kwargs: self.singlestep_dpm_solver_second_update(x, s, t, r1=r1, solver_type=solver_type, **kwargs)\n elif order == 3:\n r1, r2 = 1. / 3., 2. / 3.\n lower_update = lambda x, s, t: self.singlestep_dpm_solver_second_update(x, s, t, r1=r1, return_intermediate=True, solver_type=solver_type)\n higher_update = lambda x, s, t, **kwargs: self.singlestep_dpm_solver_third_update(x, s, t, r1=r1, r2=r2, solver_type=solver_type, **kwargs)\n else:\n raise ValueError(\"For adaptive step size solver, order must be 2 or 3, got {}\".format(order))\n while torch.abs((s - t_0)).mean() > t_err:\n t = ns.inverse_lambda(lambda_s + h)\n x_lower, lower_noise_kwargs = lower_update(x, s, t)\n x_higher = higher_update(x, s, t, **lower_noise_kwargs)\n delta = torch.max(torch.ones_like(x).to(x) * atol, rtol * torch.max(torch.abs(x_lower), torch.abs(x_prev)))\n norm_fn = lambda v: torch.sqrt(torch.square(v.reshape((v.shape[0], -1))).mean(dim=-1, keepdim=True))\n E = norm_fn((x_higher - x_lower) / delta).max()\n if torch.all(E <= 1.):\n x = x_higher\n s = t\n x_prev = x_lower\n lambda_s = ns.marginal_lambda(s)\n h = torch.min(theta * h * torch.float_power(E, -1. / order).float(), lambda_0 - lambda_s)\n nfe += order\n print('adaptive solver nfe', nfe)\n return x\n\n def sample(self, x, steps=20, t_start=None, t_end=None, order=3, skip_type='time_uniform',\n method='singlestep', lower_order_final=True, denoise_to_zero=False, solver_type='dpm_solver',\n atol=0.0078, rtol=0.05,\n ):\n \"\"\"\n Compute the sample at time `t_end` by DPM-Solver, given the initial `x` at time `t_start`.\n\n =====================================================\n\n We support the following algorithms for both noise prediction model and data prediction model:\n - 'singlestep':\n Singlestep DPM-Solver (i.e. \"DPM-Solver-fast\" in the paper), which combines different orders of singlestep DPM-Solver. \n We combine all the singlestep solvers with order <= `order` to use up all the function evaluations (steps).\n The total number of function evaluations (NFE) == `steps`.\n Given a fixed NFE == `steps`, the sampling procedure is:\n - If `order` == 1:\n - Denote K = steps. We use K steps of DPM-Solver-1 (i.e. DDIM).\n - If `order` == 2:\n - Denote K = (steps // 2) + (steps % 2). We take K intermediate time steps for sampling.\n - If steps % 2 == 0, we use K steps of singlestep DPM-Solver-2.\n - If steps % 2 == 1, we use (K - 1) steps of singlestep DPM-Solver-2 and 1 step of DPM-Solver-1.\n - If `order` == 3:\n - Denote K = (steps // 3 + 1). We take K intermediate time steps for sampling.\n - If steps % 3 == 0, we use (K - 2) steps of singlestep DPM-Solver-3, and 1 step of singlestep DPM-Solver-2 and 1 step of DPM-Solver-1.\n - If steps % 3 == 1, we use (K - 1) steps of singlestep DPM-Solver-3 and 1 step of DPM-Solver-1.\n - If steps % 3 == 2, we use (K - 1) steps of singlestep DPM-Solver-3 and 1 step of singlestep DPM-Solver-2.\n - 'multistep':\n Multistep DPM-Solver with the order of `order`. The total number of function evaluations (NFE) == `steps`.\n We initialize the first `order` values by lower order multistep solvers.\n Given a fixed NFE == `steps`, the sampling procedure is:\n Denote K = steps.\n - If `order` == 1:\n - We use K steps of DPM-Solver-1 (i.e. DDIM).\n - If `order` == 2:\n - We firstly use 1 step of DPM-Solver-1, then use (K - 1) step of multistep DPM-Solver-2.\n - If `order` == 3:\n - We firstly use 1 step of DPM-Solver-1, then 1 step of multistep DPM-Solver-2, then (K - 2) step of multistep DPM-Solver-3.\n - 'singlestep_fixed':\n Fixed order singlestep DPM-Solver (i.e. DPM-Solver-1 or singlestep DPM-Solver-2 or singlestep DPM-Solver-3).\n We use singlestep DPM-Solver-`order` for `order`=1 or 2 or 3, with total [`steps` // `order`] * `order` NFE.\n - 'adaptive':\n Adaptive step size DPM-Solver (i.e. \"DPM-Solver-12\" and \"DPM-Solver-23\" in the paper).\n We ignore `steps` and use adaptive step size DPM-Solver with a higher order of `order`.\n You can adjust the absolute tolerance `atol` and the relative tolerance `rtol` to balance the computatation costs\n (NFE) and the sample quality.\n - If `order` == 2, we use DPM-Solver-12 which combines DPM-Solver-1 and singlestep DPM-Solver-2.\n - If `order` == 3, we use DPM-Solver-23 which combines singlestep DPM-Solver-2 and singlestep DPM-Solver-3.\n\n =====================================================\n\n Some advices for choosing the algorithm:\n - For **unconditional sampling** or **guided sampling with small guidance scale** by DPMs:\n Use singlestep DPM-Solver (\"DPM-Solver-fast\" in the paper) with `order = 3`.\n e.g.\n >>> dpm_solver = DPM_Solver(model_fn, noise_schedule, predict_x0=False)\n >>> x_sample = dpm_solver.sample(x, steps=steps, t_start=t_start, t_end=t_end, order=3,\n skip_type='time_uniform', method='singlestep')\n - For **guided sampling with large guidance scale** by DPMs:\n Use multistep DPM-Solver with `predict_x0 = True` and `order = 2`.\n e.g.\n >>> dpm_solver = DPM_Solver(model_fn, noise_schedule, predict_x0=True)\n >>> x_sample = dpm_solver.sample(x, steps=steps, t_start=t_start, t_end=t_end, order=2,\n skip_type='time_uniform', method='multistep')\n\n We support three types of `skip_type`:\n - 'logSNR': uniform logSNR for the time steps. **Recommended for low-resolutional images**\n - 'time_uniform': uniform time for the time steps. **Recommended for high-resolutional images**.\n - 'time_quadratic': quadratic time for the time steps.\n\n =====================================================\n Args:\n x: A pytorch tensor. The initial value at time `t_start`\n e.g. if `t_start` == T, then `x` is a sample from the standard normal distribution.\n steps: A `int`. The total number of function evaluations (NFE).\n t_start: A `float`. The starting time of the sampling.\n If `T` is None, we use self.noise_schedule.T (default is 1.0).\n t_end: A `float`. The ending time of the sampling.\n If `t_end` is None, we use 1. / self.noise_schedule.total_N.\n e.g. if total_N == 1000, we have `t_end` == 1e-3.\n For discrete-time DPMs:\n - We recommend `t_end` == 1. / self.noise_schedule.total_N.\n For continuous-time DPMs:\n - We recommend `t_end` == 1e-3 when `steps` <= 15; and `t_end` == 1e-4 when `steps` > 15.\n order: A `int`. The order of DPM-Solver.\n skip_type: A `str`. The type for the spacing of the time steps. 'time_uniform' or 'logSNR' or 'time_quadratic'.\n method: A `str`. The method for sampling. 'singlestep' or 'multistep' or 'singlestep_fixed' or 'adaptive'.\n denoise_to_zero: A `bool`. Whether to denoise to time 0 at the final step.\n Default is `False`. If `denoise_to_zero` is `True`, the total NFE is (`steps` + 1).\n\n This trick is firstly proposed by DDPM (https://arxiv.org/abs/2006.11239) and\n score_sde (https://arxiv.org/abs/2011.13456). Such trick can improve the FID\n for diffusion models sampling by diffusion SDEs for low-resolutional images\n (such as CIFAR-10). However, we observed that such trick does not matter for\n high-resolutional images. As it needs an additional NFE, we do not recommend\n it for high-resolutional images.\n lower_order_final: A `bool`. Whether to use lower order solvers at the final steps.\n Only valid for `method=multistep` and `steps < 15`. We empirically find that\n this trick is a key to stabilizing the sampling by DPM-Solver with very few steps\n (especially for steps <= 10). So we recommend to set it to be `True`.\n solver_type: A `str`. The taylor expansion type for the solver. `dpm_solver` or `taylor`. We recommend `dpm_solver`.\n atol: A `float`. The absolute tolerance of the adaptive step size solver. Valid when `method` == 'adaptive'.\n rtol: A `float`. The relative tolerance of the adaptive step size solver. Valid when `method` == 'adaptive'.\n Returns:\n x_end: A pytorch tensor. The approximated solution at time `t_end`.\n\n \"\"\"\n t_0 = 1. / self.noise_schedule.total_N if t_end is None else t_end\n t_T = self.noise_schedule.T if t_start is None else t_start\n device = x.device\n if method == 'adaptive':\n with torch.no_grad():\n x = self.dpm_solver_adaptive(x, order=order, t_T=t_T, t_0=t_0, atol=atol, rtol=rtol, solver_type=solver_type)\n elif method == 'multistep':\n assert steps >= order\n timesteps = self.get_time_steps(skip_type=skip_type, t_T=t_T, t_0=t_0, N=steps, device=device)\n assert timesteps.shape[0] - 1 == steps\n with torch.no_grad():\n vec_t = timesteps[0].expand((x.shape[0]))\n model_prev_list = [self.model_fn(x, vec_t)]\n t_prev_list = [vec_t]\n # Init the first `order` values by lower order multistep DPM-Solver.\n for init_order in range(1, order):\n vec_t = timesteps[init_order].expand(x.shape[0])\n x = self.multistep_dpm_solver_update(x, model_prev_list, t_prev_list, vec_t, init_order, solver_type=solver_type)\n model_prev_list.append(self.model_fn(x, vec_t))\n t_prev_list.append(vec_t)\n # Compute the remaining values by `order`-th order multistep DPM-Solver.\n for step in range(order, steps + 1):\n vec_t = timesteps[step].expand(x.shape[0])\n if lower_order_final and steps < 15:\n step_order = min(order, steps + 1 - step)\n else:\n step_order = order\n x = self.multistep_dpm_solver_update(x, model_prev_list, t_prev_list, vec_t, step_order, solver_type=solver_type)\n for i in range(order - 1):\n t_prev_list[i] = t_prev_list[i + 1]\n model_prev_list[i] = model_prev_list[i + 1]\n t_prev_list[-1] = vec_t\n # We do not need to evaluate the final model value.\n if step < steps:\n model_prev_list[-1] = self.model_fn(x, vec_t)\n elif method in ['singlestep', 'singlestep_fixed']:\n if method == 'singlestep':\n timesteps_outer, orders = self.get_orders_and_timesteps_for_singlestep_solver(steps=steps, order=order, skip_type=skip_type, t_T=t_T, t_0=t_0, device=device)\n elif method == 'singlestep_fixed':\n K = steps // order\n orders = [order,] * K\n timesteps_outer = self.get_time_steps(skip_type=skip_type, t_T=t_T, t_0=t_0, N=K, device=device)\n for i, order in enumerate(orders):\n t_T_inner, t_0_inner = timesteps_outer[i], timesteps_outer[i + 1]\n timesteps_inner = self.get_time_steps(skip_type=skip_type, t_T=t_T_inner.item(), t_0=t_0_inner.item(), N=order, device=device)\n lambda_inner = self.noise_schedule.marginal_lambda(timesteps_inner)\n vec_s, vec_t = t_T_inner.tile(x.shape[0]), t_0_inner.tile(x.shape[0])\n h = lambda_inner[-1] - lambda_inner[0]\n r1 = None if order <= 1 else (lambda_inner[1] - lambda_inner[0]) / h\n r2 = None if order <= 2 else (lambda_inner[2] - lambda_inner[0]) / h\n x = self.singlestep_dpm_solver_update(x, vec_s, vec_t, order, solver_type=solver_type, r1=r1, r2=r2)\n if denoise_to_zero:\n x = self.denoise_to_zero_fn(x, torch.ones((x.shape[0],)).to(device) * t_0)\n return x" } ]
import torch from .dpm_solver import NoiseScheduleVP, model_wrapper, DPM_Solver
17,811
"""SAMPLING ONLY.""" class DPMSolverSampler(object): def __init__(self, model, **kwargs): super().__init__() self.model = model to_torch = lambda x: x.clone().detach().to(torch.float32).to(model.device) self.register_buffer('alphas_cumprod', to_torch(model.alphas_cumprod)) def register_buffer(self, name, attr): if type(attr) == torch.Tensor: if attr.device != torch.device("cuda"): attr = attr.to(torch.device("cuda")) setattr(self, name, attr) @torch.no_grad() def sample(self, S, batch_size, shape, conditioning=None, callback=None, normals_sequence=None, img_callback=None, quantize_x0=False, eta=0., mask=None, x0=None, temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None, verbose=True, x_T=None, log_every_t=100, unconditional_guidance_scale=1., unconditional_conditioning=None, # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ... **kwargs ): if conditioning is not None: if isinstance(conditioning, dict): cbs = conditioning[list(conditioning.keys())[0]].shape[0] if cbs != batch_size: print(f"Warning: Got {cbs} conditionings but batch-size is {batch_size}") else: if conditioning.shape[0] != batch_size: print(f"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}") # sampling C, H, W = shape size = (batch_size, C, H, W) # print(f'Data shape for DPM-Solver sampling is {size}, sampling steps {S}') device = self.model.betas.device if x_T is None: img = torch.randn(size, device=device) else: img = x_T ns = NoiseScheduleVP('discrete', alphas_cumprod=self.alphas_cumprod) model_fn = model_wrapper( lambda x, t, c: self.model.apply_model(x, t, c), ns, model_type="noise", guidance_type="classifier-free", condition=conditioning, unconditional_condition=unconditional_conditioning, guidance_scale=unconditional_guidance_scale, )
"""SAMPLING ONLY.""" class DPMSolverSampler(object): def __init__(self, model, **kwargs): super().__init__() self.model = model to_torch = lambda x: x.clone().detach().to(torch.float32).to(model.device) self.register_buffer('alphas_cumprod', to_torch(model.alphas_cumprod)) def register_buffer(self, name, attr): if type(attr) == torch.Tensor: if attr.device != torch.device("cuda"): attr = attr.to(torch.device("cuda")) setattr(self, name, attr) @torch.no_grad() def sample(self, S, batch_size, shape, conditioning=None, callback=None, normals_sequence=None, img_callback=None, quantize_x0=False, eta=0., mask=None, x0=None, temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None, verbose=True, x_T=None, log_every_t=100, unconditional_guidance_scale=1., unconditional_conditioning=None, # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ... **kwargs ): if conditioning is not None: if isinstance(conditioning, dict): cbs = conditioning[list(conditioning.keys())[0]].shape[0] if cbs != batch_size: print(f"Warning: Got {cbs} conditionings but batch-size is {batch_size}") else: if conditioning.shape[0] != batch_size: print(f"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}") # sampling C, H, W = shape size = (batch_size, C, H, W) # print(f'Data shape for DPM-Solver sampling is {size}, sampling steps {S}') device = self.model.betas.device if x_T is None: img = torch.randn(size, device=device) else: img = x_T ns = NoiseScheduleVP('discrete', alphas_cumprod=self.alphas_cumprod) model_fn = model_wrapper( lambda x, t, c: self.model.apply_model(x, t, c), ns, model_type="noise", guidance_type="classifier-free", condition=conditioning, unconditional_condition=unconditional_conditioning, guidance_scale=unconditional_guidance_scale, )
dpm_solver = DPM_Solver(model_fn, ns, predict_x0=True, thresholding=False)
2
2023-10-10 09:40:10+00:00
24k
spla-tam/SplaTAM
scripts/post_splatam_opt.py
[ { "identifier": "AzureKinectDataset", "path": "datasets/gradslam_datasets/azure.py", "snippet": "class AzureKinectDataset(GradSLAMDataset):\n def __init__(\n self,\n config_dict,\n basedir,\n sequence,\n stride: Optional[int] = None,\n start: Optional[int] = 0,\n end: Optional[int] = -1,\n desired_height: Optional[int] = 480,\n desired_width: Optional[int] = 640,\n load_embeddings: Optional[bool] = False,\n embedding_dir: Optional[str] = \"embeddings\",\n embedding_dim: Optional[int] = 512,\n **kwargs,\n ):\n self.input_folder = os.path.join(basedir, sequence)\n self.pose_path = None\n\n # # check if a file named 'poses_global_dvo.txt' exists in the basedir / sequence folder\n # if os.path.isfile(os.path.join(basedir, sequence, \"poses_global_dvo.txt\")):\n # self.pose_path = os.path.join(basedir, sequence, \"poses_global_dvo.txt\")\n\n if \"odomfile\" in kwargs.keys():\n self.pose_path = os.path.join(self.input_folder, kwargs[\"odomfile\"])\n super().__init__(\n config_dict,\n stride=stride,\n start=start,\n end=end,\n desired_height=desired_height,\n desired_width=desired_width,\n load_embeddings=load_embeddings,\n embedding_dir=embedding_dir,\n embedding_dim=embedding_dim,\n **kwargs,\n )\n\n def get_filepaths(self):\n color_paths = natsorted(glob.glob(f\"{self.input_folder}/color/*.jpg\"))\n depth_paths = natsorted(glob.glob(f\"{self.input_folder}/depth/*.png\"))\n embedding_paths = None\n if self.load_embeddings:\n embedding_paths = natsorted(glob.glob(f\"{self.input_folder}/{self.embedding_dir}/*.pt\"))\n return color_paths, depth_paths, embedding_paths\n\n def load_poses(self):\n if self.pose_path is None:\n print(\"WARNING: Dataset does not contain poses. Returning identity transform.\")\n return [torch.eye(4).float() for _ in range(self.num_imgs)]\n else:\n # Determine whether the posefile ends in \".log\"\n # a .log file has the following format for each frame\n # frame_idx frame_idx+1\n # row 1 of 4x4 transform\n # row 2 of 4x4 transform\n # row 3 of 4x4 transform\n # row 4 of 4x4 transform\n # [repeat for all frames]\n #\n # on the other hand, the \"poses_o3d.txt\" or \"poses_dvo.txt\" files have the format\n # 16 entries of 4x4 transform\n # [repeat for all frames]\n if self.pose_path.endswith(\".log\"):\n # print(\"Loading poses from .log format\")\n poses = []\n lines = None\n with open(self.pose_path, \"r\") as f:\n lines = f.readlines()\n if len(lines) % 5 != 0:\n raise ValueError(\n \"Incorrect file format for .log odom file \" \"Number of non-empty lines must be a multiple of 5\"\n )\n num_lines = len(lines) // 5\n for i in range(0, num_lines):\n _curpose = []\n _curpose.append(list(map(float, lines[5 * i + 1].split())))\n _curpose.append(list(map(float, lines[5 * i + 2].split())))\n _curpose.append(list(map(float, lines[5 * i + 3].split())))\n _curpose.append(list(map(float, lines[5 * i + 4].split())))\n _curpose = np.array(_curpose).reshape(4, 4)\n poses.append(torch.from_numpy(_curpose))\n else:\n poses = []\n lines = None\n with open(self.pose_path, \"r\") as f:\n lines = f.readlines()\n for line in lines:\n if len(line.split()) == 0:\n continue\n c2w = np.array(list(map(float, line.split()))).reshape(4, 4)\n poses.append(torch.from_numpy(c2w))\n return poses\n\n def read_embedding_from_file(self, embedding_file_path):\n embedding = torch.load(embedding_file_path)\n return embedding # .permute(0, 2, 3, 1) # (1, H, W, embedding_dim)" }, { "identifier": "load_dataset_config", "path": "datasets/gradslam_datasets/dataconfig.py", "snippet": "def load_dataset_config(path, default_path=None):\n \"\"\"\n Loads config file.\n\n Args:\n path (str): path to config file.\n default_path (str, optional): whether to use default path. Defaults to None.\n\n Returns:\n cfg (dict): config dict.\n\n \"\"\"\n # load configuration from file itself\n with open(path, \"r\") as f:\n cfg_special = yaml.full_load(f)\n\n # check if we should inherit from a config\n inherit_from = cfg_special.get(\"inherit_from\")\n\n # if yes, load this config first as default\n # if no, use the default_path\n if inherit_from is not None:\n cfg = load_dataset_config(inherit_from, default_path)\n elif default_path is not None:\n with open(default_path, \"r\") as f:\n cfg = yaml.full_load(f)\n else:\n cfg = dict()\n\n # include main configuration\n update_recursive(cfg, cfg_special)\n\n return cfg" }, { "identifier": "ICLDataset", "path": "datasets/gradslam_datasets/icl.py", "snippet": "class ICLDataset(GradSLAMDataset):\n def __init__(\n self,\n config_dict: Dict,\n basedir: Union[Path, str],\n sequence: Union[Path, str],\n stride: Optional[int] = 1,\n start: Optional[int] = 0,\n end: Optional[int] = -1,\n desired_height: Optional[int] = 480,\n desired_width: Optional[int] = 640,\n load_embeddings: Optional[bool] = False,\n embedding_dir: Optional[Union[Path, str]] = \"embeddings\",\n embedding_dim: Optional[int] = 512,\n embedding_file_extension: Optional[str] = \"pt\",\n **kwargs,\n ):\n self.input_folder = os.path.join(basedir, sequence)\n # Attempt to find pose file (*.gt.sim)\n self.pose_path = glob.glob(os.path.join(self.input_folder, \"*.gt.sim\"))\n if self.pose_path == 0:\n raise ValueError(\"Need pose file ending in extension `*.gt.sim`\")\n self.pose_path = self.pose_path[0]\n self.embedding_file_extension = embedding_file_extension\n super().__init__(\n config_dict,\n stride=stride,\n start=start,\n end=end,\n desired_height=desired_height,\n desired_width=desired_width,\n load_embeddings=load_embeddings,\n embedding_dir=embedding_dir,\n embedding_dim=embedding_dim,\n **kwargs,\n )\n\n def get_filepaths(self):\n color_paths = natsorted(glob.glob(f\"{self.input_folder}/rgb/*.png\"))\n depth_paths = natsorted(glob.glob(f\"{self.input_folder}/depth/*.png\"))\n embedding_paths = None\n if self.load_embeddings:\n embedding_paths = natsorted(\n glob.glob(f\"{self.input_folder}/{self.embedding_dir}/*.{self.embedding_file_extension}\")\n )\n return color_paths, depth_paths, embedding_paths\n\n def load_poses(self):\n poses = []\n\n lines = []\n with open(self.pose_path, \"r\") as f:\n lines = f.readlines()\n\n _posearr = []\n for line in lines:\n line = line.strip().split()\n if len(line) == 0:\n continue\n _npvec = np.asarray([float(line[0]), float(line[1]), float(line[2]), float(line[3])])\n _posearr.append(_npvec)\n _posearr = np.stack(_posearr)\n\n for pose_line_idx in range(0, _posearr.shape[0], 3):\n _curpose = np.zeros((4, 4))\n _curpose[3, 3] = 3\n _curpose[0] = _posearr[pose_line_idx]\n _curpose[1] = _posearr[pose_line_idx + 1]\n _curpose[2] = _posearr[pose_line_idx + 2]\n poses.append(torch.from_numpy(_curpose).float())\n\n return poses\n\n def read_embedding_from_file(self, embedding_file_path):\n embedding = torch.load(embedding_file_path)\n return embedding.permute(0, 2, 3, 1) # (1, H, W, embedding_dim)" }, { "identifier": "ReplicaDataset", "path": "datasets/gradslam_datasets/replica.py", "snippet": "class ReplicaDataset(GradSLAMDataset):\n def __init__(\n self,\n config_dict,\n basedir,\n sequence,\n stride: Optional[int] = None,\n start: Optional[int] = 0,\n end: Optional[int] = -1,\n desired_height: Optional[int] = 480,\n desired_width: Optional[int] = 640,\n load_embeddings: Optional[bool] = False,\n embedding_dir: Optional[str] = \"embeddings\",\n embedding_dim: Optional[int] = 512,\n **kwargs,\n ):\n self.input_folder = os.path.join(basedir, sequence)\n self.pose_path = os.path.join(self.input_folder, \"traj.txt\")\n super().__init__(\n config_dict,\n stride=stride,\n start=start,\n end=end,\n desired_height=desired_height,\n desired_width=desired_width,\n load_embeddings=load_embeddings,\n embedding_dir=embedding_dir,\n embedding_dim=embedding_dim,\n **kwargs,\n )\n\n def get_filepaths(self):\n color_paths = natsorted(glob.glob(f\"{self.input_folder}/results/frame*.jpg\"))\n depth_paths = natsorted(glob.glob(f\"{self.input_folder}/results/depth*.png\"))\n embedding_paths = None\n if self.load_embeddings:\n embedding_paths = natsorted(glob.glob(f\"{self.input_folder}/{self.embedding_dir}/*.pt\"))\n return color_paths, depth_paths, embedding_paths\n\n def load_poses(self):\n poses = []\n with open(self.pose_path, \"r\") as f:\n lines = f.readlines()\n for i in range(self.num_imgs):\n line = lines[i]\n c2w = np.array(list(map(float, line.split()))).reshape(4, 4)\n # c2w[:3, 1] *= -1\n # c2w[:3, 2] *= -1\n c2w = torch.from_numpy(c2w).float()\n poses.append(c2w)\n return poses\n\n def read_embedding_from_file(self, embedding_file_path):\n embedding = torch.load(embedding_file_path)\n return embedding.permute(0, 2, 3, 1) # (1, H, W, embedding_dim)" }, { "identifier": "ScannetDataset", "path": "datasets/gradslam_datasets/scannet.py", "snippet": "class ScannetDataset(GradSLAMDataset):\n def __init__(\n self,\n config_dict,\n basedir,\n sequence,\n stride: Optional[int] = None,\n start: Optional[int] = 0,\n end: Optional[int] = -1,\n desired_height: Optional[int] = 968,\n desired_width: Optional[int] = 1296,\n load_embeddings: Optional[bool] = False,\n embedding_dir: Optional[str] = \"embeddings\",\n embedding_dim: Optional[int] = 512,\n **kwargs,\n ):\n self.input_folder = os.path.join(basedir, sequence)\n self.pose_path = None\n super().__init__(\n config_dict,\n stride=stride,\n start=start,\n end=end,\n desired_height=desired_height,\n desired_width=desired_width,\n load_embeddings=load_embeddings,\n embedding_dir=embedding_dir,\n embedding_dim=embedding_dim,\n **kwargs,\n )\n\n def get_filepaths(self):\n color_paths = natsorted(glob.glob(f\"{self.input_folder}/color/*.jpg\"))\n depth_paths = natsorted(glob.glob(f\"{self.input_folder}/depth/*.png\"))\n embedding_paths = None\n if self.load_embeddings:\n embedding_paths = natsorted(glob.glob(f\"{self.input_folder}/{self.embedding_dir}/*.pt\"))\n return color_paths, depth_paths, embedding_paths\n\n def load_poses(self):\n poses = []\n posefiles = natsorted(glob.glob(f\"{self.input_folder}/pose/*.txt\"))\n for posefile in posefiles:\n _pose = torch.from_numpy(np.loadtxt(posefile))\n poses.append(_pose)\n return poses\n\n def read_embedding_from_file(self, embedding_file_path):\n print(embedding_file_path)\n embedding = torch.load(embedding_file_path, map_location=\"cpu\")\n return embedding.permute(0, 2, 3, 1) # (1, H, W, embedding_dim)" }, { "identifier": "Ai2thorDataset", "path": "datasets/gradslam_datasets/ai2thor.py", "snippet": "class Ai2thorDataset(GradSLAMDataset):\n def __init__(\n self,\n config_dict,\n basedir,\n sequence,\n stride: Optional[int] = None,\n start: Optional[int] = 0,\n end: Optional[int] = -1,\n desired_height: Optional[int] = 968,\n desired_width: Optional[int] = 1296,\n load_embeddings: Optional[bool] = False,\n embedding_dir: Optional[str] = \"embeddings\",\n embedding_dim: Optional[int] = 512,\n **kwargs,\n ):\n self.input_folder = os.path.join(basedir, sequence)\n super().__init__(\n config_dict,\n stride=stride,\n start=start,\n end=end,\n desired_height=desired_height,\n desired_width=desired_width,\n load_embeddings=load_embeddings,\n embedding_dir=embedding_dir,\n embedding_dim=embedding_dim,\n **kwargs,\n )\n\n def get_filepaths(self):\n color_paths = natsorted(glob.glob(f\"{self.input_folder}/color/*.png\"))\n depth_paths = natsorted(glob.glob(f\"{self.input_folder}/depth/*.png\"))\n embedding_paths = None\n if self.load_embeddings:\n if self.embedding_dir == \"embed_semseg\":\n # embed_semseg is stored as uint16 pngs\n embedding_paths = natsorted(glob.glob(f\"{self.input_folder}/{self.embedding_dir}/*.png\"))\n else:\n embedding_paths = natsorted(glob.glob(f\"{self.input_folder}/{self.embedding_dir}/*.pt\"))\n return color_paths, depth_paths, embedding_paths\n\n def load_poses(self):\n poses = []\n posefiles = natsorted(glob.glob(f\"{self.input_folder}/pose/*.txt\"))\n for posefile in posefiles:\n _pose = torch.from_numpy(np.loadtxt(posefile))\n poses.append(_pose)\n return poses\n\n def read_embedding_from_file(self, embedding_file_path):\n if self.embedding_dir == \"embed_semseg\":\n embedding = imageio.imread(embedding_file_path) # (H, W)\n embedding = cv2.resize(\n embedding, (self.desired_width, self.desired_height), interpolation=cv2.INTER_NEAREST\n )\n embedding = torch.from_numpy(embedding).long() # (H, W)\n embedding = F.one_hot(embedding, num_classes=self.embedding_dim) # (H, W, C)\n embedding = embedding.half() # (H, W, C)\n embedding = embedding.permute(2, 0, 1) # (C, H, W)\n embedding = embedding.unsqueeze(0) # (1, C, H, W)\n else:\n embedding = torch.load(embedding_file_path, map_location=\"cpu\")\n return embedding.permute(0, 2, 3, 1) # (1, H, W, embedding_dim)" }, { "identifier": "RealsenseDataset", "path": "datasets/gradslam_datasets/realsense.py", "snippet": "class RealsenseDataset(GradSLAMDataset):\n \"\"\"\n Dataset class to process depth images captured by realsense camera on the tabletop manipulator\n \"\"\"\n\n def __init__(\n self,\n config_dict,\n basedir,\n sequence,\n stride: Optional[int] = None,\n start: Optional[int] = 0,\n end: Optional[int] = -1,\n desired_height: Optional[int] = 480,\n desired_width: Optional[int] = 640,\n load_embeddings: Optional[bool] = False,\n embedding_dir: Optional[str] = \"embeddings\",\n embedding_dim: Optional[int] = 512,\n **kwargs,\n ):\n self.input_folder = os.path.join(basedir, sequence)\n # only poses/images/depth corresponding to the realsense_camera_order are read/used\n self.pose_path = os.path.join(self.input_folder, \"poses\")\n super().__init__(\n config_dict,\n stride=stride,\n start=start,\n end=end,\n desired_height=desired_height,\n desired_width=desired_width,\n load_embeddings=load_embeddings,\n embedding_dir=embedding_dir,\n embedding_dim=embedding_dim,\n **kwargs,\n )\n\n def get_filepaths(self):\n color_paths = natsorted(glob.glob(os.path.join(self.input_folder, \"rgb\", \"*.jpg\")))\n depth_paths = natsorted(glob.glob(os.path.join(self.input_folder, \"depth\", \"*.png\")))\n embedding_paths = None\n if self.load_embeddings:\n embedding_paths = natsorted(glob.glob(f\"{self.input_folder}/{self.embedding_dir}/*.pt\"))\n return color_paths, depth_paths, embedding_paths\n\n def load_poses(self):\n posefiles = natsorted(glob.glob(os.path.join(self.pose_path, \"*.npy\")))\n poses = []\n P = torch.tensor([[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]]).float()\n for posefile in posefiles:\n c2w = torch.from_numpy(np.load(posefile)).float()\n _R = c2w[:3, :3]\n _t = c2w[:3, 3]\n _pose = P @ c2w @ P.T\n poses.append(_pose)\n return poses\n\n def read_embedding_from_file(self, embedding_file_path):\n embedding = torch.load(embedding_file_path)\n return embedding.permute(0, 2, 3, 1) # (1, H, W, embedding_dim)" }, { "identifier": "Record3DDataset", "path": "datasets/gradslam_datasets/record3d.py", "snippet": "class Record3DDataset(GradSLAMDataset):\n \"\"\"\n Dataset class to read in saved files from the structure created by our\n `save_record3d_stream.py` script\n \"\"\"\n\n def __init__(\n self,\n config_dict,\n basedir,\n sequence,\n stride: Optional[int] = None,\n start: Optional[int] = 0,\n end: Optional[int] = -1,\n desired_height: Optional[int] = 480,\n desired_width: Optional[int] = 640,\n load_embeddings: Optional[bool] = False,\n embedding_dir: Optional[str] = \"embeddings\",\n embedding_dim: Optional[int] = 512,\n **kwargs,\n ):\n self.input_folder = os.path.join(basedir, sequence)\n self.pose_path = os.path.join(self.input_folder, \"poses\")\n super().__init__(\n config_dict,\n stride=stride,\n start=start,\n end=end,\n desired_height=desired_height,\n desired_width=desired_width,\n load_embeddings=load_embeddings,\n embedding_dir=embedding_dir,\n embedding_dim=embedding_dim,\n **kwargs,\n )\n\n def get_filepaths(self):\n color_paths = natsorted(glob.glob(os.path.join(self.input_folder, \"rgb\", \"*.png\")))\n depth_paths = natsorted(glob.glob(os.path.join(self.input_folder, \"depth\", \"*.png\")))\n embedding_paths = None\n if self.load_embeddings:\n embedding_paths = natsorted(glob.glob(f\"{self.input_folder}/{self.embedding_dir}/*.pt\"))\n return color_paths, depth_paths, embedding_paths\n\n def load_poses(self):\n posefiles = natsorted(glob.glob(os.path.join(self.pose_path, \"*.npy\")))\n poses = []\n P = torch.tensor([[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]]).float()\n for posefile in posefiles:\n c2w = torch.from_numpy(np.load(posefile)).float()\n _R = c2w[:3, :3]\n _t = c2w[:3, 3]\n _pose = P @ c2w @ P.T\n poses.append(_pose)\n return poses\n\n def read_embedding_from_file(self, embedding_file_path):\n embedding = torch.load(embedding_file_path)\n return embedding.permute(0, 2, 3, 1) # (1, H, W, embedding_dim)" }, { "identifier": "TUMDataset", "path": "datasets/gradslam_datasets/tum.py", "snippet": "class TUMDataset(GradSLAMDataset):\n def __init__(\n self,\n config_dict,\n basedir,\n sequence,\n stride: Optional[int] = None,\n start: Optional[int] = 0,\n end: Optional[int] = -1,\n desired_height: Optional[int] = 480,\n desired_width: Optional[int] = 640,\n load_embeddings: Optional[bool] = False,\n embedding_dir: Optional[str] = \"embeddings\",\n embedding_dim: Optional[int] = 512,\n **kwargs,\n ):\n self.input_folder = os.path.join(basedir, sequence)\n self.pose_path = None\n super().__init__(\n config_dict,\n stride=stride,\n start=start,\n end=end,\n desired_height=desired_height,\n desired_width=desired_width,\n load_embeddings=load_embeddings,\n embedding_dir=embedding_dir,\n embedding_dim=embedding_dim,\n **kwargs,\n )\n\n def parse_list(self, filepath, skiprows=0):\n \"\"\" read list data \"\"\"\n data = np.loadtxt(filepath, delimiter=' ',\n dtype=np.unicode_, skiprows=skiprows)\n return data\n\n def associate_frames(self, tstamp_image, tstamp_depth, tstamp_pose, max_dt=0.08):\n \"\"\" pair images, depths, and poses \"\"\"\n associations = []\n for i, t in enumerate(tstamp_image):\n if tstamp_pose is None:\n j = np.argmin(np.abs(tstamp_depth - t))\n if (np.abs(tstamp_depth[j] - t) < max_dt):\n associations.append((i, j))\n\n else:\n j = np.argmin(np.abs(tstamp_depth - t))\n k = np.argmin(np.abs(tstamp_pose - t))\n\n if (np.abs(tstamp_depth[j] - t) < max_dt) and \\\n (np.abs(tstamp_pose[k] - t) < max_dt):\n associations.append((i, j, k))\n\n return associations\n\n def pose_matrix_from_quaternion(self, pvec):\n \"\"\" convert 4x4 pose matrix to (t, q) \"\"\"\n from scipy.spatial.transform import Rotation\n\n pose = np.eye(4)\n pose[:3, :3] = Rotation.from_quat(pvec[3:]).as_matrix()\n pose[:3, 3] = pvec[:3]\n return pose\n\n def get_filepaths(self):\n\n frame_rate = 32\n \"\"\" read video data in tum-rgbd format \"\"\"\n if os.path.isfile(os.path.join(self.input_folder, 'groundtruth.txt')):\n pose_list = os.path.join(self.input_folder, 'groundtruth.txt')\n elif os.path.isfile(os.path.join(self.input_folder, 'pose.txt')):\n pose_list = os.path.join(self.input_folder, 'pose.txt')\n\n image_list = os.path.join(self.input_folder, 'rgb.txt')\n depth_list = os.path.join(self.input_folder, 'depth.txt')\n\n image_data = self.parse_list(image_list)\n depth_data = self.parse_list(depth_list)\n pose_data = self.parse_list(pose_list, skiprows=1)\n pose_vecs = pose_data[:, 1:].astype(np.float64)\n\n tstamp_image = image_data[:, 0].astype(np.float64)\n tstamp_depth = depth_data[:, 0].astype(np.float64)\n tstamp_pose = pose_data[:, 0].astype(np.float64)\n associations = self.associate_frames(\n tstamp_image, tstamp_depth, tstamp_pose)\n\n indicies = [0]\n for i in range(1, len(associations)):\n t0 = tstamp_image[associations[indicies[-1]][0]]\n t1 = tstamp_image[associations[i][0]]\n if t1 - t0 > 1.0 / frame_rate:\n indicies += [i]\n\n color_paths, depth_paths = [], []\n for ix in indicies:\n (i, j, k) = associations[ix]\n color_paths += [os.path.join(self.input_folder, image_data[i, 1])]\n depth_paths += [os.path.join(self.input_folder, depth_data[j, 1])]\n\n embedding_paths = None\n\n return color_paths, depth_paths, embedding_paths\n \n def load_poses(self):\n \n frame_rate = 32\n \"\"\" read video data in tum-rgbd format \"\"\"\n if os.path.isfile(os.path.join(self.input_folder, 'groundtruth.txt')):\n pose_list = os.path.join(self.input_folder, 'groundtruth.txt')\n elif os.path.isfile(os.path.join(self.input_folder, 'pose.txt')):\n pose_list = os.path.join(self.input_folder, 'pose.txt')\n\n image_list = os.path.join(self.input_folder, 'rgb.txt')\n depth_list = os.path.join(self.input_folder, 'depth.txt')\n\n image_data = self.parse_list(image_list)\n depth_data = self.parse_list(depth_list)\n pose_data = self.parse_list(pose_list, skiprows=1)\n pose_vecs = pose_data[:, 1:].astype(np.float64)\n\n tstamp_image = image_data[:, 0].astype(np.float64)\n tstamp_depth = depth_data[:, 0].astype(np.float64)\n tstamp_pose = pose_data[:, 0].astype(np.float64)\n associations = self.associate_frames(\n tstamp_image, tstamp_depth, tstamp_pose)\n\n indicies = [0]\n for i in range(1, len(associations)):\n t0 = tstamp_image[associations[indicies[-1]][0]]\n t1 = tstamp_image[associations[i][0]]\n if t1 - t0 > 1.0 / frame_rate:\n indicies += [i]\n\n color_paths, poses, depth_paths, intrinsics = [], [], [], []\n inv_pose = None\n for ix in indicies:\n (i, j, k) = associations[ix]\n color_paths += [os.path.join(self.input_folder, image_data[i, 1])]\n depth_paths += [os.path.join(self.input_folder, depth_data[j, 1])]\n c2w = self.pose_matrix_from_quaternion(pose_vecs[k])\n c2w = torch.from_numpy(c2w).float()\n poses += [c2w]\n\n return poses\n \n def read_embedding_from_file(self, embedding_file_path):\n embedding = torch.load(embedding_file_path, map_location=\"cpu\")\n return embedding.permute(0, 2, 3, 1)" }, { "identifier": "ScannetPPDataset", "path": "datasets/gradslam_datasets/scannetpp.py", "snippet": "class ScannetPPDataset(GradSLAMDataset):\n def __init__(\n self,\n basedir,\n sequence,\n ignore_bad: Optional[bool] = False,\n use_train_split: Optional[bool] = True,\n stride: Optional[int] = None,\n start: Optional[int] = 0,\n end: Optional[int] = -1,\n desired_height: Optional[int] = 1168,\n desired_width: Optional[int] = 1752,\n load_embeddings: Optional[bool] = False,\n embedding_dir: Optional[str] = \"embeddings\",\n embedding_dim: Optional[int] = 512,\n **kwargs,\n ):\n self.input_folder = os.path.join(basedir, sequence)\n config_dict = {}\n config_dict[\"dataset_name\"] = \"scannetpp\"\n self.pose_path = None\n self.ignore_bad = ignore_bad\n self.use_train_split = use_train_split\n\n # Load Train & Test Split\n self.train_test_split = json.load(open(f\"{self.input_folder}/dslr/train_test_lists.json\", \"r\"))\n if self.use_train_split:\n self.image_names = self.train_test_split[\"train\"]\n else:\n self.image_names = self.train_test_split[\"test\"]\n self.train_image_names = self.train_test_split[\"train\"]\n \n # Load NeRFStudio format camera & poses data\n self.cams_metadata = self.load_cams_metadata()\n if self.use_train_split:\n self.frames_metadata = self.cams_metadata[\"frames\"]\n self.filepath_index_mapping = create_filepath_index_mapping(self.frames_metadata)\n else:\n self.frames_metadata = self.cams_metadata[\"test_frames\"]\n self.train_frames_metadata = self.cams_metadata[\"frames\"]\n self.filepath_index_mapping = create_filepath_index_mapping(self.frames_metadata)\n self.train_filepath_index_mapping = create_filepath_index_mapping(self.train_frames_metadata) \n\n # Init Intrinsics\n config_dict[\"camera_params\"] = {}\n config_dict[\"camera_params\"][\"png_depth_scale\"] = 1000.0 # Depth is in mm\n config_dict[\"camera_params\"][\"image_height\"] = self.cams_metadata[\"h\"]\n config_dict[\"camera_params\"][\"image_width\"] = self.cams_metadata[\"w\"]\n config_dict[\"camera_params\"][\"fx\"] = self.cams_metadata[\"fl_x\"]\n config_dict[\"camera_params\"][\"fy\"] = self.cams_metadata[\"fl_y\"]\n config_dict[\"camera_params\"][\"cx\"] = self.cams_metadata[\"cx\"]\n config_dict[\"camera_params\"][\"cy\"] = self.cams_metadata[\"cy\"]\n\n super().__init__(\n config_dict,\n stride=stride,\n start=start,\n end=end,\n desired_height=desired_height,\n desired_width=desired_width,\n load_embeddings=load_embeddings,\n embedding_dir=embedding_dir,\n embedding_dim=embedding_dim,\n **kwargs,\n ) \n\n def load_cams_metadata(self):\n cams_metadata_path = f\"{self.input_folder}/dslr/nerfstudio/transforms_undistorted.json\"\n cams_metadata = json.load(open(cams_metadata_path, \"r\"))\n return cams_metadata\n \n def get_filepaths(self):\n base_path = f\"{self.input_folder}/dslr\"\n color_paths = []\n depth_paths = []\n self.tmp_poses = []\n P = torch.tensor(\n [\n [1, 0, 0, 0],\n [0, -1, 0, 0],\n [0, 0, -1, 0],\n [0, 0, 0, 1]\n ]\n ).float()\n if not self.use_train_split:\n self.first_train_image_name = self.train_image_names[0]\n self.first_train_image_index = self.train_filepath_index_mapping.get(self.first_train_image_name)\n self.first_train_frame_metadata = self.train_frames_metadata[self.first_train_image_index]\n # Get path of undistorted image and depth\n color_path = f\"{base_path}/undistorted_images/{self.first_train_image_name}\"\n depth_path = f\"{base_path}/undistorted_depths/{self.first_train_image_name.replace('.JPG', '.png')}\"\n color_paths.append(color_path)\n depth_paths.append(depth_path)\n # Get pose of first train frame in GradSLAM format\n c2w = torch.from_numpy(np.array(self.first_train_frame_metadata[\"transform_matrix\"])).float()\n _pose = P @ c2w @ P.T\n self.tmp_poses.append(_pose)\n for image_name in self.image_names:\n # Search for image name in frames_metadata\n frame_metadata = self.frames_metadata[self.filepath_index_mapping.get(image_name)]\n # Check if frame is blurry and if it needs to be ignored\n if self.ignore_bad and frame_metadata['is_bad']:\n continue\n # Get path of undistorted image and depth\n color_path = f\"{base_path}/undistorted_images/{image_name}\"\n depth_path = f\"{base_path}/undistorted_depths/{image_name.replace('.JPG', '.png')}\"\n color_paths.append(color_path)\n depth_paths.append(depth_path)\n # Get pose of undistorted image in GradSLAM format\n c2w = torch.from_numpy(np.array(frame_metadata[\"transform_matrix\"])).float()\n _pose = P @ c2w @ P.T\n self.tmp_poses.append(_pose)\n embedding_paths = None\n if self.load_embeddings:\n embedding_paths = natsorted(glob.glob(f\"{base_path}/{self.embedding_dir}/*.pt\"))\n return color_paths, depth_paths, embedding_paths\n\n def load_poses(self):\n return self.tmp_poses\n\n def read_embedding_from_file(self, embedding_file_path):\n print(embedding_file_path)\n embedding = torch.load(embedding_file_path, map_location=\"cpu\")\n return embedding.permute(0, 2, 3, 1) # (1, H, W, embedding_dim)" }, { "identifier": "NeRFCaptureDataset", "path": "datasets/gradslam_datasets/nerfcapture.py", "snippet": "class NeRFCaptureDataset(GradSLAMDataset):\n def __init__(\n self,\n basedir,\n sequence,\n stride: Optional[int] = None,\n start: Optional[int] = 0,\n end: Optional[int] = -1,\n desired_height: Optional[int] = 1440,\n desired_width: Optional[int] = 1920,\n load_embeddings: Optional[bool] = False,\n embedding_dir: Optional[str] = \"embeddings\",\n embedding_dim: Optional[int] = 512,\n **kwargs,\n ):\n self.input_folder = os.path.join(basedir, sequence)\n config_dict = {}\n config_dict[\"dataset_name\"] = \"nerfcapture\"\n self.pose_path = None\n \n # Load NeRFStudio format camera & poses data\n self.cams_metadata = self.load_cams_metadata()\n self.frames_metadata = self.cams_metadata[\"frames\"]\n self.filepath_index_mapping = create_filepath_index_mapping(self.frames_metadata)\n\n # Load RGB & Depth filepaths\n self.image_names = natsorted(os.listdir(f\"{self.input_folder}/rgb\"))\n self.image_names = [f'rgb/{image_name}' for image_name in self.image_names]\n\n # Init Intrinsics\n config_dict[\"camera_params\"] = {}\n config_dict[\"camera_params\"][\"png_depth_scale\"] = 6553.5 # Depth is in mm\n config_dict[\"camera_params\"][\"image_height\"] = self.cams_metadata[\"h\"]\n config_dict[\"camera_params\"][\"image_width\"] = self.cams_metadata[\"w\"]\n config_dict[\"camera_params\"][\"fx\"] = self.cams_metadata[\"fl_x\"]\n config_dict[\"camera_params\"][\"fy\"] = self.cams_metadata[\"fl_y\"]\n config_dict[\"camera_params\"][\"cx\"] = self.cams_metadata[\"cx\"]\n config_dict[\"camera_params\"][\"cy\"] = self.cams_metadata[\"cy\"]\n\n super().__init__(\n config_dict,\n stride=stride,\n start=start,\n end=end,\n desired_height=desired_height,\n desired_width=desired_width,\n load_embeddings=load_embeddings,\n embedding_dir=embedding_dir,\n embedding_dim=embedding_dim,\n **kwargs,\n ) \n\n def load_cams_metadata(self):\n cams_metadata_path = f\"{self.input_folder}/transforms.json\"\n cams_metadata = json.load(open(cams_metadata_path, \"r\"))\n return cams_metadata\n \n def get_filepaths(self):\n base_path = f\"{self.input_folder}\"\n color_paths = []\n depth_paths = []\n self.tmp_poses = []\n P = torch.tensor(\n [\n [1, 0, 0, 0],\n [0, -1, 0, 0],\n [0, 0, -1, 0],\n [0, 0, 0, 1]\n ]\n ).float()\n for image_name in self.image_names:\n # Search for image name in frames_metadata\n frame_metadata = self.frames_metadata[self.filepath_index_mapping.get(image_name)]\n # Get path of image and depth\n color_path = f\"{base_path}/{image_name}\"\n depth_path = f\"{base_path}/{image_name.replace('rgb', 'depth')}\"\n color_paths.append(color_path)\n depth_paths.append(depth_path)\n # Get pose of image in GradSLAM format\n c2w = torch.from_numpy(np.array(frame_metadata[\"transform_matrix\"])).float()\n _pose = P @ c2w @ P.T\n self.tmp_poses.append(_pose)\n embedding_paths = None\n if self.load_embeddings:\n embedding_paths = natsorted(glob.glob(f\"{base_path}/{self.embedding_dir}/*.pt\"))\n return color_paths, depth_paths, embedding_paths\n\n def load_poses(self):\n return self.tmp_poses\n\n def read_embedding_from_file(self, embedding_file_path):\n print(embedding_file_path)\n embedding = torch.load(embedding_file_path, map_location=\"cpu\")\n return embedding.permute(0, 2, 3, 1) # (1, H, W, embedding_dim)" }, { "identifier": "seed_everything", "path": "utils/common_utils.py", "snippet": "def seed_everything(seed=42):\n \"\"\"\n Set the `seed` value for torch and numpy seeds. Also turns on\n deterministic execution for cudnn.\n \n Parameters:\n - seed: A hashable seed value\n \"\"\"\n random.seed(seed)\n os.environ[\"PYTHONHASHSEED\"] = str(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n print(f\"Seed set to: {seed} (type: {type(seed)})\")" }, { "identifier": "save_seq_params", "path": "utils/common_utils.py", "snippet": "def save_seq_params(all_params, output_dir):\n params_to_save = {}\n for frame_idx, params in enumerate(all_params):\n params_to_save[f\"frame_{frame_idx}\"] = params2cpu(params)\n # Save the Parameters containing the Sequence of Gaussians\n os.makedirs(output_dir, exist_ok=True)\n print(f\"Saving parameters to: {output_dir}\")\n save_path = os.path.join(output_dir, \"params.npz\")\n np.savez(save_path, **params_to_save)" }, { "identifier": "save_params", "path": "utils/common_utils.py", "snippet": "def save_params(output_params, output_dir):\n # Convert to CPU Numpy Arrays\n to_save = params2cpu(output_params)\n # Save the Parameters containing the Gaussian Trajectories\n os.makedirs(output_dir, exist_ok=True)\n print(f\"Saving parameters to: {output_dir}\")\n save_path = os.path.join(output_dir, \"params.npz\")\n np.savez(save_path, **to_save)" }, { "identifier": "save_params_ckpt", "path": "utils/common_utils.py", "snippet": "def save_params_ckpt(output_params, output_dir, time_idx):\n # Convert to CPU Numpy Arrays\n to_save = params2cpu(output_params)\n # Save the Parameters containing the Gaussian Trajectories\n os.makedirs(output_dir, exist_ok=True)\n print(f\"Saving parameters to: {output_dir}\")\n save_path = os.path.join(output_dir, \"params\"+str(time_idx)+\".npz\")\n np.savez(save_path, **to_save)" }, { "identifier": "save_seq_params_ckpt", "path": "utils/common_utils.py", "snippet": "def save_seq_params_ckpt(all_params, output_dir,time_idx):\n params_to_save = {}\n for frame_idx, params in enumerate(all_params):\n params_to_save[f\"frame_{frame_idx}\"] = params2cpu(params)\n # Save the Parameters containing the Sequence of Gaussians\n os.makedirs(output_dir, exist_ok=True)\n print(f\"Saving parameters to: {output_dir}\")\n save_path = os.path.join(output_dir, \"params\"+str(time_idx)+\".npz\")\n np.savez(save_path, **params_to_save)" }, { "identifier": "setup_camera", "path": "utils/recon_helpers.py", "snippet": "def setup_camera(w, h, k, w2c, near=0.01, far=100):\n fx, fy, cx, cy = k[0][0], k[1][1], k[0][2], k[1][2]\n w2c = torch.tensor(w2c).cuda().float()\n cam_center = torch.inverse(w2c)[:3, 3]\n w2c = w2c.unsqueeze(0).transpose(1, 2)\n opengl_proj = torch.tensor([[2 * fx / w, 0.0, -(w - 2 * cx) / w, 0.0],\n [0.0, 2 * fy / h, -(h - 2 * cy) / h, 0.0],\n [0.0, 0.0, far / (far - near), -(far * near) / (far - near)],\n [0.0, 0.0, 1.0, 0.0]]).cuda().float().unsqueeze(0).transpose(1, 2)\n full_proj = w2c.bmm(opengl_proj)\n cam = Camera(\n image_height=h,\n image_width=w,\n tanfovx=w / (2 * fx),\n tanfovy=h / (2 * fy),\n bg=torch.tensor([0, 0, 0], dtype=torch.float32, device=\"cuda\"),\n scale_modifier=1.0,\n viewmatrix=w2c,\n projmatrix=full_proj,\n sh_degree=0,\n campos=cam_center,\n prefiltered=False\n )\n return cam" }, { "identifier": "params2rendervar", "path": "utils/gs_helpers.py", "snippet": "def params2rendervar(params):\n rendervar = {\n 'means3D': params['means3D'],\n 'colors_precomp': params['rgb_colors'],\n 'rotations': F.normalize(params['unnorm_rotations']),\n 'opacities': torch.sigmoid(params['logit_opacities']),\n 'scales': torch.exp(torch.tile(params['log_scales'], (1, 3))),\n 'means2D': torch.zeros_like(params['means3D'], requires_grad=True, device=\"cuda\") + 0\n }\n return rendervar" }, { "identifier": "params2depthplussilhouette", "path": "utils/gs_helpers.py", "snippet": "def params2depthplussilhouette(params, w2c):\n rendervar = {\n 'means3D': params['means3D'],\n 'colors_precomp': get_depth_and_silhouette(params['means3D'], w2c),\n 'rotations': F.normalize(params['unnorm_rotations']),\n 'opacities': torch.sigmoid(params['logit_opacities']),\n 'scales': torch.exp(torch.tile(params['log_scales'], (1, 3))),\n 'means2D': torch.zeros_like(params['means3D'], requires_grad=True, device=\"cuda\") + 0\n }\n return rendervar" }, { "identifier": "transformed_params2depthplussilhouette", "path": "utils/gs_helpers.py", "snippet": "def transformed_params2depthplussilhouette(params, w2c, transformed_pts):\n rendervar = {\n 'means3D': transformed_pts,\n 'colors_precomp': get_depth_and_silhouette(transformed_pts, w2c),\n 'rotations': F.normalize(params['unnorm_rotations']),\n 'opacities': torch.sigmoid(params['logit_opacities']),\n 'scales': torch.exp(torch.tile(params['log_scales'], (1, 3))),\n 'means2D': torch.zeros_like(params['means3D'], requires_grad=True, device=\"cuda\") + 0\n }\n return rendervar" }, { "identifier": "transform_to_frame", "path": "utils/gs_helpers.py", "snippet": "def transform_to_frame(params, time_idx, gaussians_grad, camera_grad):\n \"\"\"\n Function to transform Isotropic Gaussians from world frame to camera frame.\n \n Args:\n params: dict of parameters\n time_idx: time index to transform to\n gaussians_grad: enable gradients for Gaussians\n camera_grad: enable gradients for camera pose\n \n Returns:\n transformed_pts: Transformed Centers of Gaussians\n \"\"\"\n # Get Frame Camera Pose\n if camera_grad:\n cam_rot = F.normalize(params['cam_unnorm_rots'][..., time_idx])\n cam_tran = params['cam_trans'][..., time_idx]\n else:\n cam_rot = F.normalize(params['cam_unnorm_rots'][..., time_idx].detach())\n cam_tran = params['cam_trans'][..., time_idx].detach()\n rel_w2c = torch.eye(4).cuda().float()\n rel_w2c[:3, :3] = build_rotation(cam_rot)\n rel_w2c[:3, 3] = cam_tran\n\n # Get Centers and norm Rots of Gaussians in World Frame\n if gaussians_grad:\n pts = params['means3D']\n else:\n pts = params['means3D'].detach()\n \n # Transform Centers and Unnorm Rots of Gaussians to Camera Frame\n pts_ones = torch.ones(pts.shape[0], 1).cuda().float()\n pts4 = torch.cat((pts, pts_ones), dim=1)\n transformed_pts = (rel_w2c @ pts4.T).T[:, :3]\n\n return transformed_pts" }, { "identifier": "report_progress", "path": "utils/gs_helpers.py", "snippet": "def report_progress(params, data, i, progress_bar, iter_time_idx, sil_thres, every_i=1, qual_every_i=1, \n tracking=False, mapping=False, wandb_run=None, wandb_step=None, wandb_save_qual=False, online_time_idx=None):\n if i % every_i == 0 or i == 1:\n if wandb_run is not None:\n if tracking:\n stage = \"Tracking\"\n elif mapping:\n stage = \"Mapping\"\n else:\n stage = \"Current Frame Optimization\"\n\n # Initialize Render Variables\n rendervar = params2rendervar(params)\n depth_sil_rendervar = params2depthplussilhouette(params, data['w2c'])\n\n # Initialize Render Variables\n depth_sil, _, _, = Renderer(raster_settings=data['cam'])(**depth_sil_rendervar)\n rastered_depth = depth_sil[0, :, :].unsqueeze(0)\n valid_depth_mask = (data['depth'] > 0)\n silhouette = depth_sil[1, :, :]\n presence_sil_mask = (silhouette > sil_thres)\n\n im, _, _, = Renderer(raster_settings=data['cam'])(**rendervar)\n if tracking:\n psnr = calc_psnr(im * presence_sil_mask, data['im'] * presence_sil_mask).mean()\n else:\n psnr = calc_psnr(im, data['im']).mean()\n\n if tracking:\n diff_depth_rmse = torch.sqrt((((rastered_depth - data['depth']) * presence_sil_mask) ** 2))\n diff_depth_rmse = diff_depth_rmse * valid_depth_mask\n rmse = diff_depth_rmse.sum() / valid_depth_mask.sum()\n else:\n diff_depth_rmse = torch.sqrt(((rastered_depth - data['depth']) ** 2))\n diff_depth_rmse = diff_depth_rmse * valid_depth_mask\n rmse = diff_depth_rmse.sum() / valid_depth_mask.sum()\n\n if not mapping:\n progress_bar.set_postfix({f\"Time-Step: {iter_time_idx} | Frame {data['id']} | PSNR: {psnr:.{7}} | RMSE\": f\"{rmse:.{7}}\"})\n progress_bar.update(every_i)\n else:\n progress_bar.set_postfix({f\"Time-Step: {online_time_idx} | Frame {data['id']} | PSNR: {psnr:.{7}} | RMSE\": f\"{rmse:.{7}}\"})\n progress_bar.update(every_i)\n \n if wandb_run is not None:\n wandb_run.log({f\"{stage} PSNR\": psnr, f\"{stage} RMSE\": rmse}, step=wandb_step)\n \n if wandb_save_qual and (i % qual_every_i == 0 or i == 1):\n # Silhouette Mask\n presence_sil_mask = presence_sil_mask.detach().cpu().numpy()\n\n # Log plot to wandb\n if not mapping:\n fig_title = f\"Time-Step: {iter_time_idx} | Iter: {i} | Frame: {data['id']}\"\n else:\n fig_title = f\"Time-Step: {online_time_idx} | Iter: {i} | Frame: {data['id']}\"\n plot_rgbd_silhouette(data['im'], data['depth'], im, rastered_depth, presence_sil_mask, diff_depth_rmse,\n psnr, rmse, fig_title, wandb_run=wandb_run, wandb_step=wandb_step, \n wandb_title=f\"{stage} Qual Viz\")" }, { "identifier": "eval", "path": "utils/gs_helpers.py", "snippet": "def eval(dataset, final_params, num_frames, eval_dir, sil_thres, mapping_iters, add_new_gaussians, wandb_run=None, wandb_save_qual=False):\n print(\"Evaluating Final Parameters ...\")\n psnr_list = []\n rmse_list = []\n lpips_list = []\n ssim_list = []\n plot_dir = os.path.join(eval_dir, \"plots\")\n os.makedirs(plot_dir, exist_ok=True)\n\n gt_w2c_list = []\n for time_idx in tqdm(range(num_frames)):\n # Get RGB-D Data & Camera Parameters\n color, depth, intrinsics, pose = dataset[time_idx]\n gt_w2c = torch.linalg.inv(pose)\n gt_w2c_list.append(gt_w2c)\n intrinsics = intrinsics[:3, :3]\n\n # Process RGB-D Data\n color = color.permute(2, 0, 1) / 255 # (H, W, C) -> (C, H, W)\n depth = depth.permute(2, 0, 1) # (H, W, C) -> (C, H, W)\n\n # Process Camera Parameters\n w2c = torch.linalg.inv(pose)\n if time_idx == 0:\n first_frame_w2c = w2c\n # Setup Camera\n cam = setup_camera(color.shape[2], color.shape[1], intrinsics.cpu().numpy(), w2c.detach().cpu().numpy())\n \n # Define current frame data\n curr_data = {'cam': cam, 'im': color, 'depth': depth, 'id': time_idx, 'intrinsics': intrinsics, 'w2c': w2c}\n\n # Initialize Render Variables\n rendervar = params2rendervar(final_params)\n depth_sil_rendervar = params2depthplussilhouette(final_params, w2c)\n\n # Render Depth & Silhouette\n depth_sil, _, _, = Renderer(raster_settings=curr_data['cam'])(**depth_sil_rendervar)\n rastered_depth = depth_sil[0, :, :].unsqueeze(0)\n valid_depth_mask = (curr_data['depth'] > 0)\n silhouette = depth_sil[1, :, :]\n presence_sil_mask = (silhouette > sil_thres)\n \n # Render RGB and Calculate PSNR\n im, radius, _, = Renderer(raster_settings=curr_data['cam'])(**rendervar)\n if mapping_iters==0 and not add_new_gaussians:\n weighted_im = im * presence_sil_mask\n weighted_gt_im = curr_data['im'] * presence_sil_mask\n psnr = calc_psnr(weighted_im, weighted_gt_im).mean()\n ssim = ms_ssim(weighted_im.unsqueeze(0).cpu(), weighted_gt_im.unsqueeze(0).cpu(), \n data_range=1.0, size_average=True)\n lpips_score = loss_fn_alex(torch.clamp(weighted_im.unsqueeze(0), 0.0, 1.0),\n torch.clamp(weighted_gt_im.unsqueeze(0), 0.0, 1.0)).item()\n else:\n psnr = calc_psnr(im, curr_data['im']).mean()\n ssim = ms_ssim(im.unsqueeze(0).cpu(), curr_data['im'].unsqueeze(0).cpu(), \n data_range=1.0, size_average=True)\n lpips_score = loss_fn_alex(torch.clamp(im.unsqueeze(0), 0.0, 1.0),\n torch.clamp(curr_data['im'].unsqueeze(0), 0.0, 1.0)).item()\n\n psnr_list.append(psnr.cpu().numpy())\n ssim_list.append(ssim.cpu().numpy())\n lpips_list.append(lpips_score)\n\n # Compute Depth RMSE\n if mapping_iters==0 and not add_new_gaussians:\n diff_depth_rmse = torch.sqrt((((rastered_depth - curr_data['depth']) * presence_sil_mask) ** 2))\n diff_depth_rmse = diff_depth_rmse * valid_depth_mask\n rmse = diff_depth_rmse.sum() / valid_depth_mask.sum()\n else:\n diff_depth_rmse = torch.sqrt(((rastered_depth - curr_data['depth']) ** 2))\n diff_depth_rmse = diff_depth_rmse * valid_depth_mask\n rmse = diff_depth_rmse.sum() / valid_depth_mask.sum()\n rmse_list.append(rmse.cpu().numpy())\n\n # Plot the Ground Truth and Rasterized RGB & Depth, along with Silhouette\n fig_title = \"Time Step: {}\".format(time_idx)\n plot_name = \"%04d\" % time_idx\n presence_sil_mask = presence_sil_mask.detach().cpu().numpy()\n if wandb_run is None:\n plot_rgbd_silhouette(color, depth, im, rastered_depth, presence_sil_mask, diff_depth_rmse,\n psnr, rmse, fig_title, plot_dir, \n plot_name=plot_name, save_plot=True)\n elif wandb_save_qual:\n plot_rgbd_silhouette(color, depth, im, rastered_depth, presence_sil_mask, diff_depth_rmse,\n psnr, rmse, fig_title, plot_dir, \n plot_name=plot_name, save_plot=True,\n wandb_run=wandb_run, wandb_step=None, \n wandb_title=\"Eval Qual Viz\")\n\n # Compute Average Metrics\n psnr_list = np.array(psnr_list)\n rmse_list = np.array(rmse_list)\n ssim_list = np.array(ssim_list)\n lpips_list = np.array(lpips_list)\n avg_psnr = psnr_list.mean()\n avg_rmse = rmse_list.mean()\n avg_ssim = ssim_list.mean()\n avg_lpips = lpips_list.mean()\n print(\"Average PSNR: {:.2f}\".format(avg_psnr))\n print(\"Average Depth RMSE: {:.2f}\".format(avg_rmse))\n print(\"Average MS-SSIM: {:.2f}\".format(avg_ssim))\n print(\"Average LPIPS: {:.2f}\".format(avg_lpips))\n\n if wandb_run is not None:\n wandb_run.log({\"Average PSNR\": avg_psnr, \"Average Depth RMSE\": avg_rmse, \"Average MS-SSIM\": avg_ssim, \"Average LPIPS\": avg_lpips})\n\n # # Save metric lists as text files\n # np.savetxt(os.path.join(eval_dir, \"psnr.txt\"), psnr_list)\n # np.savetxt(os.path.join(eval_dir, \"rmse.txt\"), rmse_list)\n # np.savetxt(os.path.join(eval_dir, \"ssim.txt\"), ssim_list)\n # np.savetxt(os.path.join(eval_dir, \"lpips.txt\"), lpips_list)\n\n # # Plot PSNR & RMSE as line plots\n # fig, axs = plt.subplots(1, 2, figsize=(12, 4))\n # axs[0].plot(np.arange(num_frames), psnr_list)\n # axs[0].set_title(\"RGB PSNR\")\n # axs[0].set_xlabel(\"Time Step\")\n # axs[0].set_ylabel(\"PSNR\")\n # axs[1].plot(np.arange(num_frames), rmse_list)\n # axs[1].set_title(\"Depth RMSE\")\n # axs[1].set_xlabel(\"Time Step\")\n # axs[1].set_ylabel(\"RMSE\")\n # fig.suptitle(\"Average PSNR: {:.2f}, Average Depth RMSE: {:.2f}\".format(avg_psnr, avg_rmse), y=1.05, fontsize=16)\n # plt.savefig(os.path.join(eval_dir, \"metrics.png\"), bbox_inches='tight')\n # if wandb_run is not None:\n # wandb_run.log({\"Eval Metrics\": fig})\n # plt.close()" }, { "identifier": "l1_loss_v1", "path": "utils/gs_helpers.py", "snippet": "def l1_loss_v1(x, y):\n return torch.abs((x - y)).mean()" }, { "identifier": "matrix_to_quaternion", "path": "utils/gs_helpers.py", "snippet": "def matrix_to_quaternion(matrix: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Convert rotations given as rotation matrices to quaternions.\n\n Args:\n matrix: Rotation matrices as tensor of shape (..., 3, 3).\n\n Returns:\n quaternions with real part first, as tensor of shape (..., 4).\n Source: https://pytorch3d.readthedocs.io/en/latest/_modules/pytorch3d/transforms/rotation_conversions.html#matrix_to_quaternion\n \"\"\"\n if matrix.size(-1) != 3 or matrix.size(-2) != 3:\n raise ValueError(f\"Invalid rotation matrix shape {matrix.shape}.\")\n\n batch_dim = matrix.shape[:-2]\n m00, m01, m02, m10, m11, m12, m20, m21, m22 = torch.unbind(\n matrix.reshape(batch_dim + (9,)), dim=-1\n )\n\n q_abs = _sqrt_positive_part(\n torch.stack(\n [\n 1.0 + m00 + m11 + m22,\n 1.0 + m00 - m11 - m22,\n 1.0 - m00 + m11 - m22,\n 1.0 - m00 - m11 + m22,\n ],\n dim=-1,\n )\n )\n\n # we produce the desired quaternion multiplied by each of r, i, j, k\n quat_by_rijk = torch.stack(\n [\n # pyre-fixme[58]: `**` is not supported for operand types `Tensor` and\n # `int`.\n torch.stack([q_abs[..., 0] ** 2, m21 - m12, m02 - m20, m10 - m01], dim=-1),\n # pyre-fixme[58]: `**` is not supported for operand types `Tensor` and\n # `int`.\n torch.stack([m21 - m12, q_abs[..., 1] ** 2, m10 + m01, m02 + m20], dim=-1),\n # pyre-fixme[58]: `**` is not supported for operand types `Tensor` and\n # `int`.\n torch.stack([m02 - m20, m10 + m01, q_abs[..., 2] ** 2, m12 + m21], dim=-1),\n # pyre-fixme[58]: `**` is not supported for operand types `Tensor` and\n # `int`.\n torch.stack([m10 - m01, m20 + m02, m21 + m12, q_abs[..., 3] ** 2], dim=-1),\n ],\n dim=-2,\n )\n\n # We floor here at 0.1 but the exact level is not important; if q_abs is small,\n # the candidate won't be picked.\n flr = torch.tensor(0.1).to(dtype=q_abs.dtype, device=q_abs.device)\n quat_candidates = quat_by_rijk / (2.0 * q_abs[..., None].max(flr))\n\n # if not for numerical problems, quat_candidates[i] should be same (up to a sign),\n # forall i; we pick the best-conditioned one (with the largest denominator)\n\n return quat_candidates[\n F.one_hot(q_abs.argmax(dim=-1), num_classes=4) > 0.5, :\n ].reshape(batch_dim + (4,))" }, { "identifier": "calc_ssim", "path": "utils/gs_external.py", "snippet": "def calc_ssim(img1, img2, window_size=11, size_average=True):\n channel = img1.size(-3)\n window = create_window(window_size, channel)\n\n if img1.is_cuda:\n window = window.cuda(img1.get_device())\n window = window.type_as(img1)\n\n return _ssim(img1, img2, window, window_size, channel, size_average)" }, { "identifier": "build_rotation", "path": "utils/gs_external.py", "snippet": "def build_rotation(q):\n norm = torch.sqrt(q[:, 0] * q[:, 0] + q[:, 1] * q[:, 1] + q[:, 2] * q[:, 2] + q[:, 3] * q[:, 3])\n q = q / norm[:, None]\n rot = torch.zeros((q.size(0), 3, 3), device='cuda')\n r = q[:, 0]\n x = q[:, 1]\n y = q[:, 2]\n z = q[:, 3]\n rot[:, 0, 0] = 1 - 2 * (y * y + z * z)\n rot[:, 0, 1] = 2 * (x * y - r * z)\n rot[:, 0, 2] = 2 * (x * z + r * y)\n rot[:, 1, 0] = 2 * (x * y + r * z)\n rot[:, 1, 1] = 1 - 2 * (x * x + z * z)\n rot[:, 1, 2] = 2 * (y * z - r * x)\n rot[:, 2, 0] = 2 * (x * z - r * y)\n rot[:, 2, 1] = 2 * (y * z + r * x)\n rot[:, 2, 2] = 1 - 2 * (x * x + y * y)\n return rot" }, { "identifier": "densify", "path": "utils/gs_external.py", "snippet": "def densify(params, variables, optimizer, iter, densify_dict):\n if iter <= densify_dict['stop_after']:\n variables = accumulate_mean2d_gradient(variables)\n grad_thresh = densify_dict['grad_thresh']\n if (iter >= densify_dict['start_after']) and (iter % densify_dict['densify_every'] == 0):\n grads = variables['means2D_gradient_accum'] / variables['denom']\n grads[grads.isnan()] = 0.0\n to_clone = torch.logical_and(grads >= grad_thresh, (\n torch.max(torch.exp(params['log_scales']), dim=1).values <= 0.01 * variables['scene_radius']))\n new_params = {k: v[to_clone] for k, v in params.items() if k not in ['cam_unnorm_rots', 'cam_trans']}\n\n new_timestep_vars = torch.zeros(new_params['means3D'].shape[0], device=\"cuda\")\n new_timestep_vars = variables['timestep'][to_clone] \n variables['timestep'] = torch.cat((variables['timestep'], new_timestep_vars), dim=0)\n params = cat_params_to_optimizer(new_params, params, optimizer)\n num_pts = params['means3D'].shape[0]\n\n padded_grad = torch.zeros(num_pts, device=\"cuda\")\n padded_grad[:grads.shape[0]] = grads\n to_split = torch.logical_and(padded_grad >= grad_thresh,\n torch.max(torch.exp(params['log_scales']), dim=1).values > 0.01 * variables[\n 'scene_radius'])\n n = densify_dict['num_to_split_into'] # number to split into\n new_params = {k: v[to_split].repeat(n, 1) for k, v in params.items() if k not in ['cam_unnorm_rots', 'cam_trans']}\n #track new variables for new formed points\n new_timestep_vars = torch.zeros(new_params['means3D'].shape[0], device=\"cuda\")\n new_timestep_vars = variables['timestep'][to_split].repeat(n)\n variables['timestep'] = torch.cat((variables['timestep'], new_timestep_vars), dim=0)\n\n stds = torch.exp(params['log_scales'])[to_split].repeat(n, 3)\n means = torch.zeros((stds.size(0), 3), device=\"cuda\")\n samples = torch.normal(mean=means, std=stds)\n rots = build_rotation(params['unnorm_rotations'][to_split]).repeat(n, 1, 1)\n new_params['means3D'] += torch.bmm(rots, samples.unsqueeze(-1)).squeeze(-1)\n new_params['log_scales'] = torch.log(torch.exp(new_params['log_scales']) / (0.8 * n))\n params = cat_params_to_optimizer(new_params, params, optimizer)\n num_pts = params['means3D'].shape[0]\n \n variables['means2D_gradient_accum'] = torch.zeros(num_pts, device=\"cuda\")\n variables['denom'] = torch.zeros(num_pts, device=\"cuda\")\n variables['max_2D_radius'] = torch.zeros(num_pts, device=\"cuda\")\n\n to_remove = torch.cat((to_split, torch.zeros(n * to_split.sum(), dtype=torch.bool, device=\"cuda\")))\n params, variables = remove_points(to_remove, params, variables, optimizer)\n\n if iter == densify_dict['stop_after']:\n remove_threshold = densify_dict['final_removal_opacity_threshold']\n else:\n remove_threshold = densify_dict['removal_opacity_threshold']\n to_remove = (torch.sigmoid(params['logit_opacities']) < remove_threshold).squeeze()\n if iter >= densify_dict['remove_big_after']:\n big_points_ws = torch.exp(params['log_scales']).max(dim=1).values > 0.1 * variables['scene_radius']\n to_remove = torch.logical_or(to_remove, big_points_ws)\n params, variables = remove_points(to_remove, params, variables, optimizer)\n\n torch.cuda.empty_cache()\n\n # Reset Opacities for all Gaussians (This is not desired for mapping on only current frame)\n if iter > 0 and iter % densify_dict['reset_opacities_every'] == 0 and densify_dict['reset_opacities']:\n new_params = {'logit_opacities': inverse_sigmoid(torch.ones_like(params['logit_opacities']) * 0.01)}\n params = update_params_and_optimizer(new_params, params, optimizer)\n\n return params, variables" }, { "identifier": "get_expon_lr_func", "path": "utils/gs_external.py", "snippet": "def get_expon_lr_func(\n lr_init, lr_final, lr_delay_steps=0, lr_delay_mult=1.0, max_steps=1000000\n):\n \"\"\"\n Copied from Plenoxels\n\n Continuous learning rate decay function. Adapted from JaxNeRF\n The returned rate is lr_init when step=0 and lr_final when step=max_steps, and\n is log-linearly interpolated elsewhere (equivalent to exponential decay).\n If lr_delay_steps>0 then the learning rate will be scaled by some smooth\n function of lr_delay_mult, such that the initial learning rate is\n lr_init*lr_delay_mult at the beginning of optimization but will be eased back\n to the normal learning rate when steps>lr_delay_steps.\n :param conf: config subtree 'lr' or similar\n :param max_steps: int, the number of steps during optimization.\n :return HoF which takes step as input\n \"\"\"\n\n def helper(step):\n if step < 0 or (lr_init == 0.0 and lr_final == 0.0):\n # Disable this parameter\n return 0.0\n if lr_delay_steps > 0:\n # A kind of reverse cosine decay.\n delay_rate = lr_delay_mult + (1 - lr_delay_mult) * np.sin(\n 0.5 * np.pi * np.clip(step / lr_delay_steps, 0, 1)\n )\n else:\n delay_rate = 1.0\n t = np.clip(step / max_steps, 0, 1)\n log_lerp = np.exp(np.log(lr_init) * (1 - t) + np.log(lr_final) * t)\n return delay_rate * log_lerp\n\n return helper" }, { "identifier": "update_learning_rate", "path": "utils/gs_external.py", "snippet": "def update_learning_rate(optimizer, means3D_scheduler, iteration):\n ''' Learning rate scheduling per step '''\n for param_group in optimizer.param_groups:\n if param_group[\"name\"] == \"means3D\":\n lr = means3D_scheduler(iteration)\n param_group['lr'] = lr\n return lr" } ]
import argparse import os import random import sys import shutil import cv2 import numpy as np import torch import wandb from importlib.machinery import SourceFileLoader from tqdm import tqdm from datasets.gradslam_datasets import ( load_dataset_config, ICLDataset, ReplicaDataset, AzureKinectDataset, ScannetDataset, Ai2thorDataset, Record3DDataset, RealsenseDataset, TUMDataset, ScannetPPDataset, NeRFCaptureDataset ) from utils.common_utils import seed_everything, save_seq_params, save_params, save_params_ckpt, save_seq_params_ckpt from utils.recon_helpers import setup_camera from utils.gs_helpers import ( params2rendervar, params2depthplussilhouette, transformed_params2depthplussilhouette, transform_to_frame, report_progress, eval, l1_loss_v1, matrix_to_quaternion ) from utils.gs_external import ( calc_ssim, build_rotation, densify, get_expon_lr_func, update_learning_rate ) from diff_gaussian_rasterization import GaussianRasterizer as Renderer
17,180
_BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) sys.path.insert(0, _BASE_DIR) print("System Paths:") for p in sys.path: print(p) def get_dataset(config_dict, basedir, sequence, **kwargs): if config_dict["dataset_name"].lower() in ["icl"]: return ICLDataset(config_dict, basedir, sequence, **kwargs) elif config_dict["dataset_name"].lower() in ["replica"]: return ReplicaDataset(config_dict, basedir, sequence, **kwargs) elif config_dict["dataset_name"].lower() in ["azure", "azurekinect"]: return AzureKinectDataset(config_dict, basedir, sequence, **kwargs) elif config_dict["dataset_name"].lower() in ["scannet"]: return ScannetDataset(config_dict, basedir, sequence, **kwargs) elif config_dict["dataset_name"].lower() in ["ai2thor"]: return Ai2thorDataset(config_dict, basedir, sequence, **kwargs) elif config_dict["dataset_name"].lower() in ["record3d"]: return Record3DDataset(config_dict, basedir, sequence, **kwargs) elif config_dict["dataset_name"].lower() in ["realsense"]: return RealsenseDataset(config_dict, basedir, sequence, **kwargs) elif config_dict["dataset_name"].lower() in ["tum"]: return TUMDataset(config_dict, basedir, sequence, **kwargs) elif config_dict["dataset_name"].lower() in ["scannetpp"]: return ScannetPPDataset(basedir, sequence, **kwargs) elif config_dict["dataset_name"].lower() in ["nerfcapture"]:
_BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) sys.path.insert(0, _BASE_DIR) print("System Paths:") for p in sys.path: print(p) def get_dataset(config_dict, basedir, sequence, **kwargs): if config_dict["dataset_name"].lower() in ["icl"]: return ICLDataset(config_dict, basedir, sequence, **kwargs) elif config_dict["dataset_name"].lower() in ["replica"]: return ReplicaDataset(config_dict, basedir, sequence, **kwargs) elif config_dict["dataset_name"].lower() in ["azure", "azurekinect"]: return AzureKinectDataset(config_dict, basedir, sequence, **kwargs) elif config_dict["dataset_name"].lower() in ["scannet"]: return ScannetDataset(config_dict, basedir, sequence, **kwargs) elif config_dict["dataset_name"].lower() in ["ai2thor"]: return Ai2thorDataset(config_dict, basedir, sequence, **kwargs) elif config_dict["dataset_name"].lower() in ["record3d"]: return Record3DDataset(config_dict, basedir, sequence, **kwargs) elif config_dict["dataset_name"].lower() in ["realsense"]: return RealsenseDataset(config_dict, basedir, sequence, **kwargs) elif config_dict["dataset_name"].lower() in ["tum"]: return TUMDataset(config_dict, basedir, sequence, **kwargs) elif config_dict["dataset_name"].lower() in ["scannetpp"]: return ScannetPPDataset(basedir, sequence, **kwargs) elif config_dict["dataset_name"].lower() in ["nerfcapture"]:
return NeRFCaptureDataset(basedir, sequence, **kwargs)
10
2023-11-30 20:26:47+00:00
24k
zhyever/PatchFusion
zoedepth/trainers/zoedepth_custom_trainer.py
[ { "identifier": "SILogLoss", "path": "zoedepth/trainers/loss_sample.py", "snippet": "class SILogLoss(nn.Module):\n \"\"\"SILog loss (pixel-wise)\"\"\"\n def __init__(self, beta=0.15):\n super(SILogLoss, self).__init__()\n self.name = 'SILog'\n self.beta = beta\n\n def forward(self, input, target, mask=None):\n input = extract_key(input, KEY_OUTPUT)\n \n if mask is not None:\n input_filtered = input[mask]\n target_filtered = target[mask]\n\n with amp.autocast(enabled=False): # amp causes NaNs in this loss function\n alpha = 1e-7\n g = torch.log(input_filtered + alpha) - torch.log(target_filtered + alpha)\n Dg = torch.var(g) + self.beta * torch.pow(torch.mean(g), 2)\n loss = 10 * torch.sqrt(Dg)\n\n if torch.isnan(loss):\n print(\"Nan SILog loss\")\n print(\"input:\", input.shape)\n print(\"target:\", target.shape)\n print(\"G\", torch.sum(torch.isnan(g)))\n print(\"Input min max\", torch.min(input), torch.max(input))\n print(\"Target min max\", torch.min(target), torch.max(target))\n print(\"Dg\", torch.isnan(Dg))\n print(\"loss\", torch.isnan(loss))\n\n return loss" }, { "identifier": "DistributionLoss", "path": "zoedepth/trainers/loss_sample.py", "snippet": "class DistributionLoss(nn.Module):\n def __init__(self, max_depth):\n super(DistributionLoss, self).__init__()\n self.name = 'DistributionLoss'\n self.max_depth = max_depth\n\n def forward(self, input, target, mask=None, dist='biLaplacian'):\n \n \n mu0 = input['mu0']\n mu1 = input['mu1']\n sigma0 = input['sigma0']\n sigma1 = input['sigma1']\n pi0 = input['pi0']\n pi1 = input['pi1']\n \n pred_mask = (pi0 / sigma0 > pi1 / sigma1).float()\n pred_depth = (mu0 * pred_mask + mu1 * (1. - pred_mask))\n pred_metric_depth = (1 - pred_depth) * self.max_depth\n\n\n if mask is not None:\n mu0 = mu0[mask]\n mu1 = mu1[mask]\n sigma0 = sigma0[mask]\n sigma1 = sigma1[mask]\n pi0 = pi0[mask]\n pi1 = pi1[mask]\n\n # real_input = real_depth[mask]\n \n real_input = mu0\n pred_metric_depth = pred_metric_depth[mask]\n record_target = target[mask]\n\n\n target_filtered = 1 - target[mask] / self.max_depth\n bi_loss = bimodal_loss(mu0, mu1, sigma0, sigma1, pi0, pi1, target_filtered, dist=dist).mean()\n # print(bi_loss) \n\n alpha = 1e-7\n beta = 0.15\n g = torch.log(real_input + alpha) - torch.log(record_target + alpha)\n Dg = torch.var(g) + beta * torch.pow(torch.mean(g), 2)\n sig_loss = 10 * torch.sqrt(Dg)\n # print(sig_loss)\n \n return bi_loss, sig_loss" }, { "identifier": "SILogLoss", "path": "zoedepth/trainers/loss.py", "snippet": "class SILogLoss(nn.Module):\n \"\"\"SILog loss (pixel-wise)\"\"\"\n def __init__(self, beta=0.15):\n super(SILogLoss, self).__init__()\n self.name = 'SILog'\n self.beta = beta\n\n def forward(self, input, target, mask=None, interpolate=True, return_interpolated=False):\n hack_input = input\n\n input = extract_key(input, KEY_OUTPUT)\n if input.shape[-1] != target.shape[-1] and interpolate:\n input = nn.functional.interpolate(\n input, target.shape[-2:], mode='bilinear', align_corners=True)\n intr_input = input\n else:\n intr_input = input\n\n if target.ndim == 3:\n target = target.unsqueeze(1)\n\n if mask is not None:\n if mask.ndim == 3:\n mask = mask.unsqueeze(1)\n\n input = input[mask]\n target = target[mask]\n\n with amp.autocast(enabled=False): # amp causes NaNs in this loss function\n alpha = 1e-7\n g = torch.log(input + alpha) - torch.log(target + alpha)\n\n # n, c, h, w = g.shape\n # norm = 1/(h*w)\n # Dg = norm * torch.sum(g**2) - (0.85/(norm**2)) * (torch.sum(g))**2\n\n Dg = torch.var(g) + self.beta * torch.pow(torch.mean(g), 2)\n\n loss = 10 * torch.sqrt(Dg)\n\n if torch.isnan(loss):\n if input.numel() == 0:\n loss = torch.mean(hack_input) * 0\n if not return_interpolated:\n return loss\n return loss, intr_input\n \n print(\"Nan SILog loss\")\n print(\"input:\", input.shape)\n print(\"target:\", target.shape)\n print(\"G\", torch.sum(torch.isnan(g)))\n print(\"Input min max\", torch.min(input), torch.max(input))\n print(\"Target min max\", torch.min(target), torch.max(target))\n print(\"Dg\", torch.isnan(Dg))\n print(\"loss\", torch.isnan(loss))\n\n if not return_interpolated:\n return loss\n\n return loss, intr_input" }, { "identifier": "BudgetConstraint", "path": "zoedepth/trainers/loss.py", "snippet": "class BudgetConstraint(nn.Module):\n \"\"\"\n Given budget constraint to reduce expected inference FLOPs in the Dynamic Network.\n \"\"\"\n def __init__(self, loss_mu, flops_all, warm_up=True):\n super().__init__()\n self.loss_mu = loss_mu\n self.flops_all = flops_all\n self.warm_up = warm_up\n\n def forward(self, flops_expt, warm_up_rate=1.0):\n if self.warm_up:\n warm_up_rate = min(1.0, warm_up_rate)\n else:\n warm_up_rate = 1.0\n losses = warm_up_rate * ((flops_expt / self.flops_all - self.loss_mu)**2)\n return losses" }, { "identifier": "HistogramMatchingLoss", "path": "zoedepth/trainers/loss.py", "snippet": "class HistogramMatchingLoss(nn.Module):\n def __init__(self, min_depth, max_depth, bins=512):\n super(HistogramMatchingLoss, self).__init__()\n self.name = 'HistogramMatchingLoss'\n self.min_depth = min_depth\n self.max_depth = max_depth\n self.bins = bins\n\n def forward(self, input, target, mask, interpolate=True):\n if input.shape[-1] != mask.shape[-1] and interpolate:\n input = nn.functional.interpolate(\n input, mask.shape[-2:], mode='bilinear', align_corners=True)\n \n if target.shape[-1] != mask.shape[-1] and interpolate:\n target = nn.functional.interpolate(\n target, mask.shape[-2:], mode='bilinear', align_corners=True)\n\n input[~mask] = 0\n target[~mask] = 0\n\n\n pred_hist = torch.histc(input, bins=self.bins, min=self.min_depth, max=self.max_depth)\n gt_hist = torch.histc(target, bins=self.bins, min=self.min_depth, max=self.max_depth)\n\n pred_hist /= pred_hist.sum(dim=0, keepdim=True)\n gt_hist /= gt_hist.sum(dim=0, keepdim=True)\n\n # print(pred_hist.shape)\n # print(pred_hist)\n # _pred_hist = pred_hist.detach().cpu().numpy()\n # _gt_hist = gt_hist.detach().cpu().numpy()\n # plt.subplot(2, 1, 1)\n # plt.bar(range(len(_pred_hist)), _pred_hist)\n # plt.subplot(2, 1, 2)\n # plt.bar(range(len(_gt_hist)), _gt_hist)\n # plt.savefig('./debug_scale.png')\n\n # Compute cumulative histograms (CDF)\n cdf_pred = torch.cumsum(pred_hist, dim=0)\n cdf_gt = torch.cumsum(gt_hist, dim=0)\n\n # Compute Earth Mover's Distance (EMD) between the CDFs\n loss = torch.mean(torch.abs(cdf_pred - cdf_gt))\n # loss = torch.mean(torch.sqrt((pred_hist - gt_hist)**2))\n # loss = F.kl_div(torch.log(pred_hist + 1e-10), gt_hist, reduction='mean')\n \n return loss" }, { "identifier": "SSIM", "path": "zoedepth/trainers/loss.py", "snippet": "class SSIM(torch.nn.Module):\n def __init__(self, window_size = 11, size_average = True):\n super(SSIM, self).__init__()\n self.window_size = window_size\n self.size_average = size_average\n self.channel = 1\n self.window = create_window(window_size, self.channel)\n\n def forward(self, img1, img2, mask, interpolate=True):\n if img1.shape[-1] != mask.shape[-1] and interpolate:\n img1 = nn.functional.interpolate(\n img1, mask.shape[-2:], mode='bilinear', align_corners=True)\n \n if img2.shape[-1] != mask.shape[-1] and interpolate:\n img2 = nn.functional.interpolate(\n img2, mask.shape[-2:], mode='bilinear', align_corners=True)\n\n img1[~mask] = 0\n img2[~mask] = 0\n\n (_, channel, _, _) = img1.size()\n\n if channel == self.channel and self.window.data.type() == img1.data.type():\n window = self.window\n else:\n window = create_window(self.window_size, channel)\n \n if img1.is_cuda:\n window = window.cuda(img1.get_device())\n window = window.type_as(img1)\n \n self.window = window\n self.channel = channel\n\n\n loss = _ssim(img1, img2, window, self.window_size, channel, self.size_average)\n return loss" }, { "identifier": "ConsistencyLoss", "path": "zoedepth/trainers/loss.py", "snippet": "class ConsistencyLoss(nn.Module):\n def __init__(self, target, focus_flatten=False, wp=1) -> None:\n super().__init__()\n self.name = 'Consistency'\n self.target = target\n self.mode = 'no-resize'\n # self.mode = 'resize'\n self.focus_flatten = focus_flatten\n self.wp = wp\n\n def gradient_y(self, img):\n # gy = torch.cat([F.conv2d(img[:, i, :, :].unsqueeze(0), torch.Tensor([[1, 2, 1], [0, 0, 0], [-1, -2, -1]]).view((1, 1, 3, 3)).to(img.device), padding=1) for i in range(img.shape[1])], 1)\n gy = F.conv2d(img, torch.Tensor([[1, 2, 1], [0, 0, 0], [-1, -2, -1]]).view((1, 1, 3, 3)).to(img.device), padding=1)\n return gy\n\n def gradient_x(self, img):\n # gx = torch.cat([F.conv2d(img[:, i, :, :].unsqueeze(0), torch.Tensor([[1, 0, -1], [2, 0, -2], [1, 0, -1]]).view((1, 1, 3, 3)).to(img.device), padding=1) for i in range(img.shape[1])], 1)\n gx = F.conv2d(img, torch.Tensor([[1, 0, -1], [2, 0, -2], [1, 0, -1]]).view((1, 1, 3, 3)).to(img.device), padding=1)\n return gx\n\n def forward(self, depth_preds, shifts, mask, temp_features, pred_f=None):\n\n common_area_1_list = []\n common_area_2_list = []\n\n if self.focus_flatten:\n # only consider flatten place\n grad = kornia.filters.spatial_gradient(pred_f.detach())\n grad_x, grad_y = grad[:, :, 0, :, :], grad[:, :, 1, :, :]\n grad = torch.sqrt(grad_x ** 2 + grad_y ** 2)\n grad_ext = grad > 0.05 # over 5cm\n grad_ext = grad_ext.float()\n grad_blur = kornia.filters.gaussian_blur2d(grad_ext, (11, 11), (3, 3))\n grad_ext = grad_blur > 0 # over 5cm\n grad_ext = grad_blur == 0 \n mask = torch.logical_and(mask, grad_ext)\n\n\n if self.target == \"mix\":\n ## for feature\n bs, c, h, w = depth_preds.shape\n split_depth = torch.split(depth_preds, bs//2, dim=0)\n split_mask = torch.split(F.interpolate(mask.float(), (384, 512)).bool(), bs//2, dim=0)\n\n feat_ori_list = []\n feat_shift_list = []\n multi_level_mask = []\n\n for idx, feature in enumerate(temp_features): # multi-level\n split_feat = torch.split(feature, bs//2, dim=0)\n\n _, _, h, w = split_feat[0].shape\n feat_ori_list.append(split_feat[0])\n feat_shift_list.append(split_feat[1])\n\n mask_ori_cur_scale = F.interpolate(split_mask[0].float(), (h, w)).bool()\n multi_level_mask.append(mask_ori_cur_scale)\n\n for idx_out, (feat_ori_cur_level, feat_shift_cur_level, mask_ori_cur_level) in enumerate(zip(feat_ori_list, feat_shift_list, multi_level_mask)): # iter multi-scale\n scale_factor = 2 ** (5 - idx_out)\n _, _, cur_scale_h, cur_scale_w = feat_ori_cur_level.shape\n scale_factor = int(384 / cur_scale_h)\n\n for idx_in, (feat_ori, feat_shift, mask_ori, shift_bs) in enumerate(zip(feat_ori_cur_level, feat_shift_cur_level, mask_ori_cur_level, shifts)): # iter bs (paired feat)\n c, _, _ = feat_ori.shape\n mask_ori = mask_ori.repeat(c, 1, 1)\n shift_h, shift_w = int(shift_bs[0] * (384/540) / scale_factor), int(shift_bs[1]* (512/960) / scale_factor)\n\n if shift_h >= 0 and shift_w >= 0:\n common_area_1 = feat_ori[:, shift_h:, shift_w:]\n common_area_2 = feat_shift[:, :-shift_h, :-shift_w]\n mask_common = mask_ori[:, shift_h:, shift_w:] \n elif shift_h >= 0 and shift_w <= 0:\n common_area_1 = feat_ori[:, shift_h:, :-abs(shift_w)]\n common_area_2 = feat_shift[:, :-shift_h, abs(shift_w):]\n mask_common = mask_ori[:, shift_h:, :-abs(shift_w)]\n elif shift_h <= 0 and shift_w <= 0:\n common_area_1 = feat_ori[:, :-abs(shift_h), :-abs(shift_w)]\n common_area_2 = feat_shift[:, abs(shift_h):, abs(shift_w):]\n mask_common = mask_ori[:, :-abs(shift_h), :-abs(shift_w)]\n elif shift_h <= 0 and shift_w >= 0:\n common_area_1 = feat_ori[:, :-abs(shift_h):, shift_w:]\n common_area_2 = feat_shift[:, abs(shift_h):, :-shift_w]\n mask_common = mask_ori[:, :-abs(shift_h):, shift_w:]\n else:\n print(\"can you really reach here?\")\n\n common_area_masked_1 = common_area_1[mask_common].flatten()\n common_area_masked_2 = common_area_2[mask_common].flatten()\n common_area_1_list.append(common_area_masked_1)\n common_area_2_list.append(common_area_masked_2)\n\n common_area_1 = torch.cat(common_area_1_list)\n common_area_2 = torch.cat(common_area_2_list)\n if common_area_1.numel() == 0 or common_area_2.numel() == 0:\n consistency_loss = torch.Tensor([0]).squeeze()\n else:\n consistency_loss = F.mse_loss(common_area_1, common_area_2)\n consistency_loss_feat = consistency_loss\n\n \n common_area_1_list = []\n common_area_2_list = []\n\n ## for pred\n bs, c, h, w = depth_preds.shape\n split_depth = torch.split(depth_preds, bs//2, dim=0)\n split_mask = torch.split(mask, bs//2, dim=0)\n \n for shift, depth_ori, depth_shift, mask_ori, mask_shift in zip(shifts, split_depth[0], split_depth[1], split_mask[0], split_mask[1]):\n shift_h, shift_w = shift[0], shift[1]\n if shift_h >= 0 and shift_w >= 0:\n common_area_1 = depth_ori[:, shift_h:, shift_w:]\n common_area_2 = depth_shift[:, :-shift_h, :-shift_w]\n mask_common = mask_ori[:, shift_h:, shift_w:]\n # mask_debug = mask_shift[:, :-shift_h, :-shift_w]\n elif shift_h >= 0 and shift_w <= 0:\n common_area_1 = depth_ori[:, shift_h:, :-abs(shift_w)]\n common_area_2 = depth_shift[:, :-shift_h, abs(shift_w):]\n mask_common = mask_ori[:, shift_h:, :-abs(shift_w)]\n # mask_debug = mask_shift[:, :-shift_h, abs(shift_w):]\n elif shift_h <= 0 and shift_w <= 0:\n common_area_1 = depth_ori[:, :-abs(shift_h), :-abs(shift_w)]\n common_area_2 = depth_shift[:, abs(shift_h):, abs(shift_w):]\n mask_common = mask_ori[:, :-abs(shift_h), :-abs(shift_w)]\n # mask_debug = mask_shift[:, abs(shift_h):, abs(shift_w):]\n elif shift_h <= 0 and shift_w >= 0:\n common_area_1 = depth_ori[:, :-abs(shift_h):, shift_w:]\n common_area_2 = depth_shift[:, abs(shift_h):, :-shift_w]\n mask_common = mask_ori[:, :-abs(shift_h):, shift_w:]\n # mask_debug = mask_shift[:, abs(shift_h):, :-shift_w]\n else:\n print(\"can you really reach here?\")\n \n common_area_1 = common_area_1[mask_common].flatten()\n common_area_2 = common_area_2[mask_common].flatten()\n common_area_1_list.append(common_area_1)\n common_area_2_list.append(common_area_2)\n\n common_area_1 = torch.cat(common_area_1_list)\n common_area_2 = torch.cat(common_area_2_list)\n if common_area_1.numel() == 0 or common_area_2.numel() == 0:\n consistency_loss = torch.Tensor([0]).squeeze()\n else:\n # pred_hist = torch.histc(common_area_1, bins=512, min=0, max=80)\n # gt_hist = torch.histc(common_area_2, bins=512, min=0, max=80)\n\n # pred_hist /= pred_hist.sum(dim=0, keepdim=True)\n # gt_hist /= gt_hist.sum(dim=0, keepdim=True)\n\n # # Compute cumulative histograms (CDF)\n # cdf_pred = torch.cumsum(pred_hist, dim=0)\n # cdf_gt = torch.cumsum(gt_hist, dim=0)\n\n # # Compute Earth Mover's Distance (EMD) between the CDFs\n # consistency_loss = torch.mean(torch.abs(cdf_pred - cdf_gt))\n consistency_loss = F.mse_loss(common_area_1, common_area_2) \n consistency_loss_pred = consistency_loss\n\n consistency_loss = consistency_loss_pred * self.wp + consistency_loss_feat\n return consistency_loss\n \n elif 'feat' in self.target:\n if self.mode == 'resize':\n bs, c, h, w = depth_preds.shape\n split_depth = torch.split(depth_preds, bs//2, dim=0)\n split_mask = torch.split(mask, bs//2, dim=0)\n \n feat_ori_list = []\n feat_shift_list = []\n\n for idx, feature in enumerate(temp_features): # multi-level\n if idx < 4:\n continue\n \n split_feat = torch.split(feature, bs//2, dim=0)\n f = F.interpolate(split_feat[0], (h, w), mode='bilinear', align_corners=True)\n feat_ori_list.append(f)\n f = F.interpolate(split_feat[1], (h, w), mode='bilinear', align_corners=True)\n feat_shift_list.append(f)\n\n\n for idx_out, (feat_ori_cur_level, feat_shift_cur_level) in enumerate(zip(feat_ori_list, feat_shift_list)): # iter multi-scale\n scale_factor = 2 ** (5 - idx_out)\n\n for idx_in, (feat_ori, feat_shift, mask_ori, shift_bs) in enumerate(zip(feat_ori_cur_level, feat_shift_cur_level, split_mask[0], shifts)): # iter bs (paired feat)\n c, h, w = feat_ori.shape\n mask_ori = mask_ori.repeat(c, 1, 1)\n shift_h, shift_w = shift_bs[0], shift_bs[1]\n\n if shift_h >= 0 and shift_w >= 0:\n common_area_1 = feat_ori[:, shift_h:, shift_w:]\n common_area_2 = feat_shift[:, :-shift_h, :-shift_w]\n mask_common = mask_ori[:, shift_h:, shift_w:] \n elif shift_h >= 0 and shift_w <= 0:\n common_area_1 = feat_ori[:, shift_h:, :-abs(shift_w)]\n common_area_2 = feat_shift[:, :-shift_h, abs(shift_w):]\n mask_common = mask_ori[:, shift_h:, :-abs(shift_w)]\n elif shift_h <= 0 and shift_w <= 0:\n common_area_1 = feat_ori[:, :-abs(shift_h), :-abs(shift_w)]\n common_area_2 = feat_shift[:, abs(shift_h):, abs(shift_w):]\n mask_common = mask_ori[:, :-abs(shift_h), :-abs(shift_w)]\n elif shift_h <= 0 and shift_w >= 0:\n common_area_1 = feat_ori[:, :-abs(shift_h):, shift_w:]\n common_area_2 = feat_shift[:, abs(shift_h):, :-shift_w]\n mask_common = mask_ori[:, :-abs(shift_h):, shift_w:]\n else:\n print(\"can you really reach here?\")\n\n common_area_masked_1 = common_area_1[mask_common].flatten()\n common_area_masked_2 = common_area_2[mask_common].flatten()\n # common_area_masked_1 = common_area_1.flatten()\n # common_area_masked_2 = common_area_2.flatten()\n common_area_1_list.append(common_area_masked_1)\n common_area_2_list.append(common_area_masked_2)\n\n common_area_1 = torch.cat(common_area_1_list)\n common_area_2 = torch.cat(common_area_2_list)\n if common_area_1.numel() == 0 or common_area_2.numel() == 0:\n consistency_loss = torch.Tensor([0]).squeeze()\n else:\n consistency_loss = F.mse_loss(common_area_1, common_area_2)\n\n return consistency_loss\n \n\n else:\n bs, c, h, w = depth_preds.shape\n split_depth = torch.split(depth_preds, bs//2, dim=0)\n mask = F.interpolate(mask.float(), (384, 512)).bool() # back to 384, 512\n split_mask = torch.split(mask, bs//2, dim=0)\n\n feat_ori_list = []\n feat_shift_list = []\n multi_level_mask = []\n\n for idx, feature in enumerate(temp_features): # multi-level\n split_feat = torch.split(feature, bs//2, dim=0)\n\n _, _, h, w = split_feat[0].shape\n feat_ori_list.append(split_feat[0])\n feat_shift_list.append(split_feat[1])\n\n mask_ori_cur_scale = F.interpolate(split_mask[0].float(), (h, w)).bool()\n multi_level_mask.append(mask_ori_cur_scale)\n\n for idx_out, (feat_ori_cur_level, feat_shift_cur_level, mask_ori_cur_level) in enumerate(zip(feat_ori_list, feat_shift_list, multi_level_mask)): # iter multi-scale\n scale_factor = 2 ** (5 - idx_out)\n _, _, cur_scale_h, cur_scale_w = feat_ori_cur_level.shape\n scale_factor = int(384 / cur_scale_h)\n\n for idx_in, (feat_ori, feat_shift, mask_ori, shift_bs) in enumerate(zip(feat_ori_cur_level, feat_shift_cur_level, mask_ori_cur_level, shifts)): # iter bs (paired feat)\n c, _, _ = feat_ori.shape\n mask_ori = mask_ori.repeat(c, 1, 1)\n shift_h, shift_w = int(shift_bs[0] * (384/540) / scale_factor), int(shift_bs[1]* (512/960) / scale_factor)\n\n if shift_h >= 0 and shift_w >= 0:\n common_area_1 = feat_ori[:, shift_h:, shift_w:]\n common_area_2 = feat_shift[:, :-shift_h, :-shift_w]\n mask_common = mask_ori[:, shift_h:, shift_w:] \n elif shift_h >= 0 and shift_w <= 0:\n common_area_1 = feat_ori[:, shift_h:, :-abs(shift_w)]\n common_area_2 = feat_shift[:, :-shift_h, abs(shift_w):]\n mask_common = mask_ori[:, shift_h:, :-abs(shift_w)]\n elif shift_h <= 0 and shift_w <= 0:\n common_area_1 = feat_ori[:, :-abs(shift_h), :-abs(shift_w)]\n common_area_2 = feat_shift[:, abs(shift_h):, abs(shift_w):]\n mask_common = mask_ori[:, :-abs(shift_h), :-abs(shift_w)]\n elif shift_h <= 0 and shift_w >= 0:\n common_area_1 = feat_ori[:, :-abs(shift_h):, shift_w:]\n common_area_2 = feat_shift[:, abs(shift_h):, :-shift_w]\n mask_common = mask_ori[:, :-abs(shift_h):, shift_w:]\n else:\n print(\"can you really reach here?\")\n\n common_area_masked_1 = common_area_1[mask_common].flatten()\n common_area_masked_2 = common_area_2[mask_common].flatten()\n common_area_1_list.append(common_area_masked_1)\n common_area_2_list.append(common_area_masked_2)\n\n common_area_1 = torch.cat(common_area_1_list)\n common_area_2 = torch.cat(common_area_2_list)\n if common_area_1.numel() == 0 or common_area_2.numel() == 0:\n consistency_loss = torch.Tensor([0]).squeeze()\n else:\n consistency_loss = F.mse_loss(common_area_1, common_area_2)\n return consistency_loss\n \n elif self.target == 'pred':\n bs, c, h, w = depth_preds.shape\n split_depth = torch.split(depth_preds, bs//2, dim=0)\n split_mask = torch.split(mask, bs//2, dim=0)\n \n for shift, depth_ori, depth_shift, mask_ori, mask_shift in zip(shifts, split_depth[0], split_depth[1], split_mask[0], split_mask[1]):\n shift_h, shift_w = shift[0], shift[1]\n if shift_h >= 0 and shift_w >= 0:\n common_area_1 = depth_ori[:, shift_h:, shift_w:]\n common_area_2 = depth_shift[:, :-shift_h, :-shift_w]\n mask_common = mask_ori[:, shift_h:, shift_w:]\n # mask_debug = mask_shift[:, :-shift_h, :-shift_w]\n elif shift_h >= 0 and shift_w <= 0:\n common_area_1 = depth_ori[:, shift_h:, :-abs(shift_w)]\n common_area_2 = depth_shift[:, :-shift_h, abs(shift_w):]\n mask_common = mask_ori[:, shift_h:, :-abs(shift_w)]\n # mask_debug = mask_shift[:, :-shift_h, abs(shift_w):]\n elif shift_h <= 0 and shift_w <= 0:\n common_area_1 = depth_ori[:, :-abs(shift_h), :-abs(shift_w)]\n common_area_2 = depth_shift[:, abs(shift_h):, abs(shift_w):]\n mask_common = mask_ori[:, :-abs(shift_h), :-abs(shift_w)]\n # mask_debug = mask_shift[:, abs(shift_h):, abs(shift_w):]\n elif shift_h <= 0 and shift_w >= 0:\n common_area_1 = depth_ori[:, :-abs(shift_h):, shift_w:]\n common_area_2 = depth_shift[:, abs(shift_h):, :-shift_w]\n mask_common = mask_ori[:, :-abs(shift_h):, shift_w:]\n # mask_debug = mask_shift[:, abs(shift_h):, :-shift_w]\n else:\n print(\"can you really reach here?\")\n \n common_area_1 = common_area_1[mask_common].flatten()\n common_area_2 = common_area_2[mask_common].flatten()\n common_area_1_list.append(common_area_1)\n common_area_2_list.append(common_area_2)\n\n common_area_1 = torch.cat(common_area_1_list)\n common_area_2 = torch.cat(common_area_2_list)\n if common_area_1.numel() == 0 or common_area_2.numel() == 0:\n consistency_loss = torch.Tensor([0]).squeeze()\n else:\n # pred_hist = torch.histc(common_area_1, bins=512, min=0, max=80)\n # gt_hist = torch.histc(common_area_2, bins=512, min=0, max=80)\n\n # pred_hist /= pred_hist.sum(dim=0, keepdim=True)\n # gt_hist /= gt_hist.sum(dim=0, keepdim=True)\n\n # # Compute cumulative histograms (CDF)\n # cdf_pred = torch.cumsum(pred_hist, dim=0)\n # cdf_gt = torch.cumsum(gt_hist, dim=0)\n\n # # Compute Earth Mover's Distance (EMD) between the CDFs\n # consistency_loss = torch.mean(torch.abs(cdf_pred - cdf_gt))\n consistency_loss = F.mse_loss(common_area_1, common_area_2)\n \n return consistency_loss\n \n else:\n raise NotImplementedError" }, { "identifier": "DATASETS_CONFIG", "path": "zoedepth/utils/config.py", "snippet": "DATASETS_CONFIG = {\n \"kitti\": {\n \"dataset\": \"kitti\",\n \"min_depth\": 0.001,\n \"max_depth\": 80,\n \"data_path\": os.path.join(HOME_DIR, \"shortcuts/datasets/kitti/raw\"),\n \"gt_path\": os.path.join(HOME_DIR, \"shortcuts/datasets/kitti/gts\"),\n \"filenames_file\": \"./train_test_inputs/kitti_eigen_train_files_with_gt.txt\",\n \"input_height\": 352,\n \"input_width\": 1216, # 704\n \"data_path_eval\": os.path.join(HOME_DIR, \"shortcuts/datasets/kitti/raw\"),\n \"gt_path_eval\": os.path.join(HOME_DIR, \"shortcuts/datasets/kitti/gts\"),\n \"filenames_file_eval\": \"./train_test_inputs/kitti_eigen_test_files_with_gt.txt\",\n\n \"min_depth_eval\": 1e-3,\n \"max_depth_eval\": 80,\n\n \"do_random_rotate\": True,\n \"degree\": 1.0,\n \"do_kb_crop\": True,\n \"garg_crop\": True,\n \"eigen_crop\": False,\n \"use_right\": False\n },\n \"kitti_test\": {\n \"dataset\": \"kitti\",\n \"min_depth\": 0.001,\n \"max_depth\": 80,\n \"data_path\": os.path.join(HOME_DIR, \"shortcuts/datasets/kitti/raw\"),\n \"gt_path\": os.path.join(HOME_DIR, \"shortcuts/datasets/kitti/gts\"),\n \"filenames_file\": \"./train_test_inputs/kitti_eigen_train_files_with_gt.txt\",\n \"input_height\": 352,\n \"input_width\": 1216,\n \"data_path_eval\": os.path.join(HOME_DIR, \"shortcuts/datasets/kitti/raw\"),\n \"gt_path_eval\": os.path.join(HOME_DIR, \"shortcuts/datasets/kitti/gts\"),\n \"filenames_file_eval\": \"./train_test_inputs/kitti_eigen_test_files_with_gt.txt\",\n\n \"min_depth_eval\": 1e-3,\n \"max_depth_eval\": 80,\n\n \"do_random_rotate\": False,\n \"degree\": 1.0,\n \"do_kb_crop\": True,\n \"garg_crop\": True,\n \"eigen_crop\": False,\n \"use_right\": False\n },\n \"nyu\": {\n \"dataset\": \"nyu\",\n \"avoid_boundary\": False,\n \"min_depth\": 1e-3, # originally 0.1\n \"max_depth\": 10,\n \"data_path\": os.path.join(\"/ibex/ai/home/liz0l/codes/datasets/nyu/data_folder\"),\n \"gt_path\": os.path.join(\"/ibex/ai/home/liz0l/codes/datasets/nyu/data_folder\"),\n \"filenames_file\": \"/ibex/ai/home/liz0l/codes/datasets/nyu/data_folder/nyu_train.txt\",\n \"input_height\": 480,\n \"input_width\": 640,\n \"data_path_eval\": os.path.join(\"/ibex/ai/home/liz0l/codes/datasets/nyu/data_folder\"),\n \"gt_path_eval\": os.path.join(\"/ibex/ai/home/liz0l/codes/datasets/nyu/data_folder\"),\n \"filenames_file_eval\": \"/ibex/ai/home/liz0l/codes/datasets/nyu/data_folder/nyu_test.txt\",\n \"min_depth_eval\": 1e-3,\n \"max_depth_eval\": 10,\n \"min_depth_diff\": -10,\n \"max_depth_diff\": 10,\n\n \"do_random_rotate\": True,\n \"degree\": 1.0,\n \"do_kb_crop\": False,\n \"garg_crop\": False,\n \"eigen_crop\": False,\n },\n \"u4k\": {\n \"dataset\": \"u4k\",\n \"min_depth\": 1e-3, # originally 0.1\n \"max_depth\": 80,\n \"data_path\": os.path.join(\"/ibex/ai/home/liz0l/codes/datasets/u4k\"),\n \"filenames_train\": \"/ibex/ai/home/liz0l/codes/datasets/u4k/splits/train.txt\",\n \"input_height\": 480, # ? will not be used (random crop)\n \"input_width\": 640, # ? will not be used (random crop)\n \"filenames_val\": \"/ibex/ai/home/liz0l/codes/datasets/u4k/splits/val.txt\",\n # \"filenames_val\": \"/ibex/ai/home/liz0l/codes/datasets/u4k/splits/test.txt\",\n \"filenames_test\": \"/ibex/ai/home/liz0l/codes/datasets/u4k/splits/test.txt\",\n \"min_depth_eval\": 1e-3,\n \"max_depth_eval\": 80,\n \"min_depth_diff\": -10,\n \"max_depth_diff\": 10,\n\n \"do_random_rotate\": True,\n \"degree\": 1.0,\n \"do_kb_crop\": False,\n \"garg_crop\": False,\n \"eigen_crop\": False,\n \n \"num_sample_inout\": 50000,\n # \"num_sample_inout\": 40000,\n \"sampling_strategy\": 'random',\n # \"sampling_strategy\": 'dda',\n \"dilation_factor\": 10,\n\n \"use_rgb\": False,\n \"do_normalize\": True, # do normalize in dataloader\n \"do_input_resize\": True\n },\n \"mid\": {\n \"dataset\": \"mid\",\n \"min_depth\": 1e-3, # originally 0.1\n \"max_depth\": 10,\n \"data_path\": os.path.join(\"/ibex/ai/home/liz0l/codes/datasets/middlebury\"),\n \"filenames_train\": \"/ibex/ai/home/liz0l/codes/datasets/middlebury/splits/train.txt\",\n \"input_height\": 480, # ? will not be used (random crop)\n \"input_width\": 640, # ? will not be used (random crop)\n \"filenames_val\": \"/ibex/ai/home/liz0l/codes/datasets/middlebury/splits/val.txt\",\n \"filenames_test\": \"/ibex/ai/home/liz0l/codes/datasets/middlebury/splits/test.txt\",\n \"min_depth_eval\": 1e-3,\n \"max_depth_eval\": 10,\n \"min_depth_diff\": -10,\n \"max_depth_diff\": 10,\n\n \"do_random_rotate\": True,\n \"degree\": 1.0,\n \"do_kb_crop\": False,\n \"garg_crop\": False,\n \"eigen_crop\": False,\n \n \"num_sample_inout\": 50000,\n # \"num_sample_inout\": 40000,\n \"sampling_strategy\": 'random',\n # \"sampling_strategy\": 'dda',\n \"dilation_factor\": 10,\n\n \"use_rgb\": False,\n \"do_normalize\": True, # do normalize in dataloader\n \"do_input_resize\": True\n },\n \"gta\": {\n \"dataset\": \"gta\",\n \"min_depth\": 1e-3, # originally 0.1\n \"max_depth\": 80,\n \"data_path\": os.path.join(\"/ibex/ai/home/liz0l/codes/datasets/gta/GTAV_1080\"),\n \"filenames_train\": \"/ibex/ai/home/liz0l/codes/datasets/gta/GTAV_1080/train.txt\",\n \"input_height\": 480, # ? will not be used (random crop)\n \"input_width\": 640, # ? will not be used (random crop)\n \"filenames_val\": \"/ibex/ai/home/liz0l/codes/datasets/gta/GTAV_1080/val.txt\",\n # \"filenames_val\": \"/ibex/ai/home/liz0l/codes/datasets/u4k/splits/test.txt\",\n \"filenames_test\": \"/ibex/ai/home/liz0l/codes/datasets/gta/GTAV_1080/test.txt\",\n \"min_depth_eval\": 1e-3,\n \"max_depth_eval\": 80,\n \"min_depth_diff\": -10,\n \"max_depth_diff\": 10,\n\n \"do_random_rotate\": True,\n \"degree\": 1.0,\n \"do_kb_crop\": False,\n \"garg_crop\": False,\n \"eigen_crop\": False,\n \n \"num_sample_inout\": 50000,\n # \"num_sample_inout\": 40000,\n \"sampling_strategy\": 'random',\n # \"sampling_strategy\": 'dda',\n \"dilation_factor\": 10,\n\n \"use_rgb\": False,\n \"do_normalize\": True, # do normalize in dataloader\n \"do_input_resize\": True\n },\n \"ibims\": {\n \"dataset\": \"ibims\",\n \"ibims_root\": os.path.join(HOME_DIR, \"shortcuts/datasets/ibims/ibims1_core_raw/\"),\n \"eigen_crop\": True,\n \"garg_crop\": False,\n \"do_kb_crop\": False,\n \"min_depth_eval\": 0,\n \"max_depth_eval\": 10,\n \"min_depth\": 1e-3,\n \"max_depth\": 10\n },\n \"sunrgbd\": {\n \"dataset\": \"sunrgbd\",\n \"sunrgbd_root\": os.path.join(HOME_DIR, \"shortcuts/datasets/SUNRGBD/test/\"),\n \"eigen_crop\": True,\n \"garg_crop\": False,\n \"do_kb_crop\": False,\n \"min_depth_eval\": 0,\n \"max_depth_eval\": 8,\n \"min_depth\": 1e-3,\n \"max_depth\": 10\n },\n \"diml_indoor\": {\n \"dataset\": \"diml_indoor\",\n \"diml_indoor_root\": os.path.join(HOME_DIR, \"shortcuts/datasets/diml_indoor_test/\"),\n \"eigen_crop\": True,\n \"garg_crop\": False,\n \"do_kb_crop\": False,\n \"min_depth_eval\": 0,\n \"max_depth_eval\": 10,\n \"min_depth\": 1e-3,\n \"max_depth\": 10\n },\n \"diml_outdoor\": {\n \"dataset\": \"diml_outdoor\",\n \"diml_outdoor_root\": os.path.join(HOME_DIR, \"shortcuts/datasets/diml_outdoor_test/\"),\n \"eigen_crop\": False,\n \"garg_crop\": True,\n \"do_kb_crop\": False,\n \"min_depth_eval\": 2,\n \"max_depth_eval\": 80,\n \"min_depth\": 1e-3,\n \"max_depth\": 80\n },\n \"diode_indoor\": {\n \"dataset\": \"diode_indoor\",\n \"diode_indoor_root\": os.path.join(HOME_DIR, \"shortcuts/datasets/diode_indoor/\"),\n \"eigen_crop\": True,\n \"garg_crop\": False,\n \"do_kb_crop\": False,\n \"min_depth_eval\": 1e-3,\n \"max_depth_eval\": 10,\n \"min_depth\": 1e-3,\n \"max_depth\": 10\n },\n \"diode_outdoor\": {\n \"dataset\": \"diode_outdoor\",\n \"diode_outdoor_root\": os.path.join(HOME_DIR, \"shortcuts/datasets/diode_outdoor/\"),\n \"eigen_crop\": False,\n \"garg_crop\": True,\n \"do_kb_crop\": False,\n \"min_depth_eval\": 1e-3,\n \"max_depth_eval\": 80,\n \"min_depth\": 1e-3,\n \"max_depth\": 80\n },\n \"hypersim_test\": {\n \"dataset\": \"hypersim_test\",\n \"hypersim_test_root\": os.path.join(HOME_DIR, \"shortcuts/datasets/hypersim_test/\"),\n \"eigen_crop\": True,\n \"garg_crop\": False,\n \"do_kb_crop\": False,\n \"min_depth_eval\": 1e-3,\n \"max_depth_eval\": 80,\n \"min_depth\": 1e-3,\n \"max_depth\": 10\n },\n \"vkitti\": {\n \"dataset\": \"vkitti\",\n \"vkitti_root\": os.path.join(HOME_DIR, \"shortcuts/datasets/vkitti_test/\"),\n \"eigen_crop\": False,\n \"garg_crop\": True,\n \"do_kb_crop\": True,\n \"min_depth_eval\": 1e-3,\n \"max_depth_eval\": 80,\n \"min_depth\": 1e-3,\n \"max_depth\": 80\n },\n \"vkitti2\": {\n \"dataset\": \"vkitti2\",\n \"vkitti2_root\": os.path.join(HOME_DIR, \"shortcuts/datasets/vkitti2/\"),\n \"eigen_crop\": False,\n \"garg_crop\": True,\n \"do_kb_crop\": True,\n \"min_depth_eval\": 1e-3,\n \"max_depth_eval\": 80,\n \"min_depth\": 1e-3,\n \"max_depth\": 80,\n },\n \"ddad\": {\n \"dataset\": \"ddad\",\n \"ddad_root\": os.path.join(HOME_DIR, \"shortcuts/datasets/ddad/ddad_val/\"),\n \"eigen_crop\": False,\n \"garg_crop\": True,\n \"do_kb_crop\": True,\n \"min_depth_eval\": 1e-3,\n \"max_depth_eval\": 80,\n \"min_depth\": 1e-3,\n \"max_depth\": 80,\n },\n}" }, { "identifier": "compute_metrics", "path": "zoedepth/utils/misc.py", "snippet": "def compute_metrics(gt, pred, interpolate=True, garg_crop=False, eigen_crop=True, dataset='nyu', min_depth_eval=0.1, max_depth_eval=10, disp_gt_edges=None, pred_depths=None, **kwargs):\n \"\"\"Compute metrics of predicted depth maps. Applies cropping and masking as necessary or specified via arguments. Refer to compute_errors for more details on metrics.\n \"\"\"\n if 'config' in kwargs:\n config = kwargs['config']\n garg_crop = config.garg_crop\n eigen_crop = config.eigen_crop\n min_depth_eval = config.min_depth_eval\n max_depth_eval = config.max_depth_eval\n\n if gt.shape[-2:] != pred.shape[-2:] and interpolate:\n pred = nn.functional.interpolate(\n pred.unsqueeze(dim=0).unsqueeze(dim=0), gt.shape[-2:], mode='bilinear', align_corners=True).squeeze()\n\n pred = pred.squeeze().cpu().numpy()\n pred[pred < min_depth_eval] = min_depth_eval\n pred[pred > max_depth_eval] = max_depth_eval\n pred[np.isinf(pred)] = max_depth_eval\n pred[np.isnan(pred)] = min_depth_eval\n\n gt_depth = gt.squeeze().cpu().numpy()\n valid_mask = np.logical_and(\n gt_depth > min_depth_eval, gt_depth < max_depth_eval)\n\n eval_mask = np.ones(valid_mask.shape)\n if garg_crop or eigen_crop:\n gt_height, gt_width = gt_depth.shape\n eval_mask = np.zeros(valid_mask.shape)\n\n if garg_crop:\n eval_mask[int(0.40810811 * gt_height):int(0.99189189 * gt_height),\n int(0.03594771 * gt_width):int(0.96405229 * gt_width)] = 1\n\n elif eigen_crop:\n # print(\"-\"*10, \" EIGEN CROP \", \"-\"*10)\n if dataset == 'kitti':\n eval_mask[int(0.3324324 * gt_height):int(0.91351351 * gt_height),\n int(0.0359477 * gt_width):int(0.96405229 * gt_width)] = 1\n else:\n # assert gt_depth.shape == (480, 640), \"Error: Eigen crop is currently only valid for (480, 640) images\"\n eval_mask[45:471, 41:601] = 1\n else:\n eval_mask = np.ones(valid_mask.shape)\n valid_mask = np.logical_and(valid_mask, eval_mask)\n\n # if dataset == 'nyu':\n # # pred = scale_shift_linear(torch.tensor(pred_depths), torch.tensor(pred), torch.tensor(valid_mask), fuse=False).numpy()\n # pred = scale_shift_linear(torch.tensor(gt), torch.tensor(pred), torch.tensor(valid_mask), fuse=False).numpy()\n \n metrics = compute_errors(gt_depth[valid_mask], pred[valid_mask])\n\n mask = valid_mask.squeeze() # squeeze\n gt = gt_depth\n pred = pred\n see_depth = 0\n if disp_gt_edges is None:\n print(\"Maybe we need edge maps from origin disp!\")\n edges = get_boundaries(gt, th=0.08, dilation=0)\n else:\n edges = disp_gt_edges\n \n mask = np.logical_and(mask, edges)\n import matplotlib.pyplot as plt\n if mask.sum() > 0:\n see_depth = soft_edge_error(pred, gt)[mask].mean()\n metrics['see'] = see_depth\n \n return metrics" }, { "identifier": "get_black_border", "path": "zoedepth/data/preprocess.py", "snippet": "def get_black_border(rgb_image, **kwargs) -> CropParams:\n \"\"\"Crops the black border of the RGB.\n\n Args:\n rgb: RGB image, shape (H, W, 3).\n\n Returns:\n Crop parameters.\n \"\"\"\n\n return get_border_params(rgb_image, value=0, **kwargs)" }, { "identifier": "BaseTrainer", "path": "zoedepth/trainers/base_trainer.py", "snippet": "def is_rank_zero(args):\n def __init__(self, config, model, train_loader, test_loader=None, device=None):\n def resize_to_target(self, prediction, target):\n def load_ckpt(self, checkpoint_dir=\"./checkpoints\", ckpt_type=\"best\"):\n def init_optimizer(self):\n def init_scheduler(self):\n def train_on_batch(self, batch, train_step):\n def validate_on_batch(self, batch, val_step):\n def raise_if_nan(self, losses):\n def iters_per_epoch(self):\n def total_iters(self):\n def should_early_stop(self):\n def train(self):\n def stringify_losses(L): return \"; \".join(map(\n def validate(self):\n def save_checkpoint(self, filename):\n def log_images(self, rgb: Dict[str, list] = {}, depth: Dict[str, list] = {}, scalar_field: Dict[str, list] = {}, prefix=\"\", scalar_cmap=\"turbo_r\", min_depth=None, max_depth=None):\n def log_line_plot(self, data):\n def log_bar_plot(self, title, labels, values):\nclass BaseTrainer:" }, { "identifier": "generatemask", "path": "zoedepth/utils/misc.py", "snippet": "def generatemask(size, k_size=-1, sigma=-1, h_factor=0.03, w_factor=0.02):\n # Generates a Guassian mask\n mask = np.zeros(size, dtype=np.float32)\n if sigma == -1:\n sigma = int(size[0]/16)\n if k_size == -1:\n k_size = int(2 * np.ceil(2 * int(size[0]/16)) + 1)\n # mask[int(0.02*size[0]):size[0] - int(0.02*size[0]), int(0.015*size[1]): size[1] - int(0.015*size[1])] = 1\n mask[int(h_factor*size[0]):size[0] - int(h_factor*size[0]), int(w_factor*size[1]): size[1] - int(w_factor*size[1])] = 1\n mask = cv2.GaussianBlur(mask, (int(k_size), int(k_size)), sigma)\n mask = (mask - mask.min()) / (mask.max() - mask.min())\n mask = mask.astype(np.float32)\n return mask" } ]
import os import torch import torch.cuda.amp as amp import torch.nn as nn import numpy as np import wandb import uuid import torch.distributed as dist import copy import torch.optim as optim import matplotlib.pyplot as plt from zoedepth.trainers.loss_sample import SILogLoss, DistributionLoss from zoedepth.trainers.loss import SILogLoss as DenseSILogLoss from zoedepth.trainers.loss import BudgetConstraint, HistogramMatchingLoss, SSIM, ConsistencyLoss from zoedepth.utils.config import DATASETS_CONFIG from zoedepth.utils.misc import compute_metrics from zoedepth.data.preprocess import get_black_border from .base_trainer import BaseTrainer, is_rank_zero, colors, flatten from torchvision import transforms from PIL import Image from tqdm import tqdm from datetime import datetime as dt from zoedepth.utils.misc import generatemask
15,627
with amp.autocast(enabled=self.config.use_amp): pred_depth = m(x, mode='eval', image_raw=image_raw)['metric_depth'] return pred_depth @torch.no_grad() def crop_aware_infer(self, x, image_raw): # if we are not avoiding the black border, we can just use the normal inference if not self.config.get("avoid_boundary", False): return self.eval_infer(x) # otherwise, we need to crop the image to avoid the black border # For now, this may be a bit slow due to converting to numpy and back # We assume no normalization is done on the input image # get the black border assert x.shape[0] == 1, "Only batch size 1 is supported for now" x_pil = transforms.ToPILImage()(x[0].cpu()) x_np = np.array(x_pil, dtype=np.uint8) black_border_params = get_black_border(x_np) top, bottom, left, right = black_border_params.top, black_border_params.bottom, black_border_params.left, black_border_params.right x_np_cropped = x_np[top:bottom, left:right, :] x_cropped = transforms.ToTensor()(Image.fromarray(x_np_cropped)) # run inference on the cropped image pred_depths_cropped = self.eval_infer(x_cropped.unsqueeze(0).to(self.device)) # resize the prediction to x_np_cropped's size pred_depths_cropped = nn.functional.interpolate( pred_depths_cropped, size=(x_np_cropped.shape[0], x_np_cropped.shape[1]), mode="bilinear", align_corners=False) # pad the prediction back to the original size pred_depths = torch.zeros((1, 1, x_np.shape[0], x_np.shape[1]), device=pred_depths_cropped.device, dtype=pred_depths_cropped.dtype) pred_depths[:, :, top:bottom, left:right] = pred_depths_cropped return pred_depths def validate_on_batch(self, batch, val_step): images = batch['image'].to(self.device) depths_gt = batch['depth'].to(self.device) dataset = batch['dataset'][0] image_raw = batch['image_raw'].to(self.device) mask = batch["mask"].to(self.device) disp_gt_edges = batch['disp_gt_edges'].squeeze().numpy() bboxs = batch.get("bbox", None) if bboxs is not None: bboxs = bboxs.to(self.device) bbox_raw = batch.get("bbox_raw", None) if bbox_raw is not None: bbox_raw = bbox_raw.to(self.device) crop_area = batch.get("crop_area", None) if crop_area is not None: crop_area = crop_area.to(self.device) if 'has_valid_depth' in batch: if not batch['has_valid_depth']: return None, None depths_gt = depths_gt.squeeze().unsqueeze(0).unsqueeze(0) mask = mask.squeeze().unsqueeze(0).unsqueeze(0) # if dataset == 'nyu': # pred_depths = self.crop_aware_infer(images, image_raw) # else: # pred_depths = self.eval_infer(images, image_raw, bboxs, crop_area, dataset, bbox_raw) pred_depths = self.eval_infer(images, image_raw, bboxs, crop_area, dataset, bbox_raw) pred_depths = pred_depths.squeeze().unsqueeze(0).unsqueeze(0) # print(pred_depths.shape) # torch.Size([1, 1, 2160, 3840]) # print(depths_gt.shape) # torch.Size([1, 1, 2160, 3840]) with amp.autocast(enabled=self.config.use_amp): if self.sampled_training: l_depth = self.silog_loss( pred_depths, depths_gt, mask=mask.to(torch.bool)) else: l_depth = self.dense_silog_loss( pred_depths, depths_gt, mask=mask.to(torch.bool), interpolate=True) metrics = compute_metrics(depths_gt, pred_depths, disp_gt_edges=disp_gt_edges, **self.config) losses = {f"{self.silog_loss.name}": l_depth.item()} if self.should_log and self.config.get("debug", False): print(metrics) if val_step in [21, 27] and self.should_log: if self.config.get("debug", False): pass else: if self.sec_stage: log_rgb = image_raw else: log_rgb = images scale_pred = nn.functional.interpolate( pred_depths[0:1], depths_gt.shape[-2:], mode='bilinear', align_corners=True)[0] depths_gt[torch.logical_not(mask)] = DATASETS_CONFIG[dataset]['max_depth'] self.log_images(rgb={"Input": log_rgb[0]}, depth={"GT": depths_gt[0], "PredictedMono": scale_pred}, prefix="Test", min_depth=DATASETS_CONFIG[dataset]['min_depth'], max_depth=DATASETS_CONFIG[dataset]['max_depth']) return metrics, losses def train(self): print(f"Training {self.config.name}") if self.config.uid is None: self.config.uid = str(uuid.uuid4()).split('-')[-1] run_id = f"{dt.now().strftime('%d-%h_%H-%M')}-{self.config.uid}" self.config.run_id = run_id self.config.experiment_id = f"{self.config.wandb_start}_{self.config.name}{self.config.version_name}_{run_id}" self.should_write = ((not self.config.distributed) or self.config.rank == 0) self.should_log = self.should_write # and logging if self.should_log: if self.config.get("debug", False): pass else: tags = self.config.tags.split( ',') if self.config.tags != '' else None
# MIT License # Copyright (c) 2022 Intelligent Systems Lab Org # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # File author: Zhenyu Li # This file is partly inspired from ZoeDepth (https://github.com/isl-org/ZoeDepth/blob/main/zoedepth/trainers/zoedepth_trainer.py); author: Shariq Farooq Bhat class Trainer(BaseTrainer): def __init__(self, config, model, train_loader, test_loader=None, device=None): self.addf = config.get("addf", False) self.lazy_epoch = -1 self.boostingdepth = config.get("boostingdepth", False) super().__init__(config, model, train_loader, test_loader=test_loader, device=device) self.device = device self.silog_loss = SILogLoss(beta=config.get("beta", 0.15)) self.dense_silog_loss = DenseSILogLoss(beta=config.get("beta", 0.15)) print("sigloss's beta is set to {}".format(config.get("beta", 0.15))) self.scaler = amp.GradScaler(enabled=self.config.use_amp) self.distribution_loss = DistributionLoss(max_depth=self.config.max_depth) self.sampled_training = config.get("sampled_training", False) self.sec_stage = config.get("sec_stage", False) self.multi_consistency = config.get("multi_consistency", False) self.use_blur = config.get("use_blur", False) self.dynamic = config.get("dynamic", False) if self.dynamic: self.dynamic_unupdate_rate = config.get("dynamic_unupdate_rate", 0.0) self.budget_loss = BudgetConstraint(loss_mu=0.0, flops_all=21552.5684, warm_up=True) self.use_scale_loss = config.get("use_scale_loss", False) if self.use_scale_loss: if config.get("scale_type", "ssim"): self.scale_loss = SSIM(window_size=config.get("window_size", int(11))) else: self.scale_loss = HistogramMatchingLoss(min_depth=self.config.min_depth, max_depth=self.config.max_depth) self.scale_target = config.get("scale_target", None) self.consistency_training = config.get("consistency_training", False) if self.consistency_training: self.consistency_target = config.get("consistency_target", None) self.consistency_loss = ConsistencyLoss(self.consistency_target, config.get("focus_flatten", False), config.get("w_p", 1.0)) print("current weight for consistency loss is {}. focus_flatten is {}. w_p is {}".format(self.config.w_consistency, config.get("focus_flatten", False), config.get("w_p", 1.0))) def train_on_batch(self, batch, train_step, step_rate): """ Expects a batch of images and depth as input batch["image"].shape : batch_size, c, h, w batch["depth"].shape : batch_size, 1, h, w """ images, depths_gt = batch['image'].to(self.device), batch['depth'].to(self.device) image_raw = batch.get("image_raw", None) if image_raw is not None: image_raw = image_raw.to(self.device) sample_points = None if self.sampled_training: sample_points = batch['sample_points'].to(self.device) bbox = batch.get("bbox", None) if bbox is not None: bbox = bbox.to(self.device) bbox_raw = batch.get("bbox_raw", None) if bbox_raw is not None: bbox_raw = bbox_raw.to(self.device) depth_raw = batch.get("depth_raw", None) if depth_raw is not None: depth_raw = depth_raw.to(self.device) crop_area = batch.get("crop_area", None) if crop_area is not None: crop_area = crop_area.to(self.device) shift = batch.get("shift", None) if shift is not None: shift = shift.to(self.device) dataset = batch['dataset'][0] b, c, h, w = images.size() mask = batch["mask"].to(self.device).to(torch.bool) sample_mask = batch.get("sample_mask", None) if sample_mask is not None: sample_mask = sample_mask.to(self.device).to(torch.bool) mask_raw = batch.get("mask_raw", None) if mask_raw is not None: mask_raw = mask_raw.to(self.device).to(torch.bool) losses = {} with amp.autocast(enabled=self.config.use_amp): if self.sampled_training: output = self.model(images, sample_points, mode='train', image_raw=image_raw, bbox=bbox, depth_raw=depth_raw, crop_area=crop_area, shift=shift, bbox_raw=bbox_raw) else: output = self.model(images, None, mode='train', image_raw=image_raw, bbox=bbox, depth_raw=depth_raw, crop_area=crop_area, shift=shift, bbox_raw=bbox_raw) if self.boostingdepth: if self.lazy_epoch < self.epoch: output.update_learning_rate() self.lazy_epoch = self.epoch input_dict = dict() input_dict['data_gtfake'] = depths_gt output.set_input_train_gt(input_dict) output.optimize_parameters() pred_depths = output.fake_B pred = output.fake_B # print(torch.min(pred), torch.max(pred)) losses = output.get_current_losses() else: pred_depths = output['metric_depth'] if self.sampled_training: sampled_depth_gt = sample_points[:, :, -1].float().unsqueeze(dim=-1) sampled_depth_gt = sampled_depth_gt.permute(0, 2, 1) if self.config.get("representation", "") == 'biLaplacian': # only for sampled training for now l_dist, l_si = self.distribution_loss(output, sampled_depth_gt, mask=sample_mask) loss = self.config.w_dist * l_dist + self.config.w_si * l_si losses['distribution_loss'] = l_dist losses['sigloss'] = l_si if self.multi_consistency: coarse, fine = output['coarse_depth_pred'], output['fine_depth_pred'] l_si_f = self.dense_silog_loss( fine, depths_gt, mask=mask, interpolate=True, return_interpolated=False) l_si_c = self.dense_silog_loss( coarse, depth_raw, mask=mask_raw, interpolate=True, return_interpolated=False) losses['sigloss_f'] = l_si_f losses['l_si_c'] = l_si_c loss += self.config.w_si * (l_si_f + l_si_c) else: if self.sampled_training: l_si = self.silog_loss( pred_depths, sampled_depth_gt, mask=sample_mask) loss = self.config.w_si * l_si losses[self.silog_loss.name] = l_si if self.multi_consistency: coarse, fine = output['coarse_depth_pred'], output['fine_depth_pred'] l_si_f = self.dense_silog_loss( fine, depths_gt, mask=mask, interpolate=True, return_interpolated=False) l_si_c = self.dense_silog_loss( coarse, depth_raw, mask=mask_raw, interpolate=True, return_interpolated=False) losses['sigloss_f'] = l_si_f losses['l_si_c'] = l_si_c loss += self.config.w_si * (l_si_f + l_si_c) else: if self.multi_consistency: #### here here here pred_depths, coarse, fine = output['metric_depth'], output['coarse_depth_pred'], output['fine_depth_pred'] if self.consistency_training: depths_gt = torch.split(depths_gt, 1, dim=1) depths_gt = torch.cat(depths_gt, dim=0) mask = torch.split(mask, 1, dim=-1) mask = torch.cat(mask, dim=0).permute(0, 3, 1, 2) mask_raw = torch.cat([mask_raw, mask_raw], dim=0) depth_raw = torch.cat([depth_raw, depth_raw], dim=0) temp_features = output.get('temp_features', None) l_si_1, pred = self.dense_silog_loss( pred_depths, depths_gt, mask=mask, interpolate=True, return_interpolated=True) l_si_f, pred_f = self.dense_silog_loss( fine, depths_gt, mask=mask, interpolate=True, return_interpolated=True) l_si_c = self.dense_silog_loss( coarse, depth_raw, mask=mask_raw, interpolate=True, return_interpolated=False) losses[self.silog_loss.name] = l_si_1 losses['sigloss_f'] = l_si_f losses['l_si_c'] = l_si_c # loss = l_si_1 + l_si_f + l_si_c loss = l_si_1 if self.consistency_training: try: # depths_gt? pred_f? l_consistency = self.consistency_loss(pred, shift, mask, temp_features, pred_f=depths_gt) # use the resized pred except RuntimeError as e: print(e) print("some runtime error here! Hack with 0") l_consistency = torch.Tensor([0]).squeeze() losses[self.consistency_loss.name] = l_consistency loss += l_consistency * self.config.w_consistency else: l_si, pred = self.dense_silog_loss( pred_depths, depths_gt, mask=mask, interpolate=True, return_interpolated=True) loss = self.config.w_si * l_si losses[self.silog_loss.name] = l_si if self.dynamic: if step_rate > self.dynamic_unupdate_rate: warm_up_rate = min(1.0, (step_rate - self.dynamic_unupdate_rate) / 0.02) flop_cost = self.budget_loss(output['all_cell_flops'], warm_up_rate=warm_up_rate) loss += self.config.w_flop * flop_cost losses['flop_loss'] = flop_cost else: flop_cost = self.budget_loss(output['all_cell_flops'], warm_up_rate=1) loss += 0 * flop_cost losses['flop_loss'] = flop_cost if self.use_scale_loss: if self.scale_target == 'coarse': h_loss = self.scale_loss(pred_depths, output['coarse_depth_pred_roi'], mask, interpolate=True) else: h_loss = self.scale_loss(pred_depths, depths_gt, mask, interpolate=True) loss += self.config.w_scale * h_loss losses['scale_loss'] = h_loss # self.scaler.scale(loss).backward() # if self.config.clip_grad > 0: # self.scaler.unscale_(self.optimizer) # nn.utils.clip_grad_norm_( # self.model.parameters(), self.config.clip_grad) # self.scaler.step(self.optimizer) # self.scaler.update() # self.optimizer.zero_grad() self.scaler.scale(loss).backward() if self.config.clip_grad > 0: self.scaler.unscale_(self.optimizer) nn.utils.clip_grad_norm_( self.model.parameters(), self.config.clip_grad) self.scaler.step(self.optimizer) self.scaler.update() self.optimizer.zero_grad() if self.should_log and (self.step % int(self.config.log_images_every * self.iters_per_epoch)) == 0: if self.config.get("debug", False): pred = nn.functional.interpolate( pred[0:1], depths_gt.shape[-2:], mode='bilinear', align_corners=True)[0] plt.imshow(pred.squeeze().detach().cpu().numpy()) plt.savefig('debug.png') pass else: pred = nn.functional.interpolate( pred[0:1], depths_gt.shape[-2:], mode='bilinear', align_corners=True)[0] depths_gt[torch.logical_not(mask)] = DATASETS_CONFIG[dataset]['max_depth'] if self.consistency_training: split_images = torch.split(images, 3, dim=1) images = torch.cat(split_images, dim=0) self.log_images(rgb={"Input": images[0, ...]}, depth={"GT": depths_gt[0], "PredictedMono": pred}, prefix="Train", min_depth=DATASETS_CONFIG[dataset]['min_depth'], max_depth=DATASETS_CONFIG[dataset]['max_depth']) return losses @torch.no_grad() def eval_infer(self, x, image_raw, bboxs=None, crop_area=None, dataset='u4k', bbox_raw=None): m = self.model.module if self.config.multigpu else self.model if dataset == 'u4k': base_h = 540 base_w = 960 elif dataset == 'gta': base_h = 270 base_w = 480 elif dataset == 'nyu': base_h = 120 * 2 base_w = 160 * 2 else: raise NotImplementedError if dataset == 'nyu': if self.sec_stage: images_crops = torch.split(x, 3, dim=1) bboxs_list = torch.split(bboxs, 1, dim=1) crop_areas = torch.split(crop_area, 1, dim=1) pred_depth_crops = [] for i, (img, bbox, crop_area) in enumerate(zip(images_crops, bboxs_list, crop_areas)): with amp.autocast(enabled=self.config.use_amp): if i == 0: out_dict = m(img, mode='eval', image_raw=image_raw, bbox=bbox[0], crop_area=crop_area, bbox_raw=bbox_raw[:, i, :] if bbox_raw is not None else None) # whole_depth_pred = out_dict['coarse_depth_pred'] pred_depth_crop = out_dict['metric_depth'] else: pred_depth_crop = m(img, mode='eval', image_raw=image_raw, bbox=bbox[0], crop_area=crop_area, bbox_raw=bbox_raw[:, i, :] if bbox_raw is not None else None)['metric_depth'] pred_depth_crop = nn.functional.interpolate( pred_depth_crop, (base_h, base_w), mode='bilinear', align_corners=True) pred_depth_crops.append(pred_depth_crop) x_start, y_start = [0, base_h], [0, base_w] pred_depth = torch.zeros((base_h*2, base_w*2)).cuda() inner_idx = 0 for ii, x in enumerate(x_start): for jj, y in enumerate(y_start): if self.use_blur: pred_depth[x: x+base_h, y: y+base_w] = pred_depth_crops[inner_idx].squeeze() # do not care about boundry during validation else: pred_depth[x: x+base_h, y: y+base_w] = pred_depth_crops[inner_idx].squeeze() inner_idx += 1 pred_depth = pred_depth.squeeze(dim=0) else: with amp.autocast(enabled=self.config.use_amp): pred_depth = m(x, mode='eval', image_raw=image_raw)['metric_depth'] else: if self.sec_stage: images_crops = torch.split(x, 3, dim=1) bboxs_list = torch.split(bboxs, 1, dim=1) crop_areas = torch.split(crop_area, 1, dim=1) pred_depth_crops = [] for i, (img, bbox, crop_area) in enumerate(zip(images_crops, bboxs_list, crop_areas)): with amp.autocast(enabled=self.config.use_amp): if i == 0: out_dict = m(img, mode='eval', image_raw=image_raw, bbox=bbox[0], crop_area=crop_area, bbox_raw=bbox_raw[:, i, :] if bbox_raw is not None else None) # whole_depth_pred = out_dict['coarse_depth_pred'] pred_depth_crop = out_dict['metric_depth'] else: pred_depth_crop = m(img, mode='eval', image_raw=image_raw, bbox=bbox[0], crop_area=crop_area, bbox_raw=bbox_raw[:, i, :] if bbox_raw is not None else None)['metric_depth'] pred_depth_crop = nn.functional.interpolate( pred_depth_crop, (base_h, base_w), mode='bilinear', align_corners=True) pred_depth_crops.append(pred_depth_crop) x_start, y_start = [0, base_h], [0, base_w] pred_depth = torch.zeros((base_h*2, base_w*2)).cuda() inner_idx = 0 for ii, x in enumerate(x_start): for jj, y in enumerate(y_start): if self.use_blur: pred_depth[x: x+base_h, y: y+base_w] = pred_depth_crops[inner_idx].squeeze() # do not care about boundry during validation else: pred_depth[x: x+base_h, y: y+base_w] = pred_depth_crops[inner_idx].squeeze() inner_idx += 1 pred_depth = pred_depth.squeeze(dim=0) else: with amp.autocast(enabled=self.config.use_amp): pred_depth = m(x, mode='eval', image_raw=image_raw)['metric_depth'] return pred_depth @torch.no_grad() def crop_aware_infer(self, x, image_raw): # if we are not avoiding the black border, we can just use the normal inference if not self.config.get("avoid_boundary", False): return self.eval_infer(x) # otherwise, we need to crop the image to avoid the black border # For now, this may be a bit slow due to converting to numpy and back # We assume no normalization is done on the input image # get the black border assert x.shape[0] == 1, "Only batch size 1 is supported for now" x_pil = transforms.ToPILImage()(x[0].cpu()) x_np = np.array(x_pil, dtype=np.uint8) black_border_params = get_black_border(x_np) top, bottom, left, right = black_border_params.top, black_border_params.bottom, black_border_params.left, black_border_params.right x_np_cropped = x_np[top:bottom, left:right, :] x_cropped = transforms.ToTensor()(Image.fromarray(x_np_cropped)) # run inference on the cropped image pred_depths_cropped = self.eval_infer(x_cropped.unsqueeze(0).to(self.device)) # resize the prediction to x_np_cropped's size pred_depths_cropped = nn.functional.interpolate( pred_depths_cropped, size=(x_np_cropped.shape[0], x_np_cropped.shape[1]), mode="bilinear", align_corners=False) # pad the prediction back to the original size pred_depths = torch.zeros((1, 1, x_np.shape[0], x_np.shape[1]), device=pred_depths_cropped.device, dtype=pred_depths_cropped.dtype) pred_depths[:, :, top:bottom, left:right] = pred_depths_cropped return pred_depths def validate_on_batch(self, batch, val_step): images = batch['image'].to(self.device) depths_gt = batch['depth'].to(self.device) dataset = batch['dataset'][0] image_raw = batch['image_raw'].to(self.device) mask = batch["mask"].to(self.device) disp_gt_edges = batch['disp_gt_edges'].squeeze().numpy() bboxs = batch.get("bbox", None) if bboxs is not None: bboxs = bboxs.to(self.device) bbox_raw = batch.get("bbox_raw", None) if bbox_raw is not None: bbox_raw = bbox_raw.to(self.device) crop_area = batch.get("crop_area", None) if crop_area is not None: crop_area = crop_area.to(self.device) if 'has_valid_depth' in batch: if not batch['has_valid_depth']: return None, None depths_gt = depths_gt.squeeze().unsqueeze(0).unsqueeze(0) mask = mask.squeeze().unsqueeze(0).unsqueeze(0) # if dataset == 'nyu': # pred_depths = self.crop_aware_infer(images, image_raw) # else: # pred_depths = self.eval_infer(images, image_raw, bboxs, crop_area, dataset, bbox_raw) pred_depths = self.eval_infer(images, image_raw, bboxs, crop_area, dataset, bbox_raw) pred_depths = pred_depths.squeeze().unsqueeze(0).unsqueeze(0) # print(pred_depths.shape) # torch.Size([1, 1, 2160, 3840]) # print(depths_gt.shape) # torch.Size([1, 1, 2160, 3840]) with amp.autocast(enabled=self.config.use_amp): if self.sampled_training: l_depth = self.silog_loss( pred_depths, depths_gt, mask=mask.to(torch.bool)) else: l_depth = self.dense_silog_loss( pred_depths, depths_gt, mask=mask.to(torch.bool), interpolate=True) metrics = compute_metrics(depths_gt, pred_depths, disp_gt_edges=disp_gt_edges, **self.config) losses = {f"{self.silog_loss.name}": l_depth.item()} if self.should_log and self.config.get("debug", False): print(metrics) if val_step in [21, 27] and self.should_log: if self.config.get("debug", False): pass else: if self.sec_stage: log_rgb = image_raw else: log_rgb = images scale_pred = nn.functional.interpolate( pred_depths[0:1], depths_gt.shape[-2:], mode='bilinear', align_corners=True)[0] depths_gt[torch.logical_not(mask)] = DATASETS_CONFIG[dataset]['max_depth'] self.log_images(rgb={"Input": log_rgb[0]}, depth={"GT": depths_gt[0], "PredictedMono": scale_pred}, prefix="Test", min_depth=DATASETS_CONFIG[dataset]['min_depth'], max_depth=DATASETS_CONFIG[dataset]['max_depth']) return metrics, losses def train(self): print(f"Training {self.config.name}") if self.config.uid is None: self.config.uid = str(uuid.uuid4()).split('-')[-1] run_id = f"{dt.now().strftime('%d-%h_%H-%M')}-{self.config.uid}" self.config.run_id = run_id self.config.experiment_id = f"{self.config.wandb_start}_{self.config.name}{self.config.version_name}_{run_id}" self.should_write = ((not self.config.distributed) or self.config.rank == 0) self.should_log = self.should_write # and logging if self.should_log: if self.config.get("debug", False): pass else: tags = self.config.tags.split( ',') if self.config.tags != '' else None
wandb.init(project=self.config.project, name=self.config.experiment_id, config=flatten(self.config), dir=self.config.root,
10
2023-12-04 08:43:15+00:00
24k
baaivision/GeoDream
threestudio/models/geometry/tetrahedra_sdf_grid.py
[ { "identifier": "BaseExplicitGeometry", "path": "threestudio/models/geometry/base.py", "snippet": "class BaseExplicitGeometry(BaseGeometry):\n @dataclass\n class Config(BaseGeometry.Config):\n radius: float = 1.0\n\n cfg: Config\n\n def configure(self) -> None:\n self.bbox: Float[Tensor, \"2 3\"]\n self.register_buffer(\n \"bbox\",\n torch.as_tensor(\n [\n [-self.cfg.radius, -self.cfg.radius, -self.cfg.radius],\n [self.cfg.radius, self.cfg.radius, self.cfg.radius],\n ],\n dtype=torch.float32,\n ),\n )" }, { "identifier": "BaseGeometry", "path": "threestudio/models/geometry/base.py", "snippet": "class BaseGeometry(BaseModule):\n @dataclass\n class Config(BaseModule.Config):\n pass\n\n cfg: Config\n\n @staticmethod\n def create_from(\n other: \"BaseGeometry\", cfg: Optional[Union[dict, DictConfig]] = None, **kwargs\n ) -> \"BaseGeometry\":\n raise TypeError(\n f\"Cannot create {BaseGeometry.__name__} from {other.__class__.__name__}\"\n )\n\n def export(self, *args, **kwargs) -> Dict[str, Any]:\n return {}" }, { "identifier": "contract_to_unisphere", "path": "threestudio/models/geometry/base.py", "snippet": "def contract_to_unisphere(\n x: Float[Tensor, \"... 3\"], bbox: Float[Tensor, \"2 3\"], unbounded: bool = False\n) -> Float[Tensor, \"... 3\"]:\n if unbounded:\n x = scale_tensor(x, bbox, (0, 1))\n x = x * 2 - 1 # aabb is at [-1, 1]\n mag = x.norm(dim=-1, keepdim=True)\n mask = mag.squeeze(-1) > 1\n x[mask] = (2 - 1 / mag[mask]) * (x[mask] / mag[mask])\n x = x / 4 + 0.5 # [-inf, inf] is at [0, 1]\n else:\n x = scale_tensor(x, bbox, (0, 1))\n return x" }, { "identifier": "ImplicitSDF", "path": "threestudio/models/geometry/implicit_sdf.py", "snippet": "class ImplicitSDF(BaseImplicitGeometry):\n @dataclass\n class Config(BaseImplicitGeometry.Config):\n n_input_dims: int = 3\n n_feature_dims: int = 3\n pos_encoding_config: dict = field(\n default_factory=lambda: {\n \"otype\": \"HashGrid\",\n \"n_levels\": 16,\n \"n_features_per_level\": 2,\n \"log2_hashmap_size\": 19,\n \"base_resolution\": 16,\n \"per_level_scale\": 1.447269237440378,\n }\n )\n mlp_network_config: dict = field(\n default_factory=lambda: {\n \"otype\": \"VanillaMLP\",\n \"activation\": \"ReLU\",\n \"output_activation\": \"none\",\n \"n_neurons\": 64,\n \"n_hidden_layers\": 1,\n }\n )\n normal_type: Optional[\n str\n ] = \"finite_difference\" # in ['pred', 'finite_difference', 'finite_difference_laplacian']\n finite_difference_normal_eps: Union[\n float, str\n ] = 0.01 # in [float, \"progressive\"]\n shape_init: Optional[str] = None\n shape_init_params: Optional[Any] = None\n shape_init_mesh_up: str = \"+z\"\n shape_init_mesh_front: str = \"+x\"\n force_shape_init: bool = False\n sdf_bias: Union[float, str] = 0.0\n sdf_bias_params: Optional[Any] = None\n\n # no need to removal outlier for SDF\n isosurface_remove_outliers: bool = False\n\n cfg: Config\n\n def configure(self) -> None:\n super().configure()\n self.encoding = get_encoding(\n self.cfg.n_input_dims, self.cfg.pos_encoding_config\n )\n self.sdf_network = get_mlp(\n self.encoding.n_output_dims, 1, self.cfg.mlp_network_config\n )\n\n if self.cfg.n_feature_dims > 0:\n self.feature_network = get_mlp(\n self.encoding.n_output_dims,\n self.cfg.n_feature_dims,\n self.cfg.mlp_network_config,\n )\n\n if self.cfg.normal_type == \"pred\":\n self.normal_network = get_mlp(\n self.encoding.n_output_dims, 3, self.cfg.mlp_network_config\n )\n if self.cfg.isosurface_deformable_grid:\n assert (\n self.cfg.isosurface_method == \"mt\"\n ), \"isosurface_deformable_grid only works with mt\"\n self.deformation_network = get_mlp(\n self.encoding.n_output_dims, 3, self.cfg.mlp_network_config\n )\n\n self.finite_difference_normal_eps: Optional[float] = None\n\n def initialize_shape(self) -> None:\n if self.cfg.shape_init is None and not self.cfg.force_shape_init:\n return\n\n # do not initialize shape if weights are provided\n if self.cfg.weights is not None and not self.cfg.force_shape_init:\n return\n\n if self.cfg.sdf_bias != 0.0:\n threestudio.warn(\n \"shape_init and sdf_bias are both specified, which may lead to unexpected results.\"\n )\n\n get_gt_sdf: Callable[[Float[Tensor, \"N 3\"]], Float[Tensor, \"N 1\"]]\n assert isinstance(self.cfg.shape_init, str)\n if self.cfg.shape_init == \"ellipsoid\":\n assert (\n isinstance(self.cfg.shape_init_params, Sized)\n and len(self.cfg.shape_init_params) == 3\n )\n size = torch.as_tensor(self.cfg.shape_init_params).to(self.device)\n\n def func(points_rand: Float[Tensor, \"N 3\"]) -> Float[Tensor, \"N 1\"]:\n return ((points_rand / size) ** 2).sum(\n dim=-1, keepdim=True\n ).sqrt() - 1.0 # pseudo signed distance of an ellipsoid\n\n get_gt_sdf = func\n elif self.cfg.shape_init == \"sphere\":\n assert isinstance(self.cfg.shape_init_params, float)\n radius = self.cfg.shape_init_params\n\n def func(points_rand: Float[Tensor, \"N 3\"]) -> Float[Tensor, \"N 1\"]:\n return (points_rand**2).sum(dim=-1, keepdim=True).sqrt() - radius\n\n get_gt_sdf = func\n elif self.cfg.shape_init.startswith(\"mesh:\"):\n assert isinstance(self.cfg.shape_init_params, float)\n mesh_path = self.cfg.shape_init[5:]\n if not os.path.exists(mesh_path):\n raise ValueError(f\"Mesh file {mesh_path} does not exist.\")\n\n import trimesh\n\n scene = trimesh.load(mesh_path)\n if isinstance(scene, trimesh.Trimesh):\n mesh = scene\n elif isinstance(scene, trimesh.scene.Scene):\n mesh = trimesh.Trimesh()\n for obj in scene.geometry.values():\n mesh = trimesh.util.concatenate([mesh, obj])\n else:\n raise ValueError(f\"Unknown mesh type at {mesh_path}.\")\n\n # move to center\n centroid = mesh.vertices.mean(0)\n mesh.vertices = mesh.vertices - centroid\n\n # align to up-z and front-x\n dirs = [\"+x\", \"+y\", \"+z\", \"-x\", \"-y\", \"-z\"]\n dir2vec = {\n \"+x\": np.array([1, 0, 0]),\n \"+y\": np.array([0, 1, 0]),\n \"+z\": np.array([0, 0, 1]),\n \"-x\": np.array([-1, 0, 0]),\n \"-y\": np.array([0, -1, 0]),\n \"-z\": np.array([0, 0, -1]),\n }\n if (\n self.cfg.shape_init_mesh_up not in dirs\n or self.cfg.shape_init_mesh_front not in dirs\n ):\n raise ValueError(\n f\"shape_init_mesh_up and shape_init_mesh_front must be one of {dirs}.\"\n )\n if self.cfg.shape_init_mesh_up[1] == self.cfg.shape_init_mesh_front[1]:\n raise ValueError(\n \"shape_init_mesh_up and shape_init_mesh_front must be orthogonal.\"\n )\n z_, x_ = (\n dir2vec[self.cfg.shape_init_mesh_up],\n dir2vec[self.cfg.shape_init_mesh_front],\n )\n y_ = np.cross(z_, x_)\n std2mesh = np.stack([x_, y_, z_], axis=0).T\n mesh2std = np.linalg.inv(std2mesh)\n\n # scaling\n scale = np.abs(mesh.vertices).max()\n mesh.vertices = mesh.vertices / scale * self.cfg.shape_init_params\n mesh.vertices = np.dot(mesh2std, mesh.vertices.T).T\n\n from pysdf import SDF\n\n sdf = SDF(mesh.vertices, mesh.faces)\n\n def func(points_rand: Float[Tensor, \"N 3\"]) -> Float[Tensor, \"N 1\"]:\n # add a negative signed here\n # as in pysdf the inside of the shape has positive signed distance\n return torch.from_numpy(-sdf(points_rand.cpu().numpy())).to(\n points_rand\n )[..., None]\n\n get_gt_sdf = func\n\n else:\n raise ValueError(\n f\"Unknown shape initialization type: {self.cfg.shape_init}\"\n )\n\n # Initialize SDF to a given shape when no weights are provided or force_shape_init is True\n optim = torch.optim.Adam(self.parameters(), lr=1e-3)\n from tqdm import tqdm\n\n for _ in tqdm(\n range(1000),\n desc=f\"Initializing SDF to a(n) {self.cfg.shape_init}:\",\n disable=get_rank() != 0,\n ):\n points_rand = (\n torch.rand((10000, 3), dtype=torch.float32).to(self.device) * 2.0 - 1.0\n )\n sdf_gt = get_gt_sdf(points_rand)\n sdf_pred = self.forward_sdf(points_rand)\n loss = F.mse_loss(sdf_pred, sdf_gt)\n optim.zero_grad()\n loss.backward()\n optim.step()\n\n # explicit broadcast to ensure param consistency across ranks\n for param in self.parameters():\n broadcast(param, src=0)\n\n def get_shifted_sdf(\n self, points: Float[Tensor, \"*N Di\"], sdf: Float[Tensor, \"*N 1\"]\n ) -> Float[Tensor, \"*N 1\"]:\n sdf_bias: Union[float, Float[Tensor, \"*N 1\"]]\n if self.cfg.sdf_bias == \"ellipsoid\":\n assert (\n isinstance(self.cfg.sdf_bias_params, Sized)\n and len(self.cfg.sdf_bias_params) == 3\n )\n size = torch.as_tensor(self.cfg.sdf_bias_params).to(points)\n sdf_bias = ((points / size) ** 2).sum(\n dim=-1, keepdim=True\n ).sqrt() - 1.0 # pseudo signed distance of an ellipsoid\n elif self.cfg.sdf_bias == \"sphere\":\n assert isinstance(self.cfg.sdf_bias_params, float)\n radius = self.cfg.sdf_bias_params\n sdf_bias = (points**2).sum(dim=-1, keepdim=True).sqrt() - radius\n elif isinstance(self.cfg.sdf_bias, float):\n sdf_bias = self.cfg.sdf_bias\n else:\n raise ValueError(f\"Unknown sdf bias {self.cfg.sdf_bias}\")\n return sdf + sdf_bias\n\n def forward(\n self, points: Float[Tensor, \"*N Di\"], output_normal: bool = False\n ) -> Dict[str, Float[Tensor, \"...\"]]:\n grad_enabled = torch.is_grad_enabled()\n\n if output_normal and self.cfg.normal_type == \"analytic\":\n torch.set_grad_enabled(True)\n points.requires_grad_(True)\n\n points_unscaled = points # points in the original scale\n points = contract_to_unisphere(\n points, self.bbox, self.unbounded\n ) # points normalized to (0, 1)\n\n enc = self.encoding(points.view(-1, self.cfg.n_input_dims))\n sdf = self.sdf_network(enc).view(*points.shape[:-1], 1)\n sdf = self.get_shifted_sdf(points_unscaled, sdf)\n output = {\"sdf\": sdf}\n\n if self.cfg.n_feature_dims > 0:\n features = self.feature_network(enc).view(\n *points.shape[:-1], self.cfg.n_feature_dims\n )\n output.update({\"features\": features})\n\n if output_normal:\n if (\n self.cfg.normal_type == \"finite_difference\"\n or self.cfg.normal_type == \"finite_difference_laplacian\"\n ):\n assert self.finite_difference_normal_eps is not None\n eps: float = self.finite_difference_normal_eps\n if self.cfg.normal_type == \"finite_difference_laplacian\":\n offsets: Float[Tensor, \"6 3\"] = torch.as_tensor(\n [\n [eps, 0.0, 0.0],\n [-eps, 0.0, 0.0],\n [0.0, eps, 0.0],\n [0.0, -eps, 0.0],\n [0.0, 0.0, eps],\n [0.0, 0.0, -eps],\n ]\n ).to(points_unscaled)\n points_offset: Float[Tensor, \"... 6 3\"] = (\n points_unscaled[..., None, :] + offsets\n ).clamp(-self.cfg.radius, self.cfg.radius)\n sdf_offset: Float[Tensor, \"... 6 1\"] = self.forward_sdf(\n points_offset\n )\n sdf_grad = (\n 0.5\n * (sdf_offset[..., 0::2, 0] - sdf_offset[..., 1::2, 0])\n / eps\n )\n else:\n offsets: Float[Tensor, \"3 3\"] = torch.as_tensor(\n [[eps, 0.0, 0.0], [0.0, eps, 0.0], [0.0, 0.0, eps]]\n ).to(points_unscaled)\n points_offset: Float[Tensor, \"... 3 3\"] = (\n points_unscaled[..., None, :] + offsets\n ).clamp(-self.cfg.radius, self.cfg.radius)\n sdf_offset: Float[Tensor, \"... 3 1\"] = self.forward_sdf(\n points_offset\n )\n sdf_grad = (sdf_offset[..., 0::1, 0] - sdf) / eps\n normal = F.normalize(sdf_grad, dim=-1)\n elif self.cfg.normal_type == \"pred\":\n normal = self.normal_network(enc).view(*points.shape[:-1], 3)\n normal = F.normalize(normal, dim=-1)\n sdf_grad = normal\n elif self.cfg.normal_type == \"analytic\":\n sdf_grad = -torch.autograd.grad(\n sdf,\n points_unscaled,\n grad_outputs=torch.ones_like(sdf),\n create_graph=True,\n )[0]\n normal = F.normalize(sdf_grad, dim=-1)\n if not grad_enabled:\n sdf_grad = sdf_grad.detach()\n normal = normal.detach()\n else:\n raise AttributeError(f\"Unknown normal type {self.cfg.normal_type}\")\n output.update(\n {\"normal\": normal, \"shading_normal\": normal, \"sdf_grad\": sdf_grad}\n )\n return output\n\n def forward_sdf(self, points: Float[Tensor, \"*N Di\"]) -> Float[Tensor, \"*N 1\"]:\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n\n sdf = self.sdf_network(\n self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n ).reshape(*points.shape[:-1], 1)\n sdf = self.get_shifted_sdf(points_unscaled, sdf)\n return sdf\n\n def forward_field(\n self, points: Float[Tensor, \"*N Di\"]\n ) -> Tuple[Float[Tensor, \"*N 1\"], Optional[Float[Tensor, \"*N 3\"]]]:\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n enc = self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n sdf = self.sdf_network(enc).reshape(*points.shape[:-1], 1)\n sdf = self.get_shifted_sdf(points_unscaled, sdf)\n deformation: Optional[Float[Tensor, \"*N 3\"]] = None\n if self.cfg.isosurface_deformable_grid:\n deformation = self.deformation_network(enc).reshape(*points.shape[:-1], 3)\n return sdf, deformation\n\n def forward_level(\n self, field: Float[Tensor, \"*N 1\"], threshold: float\n ) -> Float[Tensor, \"*N 1\"]:\n return field - threshold\n\n def export(self, points: Float[Tensor, \"*N Di\"], **kwargs) -> Dict[str, Any]:\n out: Dict[str, Any] = {}\n if self.cfg.n_feature_dims == 0:\n return out\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n enc = self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n features = self.feature_network(enc).view(\n *points.shape[:-1], self.cfg.n_feature_dims\n )\n out.update(\n {\n \"features\": features,\n }\n )\n return out\n\n def update_step(self, epoch: int, global_step: int, on_load_weights: bool = False):\n if (\n self.cfg.normal_type == \"finite_difference\"\n or self.cfg.normal_type == \"finite_difference_laplacian\"\n ):\n if isinstance(self.cfg.finite_difference_normal_eps, float):\n self.finite_difference_normal_eps = (\n self.cfg.finite_difference_normal_eps\n )\n elif self.cfg.finite_difference_normal_eps == \"progressive\":\n # progressive finite difference eps from Neuralangelo\n # https://arxiv.org/abs/2306.03092\n hg_conf: Any = self.cfg.pos_encoding_config\n assert (\n hg_conf.otype == \"ProgressiveBandHashGrid\"\n ), \"finite_difference_normal_eps=progressive only works with ProgressiveBandHashGrid\"\n current_level = min(\n hg_conf.start_level\n + max(global_step - hg_conf.start_step, 0) // hg_conf.update_steps,\n hg_conf.n_levels,\n )\n grid_res = hg_conf.base_resolution * hg_conf.per_level_scale ** (\n current_level - 1\n )\n grid_size = 2 * self.cfg.radius / grid_res\n if grid_size != self.finite_difference_normal_eps:\n threestudio.info(\n f\"Update finite_difference_normal_eps to {grid_size}\"\n )\n self.finite_difference_normal_eps = grid_size\n else:\n raise ValueError(\n f\"Unknown finite_difference_normal_eps={self.cfg.finite_difference_normal_eps}\"\n )" }, { "identifier": "ImplicitVolume", "path": "threestudio/models/geometry/implicit_volume.py", "snippet": "class ImplicitVolume(BaseImplicitGeometry):\n @dataclass\n class Config(BaseImplicitGeometry.Config):\n n_input_dims: int = 3\n n_feature_dims: int = 3\n density_activation: Optional[str] = \"softplus\"\n density_bias: Union[float, str] = \"blob_magic3d\"\n density_blob_scale: float = 10.0\n density_blob_std: float = 0.5\n pos_encoding_config: dict = field(\n default_factory=lambda: {\n \"otype\": \"HashGrid\",\n \"n_levels\": 16,\n \"n_features_per_level\": 2,\n \"log2_hashmap_size\": 19,\n \"base_resolution\": 16,\n \"per_level_scale\": 1.447269237440378,\n }\n )\n mlp_network_config: dict = field(\n default_factory=lambda: {\n \"otype\": \"VanillaMLP\",\n \"activation\": \"ReLU\",\n \"output_activation\": \"none\",\n \"n_neurons\": 64,\n \"n_hidden_layers\": 1,\n }\n )\n normal_type: Optional[\n str\n ] = \"finite_difference\" # in ['pred', 'finite_difference', 'finite_difference_laplacian']\n finite_difference_normal_eps: float = 0.01\n\n # automatically determine the threshold\n isosurface_threshold: Union[float, str] = 25.0\n\n cfg: Config\n\n def configure(self) -> None:\n super().configure()\n self.encoding = get_encoding(\n self.cfg.n_input_dims, self.cfg.pos_encoding_config\n )\n self.density_network = get_mlp(\n self.encoding.n_output_dims, 1, self.cfg.mlp_network_config\n )\n if self.cfg.n_feature_dims > 0:\n self.feature_network = get_mlp(\n self.encoding.n_output_dims,\n self.cfg.n_feature_dims,\n self.cfg.mlp_network_config,\n )\n if self.cfg.normal_type == \"pred\":\n self.normal_network = get_mlp(\n self.encoding.n_output_dims, 3, self.cfg.mlp_network_config\n )\n\n def get_activated_density(\n self, points: Float[Tensor, \"*N Di\"], density: Float[Tensor, \"*N 1\"]\n ) -> Tuple[Float[Tensor, \"*N 1\"], Float[Tensor, \"*N 1\"]]:\n density_bias: Union[float, Float[Tensor, \"*N 1\"]]\n if self.cfg.density_bias == \"blob_dreamfusion\":\n # pre-activation density bias\n density_bias = (\n self.cfg.density_blob_scale\n * torch.exp(\n -0.5 * (points**2).sum(dim=-1) / self.cfg.density_blob_std**2\n )[..., None]\n )\n elif self.cfg.density_bias == \"blob_magic3d\":\n # pre-activation density bias\n density_bias = (\n self.cfg.density_blob_scale\n * (\n 1\n - torch.sqrt((points**2).sum(dim=-1)) / self.cfg.density_blob_std\n )[..., None]\n )\n elif isinstance(self.cfg.density_bias, float):\n density_bias = self.cfg.density_bias\n else:\n raise ValueError(f\"Unknown density bias {self.cfg.density_bias}\")\n raw_density: Float[Tensor, \"*N 1\"] = density + density_bias\n density = get_activation(self.cfg.density_activation)(raw_density)\n return raw_density, density\n\n def forward(\n self, points: Float[Tensor, \"*N Di\"], output_normal: bool = False\n ) -> Dict[str, Float[Tensor, \"...\"]]:\n grad_enabled = torch.is_grad_enabled()\n\n if output_normal and self.cfg.normal_type == \"analytic\":\n torch.set_grad_enabled(True)\n points.requires_grad_(True)\n\n points_unscaled = points # points in the original scale\n points = contract_to_unisphere(\n points, self.bbox, self.unbounded\n ) # points normalized to (0, 1)\n\n enc = self.encoding(points.view(-1, self.cfg.n_input_dims))\n density = self.density_network(enc).view(*points.shape[:-1], 1)\n raw_density, density = self.get_activated_density(points_unscaled, density)\n\n output = {\n \"density\": density,\n }\n\n if self.cfg.n_feature_dims > 0:\n features = self.feature_network(enc).view(\n *points.shape[:-1], self.cfg.n_feature_dims\n )\n output.update({\"features\": features})\n\n if output_normal:\n if (\n self.cfg.normal_type == \"finite_difference\"\n or self.cfg.normal_type == \"finite_difference_laplacian\"\n ):\n # TODO: use raw density\n eps = self.cfg.finite_difference_normal_eps\n if self.cfg.normal_type == \"finite_difference_laplacian\":\n offsets: Float[Tensor, \"6 3\"] = torch.as_tensor(\n [\n [eps, 0.0, 0.0],\n [-eps, 0.0, 0.0],\n [0.0, eps, 0.0],\n [0.0, -eps, 0.0],\n [0.0, 0.0, eps],\n [0.0, 0.0, -eps],\n ]\n ).to(points_unscaled)\n points_offset: Float[Tensor, \"... 6 3\"] = (\n points_unscaled[..., None, :] + offsets\n ).clamp(-self.cfg.radius, self.cfg.radius)\n density_offset: Float[Tensor, \"... 6 1\"] = self.forward_density(\n points_offset\n )\n normal = (\n -0.5\n * (density_offset[..., 0::2, 0] - density_offset[..., 1::2, 0])\n / eps\n )\n else:\n offsets: Float[Tensor, \"3 3\"] = torch.as_tensor(\n [[eps, 0.0, 0.0], [0.0, eps, 0.0], [0.0, 0.0, eps]]\n ).to(points_unscaled)\n points_offset: Float[Tensor, \"... 3 3\"] = (\n points_unscaled[..., None, :] + offsets\n ).clamp(-self.cfg.radius, self.cfg.radius)\n density_offset: Float[Tensor, \"... 3 1\"] = self.forward_density(\n points_offset\n )\n normal = -(density_offset[..., 0::1, 0] - density) / eps\n normal = F.normalize(normal, dim=-1)\n elif self.cfg.normal_type == \"pred\":\n normal = self.normal_network(enc).view(*points.shape[:-1], 3)\n normal = F.normalize(normal, dim=-1)\n elif self.cfg.normal_type == \"analytic\":\n normal = -torch.autograd.grad(\n density,\n points_unscaled,\n grad_outputs=torch.ones_like(density),\n create_graph=True,\n )[0]\n normal = F.normalize(normal, dim=-1)\n if not grad_enabled:\n normal = normal.detach()\n else:\n raise AttributeError(f\"Unknown normal type {self.cfg.normal_type}\")\n output.update({\"normal\": normal, \"shading_normal\": normal})\n\n torch.set_grad_enabled(grad_enabled)\n return output\n\n def forward_density(self, points: Float[Tensor, \"*N Di\"]) -> Float[Tensor, \"*N 1\"]:\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n\n density = self.density_network(\n self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n ).reshape(*points.shape[:-1], 1)\n\n _, density = self.get_activated_density(points_unscaled, density)\n return density\n\n def forward_field(\n self, points: Float[Tensor, \"*N Di\"]\n ) -> Tuple[Float[Tensor, \"*N 1\"], Optional[Float[Tensor, \"*N 3\"]]]:\n if self.cfg.isosurface_deformable_grid:\n threestudio.warn(\n f\"{self.__class__.__name__} does not support isosurface_deformable_grid. Ignoring.\"\n )\n density = self.forward_density(points)\n return density, None\n\n def forward_level(\n self, field: Float[Tensor, \"*N 1\"], threshold: float\n ) -> Float[Tensor, \"*N 1\"]:\n return -(field - threshold)\n\n def export(self, points: Float[Tensor, \"*N Di\"], **kwargs) -> Dict[str, Any]:\n out: Dict[str, Any] = {}\n if self.cfg.n_feature_dims == 0:\n return out\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n enc = self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n features = self.feature_network(enc).view(\n *points.shape[:-1], self.cfg.n_feature_dims\n )\n out.update(\n {\n \"features\": features,\n }\n )\n return out\n\n @staticmethod\n @torch.no_grad()\n def create_from(\n other: BaseGeometry,\n cfg: Optional[Union[dict, DictConfig]] = None,\n copy_net: bool = True,\n **kwargs,\n ) -> \"ImplicitVolume\":\n if isinstance(other, ImplicitVolume):\n instance = ImplicitVolume(cfg, **kwargs)\n instance.encoding.load_state_dict(other.encoding.state_dict())\n instance.density_network.load_state_dict(other.density_network.state_dict())\n if copy_net:\n if (\n instance.cfg.n_feature_dims > 0\n and other.cfg.n_feature_dims == instance.cfg.n_feature_dims\n ):\n instance.feature_network.load_state_dict(\n other.feature_network.state_dict()\n )\n if (\n instance.cfg.normal_type == \"pred\"\n and other.cfg.normal_type == \"pred\"\n ):\n instance.normal_network.load_state_dict(\n other.normal_network.state_dict()\n )\n return instance\n else:\n raise TypeError(\n f\"Cannot create {ImplicitVolume.__name__} from {other.__class__.__name__}\"\n )" }, { "identifier": "MarchingTetrahedraHelper", "path": "threestudio/models/isosurface.py", "snippet": "class MarchingTetrahedraHelper(IsosurfaceHelper):\n def __init__(self, resolution: int, tets_path: str):\n super().__init__()\n self.resolution = resolution\n self.tets_path = tets_path\n\n self.triangle_table: Float[Tensor, \"...\"]\n self.register_buffer(\n \"triangle_table\",\n torch.as_tensor(\n [\n [-1, -1, -1, -1, -1, -1],\n [1, 0, 2, -1, -1, -1],\n [4, 0, 3, -1, -1, -1],\n [1, 4, 2, 1, 3, 4],\n [3, 1, 5, -1, -1, -1],\n [2, 3, 0, 2, 5, 3],\n [1, 4, 0, 1, 5, 4],\n [4, 2, 5, -1, -1, -1],\n [4, 5, 2, -1, -1, -1],\n [4, 1, 0, 4, 5, 1],\n [3, 2, 0, 3, 5, 2],\n [1, 3, 5, -1, -1, -1],\n [4, 1, 2, 4, 3, 1],\n [3, 0, 4, -1, -1, -1],\n [2, 0, 1, -1, -1, -1],\n [-1, -1, -1, -1, -1, -1],\n ],\n dtype=torch.long,\n ),\n persistent=False,\n )\n self.num_triangles_table: Integer[Tensor, \"...\"]\n self.register_buffer(\n \"num_triangles_table\",\n torch.as_tensor(\n [0, 1, 1, 2, 1, 2, 2, 1, 1, 2, 2, 1, 2, 1, 1, 0], dtype=torch.long\n ),\n persistent=False,\n )\n self.base_tet_edges: Integer[Tensor, \"...\"]\n self.register_buffer(\n \"base_tet_edges\",\n torch.as_tensor([0, 1, 0, 2, 0, 3, 1, 2, 1, 3, 2, 3], dtype=torch.long),\n persistent=False,\n )\n\n tets = np.load(self.tets_path)\n self._grid_vertices: Float[Tensor, \"...\"]\n self.register_buffer(\n \"_grid_vertices\",\n torch.from_numpy(tets[\"vertices\"]).float(),\n persistent=False,\n )\n self.indices: Integer[Tensor, \"...\"]\n self.register_buffer(\n \"indices\", torch.from_numpy(tets[\"indices\"]).long(), persistent=False\n )\n\n self._all_edges: Optional[Integer[Tensor, \"Ne 2\"]] = None\n\n def normalize_grid_deformation(\n self, grid_vertex_offsets: Float[Tensor, \"Nv 3\"]\n ) -> Float[Tensor, \"Nv 3\"]:\n return (\n (self.points_range[1] - self.points_range[0])\n / (self.resolution) # half tet size is approximately 1 / self.resolution\n * torch.tanh(grid_vertex_offsets)\n ) # FIXME: hard-coded activation\n\n @property\n def grid_vertices(self) -> Float[Tensor, \"Nv 3\"]:\n return self._grid_vertices\n\n @property\n def all_edges(self) -> Integer[Tensor, \"Ne 2\"]:\n if self._all_edges is None:\n # compute edges on GPU, or it would be VERY SLOW (basically due to the unique operation)\n edges = torch.tensor(\n [0, 1, 0, 2, 0, 3, 1, 2, 1, 3, 2, 3],\n dtype=torch.long,\n device=self.indices.device,\n )\n _all_edges = self.indices[:, edges].reshape(-1, 2)\n _all_edges_sorted = torch.sort(_all_edges, dim=1)[0]\n _all_edges = torch.unique(_all_edges_sorted, dim=0)\n self._all_edges = _all_edges\n return self._all_edges\n\n def sort_edges(self, edges_ex2):\n with torch.no_grad():\n order = (edges_ex2[:, 0] > edges_ex2[:, 1]).long()\n order = order.unsqueeze(dim=1)\n\n a = torch.gather(input=edges_ex2, index=order, dim=1)\n b = torch.gather(input=edges_ex2, index=1 - order, dim=1)\n\n return torch.stack([a, b], -1)\n\n def _forward(self, pos_nx3, sdf_n, tet_fx4):\n with torch.no_grad():\n occ_n = sdf_n > 0\n occ_fx4 = occ_n[tet_fx4.reshape(-1)].reshape(-1, 4)\n occ_sum = torch.sum(occ_fx4, -1)\n valid_tets = (occ_sum > 0) & (occ_sum < 4)\n occ_sum = occ_sum[valid_tets]\n\n # find all vertices\n all_edges = tet_fx4[valid_tets][:, self.base_tet_edges].reshape(-1, 2)\n all_edges = self.sort_edges(all_edges)\n unique_edges, idx_map = torch.unique(all_edges, dim=0, return_inverse=True)\n\n unique_edges = unique_edges.long()\n mask_edges = occ_n[unique_edges.reshape(-1)].reshape(-1, 2).sum(-1) == 1\n mapping = (\n torch.ones(\n (unique_edges.shape[0]), dtype=torch.long, device=pos_nx3.device\n )\n * -1\n )\n mapping[mask_edges] = torch.arange(\n mask_edges.sum(), dtype=torch.long, device=pos_nx3.device\n )\n idx_map = mapping[idx_map] # map edges to verts\n\n interp_v = unique_edges[mask_edges]\n edges_to_interp = pos_nx3[interp_v.reshape(-1)].reshape(-1, 2, 3)\n edges_to_interp_sdf = sdf_n[interp_v.reshape(-1)].reshape(-1, 2, 1)\n edges_to_interp_sdf[:, -1] *= -1\n\n denominator = edges_to_interp_sdf.sum(1, keepdim=True)\n\n edges_to_interp_sdf = torch.flip(edges_to_interp_sdf, [1]) / denominator\n verts = (edges_to_interp * edges_to_interp_sdf).sum(1)\n\n idx_map = idx_map.reshape(-1, 6)\n\n v_id = torch.pow(2, torch.arange(4, dtype=torch.long, device=pos_nx3.device))\n tetindex = (occ_fx4[valid_tets] * v_id.unsqueeze(0)).sum(-1)\n num_triangles = self.num_triangles_table[tetindex]\n\n # Generate triangle indices\n faces = torch.cat(\n (\n torch.gather(\n input=idx_map[num_triangles == 1],\n dim=1,\n index=self.triangle_table[tetindex[num_triangles == 1]][:, :3],\n ).reshape(-1, 3),\n torch.gather(\n input=idx_map[num_triangles == 2],\n dim=1,\n index=self.triangle_table[tetindex[num_triangles == 2]][:, :6],\n ).reshape(-1, 3),\n ),\n dim=0,\n )\n\n return verts, faces\n\n def forward(\n self,\n level: Float[Tensor, \"N3 1\"],\n deformation: Optional[Float[Tensor, \"N3 3\"]] = None,\n ) -> Mesh:\n if deformation is not None:\n grid_vertices = self.grid_vertices + self.normalize_grid_deformation(\n deformation\n )\n else:\n grid_vertices = self.grid_vertices\n\n v_pos, t_pos_idx = self._forward(grid_vertices, level, self.indices)\n\n mesh = Mesh(\n v_pos=v_pos,\n t_pos_idx=t_pos_idx,\n # extras\n grid_vertices=grid_vertices,\n tet_edges=self.all_edges,\n grid_level=level,\n grid_deformation=deformation,\n )\n\n return mesh" }, { "identifier": "Mesh", "path": "threestudio/models/mesh.py", "snippet": "class Mesh:\n def __init__(\n self, v_pos: Float[Tensor, \"Nv 3\"], t_pos_idx: Integer[Tensor, \"Nf 3\"], **kwargs\n ) -> None:\n self.v_pos: Float[Tensor, \"Nv 3\"] = v_pos\n self.t_pos_idx: Integer[Tensor, \"Nf 3\"] = t_pos_idx\n self._v_nrm: Optional[Float[Tensor, \"Nv 3\"]] = None\n self._v_tng: Optional[Float[Tensor, \"Nv 3\"]] = None\n self._v_tex: Optional[Float[Tensor, \"Nt 3\"]] = None\n self._t_tex_idx: Optional[Float[Tensor, \"Nf 3\"]] = None\n self._v_rgb: Optional[Float[Tensor, \"Nv 3\"]] = None\n self._edges: Optional[Integer[Tensor, \"Ne 2\"]] = None\n self.extras: Dict[str, Any] = {}\n for k, v in kwargs.items():\n self.add_extra(k, v)\n\n def add_extra(self, k, v) -> None:\n self.extras[k] = v\n\n def remove_outlier(self, outlier_n_faces_threshold: Union[int, float]) -> Mesh:\n if self.requires_grad:\n threestudio.debug(\"Mesh is differentiable, not removing outliers\")\n return self\n\n # use trimesh to first split the mesh into connected components\n # then remove the components with less than n_face_threshold faces\n import trimesh\n\n # construct a trimesh object\n mesh = trimesh.Trimesh(\n vertices=self.v_pos.detach().cpu().numpy(),\n faces=self.t_pos_idx.detach().cpu().numpy(),\n )\n\n # split the mesh into connected components\n components = mesh.split(only_watertight=False)\n # log the number of faces in each component\n threestudio.debug(\n \"Mesh has {} components, with faces: {}\".format(\n len(components), [c.faces.shape[0] for c in components]\n )\n )\n\n n_faces_threshold: int\n if isinstance(outlier_n_faces_threshold, float):\n # set the threshold to the number of faces in the largest component multiplied by outlier_n_faces_threshold\n n_faces_threshold = int(\n max([c.faces.shape[0] for c in components]) * outlier_n_faces_threshold\n )\n else:\n # set the threshold directly to outlier_n_faces_threshold\n n_faces_threshold = outlier_n_faces_threshold\n\n # log the threshold\n threestudio.debug(\n \"Removing components with less than {} faces\".format(n_faces_threshold)\n )\n\n # remove the components with less than n_face_threshold faces\n components = [c for c in components if c.faces.shape[0] >= n_faces_threshold]\n\n # log the number of faces in each component after removing outliers\n threestudio.debug(\n \"Mesh has {} components after removing outliers, with faces: {}\".format(\n len(components), [c.faces.shape[0] for c in components]\n )\n )\n # merge the components\n mesh = trimesh.util.concatenate(components)\n\n # convert back to our mesh format\n v_pos = torch.from_numpy(mesh.vertices).to(self.v_pos)\n t_pos_idx = torch.from_numpy(mesh.faces).to(self.t_pos_idx)\n\n clean_mesh = Mesh(v_pos, t_pos_idx)\n # keep the extras unchanged\n\n if len(self.extras) > 0:\n clean_mesh.extras = self.extras\n threestudio.debug(\n f\"The following extra attributes are inherited from the original mesh unchanged: {list(self.extras.keys())}\"\n )\n return clean_mesh\n\n @property\n def requires_grad(self):\n return self.v_pos.requires_grad\n\n @property\n def v_nrm(self):\n if self._v_nrm is None:\n self._v_nrm = self._compute_vertex_normal()\n return self._v_nrm\n\n @property\n def v_tng(self):\n if self._v_tng is None:\n self._v_tng = self._compute_vertex_tangent()\n return self._v_tng\n\n @property\n def v_tex(self):\n if self._v_tex is None:\n self._v_tex, self._t_tex_idx = self._unwrap_uv()\n return self._v_tex\n\n @property\n def t_tex_idx(self):\n if self._t_tex_idx is None:\n self._v_tex, self._t_tex_idx = self._unwrap_uv()\n return self._t_tex_idx\n\n @property\n def v_rgb(self):\n return self._v_rgb\n\n @property\n def edges(self):\n if self._edges is None:\n self._edges = self._compute_edges()\n return self._edges\n\n def _compute_vertex_normal(self):\n i0 = self.t_pos_idx[:, 0]\n i1 = self.t_pos_idx[:, 1]\n i2 = self.t_pos_idx[:, 2]\n\n v0 = self.v_pos[i0, :]\n v1 = self.v_pos[i1, :]\n v2 = self.v_pos[i2, :]\n\n face_normals = torch.cross(v1 - v0, v2 - v0)\n\n # Splat face normals to vertices\n v_nrm = torch.zeros_like(self.v_pos)\n v_nrm.scatter_add_(0, i0[:, None].repeat(1, 3), face_normals)\n v_nrm.scatter_add_(0, i1[:, None].repeat(1, 3), face_normals)\n v_nrm.scatter_add_(0, i2[:, None].repeat(1, 3), face_normals)\n\n # Normalize, replace zero (degenerated) normals with some default value\n v_nrm = torch.where(\n dot(v_nrm, v_nrm) > 1e-20, v_nrm, torch.as_tensor([0.0, 0.0, 1.0]).to(v_nrm)\n )\n v_nrm = F.normalize(v_nrm, dim=1)\n\n if torch.is_anomaly_enabled():\n assert torch.all(torch.isfinite(v_nrm))\n\n return v_nrm\n\n def _compute_vertex_tangent(self):\n vn_idx = [None] * 3\n pos = [None] * 3\n tex = [None] * 3\n for i in range(0, 3):\n pos[i] = self.v_pos[self.t_pos_idx[:, i]]\n tex[i] = self.v_tex[self.t_tex_idx[:, i]]\n # t_nrm_idx is always the same as t_pos_idx\n vn_idx[i] = self.t_pos_idx[:, i]\n\n tangents = torch.zeros_like(self.v_nrm)\n tansum = torch.zeros_like(self.v_nrm)\n\n # Compute tangent space for each triangle\n uve1 = tex[1] - tex[0]\n uve2 = tex[2] - tex[0]\n pe1 = pos[1] - pos[0]\n pe2 = pos[2] - pos[0]\n\n nom = pe1 * uve2[..., 1:2] - pe2 * uve1[..., 1:2]\n denom = uve1[..., 0:1] * uve2[..., 1:2] - uve1[..., 1:2] * uve2[..., 0:1]\n\n # Avoid division by zero for degenerated texture coordinates\n tang = nom / torch.where(\n denom > 0.0, torch.clamp(denom, min=1e-6), torch.clamp(denom, max=-1e-6)\n )\n\n # Update all 3 vertices\n for i in range(0, 3):\n idx = vn_idx[i][:, None].repeat(1, 3)\n tangents.scatter_add_(0, idx, tang) # tangents[n_i] = tangents[n_i] + tang\n tansum.scatter_add_(\n 0, idx, torch.ones_like(tang)\n ) # tansum[n_i] = tansum[n_i] + 1\n tangents = tangents / tansum\n\n # Normalize and make sure tangent is perpendicular to normal\n tangents = F.normalize(tangents, dim=1)\n tangents = F.normalize(tangents - dot(tangents, self.v_nrm) * self.v_nrm)\n\n if torch.is_anomaly_enabled():\n assert torch.all(torch.isfinite(tangents))\n\n return tangents\n\n def _unwrap_uv(\n self, xatlas_chart_options: dict = {}, xatlas_pack_options: dict = {}\n ):\n threestudio.info(\"Using xatlas to perform UV unwrapping, may take a while ...\")\n\n import xatlas\n\n atlas = xatlas.Atlas()\n atlas.add_mesh(\n self.v_pos.detach().cpu().numpy(),\n self.t_pos_idx.cpu().numpy(),\n )\n co = xatlas.ChartOptions()\n po = xatlas.PackOptions()\n for k, v in xatlas_chart_options.items():\n setattr(co, k, v)\n for k, v in xatlas_pack_options.items():\n setattr(po, k, v)\n atlas.generate(co, po)\n vmapping, indices, uvs = atlas.get_mesh(0)\n vmapping = (\n torch.from_numpy(\n vmapping.astype(np.uint64, casting=\"same_kind\").view(np.int64)\n )\n .to(self.v_pos.device)\n .long()\n )\n uvs = torch.from_numpy(uvs).to(self.v_pos.device).float()\n indices = (\n torch.from_numpy(\n indices.astype(np.uint64, casting=\"same_kind\").view(np.int64)\n )\n .to(self.v_pos.device)\n .long()\n )\n return uvs, indices\n\n def unwrap_uv(\n self, xatlas_chart_options: dict = {}, xatlas_pack_options: dict = {}\n ):\n self._v_tex, self._t_tex_idx = self._unwrap_uv(\n xatlas_chart_options, xatlas_pack_options\n )\n\n def set_vertex_color(self, v_rgb):\n assert v_rgb.shape[0] == self.v_pos.shape[0]\n self._v_rgb = v_rgb\n\n def _compute_edges(self):\n # Compute edges\n edges = torch.cat(\n [\n self.t_pos_idx[:, [0, 1]],\n self.t_pos_idx[:, [1, 2]],\n self.t_pos_idx[:, [2, 0]],\n ],\n dim=0,\n )\n edges = edges.sort()[0]\n edges = torch.unique(edges, dim=0)\n return edges\n\n def normal_consistency(self) -> Float[Tensor, \"\"]:\n edge_nrm: Float[Tensor, \"Ne 2 3\"] = self.v_nrm[self.edges]\n nc = (\n 1.0 - torch.cosine_similarity(edge_nrm[:, 0], edge_nrm[:, 1], dim=-1)\n ).mean()\n return nc\n\n def _laplacian_uniform(self):\n # from stable-dreamfusion\n # https://github.com/ashawkey/stable-dreamfusion/blob/8fb3613e9e4cd1ded1066b46e80ca801dfb9fd06/nerf/renderer.py#L224\n verts, faces = self.v_pos, self.t_pos_idx\n\n V = verts.shape[0]\n F = faces.shape[0]\n\n # Neighbor indices\n ii = faces[:, [1, 2, 0]].flatten()\n jj = faces[:, [2, 0, 1]].flatten()\n adj = torch.stack([torch.cat([ii, jj]), torch.cat([jj, ii])], dim=0).unique(\n dim=1\n )\n adj_values = torch.ones(adj.shape[1]).to(verts)\n\n # Diagonal indices\n diag_idx = adj[0]\n\n # Build the sparse matrix\n idx = torch.cat((adj, torch.stack((diag_idx, diag_idx), dim=0)), dim=1)\n values = torch.cat((-adj_values, adj_values))\n\n # The coalesce operation sums the duplicate indices, resulting in the\n # correct diagonal\n return torch.sparse_coo_tensor(idx, values, (V, V)).coalesce()\n\n def laplacian(self) -> Float[Tensor, \"\"]:\n with torch.no_grad():\n L = self._laplacian_uniform()\n loss = L.mm(self.v_pos)\n loss = loss.norm(dim=1)\n loss = loss.mean()\n return loss" }, { "identifier": "get_encoding", "path": "threestudio/models/networks.py", "snippet": "def get_encoding(n_input_dims: int, config) -> nn.Module:\n # input suppose to be range [0, 1]\n encoding: nn.Module\n if config.otype == \"ProgressiveBandFrequency\":\n encoding = ProgressiveBandFrequency(n_input_dims, config_to_primitive(config))\n elif config.otype == \"ProgressiveBandHashGrid\":\n encoding = ProgressiveBandHashGrid(n_input_dims, config_to_primitive(config))\n else:\n encoding = TCNNEncoding(n_input_dims, config_to_primitive(config))\n encoding = CompositeEncoding(\n encoding,\n include_xyz=config.get(\"include_xyz\", False),\n xyz_scale=2.0,\n xyz_offset=-1.0,\n ) # FIXME: hard coded\n return encoding" }, { "identifier": "get_mlp", "path": "threestudio/models/networks.py", "snippet": "def get_mlp(n_input_dims, n_output_dims, config) -> nn.Module:\n network: nn.Module\n if config.otype == \"VanillaMLP\":\n network = VanillaMLP(n_input_dims, n_output_dims, config_to_primitive(config))\n elif config.otype == \"SphereInitVanillaMLP\":\n network = SphereInitVanillaMLP(\n n_input_dims, n_output_dims, config_to_primitive(config)\n )\n else:\n assert (\n config.get(\"sphere_init\", False) is False\n ), \"sphere_init=True only supported by VanillaMLP\"\n network = TCNNNetwork(n_input_dims, n_output_dims, config_to_primitive(config))\n return network" }, { "identifier": "broadcast", "path": "threestudio/utils/misc.py", "snippet": "def broadcast(tensor, src=0):\n if not _distributed_available():\n return tensor\n else:\n torch.distributed.broadcast(tensor, src=src)\n return tensor" }, { "identifier": "scale_tensor", "path": "threestudio/utils/ops.py", "snippet": "def scale_tensor(\n dat: Num[Tensor, \"... D\"], inp_scale: ValidScale, tgt_scale: ValidScale\n):\n if inp_scale is None:\n inp_scale = (0, 1)\n if tgt_scale is None:\n tgt_scale = (0, 1)\n if isinstance(tgt_scale, Tensor):\n assert dat.shape[-1] == tgt_scale.shape[-1]\n dat = (dat - inp_scale[0]) / (inp_scale[1] - inp_scale[0])\n dat = dat * (tgt_scale[1] - tgt_scale[0]) + tgt_scale[0]\n return dat" }, { "identifier": "GeodreamGeometryVolume", "path": "threestudio/models/geometry/geodream_geometry_volume.py", "snippet": "class GeodreamGeometryVolume(BaseImplicitGeometry):\n @dataclass\n class Config(BaseImplicitGeometry.Config):\n n_input_dims: int = 3\n n_feature_dims: int = 3\n density_activation: Optional[str] = \"softplus\"\n density_bias: Union[float, str] = \"blob_magic3d\"\n density_blob_scale: float = 10.0\n density_blob_std: float = 0.5\n pos_encoding_config: dict = field(\n default_factory=lambda: {\n \"otype\": \"HashGrid\",\n \"n_levels\": 16,\n \"n_features_per_level\": 2,\n \"log2_hashmap_size\": 19,\n \"base_resolution\": 16,\n \"per_level_scale\": 1.447269237440378,\n }\n )\n mlp_network_config: dict = field(\n default_factory=lambda: {\n \"otype\": \"VanillaMLP\",\n \"activation\": \"ReLU\",\n \"output_activation\": \"none\",\n \"n_neurons\": 64,\n \"n_hidden_layers\": 1,\n }\n )\n normal_type: Optional[\n str\n ] = \"finite_difference\" # in ['pred', 'finite_difference', 'finite_difference_laplacian']\n finite_difference_normal_eps: float = 0.01\n\n # automatically determine the threshold\n isosurface_threshold: Union[float, str] = 25.0\n init_volume_path: str = \"con_volume_lod0.pth\"\n one2345_weight: str = \"pretrain.pth\"\n sdf_network_grad: bool = False\n\n cfg: Config\n\n def configure(self) -> None:\n super().configure()\n self.encoding = get_encoding(\n self.cfg.n_input_dims, self.cfg.pos_encoding_config\n )\n if self.cfg.n_feature_dims > 0:\n self.feature_network = get_mlp(\n self.encoding.n_output_dims,\n self.cfg.n_feature_dims,\n self.cfg.mlp_network_config,\n )\n \n \n self.sdf_layers = SdfLayer()\n self.deviation_network = SingleVarianceNetwork(self.cfg.one2345_weight)\n\n # sdf_layers weight\n sdf_layers_weight = torch.load(self.cfg.one2345_weight)['sdf_network_lod0']\n selected_state_dict = {}\n prefix = 'sdf_layer'\n for key, value in sdf_layers_weight.items():\n if key.startswith(prefix):\n selected_state_dict[key[10:]] = value# key need remove sdf_layer prefix\n self.sdf_layers.load_state_dict(selected_state_dict)\n print(\"sdf_layers is loading weight at \" + self.cfg.one2345_weight)\n \n # sdf_layers freeze \n if self.cfg.sdf_network_grad:\n print(\"sdf_layers network is training\")\n else:\n for p in self.sdf_layers.parameters():\n p.requires_grad_(False)\n print(\"sdf_layers network is freezeing\")\n\n # volume weight\n volume_weight = torch.load(self.cfg.init_volume_path)\n\n self.volume = nn.Parameter(volume_weight, requires_grad=True)\n print(\"volume network is loading weight at \" + self.cfg.init_volume_path)\n\n def get_activated_density(\n self, points: Float[Tensor, \"*N Di\"], density: Float[Tensor, \"*N 1\"]\n ) -> Tuple[Float[Tensor, \"*N 1\"], Float[Tensor, \"*N 1\"]]:\n density_bias: Union[float, Float[Tensor, \"*N 1\"]]\n if self.cfg.density_bias == \"blob_dreamfusion\":\n # pre-activation density bias\n density_bias = (\n self.cfg.density_blob_scale\n * torch.exp(\n -0.5 * (points**2).sum(dim=-1) / self.cfg.density_blob_std**2\n )[..., None]\n )\n elif self.cfg.density_bias == \"blob_magic3d\":\n # pre-activation density bias\n density_bias = (\n self.cfg.density_blob_scale\n * (\n 1\n - torch.sqrt((points**2).sum(dim=-1)) / self.cfg.density_blob_std\n )[..., None]\n )\n elif isinstance(self.cfg.density_bias, float):\n density_bias = self.cfg.density_bias\n else:\n raise ValueError(f\"Unknown density bias {self.cfg.density_bias}\")\n raw_density: Float[Tensor, \"*N 1\"] = density + density_bias\n density = get_activation(self.cfg.density_activation)(raw_density)\n return raw_density, density\n\n def forward(\n self, points: Float[Tensor, \"*N Di\"], viewdirs, dists, output_normal: bool = False\n ) -> Dict[str, Float[Tensor, \"...\"]]:\n grad_enabled = torch.is_grad_enabled()\n\n if output_normal and self.cfg.normal_type == \"analytic\":\n torch.set_grad_enabled(True)\n points.requires_grad_(True)\n\n points_unscaled = points # points in the original scale\n \n sdf, feature_vector = self.sdf(points.view(-1, self.cfg.n_input_dims))\n\n output = {\n \"density\": sdf,\n }\n \n g = self.gradient(points.view(-1, self.cfg.n_input_dims))\n alphas = self.get_alpha(points.view(-1, self.cfg.n_input_dims), viewdirs, dists, feature_vector, sdf, g)\n output.update({\"ALPHA\": alphas})\n\n \n points_norm = contract_to_unisphere(\n points, self.bbox, self.unbounded\n ) # points normalized to (0, 1)\n\n \n enc = self.encoding(points_norm.view(-1, self.cfg.n_input_dims))\n if self.cfg.n_feature_dims > 0:\n features = self.feature_network(enc).view(\n *points.shape[:-1], self.cfg.n_feature_dims\n )\n output.update({\"features\": features})\n\n \n torch.set_grad_enabled(grad_enabled)\n return output\n\n def forward_density(self, points: Float[Tensor, \"*N Di\"]) -> Float[Tensor, \"*N 1\"]:\n points_unscaled = points\n density, _ = self.sdf(points.view(-1, self.cfg.n_input_dims))\n density = density.reshape(*points.shape[:-1], 1)\n return density\n \n def forward_field(\n self, points: Float[Tensor, \"*N Di\"]\n ) -> Tuple[Float[Tensor, \"*N 1\"], Optional[Float[Tensor, \"*N 3\"]]]:\n sdf, _ = self.sdf(points.view(-1, self.cfg.n_input_dims))\n sdf = sdf.reshape(*points.shape[:-1], 1)\n deformation: Optional[Float[Tensor, \"*N 3\"]] = None\n return sdf, deformation\n\n def forward_level(\n self, field: Float[Tensor, \"*N 1\"], threshold: float\n ) -> Float[Tensor, \"*N 1\"]:\n return field - threshold\n \n\n def export(self, points: Float[Tensor, \"*N Di\"], **kwargs) -> Dict[str, Any]:\n out: Dict[str, Any] = {}\n if self.cfg.n_feature_dims == 0:\n return out\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n enc = self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n features = self.feature_network(enc).view(\n *points.shape[:-1], self.cfg.n_feature_dims\n )\n out.update(\n {\n \"features\": features,\n }\n )\n return out\n\n @staticmethod\n @torch.no_grad()\n def create_from(\n other: BaseGeometry,\n cfg: Optional[Union[dict, DictConfig]] = None,\n copy_net: bool = True,\n **kwargs,\n ) -> \"GeodreamGeometryVolume\":\n if isinstance(other, GeodreamGeometryVolume):\n instance = GeodreamGeometryVolume(cfg, **kwargs)\n instance.encoding.load_state_dict(other.encoding.state_dict())\n instance.density_network.load_state_dict(other.density_network.state_dict())\n if copy_net:\n if (\n instance.cfg.n_feature_dims > 0\n and other.cfg.n_feature_dims == instance.cfg.n_feature_dims\n ):\n instance.feature_network.load_state_dict(\n other.feature_network.state_dict()\n )\n if (\n instance.cfg.normal_type == \"pred\"\n and other.cfg.normal_type == \"pred\"\n ):\n instance.normal_network.load_state_dict(\n other.normal_network.state_dict()\n )\n return instance\n else:\n raise TypeError(\n f\"Cannot create {GeodreamGeometryVolume.__name__} from {other.__class__.__name__}\"\n )\n \n def forward_sdf(self, pts):\n sdf, _ = self.sdf(pts)\n return sdf\n \n def sdf(self, pts, lod=0):\n conditional_volume = self.volume\n num_pts = pts.shape[0]\n device = pts.device\n pts_ = pts.clone()\n pts = pts.view(1, 1, 1, num_pts, 3) # - should be in range (-1, 1)\n\n pts = torch.flip(pts, dims=[-1])\n sampled_feature = grid_sample_3d(conditional_volume, pts) # [1, c, 1, 1, num_pts]\n sampled_feature = sampled_feature.view(-1, num_pts).permute(1, 0).contiguous().to(device)\n\n sdf_pts = self.sdf_layers(pts_, sampled_feature)\n\n return sdf_pts[:, :1], sdf_pts[:, 1:]\n \n def get_alpha(self, ray_samples, rays_d, dists, feature_vector, sdf=None, gradients=None):\n \"\"\"compute alpha from sdf as in NeuS\"\"\"\n inv_variance = self.deviation_network(feature_vector)[:, :1].clip(1e-6, 1e6) # Single parameter\n\n \n #gradients = torch.ones_like(rays_d, requires_grad=False, device=rays_d.device)\n true_dot_val = (rays_d * gradients).sum(-1, keepdim=True) # * calculate\n alpha_inter_ratio = 0.0 \n iter_cos = -(F.relu(-true_dot_val * 0.5 + 0.5) * (1.0 - alpha_inter_ratio) + F.relu(\n -true_dot_val) * alpha_inter_ratio) # always non-positive\n\n true_estimate_sdf_half_next = sdf + iter_cos.clip(-10.0, 10.0) * dists.reshape(-1, 1) * 0.5\n true_estimate_sdf_half_prev = sdf - iter_cos.clip(-10.0, 10.0) * dists.reshape(-1, 1) * 0.5\n\n prev_cdf = torch.sigmoid(true_estimate_sdf_half_prev * inv_variance)\n next_cdf = torch.sigmoid(true_estimate_sdf_half_next * inv_variance)\n\n p = prev_cdf - next_cdf\n c = prev_cdf\n\n alpha = ((p + 1e-5) / (c + 1e-5)).clip(0.0, 1.0)\n\n return alpha\n \n def gradient(self, x):\n \n x.requires_grad_(True)\n with torch.enable_grad():\n sdf, _ = self.sdf(x)\n y = sdf\n\n d_output = torch.ones_like(y, requires_grad=False, device=y.device)\n # ! Distributed Data Parallel doesn’t work with torch.autograd.grad()\n # ! (i.e. it will only work if gradients are to be accumulated in .grad attributes of parameters).\n gradients = torch.autograd.grad(\n outputs=y,\n inputs=x,\n grad_outputs=d_output,\n create_graph=True,\n retain_graph=True,\n only_inputs=True)[0]\n return gradients" } ]
import os import numpy as np import torch import torch.nn as nn import torch.nn.functional as F import threestudio import trimesh from dataclasses import dataclass, field from threestudio.models.geometry.base import ( BaseExplicitGeometry, BaseGeometry, contract_to_unisphere, ) from threestudio.models.geometry.implicit_sdf import ImplicitSDF from threestudio.models.geometry.implicit_volume import ImplicitVolume from threestudio.models.isosurface import MarchingTetrahedraHelper from threestudio.models.mesh import Mesh from threestudio.models.networks import get_encoding, get_mlp from threestudio.utils.misc import broadcast from threestudio.utils.ops import scale_tensor from threestudio.models.geometry.geodream_geometry_volume import GeodreamGeometryVolume from threestudio.utils.typing import * from pysdf import SDF
18,340
mesh2std = np.linalg.inv(std2mesh) # scaling scale = np.abs(mesh.vertices).max() mesh.vertices = mesh.vertices / scale * self.cfg.shape_init_params mesh.vertices = np.dot(mesh2std, mesh.vertices.T).T sdf = SDF(mesh.vertices, mesh.faces) def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: # add a negative signed here # as in pysdf the inside of the shape has positive signed distance return torch.from_numpy(-sdf(points_rand.cpu().numpy())).to( points_rand )[..., None] get_gt_sdf = func else: raise ValueError( f"Unknown shape initialization type: {self.cfg.shape_init}" ) sdf_gt = get_gt_sdf( scale_tensor( self.isosurface_helper.grid_vertices, self.isosurface_helper.points_range, self.isosurface_bbox, ) ) self.sdf.data = sdf_gt # explicit broadcast to ensure param consistency across ranks for param in self.parameters(): broadcast(param, src=0) def isosurface(self) -> Mesh: # return cached mesh if fix_geometry is True to save computation if self.cfg.fix_geometry and self.mesh is not None: return self.mesh mesh = self.isosurface_helper(self.sdf, self.deformation) mesh.v_pos = scale_tensor( mesh.v_pos, self.isosurface_helper.points_range, self.isosurface_bbox ) if self.cfg.isosurface_remove_outliers: mesh = mesh.remove_outlier(self.cfg.isosurface_outlier_n_faces_threshold) self.mesh = mesh return mesh def forward( self, points: Float[Tensor, "*N Di"], output_normal: bool = False ) -> Dict[str, Float[Tensor, "..."]]: if self.cfg.geometry_only: return {} assert ( output_normal == False ), f"Normal output is not supported for {self.__class__.__name__}" points_unscaled = points # points in the original scale points = contract_to_unisphere(points, self.bbox) # points normalized to (0, 1) enc = self.encoding(points.view(-1, self.cfg.n_input_dims)) features = self.feature_network(enc).view( *points.shape[:-1], self.cfg.n_feature_dims ) return {"features": features} @staticmethod @torch.no_grad() def create_from( other: BaseGeometry, cfg: Optional[Union[dict, DictConfig]] = None, copy_net: bool = True, **kwargs, ) -> "TetrahedraSDFGrid": if isinstance(other, TetrahedraSDFGrid): instance = TetrahedraSDFGrid(cfg, **kwargs) assert instance.cfg.isosurface_resolution == other.cfg.isosurface_resolution instance.isosurface_bbox = other.isosurface_bbox.clone() instance.sdf.data = other.sdf.data.clone() if ( instance.cfg.isosurface_deformable_grid and other.cfg.isosurface_deformable_grid ): assert ( instance.deformation is not None and other.deformation is not None ) instance.deformation.data = other.deformation.data.clone() if ( not instance.cfg.geometry_only and not other.cfg.geometry_only and copy_net ): instance.encoding.load_state_dict(other.encoding.state_dict()) instance.feature_network.load_state_dict( other.feature_network.state_dict() ) return instance elif isinstance(other, ImplicitVolume): instance = TetrahedraSDFGrid(cfg, **kwargs) if other.cfg.isosurface_method != "mt": other.cfg.isosurface_method = "mt" threestudio.warn( f"Override isosurface_method of the source geometry to 'mt'" ) if other.cfg.isosurface_resolution != instance.cfg.isosurface_resolution: other.cfg.isosurface_resolution = instance.cfg.isosurface_resolution threestudio.warn( f"Override isosurface_resolution of the source geometry to {instance.cfg.isosurface_resolution}" ) mesh = other.isosurface() instance.isosurface_bbox = mesh.extras["bbox"] instance.sdf.data = ( mesh.extras["grid_level"].to(instance.sdf.data).clamp(-1, 1) ) if not instance.cfg.geometry_only and copy_net: instance.encoding.load_state_dict(other.encoding.state_dict()) instance.feature_network.load_state_dict( other.feature_network.state_dict() ) return instance
@threestudio.register("tetrahedra-sdf-grid") class TetrahedraSDFGrid(BaseExplicitGeometry): @dataclass class Config(BaseExplicitGeometry.Config): isosurface_resolution: int = 128 isosurface_deformable_grid: bool = True isosurface_remove_outliers: bool = False isosurface_outlier_n_faces_threshold: Union[int, float] = 0.01 n_input_dims: int = 3 n_feature_dims: int = 3 pos_encoding_config: dict = field( default_factory=lambda: { "otype": "HashGrid", "n_levels": 16, "n_features_per_level": 2, "log2_hashmap_size": 19, "base_resolution": 16, "per_level_scale": 1.447269237440378, } ) mlp_network_config: dict = field( default_factory=lambda: { "otype": "VanillaMLP", "activation": "ReLU", "output_activation": "none", "n_neurons": 64, "n_hidden_layers": 1, } ) shape_init: Optional[str] = None shape_init_params: Optional[Any] = None shape_init_mesh_up: str = "+z" shape_init_mesh_front: str = "+x" force_shape_init: bool = False geometry_only: bool = False fix_geometry: bool = False cfg: Config def configure(self) -> None: super().configure() # this should be saved to state_dict, register as buffer self.isosurface_bbox: Float[Tensor, "2 3"] self.register_buffer("isosurface_bbox", self.bbox.clone()) self.isosurface_helper = MarchingTetrahedraHelper( self.cfg.isosurface_resolution, f"load/tets/{self.cfg.isosurface_resolution}_tets.npz", ) self.sdf: Float[Tensor, "Nv 1"] self.deformation: Optional[Float[Tensor, "Nv 3"]] if not self.cfg.fix_geometry: self.register_parameter( "sdf", nn.Parameter( torch.zeros( (self.isosurface_helper.grid_vertices.shape[0], 1), dtype=torch.float32, ) ), ) if self.cfg.isosurface_deformable_grid: self.register_parameter( "deformation", nn.Parameter( torch.zeros_like(self.isosurface_helper.grid_vertices) ), ) else: self.deformation = None else: self.register_buffer( "sdf", torch.zeros( (self.isosurface_helper.grid_vertices.shape[0], 1), dtype=torch.float32, ), ) if self.cfg.isosurface_deformable_grid: self.register_buffer( "deformation", torch.zeros_like(self.isosurface_helper.grid_vertices), ) else: self.deformation = None if not self.cfg.geometry_only: self.encoding = get_encoding( self.cfg.n_input_dims, self.cfg.pos_encoding_config ) self.feature_network = get_mlp( self.encoding.n_output_dims, self.cfg.n_feature_dims, self.cfg.mlp_network_config, ) self.mesh: Optional[Mesh] = None def initialize_shape(self) -> None: if self.cfg.shape_init is None and not self.cfg.force_shape_init: return # do not initialize shape if weights are provided if self.cfg.weights is not None and not self.cfg.force_shape_init: return get_gt_sdf: Callable[[Float[Tensor, "N 3"]], Float[Tensor, "N 1"]] assert isinstance(self.cfg.shape_init, str) if self.cfg.shape_init == "ellipsoid": assert ( isinstance(self.cfg.shape_init_params, Sized) and len(self.cfg.shape_init_params) == 3 ) size = torch.as_tensor(self.cfg.shape_init_params).to(self.device) def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: return ((points_rand / size) ** 2).sum( dim=-1, keepdim=True ).sqrt() - 1.0 # pseudo signed distance of an ellipsoid get_gt_sdf = func elif self.cfg.shape_init == "sphere": assert isinstance(self.cfg.shape_init_params, float) radius = self.cfg.shape_init_params def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: return (points_rand**2).sum(dim=-1, keepdim=True).sqrt() - radius get_gt_sdf = func elif self.cfg.shape_init.startswith("mesh:"): assert isinstance(self.cfg.shape_init_params, float) mesh_path = self.cfg.shape_init[5:] if not os.path.exists(mesh_path): raise ValueError(f"Mesh file {mesh_path} does not exist.") mesh = trimesh.load(mesh_path) # move to center centroid = mesh.vertices.mean(0) mesh.vertices = mesh.vertices - centroid # align to up-z and front-x dirs = ["+x", "+y", "+z", "-x", "-y", "-z"] dir2vec = { "+x": np.array([1, 0, 0]), "+y": np.array([0, 1, 0]), "+z": np.array([0, 0, 1]), "-x": np.array([-1, 0, 0]), "-y": np.array([0, -1, 0]), "-z": np.array([0, 0, -1]), } if ( self.cfg.shape_init_mesh_up not in dirs or self.cfg.shape_init_mesh_front not in dirs ): raise ValueError( f"shape_init_mesh_up and shape_init_mesh_front must be one of {dirs}." ) if self.cfg.shape_init_mesh_up[1] == self.cfg.shape_init_mesh_front[1]: raise ValueError( "shape_init_mesh_up and shape_init_mesh_front must be orthogonal." ) z_, x_ = ( dir2vec[self.cfg.shape_init_mesh_up], dir2vec[self.cfg.shape_init_mesh_front], ) y_ = np.cross(z_, x_) std2mesh = np.stack([x_, y_, z_], axis=0).T mesh2std = np.linalg.inv(std2mesh) # scaling scale = np.abs(mesh.vertices).max() mesh.vertices = mesh.vertices / scale * self.cfg.shape_init_params mesh.vertices = np.dot(mesh2std, mesh.vertices.T).T sdf = SDF(mesh.vertices, mesh.faces) def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: # add a negative signed here # as in pysdf the inside of the shape has positive signed distance return torch.from_numpy(-sdf(points_rand.cpu().numpy())).to( points_rand )[..., None] get_gt_sdf = func else: raise ValueError( f"Unknown shape initialization type: {self.cfg.shape_init}" ) sdf_gt = get_gt_sdf( scale_tensor( self.isosurface_helper.grid_vertices, self.isosurface_helper.points_range, self.isosurface_bbox, ) ) self.sdf.data = sdf_gt # explicit broadcast to ensure param consistency across ranks for param in self.parameters(): broadcast(param, src=0) def isosurface(self) -> Mesh: # return cached mesh if fix_geometry is True to save computation if self.cfg.fix_geometry and self.mesh is not None: return self.mesh mesh = self.isosurface_helper(self.sdf, self.deformation) mesh.v_pos = scale_tensor( mesh.v_pos, self.isosurface_helper.points_range, self.isosurface_bbox ) if self.cfg.isosurface_remove_outliers: mesh = mesh.remove_outlier(self.cfg.isosurface_outlier_n_faces_threshold) self.mesh = mesh return mesh def forward( self, points: Float[Tensor, "*N Di"], output_normal: bool = False ) -> Dict[str, Float[Tensor, "..."]]: if self.cfg.geometry_only: return {} assert ( output_normal == False ), f"Normal output is not supported for {self.__class__.__name__}" points_unscaled = points # points in the original scale points = contract_to_unisphere(points, self.bbox) # points normalized to (0, 1) enc = self.encoding(points.view(-1, self.cfg.n_input_dims)) features = self.feature_network(enc).view( *points.shape[:-1], self.cfg.n_feature_dims ) return {"features": features} @staticmethod @torch.no_grad() def create_from( other: BaseGeometry, cfg: Optional[Union[dict, DictConfig]] = None, copy_net: bool = True, **kwargs, ) -> "TetrahedraSDFGrid": if isinstance(other, TetrahedraSDFGrid): instance = TetrahedraSDFGrid(cfg, **kwargs) assert instance.cfg.isosurface_resolution == other.cfg.isosurface_resolution instance.isosurface_bbox = other.isosurface_bbox.clone() instance.sdf.data = other.sdf.data.clone() if ( instance.cfg.isosurface_deformable_grid and other.cfg.isosurface_deformable_grid ): assert ( instance.deformation is not None and other.deformation is not None ) instance.deformation.data = other.deformation.data.clone() if ( not instance.cfg.geometry_only and not other.cfg.geometry_only and copy_net ): instance.encoding.load_state_dict(other.encoding.state_dict()) instance.feature_network.load_state_dict( other.feature_network.state_dict() ) return instance elif isinstance(other, ImplicitVolume): instance = TetrahedraSDFGrid(cfg, **kwargs) if other.cfg.isosurface_method != "mt": other.cfg.isosurface_method = "mt" threestudio.warn( f"Override isosurface_method of the source geometry to 'mt'" ) if other.cfg.isosurface_resolution != instance.cfg.isosurface_resolution: other.cfg.isosurface_resolution = instance.cfg.isosurface_resolution threestudio.warn( f"Override isosurface_resolution of the source geometry to {instance.cfg.isosurface_resolution}" ) mesh = other.isosurface() instance.isosurface_bbox = mesh.extras["bbox"] instance.sdf.data = ( mesh.extras["grid_level"].to(instance.sdf.data).clamp(-1, 1) ) if not instance.cfg.geometry_only and copy_net: instance.encoding.load_state_dict(other.encoding.state_dict()) instance.feature_network.load_state_dict( other.feature_network.state_dict() ) return instance
elif isinstance(other, ImplicitSDF) or isinstance(other, GeodreamGeometryVolume):
3
2023-12-01 01:59:42+00:00
24k
horseee/DeepCache
DeepCache/sd/pipeline_text_to_video_zero.py
[ { "identifier": "UNet2DConditionModel", "path": "DeepCache/sd/unet_2d_condition.py", "snippet": "class UNet2DConditionModel(ModelMixin, ConfigMixin, UNet2DConditionLoadersMixin):\n r\"\"\"\n A conditional 2D UNet model that takes a noisy sample, conditional state, and a timestep and returns a sample\n shaped output.\n\n This model inherits from [`ModelMixin`]. Check the superclass documentation for it's generic methods implemented\n for all models (such as downloading or saving).\n\n Parameters:\n sample_size (`int` or `Tuple[int, int]`, *optional*, defaults to `None`):\n Height and width of input/output sample.\n in_channels (`int`, *optional*, defaults to 4): Number of channels in the input sample.\n out_channels (`int`, *optional*, defaults to 4): Number of channels in the output.\n center_input_sample (`bool`, *optional*, defaults to `False`): Whether to center the input sample.\n flip_sin_to_cos (`bool`, *optional*, defaults to `False`):\n Whether to flip the sin to cos in the time embedding.\n freq_shift (`int`, *optional*, defaults to 0): The frequency shift to apply to the time embedding.\n down_block_types (`Tuple[str]`, *optional*, defaults to `(\"CrossAttnDownBlock2D\", \"CrossAttnDownBlock2D\", \"CrossAttnDownBlock2D\", \"DownBlock2D\")`):\n The tuple of downsample blocks to use.\n mid_block_type (`str`, *optional*, defaults to `\"UNetMidBlock2DCrossAttn\"`):\n Block type for middle of UNet, it can be either `UNetMidBlock2DCrossAttn` or\n `UNetMidBlock2DSimpleCrossAttn`. If `None`, the mid block layer is skipped.\n up_block_types (`Tuple[str]`, *optional*, defaults to `(\"UpBlock2D\", \"CrossAttnUpBlock2D\", \"CrossAttnUpBlock2D\", \"CrossAttnUpBlock2D\")`):\n The tuple of upsample blocks to use.\n only_cross_attention(`bool` or `Tuple[bool]`, *optional*, default to `False`):\n Whether to include self-attention in the basic transformer blocks, see\n [`~models.attention.BasicTransformerBlock`].\n block_out_channels (`Tuple[int]`, *optional*, defaults to `(320, 640, 1280, 1280)`):\n The tuple of output channels for each block.\n layers_per_block (`int`, *optional*, defaults to 2): The number of layers per block.\n downsample_padding (`int`, *optional*, defaults to 1): The padding to use for the downsampling convolution.\n mid_block_scale_factor (`float`, *optional*, defaults to 1.0): The scale factor to use for the mid block.\n dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use.\n act_fn (`str`, *optional*, defaults to `\"silu\"`): The activation function to use.\n norm_num_groups (`int`, *optional*, defaults to 32): The number of groups to use for the normalization.\n If `None`, normalization and activation layers is skipped in post-processing.\n norm_eps (`float`, *optional*, defaults to 1e-5): The epsilon to use for the normalization.\n cross_attention_dim (`int` or `Tuple[int]`, *optional*, defaults to 1280):\n The dimension of the cross attention features.\n transformer_layers_per_block (`int` or `Tuple[int]`, *optional*, defaults to 1):\n The number of transformer blocks of type [`~models.attention.BasicTransformerBlock`]. Only relevant for\n [`~models.unet_2d_blocks.CrossAttnDownBlock2D`], [`~models.unet_2d_blocks.CrossAttnUpBlock2D`],\n [`~models.unet_2d_blocks.UNetMidBlock2DCrossAttn`].\n encoder_hid_dim (`int`, *optional*, defaults to None):\n If `encoder_hid_dim_type` is defined, `encoder_hidden_states` will be projected from `encoder_hid_dim`\n dimension to `cross_attention_dim`.\n encoder_hid_dim_type (`str`, *optional*, defaults to `None`):\n If given, the `encoder_hidden_states` and potentially other embeddings are down-projected to text\n embeddings of dimension `cross_attention` according to `encoder_hid_dim_type`.\n attention_head_dim (`int`, *optional*, defaults to 8): The dimension of the attention heads.\n num_attention_heads (`int`, *optional*):\n The number of attention heads. If not defined, defaults to `attention_head_dim`\n resnet_time_scale_shift (`str`, *optional*, defaults to `\"default\"`): Time scale shift config\n for ResNet blocks (see [`~models.resnet.ResnetBlock2D`]). Choose from `default` or `scale_shift`.\n class_embed_type (`str`, *optional*, defaults to `None`):\n The type of class embedding to use which is ultimately summed with the time embeddings. Choose from `None`,\n `\"timestep\"`, `\"identity\"`, `\"projection\"`, or `\"simple_projection\"`.\n addition_embed_type (`str`, *optional*, defaults to `None`):\n Configures an optional embedding which will be summed with the time embeddings. Choose from `None` or\n \"text\". \"text\" will use the `TextTimeEmbedding` layer.\n addition_time_embed_dim: (`int`, *optional*, defaults to `None`):\n Dimension for the timestep embeddings.\n num_class_embeds (`int`, *optional*, defaults to `None`):\n Input dimension of the learnable embedding matrix to be projected to `time_embed_dim`, when performing\n class conditioning with `class_embed_type` equal to `None`.\n time_embedding_type (`str`, *optional*, defaults to `positional`):\n The type of position embedding to use for timesteps. Choose from `positional` or `fourier`.\n time_embedding_dim (`int`, *optional*, defaults to `None`):\n An optional override for the dimension of the projected time embedding.\n time_embedding_act_fn (`str`, *optional*, defaults to `None`):\n Optional activation function to use only once on the time embeddings before they are passed to the rest of\n the UNet. Choose from `silu`, `mish`, `gelu`, and `swish`.\n timestep_post_act (`str`, *optional*, defaults to `None`):\n The second activation function to use in timestep embedding. Choose from `silu`, `mish` and `gelu`.\n time_cond_proj_dim (`int`, *optional*, defaults to `None`):\n The dimension of `cond_proj` layer in the timestep embedding.\n conv_in_kernel (`int`, *optional*, default to `3`): The kernel size of `conv_in` layer.\n conv_out_kernel (`int`, *optional*, default to `3`): The kernel size of `conv_out` layer.\n projection_class_embeddings_input_dim (`int`, *optional*): The dimension of the `class_labels` input when\n `class_embed_type=\"projection\"`. Required when `class_embed_type=\"projection\"`.\n class_embeddings_concat (`bool`, *optional*, defaults to `False`): Whether to concatenate the time\n embeddings with the class embeddings.\n mid_block_only_cross_attention (`bool`, *optional*, defaults to `None`):\n Whether to use cross attention with the mid block when using the `UNetMidBlock2DSimpleCrossAttn`. If\n `only_cross_attention` is given as a single boolean and `mid_block_only_cross_attention` is `None`, the\n `only_cross_attention` value is used as the value for `mid_block_only_cross_attention`. Default to `False`\n otherwise.\n \"\"\"\n\n _supports_gradient_checkpointing = True\n\n @register_to_config\n def __init__(\n self,\n sample_size: Optional[int] = None,\n in_channels: int = 4,\n out_channels: int = 4,\n center_input_sample: bool = False,\n flip_sin_to_cos: bool = True,\n freq_shift: int = 0,\n down_block_types: Tuple[str] = (\n \"CrossAttnDownBlock2D\",\n \"CrossAttnDownBlock2D\",\n \"CrossAttnDownBlock2D\",\n \"DownBlock2D\",\n ),\n mid_block_type: Optional[str] = \"UNetMidBlock2DCrossAttn\",\n up_block_types: Tuple[str] = (\"UpBlock2D\", \"CrossAttnUpBlock2D\", \"CrossAttnUpBlock2D\", \"CrossAttnUpBlock2D\"),\n only_cross_attention: Union[bool, Tuple[bool]] = False,\n block_out_channels: Tuple[int] = (320, 640, 1280, 1280),\n layers_per_block: Union[int, Tuple[int]] = 2,\n downsample_padding: int = 1,\n mid_block_scale_factor: float = 1,\n dropout: float = 0.0,\n act_fn: str = \"silu\",\n norm_num_groups: Optional[int] = 32,\n norm_eps: float = 1e-5,\n cross_attention_dim: Union[int, Tuple[int]] = 1280,\n transformer_layers_per_block: Union[int, Tuple[int]] = 1,\n encoder_hid_dim: Optional[int] = None,\n encoder_hid_dim_type: Optional[str] = None,\n attention_head_dim: Union[int, Tuple[int]] = 8,\n num_attention_heads: Optional[Union[int, Tuple[int]]] = None,\n dual_cross_attention: bool = False,\n use_linear_projection: bool = False,\n class_embed_type: Optional[str] = None,\n addition_embed_type: Optional[str] = None,\n addition_time_embed_dim: Optional[int] = None,\n num_class_embeds: Optional[int] = None,\n upcast_attention: bool = False,\n resnet_time_scale_shift: str = \"default\",\n resnet_skip_time_act: bool = False,\n resnet_out_scale_factor: int = 1.0,\n time_embedding_type: str = \"positional\",\n time_embedding_dim: Optional[int] = None,\n time_embedding_act_fn: Optional[str] = None,\n timestep_post_act: Optional[str] = None,\n time_cond_proj_dim: Optional[int] = None,\n conv_in_kernel: int = 3,\n conv_out_kernel: int = 3,\n projection_class_embeddings_input_dim: Optional[int] = None,\n attention_type: str = \"default\",\n class_embeddings_concat: bool = False,\n mid_block_only_cross_attention: Optional[bool] = None,\n cross_attention_norm: Optional[str] = None,\n addition_embed_type_num_heads=64,\n ):\n super().__init__()\n\n self.sample_size = sample_size\n\n if num_attention_heads is not None:\n raise ValueError(\n \"At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.\"\n )\n\n # If `num_attention_heads` is not defined (which is the case for most models)\n # it will default to `attention_head_dim`. This looks weird upon first reading it and it is.\n # The reason for this behavior is to correct for incorrectly named variables that were introduced\n # when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131\n # Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking\n # which is why we correct for the naming here.\n num_attention_heads = num_attention_heads or attention_head_dim\n\n # Check inputs\n if len(down_block_types) != len(up_block_types):\n raise ValueError(\n f\"Must provide the same number of `down_block_types` as `up_block_types`. `down_block_types`: {down_block_types}. `up_block_types`: {up_block_types}.\"\n )\n\n if len(block_out_channels) != len(down_block_types):\n raise ValueError(\n f\"Must provide the same number of `block_out_channels` as `down_block_types`. `block_out_channels`: {block_out_channels}. `down_block_types`: {down_block_types}.\"\n )\n\n if not isinstance(only_cross_attention, bool) and len(only_cross_attention) != len(down_block_types):\n raise ValueError(\n f\"Must provide the same number of `only_cross_attention` as `down_block_types`. `only_cross_attention`: {only_cross_attention}. `down_block_types`: {down_block_types}.\"\n )\n\n if not isinstance(num_attention_heads, int) and len(num_attention_heads) != len(down_block_types):\n raise ValueError(\n f\"Must provide the same number of `num_attention_heads` as `down_block_types`. `num_attention_heads`: {num_attention_heads}. `down_block_types`: {down_block_types}.\"\n )\n\n if not isinstance(attention_head_dim, int) and len(attention_head_dim) != len(down_block_types):\n raise ValueError(\n f\"Must provide the same number of `attention_head_dim` as `down_block_types`. `attention_head_dim`: {attention_head_dim}. `down_block_types`: {down_block_types}.\"\n )\n\n if isinstance(cross_attention_dim, list) and len(cross_attention_dim) != len(down_block_types):\n raise ValueError(\n f\"Must provide the same number of `cross_attention_dim` as `down_block_types`. `cross_attention_dim`: {cross_attention_dim}. `down_block_types`: {down_block_types}.\"\n )\n\n if not isinstance(layers_per_block, int) and len(layers_per_block) != len(down_block_types):\n raise ValueError(\n f\"Must provide the same number of `layers_per_block` as `down_block_types`. `layers_per_block`: {layers_per_block}. `down_block_types`: {down_block_types}.\"\n )\n\n # input\n conv_in_padding = (conv_in_kernel - 1) // 2\n self.conv_in = nn.Conv2d(\n in_channels, block_out_channels[0], kernel_size=conv_in_kernel, padding=conv_in_padding\n )\n\n # time\n if time_embedding_type == \"fourier\":\n time_embed_dim = time_embedding_dim or block_out_channels[0] * 2\n if time_embed_dim % 2 != 0:\n raise ValueError(f\"`time_embed_dim` should be divisible by 2, but is {time_embed_dim}.\")\n self.time_proj = GaussianFourierProjection(\n time_embed_dim // 2, set_W_to_weight=False, log=False, flip_sin_to_cos=flip_sin_to_cos\n )\n timestep_input_dim = time_embed_dim\n elif time_embedding_type == \"positional\":\n time_embed_dim = time_embedding_dim or block_out_channels[0] * 4\n\n self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift)\n timestep_input_dim = block_out_channels[0]\n else:\n raise ValueError(\n f\"{time_embedding_type} does not exist. Please make sure to use one of `fourier` or `positional`.\"\n )\n\n self.time_embedding = TimestepEmbedding(\n timestep_input_dim,\n time_embed_dim,\n act_fn=act_fn,\n post_act_fn=timestep_post_act,\n cond_proj_dim=time_cond_proj_dim,\n )\n\n if encoder_hid_dim_type is None and encoder_hid_dim is not None:\n encoder_hid_dim_type = \"text_proj\"\n self.register_to_config(encoder_hid_dim_type=encoder_hid_dim_type)\n logger.info(\"encoder_hid_dim_type defaults to 'text_proj' as `encoder_hid_dim` is defined.\")\n\n if encoder_hid_dim is None and encoder_hid_dim_type is not None:\n raise ValueError(\n f\"`encoder_hid_dim` has to be defined when `encoder_hid_dim_type` is set to {encoder_hid_dim_type}.\"\n )\n\n if encoder_hid_dim_type == \"text_proj\":\n self.encoder_hid_proj = nn.Linear(encoder_hid_dim, cross_attention_dim)\n elif encoder_hid_dim_type == \"text_image_proj\":\n # image_embed_dim DOESN'T have to be `cross_attention_dim`. To not clutter the __init__ too much\n # they are set to `cross_attention_dim` here as this is exactly the required dimension for the currently only use\n # case when `addition_embed_type == \"text_image_proj\"` (Kadinsky 2.1)`\n self.encoder_hid_proj = TextImageProjection(\n text_embed_dim=encoder_hid_dim,\n image_embed_dim=cross_attention_dim,\n cross_attention_dim=cross_attention_dim,\n )\n elif encoder_hid_dim_type == \"image_proj\":\n # Kandinsky 2.2\n self.encoder_hid_proj = ImageProjection(\n image_embed_dim=encoder_hid_dim,\n cross_attention_dim=cross_attention_dim,\n )\n elif encoder_hid_dim_type is not None:\n raise ValueError(\n f\"encoder_hid_dim_type: {encoder_hid_dim_type} must be None, 'text_proj' or 'text_image_proj'.\"\n )\n else:\n self.encoder_hid_proj = None\n\n # class embedding\n if class_embed_type is None and num_class_embeds is not None:\n self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim)\n elif class_embed_type == \"timestep\":\n self.class_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim, act_fn=act_fn)\n elif class_embed_type == \"identity\":\n self.class_embedding = nn.Identity(time_embed_dim, time_embed_dim)\n elif class_embed_type == \"projection\":\n if projection_class_embeddings_input_dim is None:\n raise ValueError(\n \"`class_embed_type`: 'projection' requires `projection_class_embeddings_input_dim` be set\"\n )\n # The projection `class_embed_type` is the same as the timestep `class_embed_type` except\n # 1. the `class_labels` inputs are not first converted to sinusoidal embeddings\n # 2. it projects from an arbitrary input dimension.\n #\n # Note that `TimestepEmbedding` is quite general, being mainly linear layers and activations.\n # When used for embedding actual timesteps, the timesteps are first converted to sinusoidal embeddings.\n # As a result, `TimestepEmbedding` can be passed arbitrary vectors.\n self.class_embedding = TimestepEmbedding(projection_class_embeddings_input_dim, time_embed_dim)\n elif class_embed_type == \"simple_projection\":\n if projection_class_embeddings_input_dim is None:\n raise ValueError(\n \"`class_embed_type`: 'simple_projection' requires `projection_class_embeddings_input_dim` be set\"\n )\n self.class_embedding = nn.Linear(projection_class_embeddings_input_dim, time_embed_dim)\n else:\n self.class_embedding = None\n\n if addition_embed_type == \"text\":\n if encoder_hid_dim is not None:\n text_time_embedding_from_dim = encoder_hid_dim\n else:\n text_time_embedding_from_dim = cross_attention_dim\n\n self.add_embedding = TextTimeEmbedding(\n text_time_embedding_from_dim, time_embed_dim, num_heads=addition_embed_type_num_heads\n )\n elif addition_embed_type == \"text_image\":\n # text_embed_dim and image_embed_dim DON'T have to be `cross_attention_dim`. To not clutter the __init__ too much\n # they are set to `cross_attention_dim` here as this is exactly the required dimension for the currently only use\n # case when `addition_embed_type == \"text_image\"` (Kadinsky 2.1)`\n self.add_embedding = TextImageTimeEmbedding(\n text_embed_dim=cross_attention_dim, image_embed_dim=cross_attention_dim, time_embed_dim=time_embed_dim\n )\n elif addition_embed_type == \"text_time\":\n self.add_time_proj = Timesteps(addition_time_embed_dim, flip_sin_to_cos, freq_shift)\n self.add_embedding = TimestepEmbedding(projection_class_embeddings_input_dim, time_embed_dim)\n elif addition_embed_type == \"image\":\n # Kandinsky 2.2\n self.add_embedding = ImageTimeEmbedding(image_embed_dim=encoder_hid_dim, time_embed_dim=time_embed_dim)\n elif addition_embed_type == \"image_hint\":\n # Kandinsky 2.2 ControlNet\n self.add_embedding = ImageHintTimeEmbedding(image_embed_dim=encoder_hid_dim, time_embed_dim=time_embed_dim)\n elif addition_embed_type is not None:\n raise ValueError(f\"addition_embed_type: {addition_embed_type} must be None, 'text' or 'text_image'.\")\n\n if time_embedding_act_fn is None:\n self.time_embed_act = None\n else:\n self.time_embed_act = get_activation(time_embedding_act_fn)\n\n self.down_blocks = nn.ModuleList([])\n self.up_blocks = nn.ModuleList([])\n\n if isinstance(only_cross_attention, bool):\n if mid_block_only_cross_attention is None:\n mid_block_only_cross_attention = only_cross_attention\n\n only_cross_attention = [only_cross_attention] * len(down_block_types)\n\n if mid_block_only_cross_attention is None:\n mid_block_only_cross_attention = False\n\n if isinstance(num_attention_heads, int):\n num_attention_heads = (num_attention_heads,) * len(down_block_types)\n\n if isinstance(attention_head_dim, int):\n attention_head_dim = (attention_head_dim,) * len(down_block_types)\n\n if isinstance(cross_attention_dim, int):\n cross_attention_dim = (cross_attention_dim,) * len(down_block_types)\n\n if isinstance(layers_per_block, int):\n layers_per_block = [layers_per_block] * len(down_block_types)\n\n if isinstance(transformer_layers_per_block, int):\n transformer_layers_per_block = [transformer_layers_per_block] * len(down_block_types)\n\n if class_embeddings_concat:\n # The time embeddings are concatenated with the class embeddings. The dimension of the\n # time embeddings passed to the down, middle, and up blocks is twice the dimension of the\n # regular time embeddings\n blocks_time_embed_dim = time_embed_dim * 2\n else:\n blocks_time_embed_dim = time_embed_dim\n\n # down\n output_channel = block_out_channels[0]\n for i, down_block_type in enumerate(down_block_types):\n input_channel = output_channel\n output_channel = block_out_channels[i]\n is_final_block = i == len(block_out_channels) - 1\n\n down_block = get_down_block(\n down_block_type,\n num_layers=layers_per_block[i],\n transformer_layers_per_block=transformer_layers_per_block[i],\n in_channels=input_channel,\n out_channels=output_channel,\n temb_channels=blocks_time_embed_dim,\n add_downsample=not is_final_block,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n resnet_groups=norm_num_groups,\n cross_attention_dim=cross_attention_dim[i],\n num_attention_heads=num_attention_heads[i],\n downsample_padding=downsample_padding,\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention[i],\n upcast_attention=upcast_attention,\n resnet_time_scale_shift=resnet_time_scale_shift,\n attention_type=attention_type,\n resnet_skip_time_act=resnet_skip_time_act,\n resnet_out_scale_factor=resnet_out_scale_factor,\n cross_attention_norm=cross_attention_norm,\n attention_head_dim=attention_head_dim[i] if attention_head_dim[i] is not None else output_channel,\n dropout=dropout,\n )\n self.down_blocks.append(down_block)\n\n # mid\n if mid_block_type == \"UNetMidBlock2DCrossAttn\":\n self.mid_block = UNetMidBlock2DCrossAttn(\n transformer_layers_per_block=transformer_layers_per_block[-1],\n in_channels=block_out_channels[-1],\n temb_channels=blocks_time_embed_dim,\n dropout=dropout,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n output_scale_factor=mid_block_scale_factor,\n resnet_time_scale_shift=resnet_time_scale_shift,\n cross_attention_dim=cross_attention_dim[-1],\n num_attention_heads=num_attention_heads[-1],\n resnet_groups=norm_num_groups,\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n upcast_attention=upcast_attention,\n attention_type=attention_type,\n )\n elif mid_block_type == \"UNetMidBlock2DSimpleCrossAttn\":\n self.mid_block = UNetMidBlock2DSimpleCrossAttn(\n in_channels=block_out_channels[-1],\n temb_channels=blocks_time_embed_dim,\n dropout=dropout,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n output_scale_factor=mid_block_scale_factor,\n cross_attention_dim=cross_attention_dim[-1],\n attention_head_dim=attention_head_dim[-1],\n resnet_groups=norm_num_groups,\n resnet_time_scale_shift=resnet_time_scale_shift,\n skip_time_act=resnet_skip_time_act,\n only_cross_attention=mid_block_only_cross_attention,\n cross_attention_norm=cross_attention_norm,\n )\n elif mid_block_type is None:\n self.mid_block = None\n else:\n raise ValueError(f\"unknown mid_block_type : {mid_block_type}\")\n\n # count how many layers upsample the images\n self.num_upsamplers = 0\n\n # up\n reversed_block_out_channels = list(reversed(block_out_channels))\n reversed_num_attention_heads = list(reversed(num_attention_heads))\n reversed_layers_per_block = list(reversed(layers_per_block))\n reversed_cross_attention_dim = list(reversed(cross_attention_dim))\n reversed_transformer_layers_per_block = list(reversed(transformer_layers_per_block))\n only_cross_attention = list(reversed(only_cross_attention))\n\n output_channel = reversed_block_out_channels[0]\n for i, up_block_type in enumerate(up_block_types):\n is_final_block = i == len(block_out_channels) - 1\n\n prev_output_channel = output_channel\n output_channel = reversed_block_out_channels[i]\n input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)]\n\n # add upsample block for all BUT final layer\n if not is_final_block:\n add_upsample = True\n self.num_upsamplers += 1\n else:\n add_upsample = False\n\n up_block = get_up_block(\n up_block_type,\n num_layers=reversed_layers_per_block[i] + 1,\n transformer_layers_per_block=reversed_transformer_layers_per_block[i],\n in_channels=input_channel,\n out_channels=output_channel,\n prev_output_channel=prev_output_channel,\n temb_channels=blocks_time_embed_dim,\n add_upsample=add_upsample,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n resnet_groups=norm_num_groups,\n cross_attention_dim=reversed_cross_attention_dim[i],\n num_attention_heads=reversed_num_attention_heads[i],\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention[i],\n upcast_attention=upcast_attention,\n resnet_time_scale_shift=resnet_time_scale_shift,\n attention_type=attention_type,\n resnet_skip_time_act=resnet_skip_time_act,\n resnet_out_scale_factor=resnet_out_scale_factor,\n cross_attention_norm=cross_attention_norm,\n attention_head_dim=attention_head_dim[i] if attention_head_dim[i] is not None else output_channel,\n dropout=dropout,\n )\n self.up_blocks.append(up_block)\n prev_output_channel = output_channel\n\n # out\n if norm_num_groups is not None:\n self.conv_norm_out = nn.GroupNorm(\n num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=norm_eps\n )\n\n self.conv_act = get_activation(act_fn)\n\n else:\n self.conv_norm_out = None\n self.conv_act = None\n\n conv_out_padding = (conv_out_kernel - 1) // 2\n self.conv_out = nn.Conv2d(\n block_out_channels[0], out_channels, kernel_size=conv_out_kernel, padding=conv_out_padding\n )\n\n if attention_type in [\"gated\", \"gated-text-image\"]:\n positive_len = 768\n if isinstance(cross_attention_dim, int):\n positive_len = cross_attention_dim\n elif isinstance(cross_attention_dim, tuple) or isinstance(cross_attention_dim, list):\n positive_len = cross_attention_dim[0]\n\n feature_type = \"text-only\" if attention_type == \"gated\" else \"text-image\"\n self.position_net = PositionNet(\n positive_len=positive_len, out_dim=cross_attention_dim, feature_type=feature_type\n )\n\n @property\n def attn_processors(self) -> Dict[str, AttentionProcessor]:\n r\"\"\"\n Returns:\n `dict` of attention processors: A dictionary containing all attention processors used in the model with\n indexed by its weight name.\n \"\"\"\n # set recursively\n processors = {}\n\n def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]):\n if hasattr(module, \"get_processor\"):\n processors[f\"{name}.processor\"] = module.get_processor(return_deprecated_lora=True)\n\n for sub_name, child in module.named_children():\n fn_recursive_add_processors(f\"{name}.{sub_name}\", child, processors)\n\n return processors\n\n for name, module in self.named_children():\n fn_recursive_add_processors(name, module, processors)\n\n return processors\n\n def set_attn_processor(\n self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]], _remove_lora=False\n ):\n r\"\"\"\n Sets the attention processor to use to compute attention.\n\n Parameters:\n processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`):\n The instantiated processor class or a dictionary of processor classes that will be set as the processor\n for **all** `Attention` layers.\n\n If `processor` is a dict, the key needs to define the path to the corresponding cross attention\n processor. This is strongly recommended when setting trainable attention processors.\n\n \"\"\"\n count = len(self.attn_processors.keys())\n\n if isinstance(processor, dict) and len(processor) != count:\n raise ValueError(\n f\"A dict of processors was passed, but the number of processors {len(processor)} does not match the\"\n f\" number of attention layers: {count}. Please make sure to pass {count} processor classes.\"\n )\n\n def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor):\n if hasattr(module, \"set_processor\"):\n if not isinstance(processor, dict):\n module.set_processor(processor, _remove_lora=_remove_lora)\n else:\n module.set_processor(processor.pop(f\"{name}.processor\"), _remove_lora=_remove_lora)\n\n for sub_name, child in module.named_children():\n fn_recursive_attn_processor(f\"{name}.{sub_name}\", child, processor)\n\n for name, module in self.named_children():\n fn_recursive_attn_processor(name, module, processor)\n\n def set_default_attn_processor(self):\n \"\"\"\n Disables custom attention processors and sets the default attention implementation.\n \"\"\"\n if all(proc.__class__ in ADDED_KV_ATTENTION_PROCESSORS for proc in self.attn_processors.values()):\n processor = AttnAddedKVProcessor()\n elif all(proc.__class__ in CROSS_ATTENTION_PROCESSORS for proc in self.attn_processors.values()):\n processor = AttnProcessor()\n else:\n raise ValueError(\n f\"Cannot call `set_default_attn_processor` when attention processors are of type {next(iter(self.attn_processors.values()))}\"\n )\n\n self.set_attn_processor(processor, _remove_lora=True)\n\n def set_attention_slice(self, slice_size):\n r\"\"\"\n Enable sliced attention computation.\n\n When this option is enabled, the attention module splits the input tensor in slices to compute attention in\n several steps. This is useful for saving some memory in exchange for a small decrease in speed.\n\n Args:\n slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `\"auto\"`):\n When `\"auto\"`, input to the attention heads is halved, so attention is computed in two steps. If\n `\"max\"`, maximum amount of memory is saved by running only one slice at a time. If a number is\n provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim`\n must be a multiple of `slice_size`.\n \"\"\"\n sliceable_head_dims = []\n\n def fn_recursive_retrieve_sliceable_dims(module: torch.nn.Module):\n if hasattr(module, \"set_attention_slice\"):\n sliceable_head_dims.append(module.sliceable_head_dim)\n\n for child in module.children():\n fn_recursive_retrieve_sliceable_dims(child)\n\n # retrieve number of attention layers\n for module in self.children():\n fn_recursive_retrieve_sliceable_dims(module)\n\n num_sliceable_layers = len(sliceable_head_dims)\n\n if slice_size == \"auto\":\n # half the attention head size is usually a good trade-off between\n # speed and memory\n slice_size = [dim // 2 for dim in sliceable_head_dims]\n elif slice_size == \"max\":\n # make smallest slice possible\n slice_size = num_sliceable_layers * [1]\n\n slice_size = num_sliceable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size\n\n if len(slice_size) != len(sliceable_head_dims):\n raise ValueError(\n f\"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different\"\n f\" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}.\"\n )\n\n for i in range(len(slice_size)):\n size = slice_size[i]\n dim = sliceable_head_dims[i]\n if size is not None and size > dim:\n raise ValueError(f\"size {size} has to be smaller or equal to {dim}.\")\n\n # Recursively walk through all the children.\n # Any children which exposes the set_attention_slice method\n # gets the message\n def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]):\n if hasattr(module, \"set_attention_slice\"):\n module.set_attention_slice(slice_size.pop())\n\n for child in module.children():\n fn_recursive_set_attention_slice(child, slice_size)\n\n reversed_slice_size = list(reversed(slice_size))\n for module in self.children():\n fn_recursive_set_attention_slice(module, reversed_slice_size)\n\n def _set_gradient_checkpointing(self, module, value=False):\n if hasattr(module, \"gradient_checkpointing\"):\n module.gradient_checkpointing = value\n\n def forward(\n self,\n sample: torch.FloatTensor,\n timestep: Union[torch.Tensor, float, int],\n encoder_hidden_states: torch.Tensor,\n class_labels: Optional[torch.Tensor] = None,\n timestep_cond: Optional[torch.Tensor] = None,\n attention_mask: Optional[torch.Tensor] = None,\n cross_attention_kwargs: Optional[Dict[str, Any]] = None,\n added_cond_kwargs: Optional[Dict[str, torch.Tensor]] = None,\n down_block_additional_residuals: Optional[Tuple[torch.Tensor]] = None,\n mid_block_additional_residual: Optional[torch.Tensor] = None,\n encoder_attention_mask: Optional[torch.Tensor] = None,\n quick_replicate: bool = False,\n replicate_prv_feature: Optional[List[torch.Tensor]] = None,\n cache_layer_id: Optional[int] = None,\n cache_block_id: Optional[int] = None,\n return_dict: bool = True,\n ) -> Union[UNet2DConditionOutput, Tuple]:\n r\"\"\"\n The [`UNet2DConditionModel`] forward method.\n\n Args:\n sample (`torch.FloatTensor`):\n The noisy input tensor with the following shape `(batch, channel, height, width)`.\n timestep (`torch.FloatTensor` or `float` or `int`): The number of timesteps to denoise an input.\n encoder_hidden_states (`torch.FloatTensor`):\n The encoder hidden states with shape `(batch, sequence_length, feature_dim)`.\n encoder_attention_mask (`torch.Tensor`):\n A cross-attention mask of shape `(batch, sequence_length)` is applied to `encoder_hidden_states`. If\n `True` the mask is kept, otherwise if `False` it is discarded. Mask will be converted into a bias,\n which adds large negative values to the attention scores corresponding to \"discard\" tokens.\n return_dict (`bool`, *optional*, defaults to `True`):\n Whether or not to return a [`~models.unet_2d_condition.UNet2DConditionOutput`] instead of a plain\n tuple.\n cross_attention_kwargs (`dict`, *optional*):\n A kwargs dictionary that if specified is passed along to the [`AttnProcessor`].\n added_cond_kwargs: (`dict`, *optional*):\n A kwargs dictionary containin additional embeddings that if specified are added to the embeddings that\n are passed along to the UNet blocks.\n\n Returns:\n [`~models.unet_2d_condition.UNet2DConditionOutput`] or `tuple`:\n If `return_dict` is True, an [`~models.unet_2d_condition.UNet2DConditionOutput`] is returned, otherwise\n a `tuple` is returned where the first element is the sample tensor.\n \"\"\"\n # By default samples have to be AT least a multiple of the overall upsampling factor.\n # The overall upsampling factor is equal to 2 ** (# num of upsampling layers).\n # However, the upsampling interpolation output size can be forced to fit any upsampling size\n # on the fly if necessary.\n default_overall_up_factor = 2**self.num_upsamplers\n\n # upsample size should be forwarded when sample is not a multiple of `default_overall_up_factor`\n forward_upsample_size = False\n upsample_size = None\n\n if any(s % default_overall_up_factor != 0 for s in sample.shape[-2:]):\n logger.info(\"Forward upsample size to force interpolation output size.\")\n forward_upsample_size = True\n\n # ensure attention_mask is a bias, and give it a singleton query_tokens dimension\n # expects mask of shape:\n # [batch, key_tokens]\n # adds singleton query_tokens dimension:\n # [batch, 1, key_tokens]\n # this helps to broadcast it as a bias over attention scores, which will be in one of the following shapes:\n # [batch, heads, query_tokens, key_tokens] (e.g. torch sdp attn)\n # [batch * heads, query_tokens, key_tokens] (e.g. xformers or classic attn)\n if attention_mask is not None:\n # assume that mask is expressed as:\n # (1 = keep, 0 = discard)\n # convert mask into a bias that can be added to attention scores:\n # (keep = +0, discard = -10000.0)\n attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0\n attention_mask = attention_mask.unsqueeze(1)\n\n # convert encoder_attention_mask to a bias the same way we do for attention_mask\n if encoder_attention_mask is not None:\n encoder_attention_mask = (1 - encoder_attention_mask.to(sample.dtype)) * -10000.0\n encoder_attention_mask = encoder_attention_mask.unsqueeze(1)\n\n # 0. center input if necessary\n if self.config.center_input_sample:\n sample = 2 * sample - 1.0\n\n # 1. time\n timesteps = timestep\n if not torch.is_tensor(timesteps):\n # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can\n # This would be a good case for the `match` statement (Python 3.10+)\n is_mps = sample.device.type == \"mps\"\n if isinstance(timestep, float):\n dtype = torch.float32 if is_mps else torch.float64\n else:\n dtype = torch.int32 if is_mps else torch.int64\n timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device)\n elif len(timesteps.shape) == 0:\n timesteps = timesteps[None].to(sample.device)\n\n # broadcast to batch dimension in a way that's compatible with ONNX/Core ML\n timesteps = timesteps.expand(sample.shape[0])\n\n t_emb = self.time_proj(timesteps)\n\n # `Timesteps` does not contain any weights and will always return f32 tensors\n # but time_embedding might actually be running in fp16. so we need to cast here.\n # there might be better ways to encapsulate this.\n t_emb = t_emb.to(dtype=sample.dtype)\n\n emb = self.time_embedding(t_emb, timestep_cond)\n aug_emb = None\n\n if self.class_embedding is not None:\n if class_labels is None:\n raise ValueError(\"class_labels should be provided when num_class_embeds > 0\")\n\n if self.config.class_embed_type == \"timestep\":\n class_labels = self.time_proj(class_labels)\n\n # `Timesteps` does not contain any weights and will always return f32 tensors\n # there might be better ways to encapsulate this.\n class_labels = class_labels.to(dtype=sample.dtype)\n\n class_emb = self.class_embedding(class_labels).to(dtype=sample.dtype)\n\n if self.config.class_embeddings_concat:\n emb = torch.cat([emb, class_emb], dim=-1)\n else:\n emb = emb + class_emb\n\n if self.config.addition_embed_type == \"text\":\n aug_emb = self.add_embedding(encoder_hidden_states)\n elif self.config.addition_embed_type == \"text_image\":\n # Kandinsky 2.1 - style\n if \"image_embeds\" not in added_cond_kwargs:\n raise ValueError(\n f\"{self.__class__} has the config param `addition_embed_type` set to 'text_image' which requires the keyword argument `image_embeds` to be passed in `added_cond_kwargs`\"\n )\n\n image_embs = added_cond_kwargs.get(\"image_embeds\")\n text_embs = added_cond_kwargs.get(\"text_embeds\", encoder_hidden_states)\n aug_emb = self.add_embedding(text_embs, image_embs)\n elif self.config.addition_embed_type == \"text_time\":\n # SDXL - style\n if \"text_embeds\" not in added_cond_kwargs:\n raise ValueError(\n f\"{self.__class__} has the config param `addition_embed_type` set to 'text_time' which requires the keyword argument `text_embeds` to be passed in `added_cond_kwargs`\"\n )\n text_embeds = added_cond_kwargs.get(\"text_embeds\")\n if \"time_ids\" not in added_cond_kwargs:\n raise ValueError(\n f\"{self.__class__} has the config param `addition_embed_type` set to 'text_time' which requires the keyword argument `time_ids` to be passed in `added_cond_kwargs`\"\n )\n time_ids = added_cond_kwargs.get(\"time_ids\")\n time_embeds = self.add_time_proj(time_ids.flatten())\n time_embeds = time_embeds.reshape((text_embeds.shape[0], -1))\n\n add_embeds = torch.concat([text_embeds, time_embeds], dim=-1)\n add_embeds = add_embeds.to(emb.dtype)\n aug_emb = self.add_embedding(add_embeds)\n elif self.config.addition_embed_type == \"image\":\n # Kandinsky 2.2 - style\n if \"image_embeds\" not in added_cond_kwargs:\n raise ValueError(\n f\"{self.__class__} has the config param `addition_embed_type` set to 'image' which requires the keyword argument `image_embeds` to be passed in `added_cond_kwargs`\"\n )\n image_embs = added_cond_kwargs.get(\"image_embeds\")\n aug_emb = self.add_embedding(image_embs)\n elif self.config.addition_embed_type == \"image_hint\":\n # Kandinsky 2.2 - style\n if \"image_embeds\" not in added_cond_kwargs or \"hint\" not in added_cond_kwargs:\n raise ValueError(\n f\"{self.__class__} has the config param `addition_embed_type` set to 'image_hint' which requires the keyword arguments `image_embeds` and `hint` to be passed in `added_cond_kwargs`\"\n )\n image_embs = added_cond_kwargs.get(\"image_embeds\")\n hint = added_cond_kwargs.get(\"hint\")\n aug_emb, hint = self.add_embedding(image_embs, hint)\n sample = torch.cat([sample, hint], dim=1)\n\n emb = emb + aug_emb if aug_emb is not None else emb\n\n if self.time_embed_act is not None:\n emb = self.time_embed_act(emb)\n\n if self.encoder_hid_proj is not None and self.config.encoder_hid_dim_type == \"text_proj\":\n encoder_hidden_states = self.encoder_hid_proj(encoder_hidden_states)\n elif self.encoder_hid_proj is not None and self.config.encoder_hid_dim_type == \"text_image_proj\":\n # Kadinsky 2.1 - style\n if \"image_embeds\" not in added_cond_kwargs:\n raise ValueError(\n f\"{self.__class__} has the config param `encoder_hid_dim_type` set to 'text_image_proj' which requires the keyword argument `image_embeds` to be passed in `added_conditions`\"\n )\n\n image_embeds = added_cond_kwargs.get(\"image_embeds\")\n encoder_hidden_states = self.encoder_hid_proj(encoder_hidden_states, image_embeds)\n elif self.encoder_hid_proj is not None and self.config.encoder_hid_dim_type == \"image_proj\":\n # Kandinsky 2.2 - style\n if \"image_embeds\" not in added_cond_kwargs:\n raise ValueError(\n f\"{self.__class__} has the config param `encoder_hid_dim_type` set to 'image_proj' which requires the keyword argument `image_embeds` to be passed in `added_conditions`\"\n )\n image_embeds = added_cond_kwargs.get(\"image_embeds\")\n encoder_hidden_states = self.encoder_hid_proj(image_embeds)\n # 2. pre-process\n sample = self.conv_in(sample)\n\n # 2.5 GLIGEN position net\n if cross_attention_kwargs is not None and cross_attention_kwargs.get(\"gligen\", None) is not None:\n cross_attention_kwargs = cross_attention_kwargs.copy()\n gligen_args = cross_attention_kwargs.pop(\"gligen\")\n cross_attention_kwargs[\"gligen\"] = {\"objs\": self.position_net(**gligen_args)}\n\n # 3. down\n lora_scale = cross_attention_kwargs.get(\"scale\", 1.0) if cross_attention_kwargs is not None else 1.0\n\n is_controlnet = mid_block_additional_residual is not None and down_block_additional_residuals is not None\n is_adapter = mid_block_additional_residual is None and down_block_additional_residuals is not None\n\n down_block_res_samples = (sample,)\n if quick_replicate and replicate_prv_feature is not None:\n # Down\n for i, downsample_block in enumerate(self.down_blocks):\n if i > cache_layer_id:\n break\n\n if hasattr(downsample_block, \"has_cross_attention\") and downsample_block.has_cross_attention:\n # For t2i-adapter CrossAttnDownBlock2D\n additional_residuals = {}\n if is_adapter and len(down_block_additional_residuals) > 0:\n additional_residuals[\"additional_residuals\"] = down_block_additional_residuals.pop(0)\n\n sample, res_samples = downsample_block(\n hidden_states=sample,\n temb=emb,\n encoder_hidden_states=encoder_hidden_states,\n attention_mask=attention_mask,\n cross_attention_kwargs=cross_attention_kwargs,\n encoder_attention_mask=encoder_attention_mask,\n exist_block_number=cache_block_id if i == cache_layer_id else None,\n **additional_residuals,\n )\n else:\n sample, res_samples = downsample_block(hidden_states=sample, temb=emb, scale=lora_scale)\n\n if is_adapter and len(down_block_additional_residuals) > 0:\n sample += down_block_additional_residuals.pop(0)\n\n down_block_res_samples += res_samples\n\n # No Middle\n # Up\n #print(\"down_block_res_samples:\", [res_sample.shape for res_sample in down_block_res_samples])\n sample = replicate_prv_feature\n #down_block_res_samples = down_block_res_samples[:-1]\n if cache_block_id == len(self.down_blocks[cache_layer_id].attentions) :\n cache_block_id = 0\n cache_layer_id += 1\n else:\n cache_block_id += 1\n\n for i, upsample_block in enumerate(self.up_blocks):\n if i < len(self.up_blocks) - 1 - cache_layer_id:\n continue\n\n if i == len(self.up_blocks) - 1 - cache_layer_id:\n trunc_upsample_block = cache_block_id + 1\n else:\n trunc_upsample_block = len(upsample_block.resnets)\n\n is_final_block = i == len(self.up_blocks) - 1\n\n res_samples = down_block_res_samples[-trunc_upsample_block:]\n down_block_res_samples = down_block_res_samples[: -trunc_upsample_block]\n\n # if we have not reached the final block and need to forward the\n # upsample size, we do it here\n if not is_final_block and forward_upsample_size:\n upsample_size = down_block_res_samples[-1].shape[2:]\n\n if hasattr(upsample_block, \"has_cross_attention\") and upsample_block.has_cross_attention:\n #print(sample.shape, [res_sample.shape for res_sample in res_samples])\n sample, _ = upsample_block(\n hidden_states=sample,\n temb=emb,\n res_hidden_states_tuple=res_samples,\n encoder_hidden_states=encoder_hidden_states,\n cross_attention_kwargs=cross_attention_kwargs,\n upsample_size=upsample_size,\n attention_mask=attention_mask,\n encoder_attention_mask=encoder_attention_mask,\n enter_block_number=cache_block_id if i == len(self.up_blocks) - 1 - cache_layer_id else None,\n )\n else:\n sample = upsample_block(\n hidden_states=sample,\n temb=emb,\n res_hidden_states_tuple=res_samples,\n upsample_size=upsample_size,\n scale=lora_scale,\n )\n \n prv_f = replicate_prv_feature\n else:\n for i, downsample_block in enumerate(self.down_blocks):\n if hasattr(downsample_block, \"has_cross_attention\") and downsample_block.has_cross_attention:\n # For t2i-adapter CrossAttnDownBlock2D\n additional_residuals = {}\n if is_adapter and len(down_block_additional_residuals) > 0:\n additional_residuals[\"additional_residuals\"] = down_block_additional_residuals.pop(0)\n\n sample, res_samples = downsample_block(\n hidden_states=sample,\n temb=emb,\n encoder_hidden_states=encoder_hidden_states,\n attention_mask=attention_mask,\n cross_attention_kwargs=cross_attention_kwargs,\n encoder_attention_mask=encoder_attention_mask,\n **additional_residuals,\n )\n else:\n sample, res_samples = downsample_block(hidden_states=sample, temb=emb, scale=lora_scale)\n\n if is_adapter and len(down_block_additional_residuals) > 0:\n sample += down_block_additional_residuals.pop(0)\n\n down_block_res_samples += res_samples\n\n if is_controlnet:\n new_down_block_res_samples = ()\n\n for down_block_res_sample, down_block_additional_residual in zip(\n down_block_res_samples, down_block_additional_residuals\n ):\n down_block_res_sample = down_block_res_sample + down_block_additional_residual\n new_down_block_res_samples = new_down_block_res_samples + (down_block_res_sample,)\n\n down_block_res_samples = new_down_block_res_samples\n\n # 4. mid\n if self.mid_block is not None:\n sample = self.mid_block(\n sample,\n emb,\n encoder_hidden_states=encoder_hidden_states,\n attention_mask=attention_mask,\n cross_attention_kwargs=cross_attention_kwargs,\n encoder_attention_mask=encoder_attention_mask,\n )\n # To support T2I-Adapter-XL\n if (\n is_adapter\n and len(down_block_additional_residuals) > 0\n and sample.shape == down_block_additional_residuals[0].shape\n ):\n sample += down_block_additional_residuals.pop(0)\n\n if is_controlnet:\n sample = sample + mid_block_additional_residual\n\n # 5. up\n if cache_block_id is not None:\n if cache_block_id == len(self.down_blocks[cache_layer_id].attentions) :\n cache_block_id = 0\n cache_layer_id += 1\n else:\n cache_block_id += 1\n #print(\"down_block_res_samples:\", [res_sample.shape for res_sample in down_block_res_samples])\n #print(cache_block_id, cache_layer_id)\n prv_f = None\n for i, upsample_block in enumerate(self.up_blocks):\n is_final_block = i == len(self.up_blocks) - 1\n\n res_samples = down_block_res_samples[-len(upsample_block.resnets) :]\n down_block_res_samples = down_block_res_samples[: -len(upsample_block.resnets)]\n #print(sample.shape, [res_sample.shape for res_sample in res_samples])\n # if we have not reached the final block and need to forward the\n # upsample size, we do it here\n if not is_final_block and forward_upsample_size:\n upsample_size = down_block_res_samples[-1].shape[2:]\n\n if hasattr(upsample_block, \"has_cross_attention\") and upsample_block.has_cross_attention:\n sample, current_record_f = upsample_block(\n hidden_states=sample,\n temb=emb,\n res_hidden_states_tuple=res_samples,\n encoder_hidden_states=encoder_hidden_states,\n cross_attention_kwargs=cross_attention_kwargs,\n upsample_size=upsample_size,\n attention_mask=attention_mask,\n encoder_attention_mask=encoder_attention_mask,\n )\n else:\n sample = upsample_block(\n hidden_states=sample,\n temb=emb,\n res_hidden_states_tuple=res_samples,\n upsample_size=upsample_size,\n scale=lora_scale,\n )\n current_record_f = None\n\n #print(\"Append prv_feature with shape:\", sample.shape)\n if cache_layer_id is not None and current_record_f is not None and i == len(self.up_blocks) - cache_layer_id - 1:\n prv_f = current_record_f[-cache_block_id-1]\n \n # 6. post-process\n if self.conv_norm_out:\n sample = self.conv_norm_out(sample)\n sample = self.conv_act(sample)\n sample = self.conv_out(sample)\n if not return_dict:\n return (sample, prv_f,)\n \n return UNet2DConditionOutput(sample=sample)" }, { "identifier": "StableDiffusionPipeline", "path": "DeepCache/sd/pipeline_stable_diffusion.py", "snippet": "EXAMPLE_DOC_STRING = \"\"\"\n Examples:\n ```py\n >>> import torch\n >>> from diffusers import StableDiffusionPipeline\n\n >>> pipe = StableDiffusionPipeline.from_pretrained(\"runwayml/stable-diffusion-v1-5\", torch_dtype=torch.float16)\n >>> pipe = pipe.to(\"cuda\")\n\n >>> prompt = \"a photo of an astronaut riding a horse on mars\"\n >>> image = pipe(prompt).images[0]\n ```\n\"\"\"\ndef sample_gaussian_centered(n=1000, sample_size=100, std_dev=100):\ndef sample_from_quad(total_numbers, n_samples, pow=1.2):\ndef sample_from_quad_center(total_numbers, n_samples, center, pow=1.2):\ndef rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0):\n def __init__(\n self,\n vae: AutoencoderKL,\n text_encoder: CLIPTextModel,\n tokenizer: CLIPTokenizer,\n unet: UNet2DConditionModel,\n scheduler: KarrasDiffusionSchedulers,\n safety_checker: StableDiffusionSafetyChecker,\n feature_extractor: CLIPImageProcessor,\n requires_safety_checker: bool = True,\n ):\n def enable_vae_slicing(self):\n def disable_vae_slicing(self):\n def enable_vae_tiling(self):\n def disable_vae_tiling(self):\n def _encode_prompt(\n self,\n prompt,\n device,\n num_images_per_prompt,\n do_classifier_free_guidance,\n negative_prompt=None,\n prompt_embeds: Optional[torch.FloatTensor] = None,\n negative_prompt_embeds: Optional[torch.FloatTensor] = None,\n lora_scale: Optional[float] = None,\n ):\n def encode_prompt(\n self,\n prompt,\n device,\n num_images_per_prompt,\n do_classifier_free_guidance,\n negative_prompt=None,\n prompt_embeds: Optional[torch.FloatTensor] = None,\n negative_prompt_embeds: Optional[torch.FloatTensor] = None,\n lora_scale: Optional[float] = None,\n ):\n def run_safety_checker(self, image, device, dtype):\n def decode_latents(self, latents):\n def prepare_extra_step_kwargs(self, generator, eta):\n def check_inputs(\n self,\n prompt,\n height,\n width,\n callback_steps,\n negative_prompt=None,\n prompt_embeds=None,\n negative_prompt_embeds=None,\n ):\n def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None):\n def __call__(\n self,\n prompt: Union[str, List[str]] = None,\n height: Optional[int] = None,\n width: Optional[int] = None,\n num_inference_steps: int = 50,\n guidance_scale: float = 7.5,\n negative_prompt: Optional[Union[str, List[str]]] = None,\n num_images_per_prompt: Optional[int] = 1,\n eta: float = 0.0,\n generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,\n latents: Optional[torch.FloatTensor] = None,\n prompt_embeds: Optional[torch.FloatTensor] = None,\n negative_prompt_embeds: Optional[torch.FloatTensor] = None,\n output_type: Optional[str] = \"pil\",\n return_dict: bool = True,\n callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,\n callback_steps: int = 1,\n cross_attention_kwargs: Optional[Dict[str, Any]] = None,\n guidance_rescale: float = 0.0,\n cache_interval: int = 1,\n cache_layer_id: int = None,\n cache_block_id: int = None,\n uniform: bool = True,\n pow: float = None,\n center: int = None,\n output_all_sequence: bool = False,\n ):\nclass StableDiffusionPipeline(DiffusionPipeline, TextualInversionLoaderMixin, LoraLoaderMixin, FromSingleFileMixin):" } ]
import copy import numpy as np import PIL.Image import torch import torch.nn.functional as F from dataclasses import dataclass from typing import Callable, List, Optional, Union from torch.nn.functional import grid_sample from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from diffusers.models import AutoencoderKL from .unet_2d_condition import UNet2DConditionModel from .pipeline_stable_diffusion import StableDiffusionPipeline, StableDiffusionSafetyChecker from diffusers.schedulers import KarrasDiffusionSchedulers from diffusers.utils import BaseOutput from diffusers.utils.torch_utils import randn_tensor
17,098
def coords_grid(batch, ht, wd, device): # Adapted from https://github.com/princeton-vl/RAFT/blob/master/core/utils/utils.py coords = torch.meshgrid(torch.arange(ht, device=device), torch.arange(wd, device=device)) coords = torch.stack(coords[::-1], dim=0).float() return coords[None].repeat(batch, 1, 1, 1) def warp_single_latent(latent, reference_flow): """ Warp latent of a single frame with given flow Args: latent: latent code of a single frame reference_flow: flow which to warp the latent with Returns: warped: warped latent """ _, _, H, W = reference_flow.size() _, _, h, w = latent.size() coords0 = coords_grid(1, H, W, device=latent.device).to(latent.dtype) coords_t0 = coords0 + reference_flow coords_t0[:, 0] /= W coords_t0[:, 1] /= H coords_t0 = coords_t0 * 2.0 - 1.0 coords_t0 = F.interpolate(coords_t0, size=(h, w), mode="bilinear") coords_t0 = torch.permute(coords_t0, (0, 2, 3, 1)) warped = grid_sample(latent, coords_t0, mode="nearest", padding_mode="reflection") return warped def create_motion_field(motion_field_strength_x, motion_field_strength_y, frame_ids, device, dtype): """ Create translation motion field Args: motion_field_strength_x: motion strength along x-axis motion_field_strength_y: motion strength along y-axis frame_ids: indexes of the frames the latents of which are being processed. This is needed when we perform chunk-by-chunk inference device: device dtype: dtype Returns: """ seq_length = len(frame_ids) reference_flow = torch.zeros((seq_length, 2, 512, 512), device=device, dtype=dtype) for fr_idx in range(seq_length): reference_flow[fr_idx, 0, :, :] = motion_field_strength_x * (frame_ids[fr_idx]) reference_flow[fr_idx, 1, :, :] = motion_field_strength_y * (frame_ids[fr_idx]) return reference_flow def create_motion_field_and_warp_latents(motion_field_strength_x, motion_field_strength_y, frame_ids, latents): """ Creates translation motion and warps the latents accordingly Args: motion_field_strength_x: motion strength along x-axis motion_field_strength_y: motion strength along y-axis frame_ids: indexes of the frames the latents of which are being processed. This is needed when we perform chunk-by-chunk inference latents: latent codes of frames Returns: warped_latents: warped latents """ motion_field = create_motion_field( motion_field_strength_x=motion_field_strength_x, motion_field_strength_y=motion_field_strength_y, frame_ids=frame_ids, device=latents.device, dtype=latents.dtype, ) warped_latents = latents.clone().detach() for i in range(len(warped_latents)): warped_latents[i] = warp_single_latent(latents[i][None], motion_field[i][None]) return warped_latents class TextToVideoZeroPipeline(StableDiffusionPipeline): r""" Pipeline for zero-shot text-to-video generation using Stable Diffusion. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods implemented for all pipelines (downloading, saving, running on a particular device, etc.). Args: vae ([`AutoencoderKL`]): Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. text_encoder ([`CLIPTextModel`]): Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). tokenizer (`CLIPTokenizer`): A [`~transformers.CLIPTokenizer`] to tokenize text. unet ([`UNet2DConditionModel`]): A [`UNet3DConditionModel`] to denoise the encoded video latents. scheduler ([`SchedulerMixin`]): A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. safety_checker ([`StableDiffusionSafetyChecker`]): Classification module that estimates whether generated images could be considered offensive or harmful. Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details about a model's potential harms. feature_extractor ([`CLIPImageProcessor`]): A [`CLIPImageProcessor`] to extract features from generated images; used as inputs to the `safety_checker`. """ def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet2DConditionModel, scheduler: KarrasDiffusionSchedulers,
def sample_gaussian_centered(n=1000, sample_size=100, std_dev=100): samples = [] while len(samples) < sample_size: # Sample from a Gaussian centered at n/2 sample = int(np.random.normal(loc=n/2, scale=std_dev)) # Check if the sample is in bounds if 1 <= sample < n and sample not in samples: samples.append(sample) return samples def sample_from_quad(total_numbers, n_samples, pow=1.2): while pow > 1: # Generate linearly spaced values between 0 and a max value x_values = np.linspace(0, total_numbers**(1/pow), n_samples+1) # Raise these values to the power of 1.5 to get a non-linear distribution indices = np.unique(np.int32(x_values**pow))[:-1] if len(indices) == n_samples: break pow -=0.02 if pow <= 1: raise ValueError("Cannot find suitable pow. Please adjust n_samples or decrease center.") return indices, pow def sample_from_quad_center(total_numbers, n_samples, center, pow=1.2): while pow > 1: # Generate linearly spaced values between 0 and a max value x_values = np.linspace((-center)**(1/pow), (total_numbers-center)**(1/pow), n_samples+1) indices = [0] + [x+center for x in np.unique(np.int32(x_values**pow))[1:-1]] if len(indices) == n_samples: break pow -=0.02 if pow <= 1: raise ValueError("Cannot find suitable pow. Please adjust n_samples or decrease center.") return indices, pow def rearrange_0(tensor, f): F, C, H, W = tensor.size() tensor = torch.permute(torch.reshape(tensor, (F // f, f, C, H, W)), (0, 2, 1, 3, 4)) return tensor def rearrange_1(tensor): B, C, F, H, W = tensor.size() return torch.reshape(torch.permute(tensor, (0, 2, 1, 3, 4)), (B * F, C, H, W)) def rearrange_3(tensor, f): F, D, C = tensor.size() return torch.reshape(tensor, (F // f, f, D, C)) def rearrange_4(tensor): B, F, D, C = tensor.size() return torch.reshape(tensor, (B * F, D, C)) class CrossFrameAttnProcessor: """ Cross frame attention processor. Each frame attends the first frame. Args: batch_size: The number that represents actual batch size, other than the frames. For example, calling unet with a single prompt and num_images_per_prompt=1, batch_size should be equal to 2, due to classifier-free guidance. """ def __init__(self, batch_size=2): self.batch_size = batch_size def __call__(self, attn, hidden_states, encoder_hidden_states=None, attention_mask=None): batch_size, sequence_length, _ = hidden_states.shape attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) query = attn.to_q(hidden_states) is_cross_attention = encoder_hidden_states is not None if encoder_hidden_states is None: encoder_hidden_states = hidden_states elif attn.norm_cross: encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) key = attn.to_k(encoder_hidden_states) value = attn.to_v(encoder_hidden_states) # Cross Frame Attention if not is_cross_attention: video_length = key.size()[0] // self.batch_size first_frame_index = [0] * video_length # rearrange keys to have batch and frames in the 1st and 2nd dims respectively key = rearrange_3(key, video_length) key = key[:, first_frame_index] # rearrange values to have batch and frames in the 1st and 2nd dims respectively value = rearrange_3(value, video_length) value = value[:, first_frame_index] # rearrange back to original shape key = rearrange_4(key) value = rearrange_4(value) query = attn.head_to_batch_dim(query) key = attn.head_to_batch_dim(key) value = attn.head_to_batch_dim(value) attention_probs = attn.get_attention_scores(query, key, attention_mask) hidden_states = torch.bmm(attention_probs, value) hidden_states = attn.batch_to_head_dim(hidden_states) # linear proj hidden_states = attn.to_out[0](hidden_states) # dropout hidden_states = attn.to_out[1](hidden_states) return hidden_states class CrossFrameAttnProcessor2_0: """ Cross frame attention processor with scaled_dot_product attention of Pytorch 2.0. Args: batch_size: The number that represents actual batch size, other than the frames. For example, calling unet with a single prompt and num_images_per_prompt=1, batch_size should be equal to 2, due to classifier-free guidance. """ def __init__(self, batch_size=2): if not hasattr(F, "scaled_dot_product_attention"): raise ImportError("AttnProcessor2_0 requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0.") self.batch_size = batch_size def __call__(self, attn, hidden_states, encoder_hidden_states=None, attention_mask=None): batch_size, sequence_length, _ = ( hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape ) inner_dim = hidden_states.shape[-1] if attention_mask is not None: attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) # scaled_dot_product_attention expects attention_mask shape to be # (batch, heads, source_length, target_length) attention_mask = attention_mask.view(batch_size, attn.heads, -1, attention_mask.shape[-1]) query = attn.to_q(hidden_states) is_cross_attention = encoder_hidden_states is not None if encoder_hidden_states is None: encoder_hidden_states = hidden_states elif attn.norm_cross: encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) key = attn.to_k(encoder_hidden_states) value = attn.to_v(encoder_hidden_states) # Cross Frame Attention if not is_cross_attention: video_length = max(1, key.size()[0] // self.batch_size) first_frame_index = [0] * video_length # rearrange keys to have batch and frames in the 1st and 2nd dims respectively key = rearrange_3(key, video_length) key = key[:, first_frame_index] # rearrange values to have batch and frames in the 1st and 2nd dims respectively value = rearrange_3(value, video_length) value = value[:, first_frame_index] # rearrange back to original shape key = rearrange_4(key) value = rearrange_4(value) head_dim = inner_dim // attn.heads query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) # the output of sdp = (batch, num_heads, seq_len, head_dim) # TODO: add support for attn.scale when we move to Torch 2.1 hidden_states = F.scaled_dot_product_attention( query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False ) hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim) hidden_states = hidden_states.to(query.dtype) # linear proj hidden_states = attn.to_out[0](hidden_states) # dropout hidden_states = attn.to_out[1](hidden_states) return hidden_states @dataclass class TextToVideoPipelineOutput(BaseOutput): r""" Output class for zero-shot text-to-video pipeline. Args: images (`[List[PIL.Image.Image]`, `np.ndarray`]): List of denoised PIL images of length `batch_size` or NumPy array of shape `(batch_size, height, width, num_channels)`. nsfw_content_detected (`[List[bool]]`): List indicating whether the corresponding generated image contains "not-safe-for-work" (nsfw) content or `None` if safety checking could not be performed. """ images: Union[List[PIL.Image.Image], np.ndarray] nsfw_content_detected: Optional[List[bool]] def coords_grid(batch, ht, wd, device): # Adapted from https://github.com/princeton-vl/RAFT/blob/master/core/utils/utils.py coords = torch.meshgrid(torch.arange(ht, device=device), torch.arange(wd, device=device)) coords = torch.stack(coords[::-1], dim=0).float() return coords[None].repeat(batch, 1, 1, 1) def warp_single_latent(latent, reference_flow): """ Warp latent of a single frame with given flow Args: latent: latent code of a single frame reference_flow: flow which to warp the latent with Returns: warped: warped latent """ _, _, H, W = reference_flow.size() _, _, h, w = latent.size() coords0 = coords_grid(1, H, W, device=latent.device).to(latent.dtype) coords_t0 = coords0 + reference_flow coords_t0[:, 0] /= W coords_t0[:, 1] /= H coords_t0 = coords_t0 * 2.0 - 1.0 coords_t0 = F.interpolate(coords_t0, size=(h, w), mode="bilinear") coords_t0 = torch.permute(coords_t0, (0, 2, 3, 1)) warped = grid_sample(latent, coords_t0, mode="nearest", padding_mode="reflection") return warped def create_motion_field(motion_field_strength_x, motion_field_strength_y, frame_ids, device, dtype): """ Create translation motion field Args: motion_field_strength_x: motion strength along x-axis motion_field_strength_y: motion strength along y-axis frame_ids: indexes of the frames the latents of which are being processed. This is needed when we perform chunk-by-chunk inference device: device dtype: dtype Returns: """ seq_length = len(frame_ids) reference_flow = torch.zeros((seq_length, 2, 512, 512), device=device, dtype=dtype) for fr_idx in range(seq_length): reference_flow[fr_idx, 0, :, :] = motion_field_strength_x * (frame_ids[fr_idx]) reference_flow[fr_idx, 1, :, :] = motion_field_strength_y * (frame_ids[fr_idx]) return reference_flow def create_motion_field_and_warp_latents(motion_field_strength_x, motion_field_strength_y, frame_ids, latents): """ Creates translation motion and warps the latents accordingly Args: motion_field_strength_x: motion strength along x-axis motion_field_strength_y: motion strength along y-axis frame_ids: indexes of the frames the latents of which are being processed. This is needed when we perform chunk-by-chunk inference latents: latent codes of frames Returns: warped_latents: warped latents """ motion_field = create_motion_field( motion_field_strength_x=motion_field_strength_x, motion_field_strength_y=motion_field_strength_y, frame_ids=frame_ids, device=latents.device, dtype=latents.dtype, ) warped_latents = latents.clone().detach() for i in range(len(warped_latents)): warped_latents[i] = warp_single_latent(latents[i][None], motion_field[i][None]) return warped_latents class TextToVideoZeroPipeline(StableDiffusionPipeline): r""" Pipeline for zero-shot text-to-video generation using Stable Diffusion. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods implemented for all pipelines (downloading, saving, running on a particular device, etc.). Args: vae ([`AutoencoderKL`]): Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. text_encoder ([`CLIPTextModel`]): Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). tokenizer (`CLIPTokenizer`): A [`~transformers.CLIPTokenizer`] to tokenize text. unet ([`UNet2DConditionModel`]): A [`UNet3DConditionModel`] to denoise the encoded video latents. scheduler ([`SchedulerMixin`]): A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. safety_checker ([`StableDiffusionSafetyChecker`]): Classification module that estimates whether generated images could be considered offensive or harmful. Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details about a model's potential harms. feature_extractor ([`CLIPImageProcessor`]): A [`CLIPImageProcessor`] to extract features from generated images; used as inputs to the `safety_checker`. """ def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet2DConditionModel, scheduler: KarrasDiffusionSchedulers,
safety_checker: StableDiffusionSafetyChecker,
1
2023-12-01 10:54:04+00:00
24k
alvinliu0/HumanGaussian
threestudio/models/geometry/tetrahedra_sdf_grid.py
[ { "identifier": "BaseExplicitGeometry", "path": "threestudio/models/geometry/base.py", "snippet": "class BaseExplicitGeometry(BaseGeometry):\n @dataclass\n class Config(BaseGeometry.Config):\n radius: float = 1.0\n\n cfg: Config\n\n def configure(self) -> None:\n self.bbox: Float[Tensor, \"2 3\"]\n self.register_buffer(\n \"bbox\",\n torch.as_tensor(\n [\n [-self.cfg.radius, -self.cfg.radius, -self.cfg.radius],\n [self.cfg.radius, self.cfg.radius, self.cfg.radius],\n ],\n dtype=torch.float32,\n ),\n )" }, { "identifier": "BaseGeometry", "path": "threestudio/models/geometry/base.py", "snippet": "class BaseGeometry(BaseModule):\n @dataclass\n class Config(BaseModule.Config):\n pass\n\n cfg: Config\n\n @staticmethod\n def create_from(\n other: \"BaseGeometry\", cfg: Optional[Union[dict, DictConfig]] = None, **kwargs\n ) -> \"BaseGeometry\":\n raise TypeError(\n f\"Cannot create {BaseGeometry.__name__} from {other.__class__.__name__}\"\n )\n\n def export(self, *args, **kwargs) -> Dict[str, Any]:\n return {}" }, { "identifier": "contract_to_unisphere", "path": "threestudio/models/geometry/base.py", "snippet": "def contract_to_unisphere(\n x: Float[Tensor, \"... 3\"], bbox: Float[Tensor, \"2 3\"], unbounded: bool = False\n) -> Float[Tensor, \"... 3\"]:\n if unbounded:\n x = scale_tensor(x, bbox, (0, 1))\n x = x * 2 - 1 # aabb is at [-1, 1]\n mag = x.norm(dim=-1, keepdim=True)\n mask = mag.squeeze(-1) > 1\n x[mask] = (2 - 1 / mag[mask]) * (x[mask] / mag[mask])\n x = x / 4 + 0.5 # [-inf, inf] is at [0, 1]\n else:\n x = scale_tensor(x, bbox, (0, 1))\n return x" }, { "identifier": "ImplicitSDF", "path": "threestudio/models/geometry/implicit_sdf.py", "snippet": "class ImplicitSDF(BaseImplicitGeometry):\n @dataclass\n class Config(BaseImplicitGeometry.Config):\n n_input_dims: int = 3\n n_feature_dims: int = 3\n pos_encoding_config: dict = field(\n default_factory=lambda: {\n \"otype\": \"HashGrid\",\n \"n_levels\": 16,\n \"n_features_per_level\": 2,\n \"log2_hashmap_size\": 19,\n \"base_resolution\": 16,\n \"per_level_scale\": 1.447269237440378,\n }\n )\n mlp_network_config: dict = field(\n default_factory=lambda: {\n \"otype\": \"VanillaMLP\",\n \"activation\": \"ReLU\",\n \"output_activation\": \"none\",\n \"n_neurons\": 64,\n \"n_hidden_layers\": 1,\n }\n )\n normal_type: Optional[\n str\n ] = \"finite_difference\" # in ['pred', 'finite_difference', 'finite_difference_laplacian']\n finite_difference_normal_eps: Union[\n float, str\n ] = 0.01 # in [float, \"progressive\"]\n shape_init: Optional[str] = None\n shape_init_params: Optional[Any] = None\n shape_init_mesh_up: str = \"+z\"\n shape_init_mesh_front: str = \"+x\"\n force_shape_init: bool = False\n sdf_bias: Union[float, str] = 0.0\n sdf_bias_params: Optional[Any] = None\n\n # no need to removal outlier for SDF\n isosurface_remove_outliers: bool = False\n\n cfg: Config\n\n def configure(self) -> None:\n super().configure()\n self.encoding = get_encoding(\n self.cfg.n_input_dims, self.cfg.pos_encoding_config\n )\n self.sdf_network = get_mlp(\n self.encoding.n_output_dims, 1, self.cfg.mlp_network_config\n )\n\n if self.cfg.n_feature_dims > 0:\n self.feature_network = get_mlp(\n self.encoding.n_output_dims,\n self.cfg.n_feature_dims,\n self.cfg.mlp_network_config,\n )\n\n if self.cfg.normal_type == \"pred\":\n self.normal_network = get_mlp(\n self.encoding.n_output_dims, 3, self.cfg.mlp_network_config\n )\n if self.cfg.isosurface_deformable_grid:\n assert (\n self.cfg.isosurface_method == \"mt\"\n ), \"isosurface_deformable_grid only works with mt\"\n self.deformation_network = get_mlp(\n self.encoding.n_output_dims, 3, self.cfg.mlp_network_config\n )\n\n self.finite_difference_normal_eps: Optional[float] = None\n\n def initialize_shape(self) -> None:\n if self.cfg.shape_init is None and not self.cfg.force_shape_init:\n return\n\n # do not initialize shape if weights are provided\n if self.cfg.weights is not None and not self.cfg.force_shape_init:\n return\n\n if self.cfg.sdf_bias != 0.0:\n threestudio.warn(\n \"shape_init and sdf_bias are both specified, which may lead to unexpected results.\"\n )\n\n get_gt_sdf: Callable[[Float[Tensor, \"N 3\"]], Float[Tensor, \"N 1\"]]\n assert isinstance(self.cfg.shape_init, str)\n if self.cfg.shape_init == \"ellipsoid\":\n assert (\n isinstance(self.cfg.shape_init_params, Sized)\n and len(self.cfg.shape_init_params) == 3\n )\n size = torch.as_tensor(self.cfg.shape_init_params).to(self.device)\n\n def func(points_rand: Float[Tensor, \"N 3\"]) -> Float[Tensor, \"N 1\"]:\n return ((points_rand / size) ** 2).sum(\n dim=-1, keepdim=True\n ).sqrt() - 1.0 # pseudo signed distance of an ellipsoid\n\n get_gt_sdf = func\n elif self.cfg.shape_init == \"sphere\":\n assert isinstance(self.cfg.shape_init_params, float)\n radius = self.cfg.shape_init_params\n\n def func(points_rand: Float[Tensor, \"N 3\"]) -> Float[Tensor, \"N 1\"]:\n return (points_rand**2).sum(dim=-1, keepdim=True).sqrt() - radius\n\n get_gt_sdf = func\n elif self.cfg.shape_init.startswith(\"mesh:\"):\n assert isinstance(self.cfg.shape_init_params, float)\n mesh_path = self.cfg.shape_init[5:]\n if not os.path.exists(mesh_path):\n raise ValueError(f\"Mesh file {mesh_path} does not exist.\")\n\n import trimesh\n\n scene = trimesh.load(mesh_path)\n if isinstance(scene, trimesh.Trimesh):\n mesh = scene\n elif isinstance(scene, trimesh.scene.Scene):\n mesh = trimesh.Trimesh()\n for obj in scene.geometry.values():\n mesh = trimesh.util.concatenate([mesh, obj])\n else:\n raise ValueError(f\"Unknown mesh type at {mesh_path}.\")\n\n # move to center\n centroid = mesh.vertices.mean(0)\n mesh.vertices = mesh.vertices - centroid\n\n # align to up-z and front-x\n dirs = [\"+x\", \"+y\", \"+z\", \"-x\", \"-y\", \"-z\"]\n dir2vec = {\n \"+x\": np.array([1, 0, 0]),\n \"+y\": np.array([0, 1, 0]),\n \"+z\": np.array([0, 0, 1]),\n \"-x\": np.array([-1, 0, 0]),\n \"-y\": np.array([0, -1, 0]),\n \"-z\": np.array([0, 0, -1]),\n }\n if (\n self.cfg.shape_init_mesh_up not in dirs\n or self.cfg.shape_init_mesh_front not in dirs\n ):\n raise ValueError(\n f\"shape_init_mesh_up and shape_init_mesh_front must be one of {dirs}.\"\n )\n if self.cfg.shape_init_mesh_up[1] == self.cfg.shape_init_mesh_front[1]:\n raise ValueError(\n \"shape_init_mesh_up and shape_init_mesh_front must be orthogonal.\"\n )\n z_, x_ = (\n dir2vec[self.cfg.shape_init_mesh_up],\n dir2vec[self.cfg.shape_init_mesh_front],\n )\n y_ = np.cross(z_, x_)\n std2mesh = np.stack([x_, y_, z_], axis=0).T\n mesh2std = np.linalg.inv(std2mesh)\n\n # scaling\n scale = np.abs(mesh.vertices).max()\n mesh.vertices = mesh.vertices / scale * self.cfg.shape_init_params\n mesh.vertices = np.dot(mesh2std, mesh.vertices.T).T\n\n from pysdf import SDF\n\n sdf = SDF(mesh.vertices, mesh.faces)\n\n def func(points_rand: Float[Tensor, \"N 3\"]) -> Float[Tensor, \"N 1\"]:\n # add a negative signed here\n # as in pysdf the inside of the shape has positive signed distance\n return torch.from_numpy(-sdf(points_rand.cpu().numpy())).to(\n points_rand\n )[..., None]\n\n get_gt_sdf = func\n\n else:\n raise ValueError(\n f\"Unknown shape initialization type: {self.cfg.shape_init}\"\n )\n\n # Initialize SDF to a given shape when no weights are provided or force_shape_init is True\n optim = torch.optim.Adam(self.parameters(), lr=1e-3)\n from tqdm import tqdm\n\n for _ in tqdm(\n range(1000),\n desc=f\"Initializing SDF to a(n) {self.cfg.shape_init}:\",\n disable=get_rank() != 0,\n ):\n points_rand = (\n torch.rand((10000, 3), dtype=torch.float32).to(self.device) * 2.0 - 1.0\n )\n sdf_gt = get_gt_sdf(points_rand)\n sdf_pred = self.forward_sdf(points_rand)\n loss = F.mse_loss(sdf_pred, sdf_gt)\n optim.zero_grad()\n loss.backward()\n optim.step()\n\n # explicit broadcast to ensure param consistency across ranks\n for param in self.parameters():\n broadcast(param, src=0)\n\n def get_shifted_sdf(\n self, points: Float[Tensor, \"*N Di\"], sdf: Float[Tensor, \"*N 1\"]\n ) -> Float[Tensor, \"*N 1\"]:\n sdf_bias: Union[float, Float[Tensor, \"*N 1\"]]\n if self.cfg.sdf_bias == \"ellipsoid\":\n assert (\n isinstance(self.cfg.sdf_bias_params, Sized)\n and len(self.cfg.sdf_bias_params) == 3\n )\n size = torch.as_tensor(self.cfg.sdf_bias_params).to(points)\n sdf_bias = ((points / size) ** 2).sum(\n dim=-1, keepdim=True\n ).sqrt() - 1.0 # pseudo signed distance of an ellipsoid\n elif self.cfg.sdf_bias == \"sphere\":\n assert isinstance(self.cfg.sdf_bias_params, float)\n radius = self.cfg.sdf_bias_params\n sdf_bias = (points**2).sum(dim=-1, keepdim=True).sqrt() - radius\n elif isinstance(self.cfg.sdf_bias, float):\n sdf_bias = self.cfg.sdf_bias\n else:\n raise ValueError(f\"Unknown sdf bias {self.cfg.sdf_bias}\")\n return sdf + sdf_bias\n\n def forward(\n self, points: Float[Tensor, \"*N Di\"], output_normal: bool = False\n ) -> Dict[str, Float[Tensor, \"...\"]]:\n grad_enabled = torch.is_grad_enabled()\n\n if output_normal and self.cfg.normal_type == \"analytic\":\n torch.set_grad_enabled(True)\n points.requires_grad_(True)\n\n points_unscaled = points # points in the original scale\n points = contract_to_unisphere(\n points, self.bbox, self.unbounded\n ) # points normalized to (0, 1)\n\n enc = self.encoding(points.view(-1, self.cfg.n_input_dims))\n sdf = self.sdf_network(enc).view(*points.shape[:-1], 1)\n sdf = self.get_shifted_sdf(points_unscaled, sdf)\n output = {\"sdf\": sdf}\n\n if self.cfg.n_feature_dims > 0:\n features = self.feature_network(enc).view(\n *points.shape[:-1], self.cfg.n_feature_dims\n )\n output.update({\"features\": features})\n\n if output_normal:\n if (\n self.cfg.normal_type == \"finite_difference\"\n or self.cfg.normal_type == \"finite_difference_laplacian\"\n ):\n assert self.finite_difference_normal_eps is not None\n eps: float = self.finite_difference_normal_eps\n if self.cfg.normal_type == \"finite_difference_laplacian\":\n offsets: Float[Tensor, \"6 3\"] = torch.as_tensor(\n [\n [eps, 0.0, 0.0],\n [-eps, 0.0, 0.0],\n [0.0, eps, 0.0],\n [0.0, -eps, 0.0],\n [0.0, 0.0, eps],\n [0.0, 0.0, -eps],\n ]\n ).to(points_unscaled)\n points_offset: Float[Tensor, \"... 6 3\"] = (\n points_unscaled[..., None, :] + offsets\n ).clamp(-self.cfg.radius, self.cfg.radius)\n sdf_offset: Float[Tensor, \"... 6 1\"] = self.forward_sdf(\n points_offset\n )\n sdf_grad = (\n 0.5\n * (sdf_offset[..., 0::2, 0] - sdf_offset[..., 1::2, 0])\n / eps\n )\n else:\n offsets: Float[Tensor, \"3 3\"] = torch.as_tensor(\n [[eps, 0.0, 0.0], [0.0, eps, 0.0], [0.0, 0.0, eps]]\n ).to(points_unscaled)\n points_offset: Float[Tensor, \"... 3 3\"] = (\n points_unscaled[..., None, :] + offsets\n ).clamp(-self.cfg.radius, self.cfg.radius)\n sdf_offset: Float[Tensor, \"... 3 1\"] = self.forward_sdf(\n points_offset\n )\n sdf_grad = (sdf_offset[..., 0::1, 0] - sdf) / eps\n normal = F.normalize(sdf_grad, dim=-1)\n elif self.cfg.normal_type == \"pred\":\n normal = self.normal_network(enc).view(*points.shape[:-1], 3)\n normal = F.normalize(normal, dim=-1)\n sdf_grad = normal\n elif self.cfg.normal_type == \"analytic\":\n sdf_grad = -torch.autograd.grad(\n sdf,\n points_unscaled,\n grad_outputs=torch.ones_like(sdf),\n create_graph=True,\n )[0]\n normal = F.normalize(sdf_grad, dim=-1)\n if not grad_enabled:\n sdf_grad = sdf_grad.detach()\n normal = normal.detach()\n else:\n raise AttributeError(f\"Unknown normal type {self.cfg.normal_type}\")\n output.update(\n {\"normal\": normal, \"shading_normal\": normal, \"sdf_grad\": sdf_grad}\n )\n return output\n\n def forward_sdf(self, points: Float[Tensor, \"*N Di\"]) -> Float[Tensor, \"*N 1\"]:\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n\n sdf = self.sdf_network(\n self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n ).reshape(*points.shape[:-1], 1)\n sdf = self.get_shifted_sdf(points_unscaled, sdf)\n return sdf\n\n def forward_field(\n self, points: Float[Tensor, \"*N Di\"]\n ) -> Tuple[Float[Tensor, \"*N 1\"], Optional[Float[Tensor, \"*N 3\"]]]:\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n enc = self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n sdf = self.sdf_network(enc).reshape(*points.shape[:-1], 1)\n sdf = self.get_shifted_sdf(points_unscaled, sdf)\n deformation: Optional[Float[Tensor, \"*N 3\"]] = None\n if self.cfg.isosurface_deformable_grid:\n deformation = self.deformation_network(enc).reshape(*points.shape[:-1], 3)\n return sdf, deformation\n\n def forward_level(\n self, field: Float[Tensor, \"*N 1\"], threshold: float\n ) -> Float[Tensor, \"*N 1\"]:\n return field - threshold\n\n def export(self, points: Float[Tensor, \"*N Di\"], **kwargs) -> Dict[str, Any]:\n out: Dict[str, Any] = {}\n if self.cfg.n_feature_dims == 0:\n return out\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n enc = self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n features = self.feature_network(enc).view(\n *points.shape[:-1], self.cfg.n_feature_dims\n )\n out.update(\n {\n \"features\": features,\n }\n )\n return out\n\n def update_step(self, epoch: int, global_step: int, on_load_weights: bool = False):\n if (\n self.cfg.normal_type == \"finite_difference\"\n or self.cfg.normal_type == \"finite_difference_laplacian\"\n ):\n if isinstance(self.cfg.finite_difference_normal_eps, float):\n self.finite_difference_normal_eps = (\n self.cfg.finite_difference_normal_eps\n )\n elif self.cfg.finite_difference_normal_eps == \"progressive\":\n # progressive finite difference eps from Neuralangelo\n # https://arxiv.org/abs/2306.03092\n hg_conf: Any = self.cfg.pos_encoding_config\n assert (\n hg_conf.otype == \"ProgressiveBandHashGrid\"\n ), \"finite_difference_normal_eps=progressive only works with ProgressiveBandHashGrid\"\n current_level = min(\n hg_conf.start_level\n + max(global_step - hg_conf.start_step, 0) // hg_conf.update_steps,\n hg_conf.n_levels,\n )\n grid_res = hg_conf.base_resolution * hg_conf.per_level_scale ** (\n current_level - 1\n )\n grid_size = 2 * self.cfg.radius / grid_res\n if grid_size != self.finite_difference_normal_eps:\n threestudio.info(\n f\"Update finite_difference_normal_eps to {grid_size}\"\n )\n self.finite_difference_normal_eps = grid_size\n else:\n raise ValueError(\n f\"Unknown finite_difference_normal_eps={self.cfg.finite_difference_normal_eps}\"\n )" }, { "identifier": "ImplicitVolume", "path": "threestudio/models/geometry/implicit_volume.py", "snippet": "class ImplicitVolume(BaseImplicitGeometry):\n @dataclass\n class Config(BaseImplicitGeometry.Config):\n n_input_dims: int = 3\n n_feature_dims: int = 3\n density_activation: Optional[str] = \"softplus\"\n density_bias: Union[float, str] = \"blob_magic3d\"\n density_blob_scale: float = 10.0\n density_blob_std: float = 0.5\n pos_encoding_config: dict = field(\n default_factory=lambda: {\n \"otype\": \"HashGrid\",\n \"n_levels\": 16,\n \"n_features_per_level\": 2,\n \"log2_hashmap_size\": 19,\n \"base_resolution\": 16,\n \"per_level_scale\": 1.447269237440378,\n }\n )\n mlp_network_config: dict = field(\n default_factory=lambda: {\n \"otype\": \"VanillaMLP\",\n \"activation\": \"ReLU\",\n \"output_activation\": \"none\",\n \"n_neurons\": 64,\n \"n_hidden_layers\": 1,\n }\n )\n normal_type: Optional[\n str\n ] = \"finite_difference\" # in ['pred', 'finite_difference', 'finite_difference_laplacian']\n finite_difference_normal_eps: float = 0.01\n\n # automatically determine the threshold\n isosurface_threshold: Union[float, str] = 25.0\n\n cfg: Config\n\n def configure(self) -> None:\n super().configure()\n self.encoding = get_encoding(\n self.cfg.n_input_dims, self.cfg.pos_encoding_config\n )\n self.density_network = get_mlp(\n self.encoding.n_output_dims, 1, self.cfg.mlp_network_config\n )\n if self.cfg.n_feature_dims > 0:\n self.feature_network = get_mlp(\n self.encoding.n_output_dims,\n self.cfg.n_feature_dims,\n self.cfg.mlp_network_config,\n )\n if self.cfg.normal_type == \"pred\":\n self.normal_network = get_mlp(\n self.encoding.n_output_dims, 3, self.cfg.mlp_network_config\n )\n\n def get_activated_density(\n self, points: Float[Tensor, \"*N Di\"], density: Float[Tensor, \"*N 1\"]\n ) -> Tuple[Float[Tensor, \"*N 1\"], Float[Tensor, \"*N 1\"]]:\n density_bias: Union[float, Float[Tensor, \"*N 1\"]]\n if self.cfg.density_bias == \"blob_dreamfusion\":\n # pre-activation density bias\n density_bias = (\n self.cfg.density_blob_scale\n * torch.exp(\n -0.5 * (points**2).sum(dim=-1) / self.cfg.density_blob_std**2\n )[..., None]\n )\n elif self.cfg.density_bias == \"blob_magic3d\":\n # pre-activation density bias\n density_bias = (\n self.cfg.density_blob_scale\n * (\n 1\n - torch.sqrt((points**2).sum(dim=-1)) / self.cfg.density_blob_std\n )[..., None]\n )\n elif isinstance(self.cfg.density_bias, float):\n density_bias = self.cfg.density_bias\n else:\n raise ValueError(f\"Unknown density bias {self.cfg.density_bias}\")\n raw_density: Float[Tensor, \"*N 1\"] = density + density_bias\n density = get_activation(self.cfg.density_activation)(raw_density)\n return raw_density, density\n\n def forward(\n self, points: Float[Tensor, \"*N Di\"], output_normal: bool = False\n ) -> Dict[str, Float[Tensor, \"...\"]]:\n grad_enabled = torch.is_grad_enabled()\n\n if output_normal and self.cfg.normal_type == \"analytic\":\n torch.set_grad_enabled(True)\n points.requires_grad_(True)\n\n points_unscaled = points # points in the original scale\n points = contract_to_unisphere(\n points, self.bbox, self.unbounded\n ) # points normalized to (0, 1)\n\n enc = self.encoding(points.view(-1, self.cfg.n_input_dims))\n density = self.density_network(enc).view(*points.shape[:-1], 1)\n raw_density, density = self.get_activated_density(points_unscaled, density)\n\n output = {\n \"density\": density,\n }\n\n if self.cfg.n_feature_dims > 0:\n features = self.feature_network(enc).view(\n *points.shape[:-1], self.cfg.n_feature_dims\n )\n output.update({\"features\": features})\n\n if output_normal:\n if (\n self.cfg.normal_type == \"finite_difference\"\n or self.cfg.normal_type == \"finite_difference_laplacian\"\n ):\n # TODO: use raw density\n eps = self.cfg.finite_difference_normal_eps\n if self.cfg.normal_type == \"finite_difference_laplacian\":\n offsets: Float[Tensor, \"6 3\"] = torch.as_tensor(\n [\n [eps, 0.0, 0.0],\n [-eps, 0.0, 0.0],\n [0.0, eps, 0.0],\n [0.0, -eps, 0.0],\n [0.0, 0.0, eps],\n [0.0, 0.0, -eps],\n ]\n ).to(points_unscaled)\n points_offset: Float[Tensor, \"... 6 3\"] = (\n points_unscaled[..., None, :] + offsets\n ).clamp(-self.cfg.radius, self.cfg.radius)\n density_offset: Float[Tensor, \"... 6 1\"] = self.forward_density(\n points_offset\n )\n normal = (\n -0.5\n * (density_offset[..., 0::2, 0] - density_offset[..., 1::2, 0])\n / eps\n )\n else:\n offsets: Float[Tensor, \"3 3\"] = torch.as_tensor(\n [[eps, 0.0, 0.0], [0.0, eps, 0.0], [0.0, 0.0, eps]]\n ).to(points_unscaled)\n points_offset: Float[Tensor, \"... 3 3\"] = (\n points_unscaled[..., None, :] + offsets\n ).clamp(-self.cfg.radius, self.cfg.radius)\n density_offset: Float[Tensor, \"... 3 1\"] = self.forward_density(\n points_offset\n )\n normal = -(density_offset[..., 0::1, 0] - density) / eps\n normal = F.normalize(normal, dim=-1)\n elif self.cfg.normal_type == \"pred\":\n normal = self.normal_network(enc).view(*points.shape[:-1], 3)\n normal = F.normalize(normal, dim=-1)\n elif self.cfg.normal_type == \"analytic\":\n normal = -torch.autograd.grad(\n density,\n points_unscaled,\n grad_outputs=torch.ones_like(density),\n create_graph=True,\n )[0]\n normal = F.normalize(normal, dim=-1)\n if not grad_enabled:\n normal = normal.detach()\n else:\n raise AttributeError(f\"Unknown normal type {self.cfg.normal_type}\")\n output.update({\"normal\": normal, \"shading_normal\": normal})\n\n torch.set_grad_enabled(grad_enabled)\n return output\n\n def forward_density(self, points: Float[Tensor, \"*N Di\"]) -> Float[Tensor, \"*N 1\"]:\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n\n density = self.density_network(\n self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n ).reshape(*points.shape[:-1], 1)\n\n _, density = self.get_activated_density(points_unscaled, density)\n return density\n\n def forward_field(\n self, points: Float[Tensor, \"*N Di\"]\n ) -> Tuple[Float[Tensor, \"*N 1\"], Optional[Float[Tensor, \"*N 3\"]]]:\n if self.cfg.isosurface_deformable_grid:\n threestudio.warn(\n f\"{self.__class__.__name__} does not support isosurface_deformable_grid. Ignoring.\"\n )\n density = self.forward_density(points)\n return density, None\n\n def forward_level(\n self, field: Float[Tensor, \"*N 1\"], threshold: float\n ) -> Float[Tensor, \"*N 1\"]:\n return -(field - threshold)\n\n def export(self, points: Float[Tensor, \"*N Di\"], **kwargs) -> Dict[str, Any]:\n out: Dict[str, Any] = {}\n if self.cfg.n_feature_dims == 0:\n return out\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n enc = self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n features = self.feature_network(enc).view(\n *points.shape[:-1], self.cfg.n_feature_dims\n )\n out.update(\n {\n \"features\": features,\n }\n )\n return out\n\n @staticmethod\n @torch.no_grad()\n def create_from(\n other: BaseGeometry,\n cfg: Optional[Union[dict, DictConfig]] = None,\n copy_net: bool = True,\n **kwargs,\n ) -> \"ImplicitVolume\":\n if isinstance(other, ImplicitVolume):\n instance = ImplicitVolume(cfg, **kwargs)\n instance.encoding.load_state_dict(other.encoding.state_dict())\n instance.density_network.load_state_dict(other.density_network.state_dict())\n if copy_net:\n if (\n instance.cfg.n_feature_dims > 0\n and other.cfg.n_feature_dims == instance.cfg.n_feature_dims\n ):\n instance.feature_network.load_state_dict(\n other.feature_network.state_dict()\n )\n if (\n instance.cfg.normal_type == \"pred\"\n and other.cfg.normal_type == \"pred\"\n ):\n instance.normal_network.load_state_dict(\n other.normal_network.state_dict()\n )\n return instance\n else:\n raise TypeError(\n f\"Cannot create {ImplicitVolume.__name__} from {other.__class__.__name__}\"\n )" }, { "identifier": "MarchingTetrahedraHelper", "path": "threestudio/models/isosurface.py", "snippet": "class MarchingTetrahedraHelper(IsosurfaceHelper):\n def __init__(self, resolution: int, tets_path: str):\n super().__init__()\n self.resolution = resolution\n self.tets_path = tets_path\n\n self.triangle_table: Float[Tensor, \"...\"]\n self.register_buffer(\n \"triangle_table\",\n torch.as_tensor(\n [\n [-1, -1, -1, -1, -1, -1],\n [1, 0, 2, -1, -1, -1],\n [4, 0, 3, -1, -1, -1],\n [1, 4, 2, 1, 3, 4],\n [3, 1, 5, -1, -1, -1],\n [2, 3, 0, 2, 5, 3],\n [1, 4, 0, 1, 5, 4],\n [4, 2, 5, -1, -1, -1],\n [4, 5, 2, -1, -1, -1],\n [4, 1, 0, 4, 5, 1],\n [3, 2, 0, 3, 5, 2],\n [1, 3, 5, -1, -1, -1],\n [4, 1, 2, 4, 3, 1],\n [3, 0, 4, -1, -1, -1],\n [2, 0, 1, -1, -1, -1],\n [-1, -1, -1, -1, -1, -1],\n ],\n dtype=torch.long,\n ),\n persistent=False,\n )\n self.num_triangles_table: Integer[Tensor, \"...\"]\n self.register_buffer(\n \"num_triangles_table\",\n torch.as_tensor(\n [0, 1, 1, 2, 1, 2, 2, 1, 1, 2, 2, 1, 2, 1, 1, 0], dtype=torch.long\n ),\n persistent=False,\n )\n self.base_tet_edges: Integer[Tensor, \"...\"]\n self.register_buffer(\n \"base_tet_edges\",\n torch.as_tensor([0, 1, 0, 2, 0, 3, 1, 2, 1, 3, 2, 3], dtype=torch.long),\n persistent=False,\n )\n\n tets = np.load(self.tets_path)\n self._grid_vertices: Float[Tensor, \"...\"]\n self.register_buffer(\n \"_grid_vertices\",\n torch.from_numpy(tets[\"vertices\"]).float(),\n persistent=False,\n )\n self.indices: Integer[Tensor, \"...\"]\n self.register_buffer(\n \"indices\", torch.from_numpy(tets[\"indices\"]).long(), persistent=False\n )\n\n self._all_edges: Optional[Integer[Tensor, \"Ne 2\"]] = None\n\n def normalize_grid_deformation(\n self, grid_vertex_offsets: Float[Tensor, \"Nv 3\"]\n ) -> Float[Tensor, \"Nv 3\"]:\n return (\n (self.points_range[1] - self.points_range[0])\n / (self.resolution) # half tet size is approximately 1 / self.resolution\n * torch.tanh(grid_vertex_offsets)\n ) # FIXME: hard-coded activation\n\n @property\n def grid_vertices(self) -> Float[Tensor, \"Nv 3\"]:\n return self._grid_vertices\n\n @property\n def all_edges(self) -> Integer[Tensor, \"Ne 2\"]:\n if self._all_edges is None:\n # compute edges on GPU, or it would be VERY SLOW (basically due to the unique operation)\n edges = torch.tensor(\n [0, 1, 0, 2, 0, 3, 1, 2, 1, 3, 2, 3],\n dtype=torch.long,\n device=self.indices.device,\n )\n _all_edges = self.indices[:, edges].reshape(-1, 2)\n _all_edges_sorted = torch.sort(_all_edges, dim=1)[0]\n _all_edges = torch.unique(_all_edges_sorted, dim=0)\n self._all_edges = _all_edges\n return self._all_edges\n\n def sort_edges(self, edges_ex2):\n with torch.no_grad():\n order = (edges_ex2[:, 0] > edges_ex2[:, 1]).long()\n order = order.unsqueeze(dim=1)\n\n a = torch.gather(input=edges_ex2, index=order, dim=1)\n b = torch.gather(input=edges_ex2, index=1 - order, dim=1)\n\n return torch.stack([a, b], -1)\n\n def _forward(self, pos_nx3, sdf_n, tet_fx4):\n with torch.no_grad():\n occ_n = sdf_n > 0\n occ_fx4 = occ_n[tet_fx4.reshape(-1)].reshape(-1, 4)\n occ_sum = torch.sum(occ_fx4, -1)\n valid_tets = (occ_sum > 0) & (occ_sum < 4)\n occ_sum = occ_sum[valid_tets]\n\n # find all vertices\n all_edges = tet_fx4[valid_tets][:, self.base_tet_edges].reshape(-1, 2)\n all_edges = self.sort_edges(all_edges)\n unique_edges, idx_map = torch.unique(all_edges, dim=0, return_inverse=True)\n\n unique_edges = unique_edges.long()\n mask_edges = occ_n[unique_edges.reshape(-1)].reshape(-1, 2).sum(-1) == 1\n mapping = (\n torch.ones(\n (unique_edges.shape[0]), dtype=torch.long, device=pos_nx3.device\n )\n * -1\n )\n mapping[mask_edges] = torch.arange(\n mask_edges.sum(), dtype=torch.long, device=pos_nx3.device\n )\n idx_map = mapping[idx_map] # map edges to verts\n\n interp_v = unique_edges[mask_edges]\n edges_to_interp = pos_nx3[interp_v.reshape(-1)].reshape(-1, 2, 3)\n edges_to_interp_sdf = sdf_n[interp_v.reshape(-1)].reshape(-1, 2, 1)\n edges_to_interp_sdf[:, -1] *= -1\n\n denominator = edges_to_interp_sdf.sum(1, keepdim=True)\n\n edges_to_interp_sdf = torch.flip(edges_to_interp_sdf, [1]) / denominator\n verts = (edges_to_interp * edges_to_interp_sdf).sum(1)\n\n idx_map = idx_map.reshape(-1, 6)\n\n v_id = torch.pow(2, torch.arange(4, dtype=torch.long, device=pos_nx3.device))\n tetindex = (occ_fx4[valid_tets] * v_id.unsqueeze(0)).sum(-1)\n num_triangles = self.num_triangles_table[tetindex]\n\n # Generate triangle indices\n faces = torch.cat(\n (\n torch.gather(\n input=idx_map[num_triangles == 1],\n dim=1,\n index=self.triangle_table[tetindex[num_triangles == 1]][:, :3],\n ).reshape(-1, 3),\n torch.gather(\n input=idx_map[num_triangles == 2],\n dim=1,\n index=self.triangle_table[tetindex[num_triangles == 2]][:, :6],\n ).reshape(-1, 3),\n ),\n dim=0,\n )\n\n return verts, faces\n\n def forward(\n self,\n level: Float[Tensor, \"N3 1\"],\n deformation: Optional[Float[Tensor, \"N3 3\"]] = None,\n ) -> Mesh:\n if deformation is not None:\n grid_vertices = self.grid_vertices + self.normalize_grid_deformation(\n deformation\n )\n else:\n grid_vertices = self.grid_vertices\n\n v_pos, t_pos_idx = self._forward(grid_vertices, level, self.indices)\n\n mesh = Mesh(\n v_pos=v_pos,\n t_pos_idx=t_pos_idx,\n # extras\n grid_vertices=grid_vertices,\n tet_edges=self.all_edges,\n grid_level=level,\n grid_deformation=deformation,\n )\n\n return mesh" }, { "identifier": "Mesh", "path": "threestudio/models/mesh.py", "snippet": "class Mesh:\n def __init__(\n self, v_pos: Float[Tensor, \"Nv 3\"], t_pos_idx: Integer[Tensor, \"Nf 3\"], **kwargs\n ) -> None:\n self.v_pos: Float[Tensor, \"Nv 3\"] = v_pos\n self.t_pos_idx: Integer[Tensor, \"Nf 3\"] = t_pos_idx\n self._v_nrm: Optional[Float[Tensor, \"Nv 3\"]] = None\n self._v_tng: Optional[Float[Tensor, \"Nv 3\"]] = None\n self._v_tex: Optional[Float[Tensor, \"Nt 3\"]] = None\n self._t_tex_idx: Optional[Float[Tensor, \"Nf 3\"]] = None\n self._v_rgb: Optional[Float[Tensor, \"Nv 3\"]] = None\n self._edges: Optional[Integer[Tensor, \"Ne 2\"]] = None\n self.extras: Dict[str, Any] = {}\n for k, v in kwargs.items():\n self.add_extra(k, v)\n\n def add_extra(self, k, v) -> None:\n self.extras[k] = v\n\n def remove_outlier(self, outlier_n_faces_threshold: Union[int, float]) -> Mesh:\n if self.requires_grad:\n threestudio.debug(\"Mesh is differentiable, not removing outliers\")\n return self\n\n # use trimesh to first split the mesh into connected components\n # then remove the components with less than n_face_threshold faces\n import trimesh\n\n # construct a trimesh object\n mesh = trimesh.Trimesh(\n vertices=self.v_pos.detach().cpu().numpy(),\n faces=self.t_pos_idx.detach().cpu().numpy(),\n )\n\n # split the mesh into connected components\n components = mesh.split(only_watertight=False)\n # log the number of faces in each component\n threestudio.debug(\n \"Mesh has {} components, with faces: {}\".format(\n len(components), [c.faces.shape[0] for c in components]\n )\n )\n\n n_faces_threshold: int\n if isinstance(outlier_n_faces_threshold, float):\n # set the threshold to the number of faces in the largest component multiplied by outlier_n_faces_threshold\n n_faces_threshold = int(\n max([c.faces.shape[0] for c in components]) * outlier_n_faces_threshold\n )\n else:\n # set the threshold directly to outlier_n_faces_threshold\n n_faces_threshold = outlier_n_faces_threshold\n\n # log the threshold\n threestudio.debug(\n \"Removing components with less than {} faces\".format(n_faces_threshold)\n )\n\n # remove the components with less than n_face_threshold faces\n components = [c for c in components if c.faces.shape[0] >= n_faces_threshold]\n\n # log the number of faces in each component after removing outliers\n threestudio.debug(\n \"Mesh has {} components after removing outliers, with faces: {}\".format(\n len(components), [c.faces.shape[0] for c in components]\n )\n )\n # merge the components\n mesh = trimesh.util.concatenate(components)\n\n # convert back to our mesh format\n v_pos = torch.from_numpy(mesh.vertices).to(self.v_pos)\n t_pos_idx = torch.from_numpy(mesh.faces).to(self.t_pos_idx)\n\n clean_mesh = Mesh(v_pos, t_pos_idx)\n # keep the extras unchanged\n\n if len(self.extras) > 0:\n clean_mesh.extras = self.extras\n threestudio.debug(\n f\"The following extra attributes are inherited from the original mesh unchanged: {list(self.extras.keys())}\"\n )\n return clean_mesh\n\n @property\n def requires_grad(self):\n return self.v_pos.requires_grad\n\n @property\n def v_nrm(self):\n if self._v_nrm is None:\n self._v_nrm = self._compute_vertex_normal()\n return self._v_nrm\n\n @property\n def v_tng(self):\n if self._v_tng is None:\n self._v_tng = self._compute_vertex_tangent()\n return self._v_tng\n\n @property\n def v_tex(self):\n if self._v_tex is None:\n self._v_tex, self._t_tex_idx = self._unwrap_uv()\n return self._v_tex\n\n @property\n def t_tex_idx(self):\n if self._t_tex_idx is None:\n self._v_tex, self._t_tex_idx = self._unwrap_uv()\n return self._t_tex_idx\n\n @property\n def v_rgb(self):\n return self._v_rgb\n\n @property\n def edges(self):\n if self._edges is None:\n self._edges = self._compute_edges()\n return self._edges\n\n def _compute_vertex_normal(self):\n i0 = self.t_pos_idx[:, 0]\n i1 = self.t_pos_idx[:, 1]\n i2 = self.t_pos_idx[:, 2]\n\n v0 = self.v_pos[i0, :]\n v1 = self.v_pos[i1, :]\n v2 = self.v_pos[i2, :]\n\n face_normals = torch.cross(v1 - v0, v2 - v0)\n\n # Splat face normals to vertices\n v_nrm = torch.zeros_like(self.v_pos)\n v_nrm.scatter_add_(0, i0[:, None].repeat(1, 3), face_normals)\n v_nrm.scatter_add_(0, i1[:, None].repeat(1, 3), face_normals)\n v_nrm.scatter_add_(0, i2[:, None].repeat(1, 3), face_normals)\n\n # Normalize, replace zero (degenerated) normals with some default value\n v_nrm = torch.where(\n dot(v_nrm, v_nrm) > 1e-20, v_nrm, torch.as_tensor([0.0, 0.0, 1.0]).to(v_nrm)\n )\n v_nrm = F.normalize(v_nrm, dim=1)\n\n if torch.is_anomaly_enabled():\n assert torch.all(torch.isfinite(v_nrm))\n\n return v_nrm\n\n def _compute_vertex_tangent(self):\n vn_idx = [None] * 3\n pos = [None] * 3\n tex = [None] * 3\n for i in range(0, 3):\n pos[i] = self.v_pos[self.t_pos_idx[:, i]]\n tex[i] = self.v_tex[self.t_tex_idx[:, i]]\n # t_nrm_idx is always the same as t_pos_idx\n vn_idx[i] = self.t_pos_idx[:, i]\n\n tangents = torch.zeros_like(self.v_nrm)\n tansum = torch.zeros_like(self.v_nrm)\n\n # Compute tangent space for each triangle\n uve1 = tex[1] - tex[0]\n uve2 = tex[2] - tex[0]\n pe1 = pos[1] - pos[0]\n pe2 = pos[2] - pos[0]\n\n nom = pe1 * uve2[..., 1:2] - pe2 * uve1[..., 1:2]\n denom = uve1[..., 0:1] * uve2[..., 1:2] - uve1[..., 1:2] * uve2[..., 0:1]\n\n # Avoid division by zero for degenerated texture coordinates\n tang = nom / torch.where(\n denom > 0.0, torch.clamp(denom, min=1e-6), torch.clamp(denom, max=-1e-6)\n )\n\n # Update all 3 vertices\n for i in range(0, 3):\n idx = vn_idx[i][:, None].repeat(1, 3)\n tangents.scatter_add_(0, idx, tang) # tangents[n_i] = tangents[n_i] + tang\n tansum.scatter_add_(\n 0, idx, torch.ones_like(tang)\n ) # tansum[n_i] = tansum[n_i] + 1\n tangents = tangents / tansum\n\n # Normalize and make sure tangent is perpendicular to normal\n tangents = F.normalize(tangents, dim=1)\n tangents = F.normalize(tangents - dot(tangents, self.v_nrm) * self.v_nrm)\n\n if torch.is_anomaly_enabled():\n assert torch.all(torch.isfinite(tangents))\n\n return tangents\n\n def _unwrap_uv(\n self, xatlas_chart_options: dict = {}, xatlas_pack_options: dict = {}\n ):\n threestudio.info(\"Using xatlas to perform UV unwrapping, may take a while ...\")\n\n import xatlas\n\n atlas = xatlas.Atlas()\n atlas.add_mesh(\n self.v_pos.detach().cpu().numpy(),\n self.t_pos_idx.cpu().numpy(),\n )\n co = xatlas.ChartOptions()\n po = xatlas.PackOptions()\n for k, v in xatlas_chart_options.items():\n setattr(co, k, v)\n for k, v in xatlas_pack_options.items():\n setattr(po, k, v)\n atlas.generate(co, po)\n vmapping, indices, uvs = atlas.get_mesh(0)\n vmapping = (\n torch.from_numpy(\n vmapping.astype(np.uint64, casting=\"same_kind\").view(np.int64)\n )\n .to(self.v_pos.device)\n .long()\n )\n uvs = torch.from_numpy(uvs).to(self.v_pos.device).float()\n indices = (\n torch.from_numpy(\n indices.astype(np.uint64, casting=\"same_kind\").view(np.int64)\n )\n .to(self.v_pos.device)\n .long()\n )\n return uvs, indices\n\n def unwrap_uv(\n self, xatlas_chart_options: dict = {}, xatlas_pack_options: dict = {}\n ):\n self._v_tex, self._t_tex_idx = self._unwrap_uv(\n xatlas_chart_options, xatlas_pack_options\n )\n\n def set_vertex_color(self, v_rgb):\n assert v_rgb.shape[0] == self.v_pos.shape[0]\n self._v_rgb = v_rgb\n\n def _compute_edges(self):\n # Compute edges\n edges = torch.cat(\n [\n self.t_pos_idx[:, [0, 1]],\n self.t_pos_idx[:, [1, 2]],\n self.t_pos_idx[:, [2, 0]],\n ],\n dim=0,\n )\n edges = edges.sort()[0]\n edges = torch.unique(edges, dim=0)\n return edges\n\n def normal_consistency(self) -> Float[Tensor, \"\"]:\n edge_nrm: Float[Tensor, \"Ne 2 3\"] = self.v_nrm[self.edges]\n nc = (\n 1.0 - torch.cosine_similarity(edge_nrm[:, 0], edge_nrm[:, 1], dim=-1)\n ).mean()\n return nc\n\n def _laplacian_uniform(self):\n # from stable-dreamfusion\n # https://github.com/ashawkey/stable-dreamfusion/blob/8fb3613e9e4cd1ded1066b46e80ca801dfb9fd06/nerf/renderer.py#L224\n verts, faces = self.v_pos, self.t_pos_idx\n\n V = verts.shape[0]\n F = faces.shape[0]\n\n # Neighbor indices\n ii = faces[:, [1, 2, 0]].flatten()\n jj = faces[:, [2, 0, 1]].flatten()\n adj = torch.stack([torch.cat([ii, jj]), torch.cat([jj, ii])], dim=0).unique(\n dim=1\n )\n adj_values = torch.ones(adj.shape[1]).to(verts)\n\n # Diagonal indices\n diag_idx = adj[0]\n\n # Build the sparse matrix\n idx = torch.cat((adj, torch.stack((diag_idx, diag_idx), dim=0)), dim=1)\n values = torch.cat((-adj_values, adj_values))\n\n # The coalesce operation sums the duplicate indices, resulting in the\n # correct diagonal\n return torch.sparse_coo_tensor(idx, values, (V, V)).coalesce()\n\n def laplacian(self) -> Float[Tensor, \"\"]:\n with torch.no_grad():\n L = self._laplacian_uniform()\n loss = L.mm(self.v_pos)\n loss = loss.norm(dim=1)\n loss = loss.mean()\n return loss" }, { "identifier": "get_encoding", "path": "threestudio/models/networks.py", "snippet": "def get_encoding(n_input_dims: int, config) -> nn.Module:\n # input suppose to be range [0, 1]\n encoding: nn.Module\n if config.otype == \"ProgressiveBandFrequency\":\n encoding = ProgressiveBandFrequency(n_input_dims, config_to_primitive(config))\n elif config.otype == \"ProgressiveBandHashGrid\":\n encoding = ProgressiveBandHashGrid(n_input_dims, config_to_primitive(config))\n else:\n encoding = TCNNEncoding(n_input_dims, config_to_primitive(config))\n encoding = CompositeEncoding(\n encoding,\n include_xyz=config.get(\"include_xyz\", False),\n xyz_scale=2.0,\n xyz_offset=-1.0,\n ) # FIXME: hard coded\n return encoding" }, { "identifier": "get_mlp", "path": "threestudio/models/networks.py", "snippet": "def get_mlp(n_input_dims, n_output_dims, config) -> nn.Module:\n network: nn.Module\n if config.otype == \"VanillaMLP\":\n network = VanillaMLP(n_input_dims, n_output_dims, config_to_primitive(config))\n elif config.otype == \"SphereInitVanillaMLP\":\n network = SphereInitVanillaMLP(\n n_input_dims, n_output_dims, config_to_primitive(config)\n )\n else:\n assert (\n config.get(\"sphere_init\", False) is False\n ), \"sphere_init=True only supported by VanillaMLP\"\n network = TCNNNetwork(n_input_dims, n_output_dims, config_to_primitive(config))\n return network" }, { "identifier": "broadcast", "path": "threestudio/utils/misc.py", "snippet": "def broadcast(tensor, src=0):\n if not _distributed_available():\n return tensor\n else:\n torch.distributed.broadcast(tensor, src=src)\n return tensor" }, { "identifier": "scale_tensor", "path": "threestudio/utils/ops.py", "snippet": "def scale_tensor(\n dat: Num[Tensor, \"... D\"], inp_scale: ValidScale, tgt_scale: ValidScale\n):\n if inp_scale is None:\n inp_scale = (0, 1)\n if tgt_scale is None:\n tgt_scale = (0, 1)\n if isinstance(tgt_scale, Tensor):\n assert dat.shape[-1] == tgt_scale.shape[-1]\n dat = (dat - inp_scale[0]) / (inp_scale[1] - inp_scale[0])\n dat = dat * (tgt_scale[1] - tgt_scale[0]) + tgt_scale[0]\n return dat" } ]
import os import numpy as np import torch import torch.nn as nn import torch.nn.functional as F import threestudio import trimesh from dataclasses import dataclass, field from threestudio.models.geometry.base import ( BaseExplicitGeometry, BaseGeometry, contract_to_unisphere, ) from threestudio.models.geometry.implicit_sdf import ImplicitSDF from threestudio.models.geometry.implicit_volume import ImplicitVolume from threestudio.models.isosurface import MarchingTetrahedraHelper from threestudio.models.mesh import Mesh from threestudio.models.networks import get_encoding, get_mlp from threestudio.utils.misc import broadcast from threestudio.utils.ops import scale_tensor from threestudio.utils.typing import * from pysdf import SDF
15,252
"+y": np.array([0, 1, 0]), "+z": np.array([0, 0, 1]), "-x": np.array([-1, 0, 0]), "-y": np.array([0, -1, 0]), "-z": np.array([0, 0, -1]), } if ( self.cfg.shape_init_mesh_up not in dirs or self.cfg.shape_init_mesh_front not in dirs ): raise ValueError( f"shape_init_mesh_up and shape_init_mesh_front must be one of {dirs}." ) if self.cfg.shape_init_mesh_up[1] == self.cfg.shape_init_mesh_front[1]: raise ValueError( "shape_init_mesh_up and shape_init_mesh_front must be orthogonal." ) z_, x_ = ( dir2vec[self.cfg.shape_init_mesh_up], dir2vec[self.cfg.shape_init_mesh_front], ) y_ = np.cross(z_, x_) std2mesh = np.stack([x_, y_, z_], axis=0).T mesh2std = np.linalg.inv(std2mesh) # scaling scale = np.abs(mesh.vertices).max() mesh.vertices = mesh.vertices / scale * self.cfg.shape_init_params mesh.vertices = np.dot(mesh2std, mesh.vertices.T).T sdf = SDF(mesh.vertices, mesh.faces) def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: # add a negative signed here # as in pysdf the inside of the shape has positive signed distance return torch.from_numpy(-sdf(points_rand.cpu().numpy())).to( points_rand )[..., None] get_gt_sdf = func else: raise ValueError( f"Unknown shape initialization type: {self.cfg.shape_init}" ) sdf_gt = get_gt_sdf( scale_tensor( self.isosurface_helper.grid_vertices, self.isosurface_helper.points_range, self.isosurface_bbox, ) ) self.sdf.data = sdf_gt # explicit broadcast to ensure param consistency across ranks for param in self.parameters(): broadcast(param, src=0) def isosurface(self) -> Mesh: # return cached mesh if fix_geometry is True to save computation if self.cfg.fix_geometry and self.mesh is not None: return self.mesh mesh = self.isosurface_helper(self.sdf, self.deformation) mesh.v_pos = scale_tensor( mesh.v_pos, self.isosurface_helper.points_range, self.isosurface_bbox ) if self.cfg.isosurface_remove_outliers: mesh = mesh.remove_outlier(self.cfg.isosurface_outlier_n_faces_threshold) self.mesh = mesh return mesh def forward( self, points: Float[Tensor, "*N Di"], output_normal: bool = False ) -> Dict[str, Float[Tensor, "..."]]: if self.cfg.geometry_only: return {} assert ( output_normal == False ), f"Normal output is not supported for {self.__class__.__name__}" points_unscaled = points # points in the original scale points = contract_to_unisphere(points, self.bbox) # points normalized to (0, 1) enc = self.encoding(points.view(-1, self.cfg.n_input_dims)) features = self.feature_network(enc).view( *points.shape[:-1], self.cfg.n_feature_dims ) return {"features": features} @staticmethod @torch.no_grad() def create_from( other: BaseGeometry, cfg: Optional[Union[dict, DictConfig]] = None, copy_net: bool = True, **kwargs, ) -> "TetrahedraSDFGrid": if isinstance(other, TetrahedraSDFGrid): instance = TetrahedraSDFGrid(cfg, **kwargs) assert instance.cfg.isosurface_resolution == other.cfg.isosurface_resolution instance.isosurface_bbox = other.isosurface_bbox.clone() instance.sdf.data = other.sdf.data.clone() if ( instance.cfg.isosurface_deformable_grid and other.cfg.isosurface_deformable_grid ): assert ( instance.deformation is not None and other.deformation is not None ) instance.deformation.data = other.deformation.data.clone() if ( not instance.cfg.geometry_only and not other.cfg.geometry_only and copy_net ): instance.encoding.load_state_dict(other.encoding.state_dict()) instance.feature_network.load_state_dict( other.feature_network.state_dict() ) return instance
@threestudio.register("tetrahedra-sdf-grid") class TetrahedraSDFGrid(BaseExplicitGeometry): @dataclass class Config(BaseExplicitGeometry.Config): isosurface_resolution: int = 128 isosurface_deformable_grid: bool = True isosurface_remove_outliers: bool = False isosurface_outlier_n_faces_threshold: Union[int, float] = 0.01 n_input_dims: int = 3 n_feature_dims: int = 3 pos_encoding_config: dict = field( default_factory=lambda: { "otype": "HashGrid", "n_levels": 16, "n_features_per_level": 2, "log2_hashmap_size": 19, "base_resolution": 16, "per_level_scale": 1.447269237440378, } ) mlp_network_config: dict = field( default_factory=lambda: { "otype": "VanillaMLP", "activation": "ReLU", "output_activation": "none", "n_neurons": 64, "n_hidden_layers": 1, } ) shape_init: Optional[str] = None shape_init_params: Optional[Any] = None shape_init_mesh_up: str = "+z" shape_init_mesh_front: str = "+x" force_shape_init: bool = False geometry_only: bool = False fix_geometry: bool = False cfg: Config def configure(self) -> None: super().configure() # this should be saved to state_dict, register as buffer self.isosurface_bbox: Float[Tensor, "2 3"] self.register_buffer("isosurface_bbox", self.bbox.clone()) self.isosurface_helper = MarchingTetrahedraHelper( self.cfg.isosurface_resolution, f"load/tets/{self.cfg.isosurface_resolution}_tets.npz", ) self.sdf: Float[Tensor, "Nv 1"] self.deformation: Optional[Float[Tensor, "Nv 3"]] if not self.cfg.fix_geometry: self.register_parameter( "sdf", nn.Parameter( torch.zeros( (self.isosurface_helper.grid_vertices.shape[0], 1), dtype=torch.float32, ) ), ) if self.cfg.isosurface_deformable_grid: self.register_parameter( "deformation", nn.Parameter( torch.zeros_like(self.isosurface_helper.grid_vertices) ), ) else: self.deformation = None else: self.register_buffer( "sdf", torch.zeros( (self.isosurface_helper.grid_vertices.shape[0], 1), dtype=torch.float32, ), ) if self.cfg.isosurface_deformable_grid: self.register_buffer( "deformation", torch.zeros_like(self.isosurface_helper.grid_vertices), ) else: self.deformation = None if not self.cfg.geometry_only: self.encoding = get_encoding( self.cfg.n_input_dims, self.cfg.pos_encoding_config ) self.feature_network = get_mlp( self.encoding.n_output_dims, self.cfg.n_feature_dims, self.cfg.mlp_network_config, ) self.mesh: Optional[Mesh] = None def initialize_shape(self) -> None: if self.cfg.shape_init is None and not self.cfg.force_shape_init: return # do not initialize shape if weights are provided if self.cfg.weights is not None and not self.cfg.force_shape_init: return get_gt_sdf: Callable[[Float[Tensor, "N 3"]], Float[Tensor, "N 1"]] assert isinstance(self.cfg.shape_init, str) if self.cfg.shape_init == "ellipsoid": assert ( isinstance(self.cfg.shape_init_params, Sized) and len(self.cfg.shape_init_params) == 3 ) size = torch.as_tensor(self.cfg.shape_init_params).to(self.device) def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: return ((points_rand / size) ** 2).sum( dim=-1, keepdim=True ).sqrt() - 1.0 # pseudo signed distance of an ellipsoid get_gt_sdf = func elif self.cfg.shape_init == "sphere": assert isinstance(self.cfg.shape_init_params, float) radius = self.cfg.shape_init_params def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: return (points_rand**2).sum(dim=-1, keepdim=True).sqrt() - radius get_gt_sdf = func elif self.cfg.shape_init.startswith("mesh:"): assert isinstance(self.cfg.shape_init_params, float) mesh_path = self.cfg.shape_init[5:] if not os.path.exists(mesh_path): raise ValueError(f"Mesh file {mesh_path} does not exist.") mesh = trimesh.load(mesh_path) # move to center centroid = mesh.vertices.mean(0) mesh.vertices = mesh.vertices - centroid # align to up-z and front-x dirs = ["+x", "+y", "+z", "-x", "-y", "-z"] dir2vec = { "+x": np.array([1, 0, 0]), "+y": np.array([0, 1, 0]), "+z": np.array([0, 0, 1]), "-x": np.array([-1, 0, 0]), "-y": np.array([0, -1, 0]), "-z": np.array([0, 0, -1]), } if ( self.cfg.shape_init_mesh_up not in dirs or self.cfg.shape_init_mesh_front not in dirs ): raise ValueError( f"shape_init_mesh_up and shape_init_mesh_front must be one of {dirs}." ) if self.cfg.shape_init_mesh_up[1] == self.cfg.shape_init_mesh_front[1]: raise ValueError( "shape_init_mesh_up and shape_init_mesh_front must be orthogonal." ) z_, x_ = ( dir2vec[self.cfg.shape_init_mesh_up], dir2vec[self.cfg.shape_init_mesh_front], ) y_ = np.cross(z_, x_) std2mesh = np.stack([x_, y_, z_], axis=0).T mesh2std = np.linalg.inv(std2mesh) # scaling scale = np.abs(mesh.vertices).max() mesh.vertices = mesh.vertices / scale * self.cfg.shape_init_params mesh.vertices = np.dot(mesh2std, mesh.vertices.T).T sdf = SDF(mesh.vertices, mesh.faces) def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: # add a negative signed here # as in pysdf the inside of the shape has positive signed distance return torch.from_numpy(-sdf(points_rand.cpu().numpy())).to( points_rand )[..., None] get_gt_sdf = func else: raise ValueError( f"Unknown shape initialization type: {self.cfg.shape_init}" ) sdf_gt = get_gt_sdf( scale_tensor( self.isosurface_helper.grid_vertices, self.isosurface_helper.points_range, self.isosurface_bbox, ) ) self.sdf.data = sdf_gt # explicit broadcast to ensure param consistency across ranks for param in self.parameters(): broadcast(param, src=0) def isosurface(self) -> Mesh: # return cached mesh if fix_geometry is True to save computation if self.cfg.fix_geometry and self.mesh is not None: return self.mesh mesh = self.isosurface_helper(self.sdf, self.deformation) mesh.v_pos = scale_tensor( mesh.v_pos, self.isosurface_helper.points_range, self.isosurface_bbox ) if self.cfg.isosurface_remove_outliers: mesh = mesh.remove_outlier(self.cfg.isosurface_outlier_n_faces_threshold) self.mesh = mesh return mesh def forward( self, points: Float[Tensor, "*N Di"], output_normal: bool = False ) -> Dict[str, Float[Tensor, "..."]]: if self.cfg.geometry_only: return {} assert ( output_normal == False ), f"Normal output is not supported for {self.__class__.__name__}" points_unscaled = points # points in the original scale points = contract_to_unisphere(points, self.bbox) # points normalized to (0, 1) enc = self.encoding(points.view(-1, self.cfg.n_input_dims)) features = self.feature_network(enc).view( *points.shape[:-1], self.cfg.n_feature_dims ) return {"features": features} @staticmethod @torch.no_grad() def create_from( other: BaseGeometry, cfg: Optional[Union[dict, DictConfig]] = None, copy_net: bool = True, **kwargs, ) -> "TetrahedraSDFGrid": if isinstance(other, TetrahedraSDFGrid): instance = TetrahedraSDFGrid(cfg, **kwargs) assert instance.cfg.isosurface_resolution == other.cfg.isosurface_resolution instance.isosurface_bbox = other.isosurface_bbox.clone() instance.sdf.data = other.sdf.data.clone() if ( instance.cfg.isosurface_deformable_grid and other.cfg.isosurface_deformable_grid ): assert ( instance.deformation is not None and other.deformation is not None ) instance.deformation.data = other.deformation.data.clone() if ( not instance.cfg.geometry_only and not other.cfg.geometry_only and copy_net ): instance.encoding.load_state_dict(other.encoding.state_dict()) instance.feature_network.load_state_dict( other.feature_network.state_dict() ) return instance
elif isinstance(other, ImplicitVolume):
4
2023-11-27 02:39:39+00:00
24k
EricGuo5513/momask-codes
eval_t2m_trans_res.py
[ { "identifier": "MaskTransformer", "path": "models/mask_transformer/transformer.py", "snippet": "class MaskTransformer(nn.Module):\n def __init__(self, code_dim, cond_mode, latent_dim=256, ff_size=1024, num_layers=8,\n num_heads=4, dropout=0.1, clip_dim=512, cond_drop_prob=0.1,\n clip_version=None, opt=None, **kargs):\n super(MaskTransformer, self).__init__()\n print(f'latent_dim: {latent_dim}, ff_size: {ff_size}, nlayers: {num_layers}, nheads: {num_heads}, dropout: {dropout}')\n\n self.code_dim = code_dim\n self.latent_dim = latent_dim\n self.clip_dim = clip_dim\n self.dropout = dropout\n self.opt = opt\n\n self.cond_mode = cond_mode\n self.cond_drop_prob = cond_drop_prob\n\n if self.cond_mode == 'action':\n assert 'num_actions' in kargs\n self.num_actions = kargs.get('num_actions', 1)\n\n '''\n Preparing Networks\n '''\n self.input_process = InputProcess(self.code_dim, self.latent_dim)\n self.position_enc = PositionalEncoding(self.latent_dim, self.dropout)\n\n seqTransEncoderLayer = nn.TransformerEncoderLayer(d_model=self.latent_dim,\n nhead=num_heads,\n dim_feedforward=ff_size,\n dropout=dropout,\n activation='gelu')\n\n self.seqTransEncoder = nn.TransformerEncoder(seqTransEncoderLayer,\n num_layers=num_layers)\n\n self.encode_action = partial(F.one_hot, num_classes=self.num_actions)\n\n # if self.cond_mode != 'no_cond':\n if self.cond_mode == 'text':\n self.cond_emb = nn.Linear(self.clip_dim, self.latent_dim)\n elif self.cond_mode == 'action':\n self.cond_emb = nn.Linear(self.num_actions, self.latent_dim)\n elif self.cond_mode == 'uncond':\n self.cond_emb = nn.Identity()\n else:\n raise KeyError(\"Unsupported condition mode!!!\")\n\n\n _num_tokens = opt.num_tokens + 2 # two dummy tokens, one for masking, one for padding\n self.mask_id = opt.num_tokens\n self.pad_id = opt.num_tokens + 1\n\n self.output_process = OutputProcess_Bert(out_feats=opt.num_tokens, latent_dim=latent_dim)\n\n self.token_emb = nn.Embedding(_num_tokens, self.code_dim)\n\n self.apply(self.__init_weights)\n\n '''\n Preparing frozen weights\n '''\n\n if self.cond_mode == 'text':\n print('Loading CLIP...')\n self.clip_version = clip_version\n self.clip_model = self.load_and_freeze_clip(clip_version)\n\n self.noise_schedule = cosine_schedule\n\n def load_and_freeze_token_emb(self, codebook):\n '''\n :param codebook: (c, d)\n :return:\n '''\n assert self.training, 'Only necessary in training mode'\n c, d = codebook.shape\n self.token_emb.weight = nn.Parameter(torch.cat([codebook, torch.zeros(size=(2, d), device=codebook.device)], dim=0)) #add two dummy tokens, 0 vectors\n self.token_emb.requires_grad_(False)\n # self.token_emb.weight.requires_grad = False\n # self.token_emb_ready = True\n print(\"Token embedding initialized!\")\n\n def __init_weights(self, module):\n if isinstance(module, (nn.Linear, nn.Embedding)):\n module.weight.data.normal_(mean=0.0, std=0.02)\n if isinstance(module, nn.Linear) and module.bias is not None:\n module.bias.data.zero_()\n elif isinstance(module, nn.LayerNorm):\n module.bias.data.zero_()\n module.weight.data.fill_(1.0)\n\n def parameters_wo_clip(self):\n return [p for name, p in self.named_parameters() if not name.startswith('clip_model.')]\n\n def load_and_freeze_clip(self, clip_version):\n clip_model, clip_preprocess = clip.load(clip_version, device='cpu',\n jit=False) # Must set jit=False for training\n # Cannot run on cpu\n clip.model.convert_weights(\n clip_model) # Actually this line is unnecessary since clip by default already on float16\n # Date 0707: It's necessary, only unecessary when load directly to gpu. Disable if need to run on cpu\n\n # Freeze CLIP weights\n clip_model.eval()\n for p in clip_model.parameters():\n p.requires_grad = False\n\n return clip_model\n\n def encode_text(self, raw_text):\n device = next(self.parameters()).device\n text = clip.tokenize(raw_text, truncate=True).to(device)\n feat_clip_text = self.clip_model.encode_text(text).float()\n return feat_clip_text\n\n def mask_cond(self, cond, force_mask=False):\n bs, d = cond.shape\n if force_mask:\n return torch.zeros_like(cond)\n elif self.training and self.cond_drop_prob > 0.:\n mask = torch.bernoulli(torch.ones(bs, device=cond.device) * self.cond_drop_prob).view(bs, 1)\n return cond * (1. - mask)\n else:\n return cond\n\n def trans_forward(self, motion_ids, cond, padding_mask, force_mask=False):\n '''\n :param motion_ids: (b, seqlen)\n :padding_mask: (b, seqlen), all pad positions are TRUE else FALSE\n :param cond: (b, embed_dim) for text, (b, num_actions) for action\n :param force_mask: boolean\n :return:\n -logits: (b, num_token, seqlen)\n '''\n\n cond = self.mask_cond(cond, force_mask=force_mask)\n\n # print(motion_ids.shape)\n x = self.token_emb(motion_ids)\n # print(x.shape)\n # (b, seqlen, d) -> (seqlen, b, latent_dim)\n x = self.input_process(x)\n\n cond = self.cond_emb(cond).unsqueeze(0) #(1, b, latent_dim)\n\n x = self.position_enc(x)\n xseq = torch.cat([cond, x], dim=0) #(seqlen+1, b, latent_dim)\n\n padding_mask = torch.cat([torch.zeros_like(padding_mask[:, 0:1]), padding_mask], dim=1) #(b, seqlen+1)\n # print(xseq.shape, padding_mask.shape)\n\n # print(padding_mask.shape, xseq.shape)\n\n output = self.seqTransEncoder(xseq, src_key_padding_mask=padding_mask)[1:] #(seqlen, b, e)\n logits = self.output_process(output) #(seqlen, b, e) -> (b, ntoken, seqlen)\n return logits\n\n def forward(self, ids, y, m_lens):\n '''\n :param ids: (b, n)\n :param y: raw text for cond_mode=text, (b, ) for cond_mode=action\n :m_lens: (b,)\n :return:\n '''\n\n bs, ntokens = ids.shape\n device = ids.device\n\n # Positions that are PADDED are ALL FALSE\n non_pad_mask = lengths_to_mask(m_lens, ntokens) #(b, n)\n ids = torch.where(non_pad_mask, ids, self.pad_id)\n\n force_mask = False\n if self.cond_mode == 'text':\n with torch.no_grad():\n cond_vector = self.encode_text(y)\n elif self.cond_mode == 'action':\n cond_vector = self.enc_action(y).to(device).float()\n elif self.cond_mode == 'uncond':\n cond_vector = torch.zeros(bs, self.latent_dim).float().to(device)\n force_mask = True\n else:\n raise NotImplementedError(\"Unsupported condition mode!!!\")\n\n\n '''\n Prepare mask\n '''\n rand_time = uniform((bs,), device=device)\n rand_mask_probs = self.noise_schedule(rand_time)\n num_token_masked = (ntokens * rand_mask_probs).round().clamp(min=1)\n\n batch_randperm = torch.rand((bs, ntokens), device=device).argsort(dim=-1)\n # Positions to be MASKED are ALL TRUE\n mask = batch_randperm < num_token_masked.unsqueeze(-1)\n\n # Positions to be MASKED must also be NON-PADDED\n mask &= non_pad_mask\n\n # Note this is our training target, not input\n labels = torch.where(mask, ids, self.mask_id)\n\n x_ids = ids.clone()\n\n # Further Apply Bert Masking Scheme\n # Step 1: 10% replace with an incorrect token\n mask_rid = get_mask_subset_prob(mask, 0.1)\n rand_id = torch.randint_like(x_ids, high=self.opt.num_tokens)\n x_ids = torch.where(mask_rid, rand_id, x_ids)\n # Step 2: 90% x 10% replace with correct token, and 90% x 88% replace with mask token\n mask_mid = get_mask_subset_prob(mask & ~mask_rid, 0.88)\n\n # mask_mid = mask\n\n x_ids = torch.where(mask_mid, self.mask_id, x_ids)\n\n logits = self.trans_forward(x_ids, cond_vector, ~non_pad_mask, force_mask)\n ce_loss, pred_id, acc = cal_performance(logits, labels, ignore_index=self.mask_id)\n\n return ce_loss, pred_id, acc\n\n def forward_with_cond_scale(self,\n motion_ids,\n cond_vector,\n padding_mask,\n cond_scale=3,\n force_mask=False):\n # bs = motion_ids.shape[0]\n # if cond_scale == 1:\n if force_mask:\n return self.trans_forward(motion_ids, cond_vector, padding_mask, force_mask=True)\n\n logits = self.trans_forward(motion_ids, cond_vector, padding_mask)\n if cond_scale == 1:\n return logits\n\n aux_logits = self.trans_forward(motion_ids, cond_vector, padding_mask, force_mask=True)\n\n scaled_logits = aux_logits + (logits - aux_logits) * cond_scale\n return scaled_logits\n\n @torch.no_grad()\n @eval_decorator\n def generate(self,\n conds,\n m_lens,\n timesteps: int,\n cond_scale: int,\n temperature=1,\n topk_filter_thres=0.9,\n gsample=False,\n force_mask=False\n ):\n # print(self.opt.num_quantizers)\n # assert len(timesteps) >= len(cond_scales) == self.opt.num_quantizers\n\n device = next(self.parameters()).device\n seq_len = max(m_lens)\n batch_size = len(m_lens)\n\n if self.cond_mode == 'text':\n with torch.no_grad():\n cond_vector = self.encode_text(conds)\n elif self.cond_mode == 'action':\n cond_vector = self.enc_action(conds).to(device)\n elif self.cond_mode == 'uncond':\n cond_vector = torch.zeros(batch_size, self.latent_dim).float().to(device)\n else:\n raise NotImplementedError(\"Unsupported condition mode!!!\")\n\n padding_mask = ~lengths_to_mask(m_lens, seq_len)\n # print(padding_mask.shape, )\n\n # Start from all tokens being masked\n ids = torch.where(padding_mask, self.pad_id, self.mask_id)\n scores = torch.where(padding_mask, 1e5, 0.)\n starting_temperature = temperature\n\n for timestep, steps_until_x0 in zip(torch.linspace(0, 1, timesteps, device=device), reversed(range(timesteps))):\n # 0 < timestep < 1\n rand_mask_prob = self.noise_schedule(timestep) # Tensor\n\n '''\n Maskout, and cope with variable length\n '''\n # fix: the ratio regarding lengths, instead of seq_len\n num_token_masked = torch.round(rand_mask_prob * m_lens).clamp(min=1) # (b, )\n\n # select num_token_masked tokens with lowest scores to be masked\n sorted_indices = scores.argsort(\n dim=1) # (b, k), sorted_indices[i, j] = the index of j-th lowest element in scores on dim=1\n ranks = sorted_indices.argsort(dim=1) # (b, k), rank[i, j] = the rank (0: lowest) of scores[i, j] on dim=1\n is_mask = (ranks < num_token_masked.unsqueeze(-1))\n ids = torch.where(is_mask, self.mask_id, ids)\n\n '''\n Preparing input\n '''\n # (b, num_token, seqlen)\n logits = self.forward_with_cond_scale(ids, cond_vector=cond_vector,\n padding_mask=padding_mask,\n cond_scale=cond_scale,\n force_mask=force_mask)\n\n logits = logits.permute(0, 2, 1) # (b, seqlen, ntoken)\n # print(logits.shape, self.opt.num_tokens)\n # clean low prob token\n filtered_logits = top_k(logits, topk_filter_thres, dim=-1)\n\n '''\n Update ids\n '''\n # if force_mask:\n temperature = starting_temperature\n # else:\n # temperature = starting_temperature * (steps_until_x0 / timesteps)\n # temperature = max(temperature, 1e-4)\n # print(filtered_logits.shape)\n # temperature is annealed, gradually reducing temperature as well as randomness\n if gsample: # use gumbel_softmax sampling\n # print(\"1111\")\n pred_ids = gumbel_sample(filtered_logits, temperature=temperature, dim=-1) # (b, seqlen)\n else: # use multinomial sampling\n # print(\"2222\")\n probs = F.softmax(filtered_logits, dim=-1) # (b, seqlen, ntoken)\n # print(temperature, starting_temperature, steps_until_x0, timesteps)\n # print(probs / temperature)\n pred_ids = Categorical(probs / temperature).sample() # (b, seqlen)\n\n # print(pred_ids.max(), pred_ids.min())\n # if pred_ids.\n ids = torch.where(is_mask, pred_ids, ids)\n\n '''\n Updating scores\n '''\n probs_without_temperature = logits.softmax(dim=-1) # (b, seqlen, ntoken)\n scores = probs_without_temperature.gather(2, pred_ids.unsqueeze(dim=-1)) # (b, seqlen, 1)\n scores = scores.squeeze(-1) # (b, seqlen)\n\n # We do not want to re-mask the previously kept tokens, or pad tokens\n scores = scores.masked_fill(~is_mask, 1e5)\n\n ids = torch.where(padding_mask, -1, ids)\n # print(\"Final\", ids.max(), ids.min())\n return ids\n\n\n @torch.no_grad()\n @eval_decorator\n def edit(self,\n conds,\n tokens,\n m_lens,\n timesteps: int,\n cond_scale: int,\n temperature=1,\n topk_filter_thres=0.9,\n gsample=False,\n force_mask=False,\n edit_mask=None,\n padding_mask=None,\n ):\n\n assert edit_mask.shape == tokens.shape if edit_mask is not None else True\n device = next(self.parameters()).device\n seq_len = tokens.shape[1]\n\n if self.cond_mode == 'text':\n with torch.no_grad():\n cond_vector = self.encode_text(conds)\n elif self.cond_mode == 'action':\n cond_vector = self.enc_action(conds).to(device)\n elif self.cond_mode == 'uncond':\n cond_vector = torch.zeros(1, self.latent_dim).float().to(device)\n else:\n raise NotImplementedError(\"Unsupported condition mode!!!\")\n\n if padding_mask == None:\n padding_mask = ~lengths_to_mask(m_lens, seq_len)\n\n # Start from all tokens being masked\n if edit_mask == None:\n mask_free = True\n ids = torch.where(padding_mask, self.pad_id, tokens)\n edit_mask = torch.ones_like(padding_mask)\n edit_mask = edit_mask & ~padding_mask\n edit_len = edit_mask.sum(dim=-1)\n scores = torch.where(edit_mask, 0., 1e5)\n else:\n mask_free = False\n edit_mask = edit_mask & ~padding_mask\n edit_len = edit_mask.sum(dim=-1)\n ids = torch.where(edit_mask, self.mask_id, tokens)\n scores = torch.where(edit_mask, 0., 1e5)\n starting_temperature = temperature\n\n for timestep, steps_until_x0 in zip(torch.linspace(0, 1, timesteps, device=device), reversed(range(timesteps))):\n # 0 < timestep < 1\n rand_mask_prob = 0.16 if mask_free else self.noise_schedule(timestep) # Tensor\n\n '''\n Maskout, and cope with variable length\n '''\n # fix: the ratio regarding lengths, instead of seq_len\n num_token_masked = torch.round(rand_mask_prob * edit_len).clamp(min=1) # (b, )\n\n # select num_token_masked tokens with lowest scores to be masked\n sorted_indices = scores.argsort(\n dim=1) # (b, k), sorted_indices[i, j] = the index of j-th lowest element in scores on dim=1\n ranks = sorted_indices.argsort(dim=1) # (b, k), rank[i, j] = the rank (0: lowest) of scores[i, j] on dim=1\n is_mask = (ranks < num_token_masked.unsqueeze(-1))\n # is_mask = (torch.rand_like(scores) < 0.8) * ~padding_mask if mask_free else is_mask\n ids = torch.where(is_mask, self.mask_id, ids)\n\n '''\n Preparing input\n '''\n # (b, num_token, seqlen)\n logits = self.forward_with_cond_scale(ids, cond_vector=cond_vector,\n padding_mask=padding_mask,\n cond_scale=cond_scale,\n force_mask=force_mask)\n\n logits = logits.permute(0, 2, 1) # (b, seqlen, ntoken)\n # print(logits.shape, self.opt.num_tokens)\n # clean low prob token\n filtered_logits = top_k(logits, topk_filter_thres, dim=-1)\n\n '''\n Update ids\n '''\n # if force_mask:\n temperature = starting_temperature\n # else:\n # temperature = starting_temperature * (steps_until_x0 / timesteps)\n # temperature = max(temperature, 1e-4)\n # print(filtered_logits.shape)\n # temperature is annealed, gradually reducing temperature as well as randomness\n if gsample: # use gumbel_softmax sampling\n # print(\"1111\")\n pred_ids = gumbel_sample(filtered_logits, temperature=temperature, dim=-1) # (b, seqlen)\n else: # use multinomial sampling\n # print(\"2222\")\n probs = F.softmax(filtered_logits, dim=-1) # (b, seqlen, ntoken)\n # print(temperature, starting_temperature, steps_until_x0, timesteps)\n # print(probs / temperature)\n pred_ids = Categorical(probs / temperature).sample() # (b, seqlen)\n\n # print(pred_ids.max(), pred_ids.min())\n # if pred_ids.\n ids = torch.where(is_mask, pred_ids, ids)\n\n '''\n Updating scores\n '''\n probs_without_temperature = logits.softmax(dim=-1) # (b, seqlen, ntoken)\n scores = probs_without_temperature.gather(2, pred_ids.unsqueeze(dim=-1)) # (b, seqlen, 1)\n scores = scores.squeeze(-1) # (b, seqlen)\n\n # We do not want to re-mask the previously kept tokens, or pad tokens\n scores = scores.masked_fill(~edit_mask, 1e5) if mask_free else scores.masked_fill(~is_mask, 1e5)\n\n ids = torch.where(padding_mask, -1, ids)\n # print(\"Final\", ids.max(), ids.min())\n return ids\n\n @torch.no_grad()\n @eval_decorator\n def edit_beta(self,\n conds,\n conds_og,\n tokens,\n m_lens,\n cond_scale: int,\n force_mask=False,\n ):\n\n device = next(self.parameters()).device\n seq_len = tokens.shape[1]\n\n if self.cond_mode == 'text':\n with torch.no_grad():\n cond_vector = self.encode_text(conds)\n if conds_og is not None:\n cond_vector_og = self.encode_text(conds_og)\n else:\n cond_vector_og = None\n elif self.cond_mode == 'action':\n cond_vector = self.enc_action(conds).to(device)\n if conds_og is not None:\n cond_vector_og = self.enc_action(conds_og).to(device)\n else:\n cond_vector_og = None\n else:\n raise NotImplementedError(\"Unsupported condition mode!!!\")\n\n padding_mask = ~lengths_to_mask(m_lens, seq_len)\n\n # Start from all tokens being masked\n ids = torch.where(padding_mask, self.pad_id, tokens) # Do not mask anything\n\n '''\n Preparing input\n '''\n # (b, num_token, seqlen)\n logits = self.forward_with_cond_scale(ids,\n cond_vector=cond_vector,\n cond_vector_neg=cond_vector_og,\n padding_mask=padding_mask,\n cond_scale=cond_scale,\n force_mask=force_mask)\n\n logits = logits.permute(0, 2, 1) # (b, seqlen, ntoken)\n\n '''\n Updating scores\n '''\n probs_without_temperature = logits.softmax(dim=-1) # (b, seqlen, ntoken)\n tokens[tokens == -1] = 0 # just to get through an error when index = -1 using gather\n og_tokens_scores = probs_without_temperature.gather(2, tokens.unsqueeze(dim=-1)) # (b, seqlen, 1)\n og_tokens_scores = og_tokens_scores.squeeze(-1) # (b, seqlen)\n\n return og_tokens_scores" }, { "identifier": "ResidualTransformer", "path": "models/mask_transformer/transformer.py", "snippet": "class ResidualTransformer(nn.Module):\n def __init__(self, code_dim, cond_mode, latent_dim=256, ff_size=1024, num_layers=8, cond_drop_prob=0.1,\n num_heads=4, dropout=0.1, clip_dim=512, shared_codebook=False, share_weight=False,\n clip_version=None, opt=None, **kargs):\n super(ResidualTransformer, self).__init__()\n print(f'latent_dim: {latent_dim}, ff_size: {ff_size}, nlayers: {num_layers}, nheads: {num_heads}, dropout: {dropout}')\n\n # assert shared_codebook == True, \"Only support shared codebook right now!\"\n\n self.code_dim = code_dim\n self.latent_dim = latent_dim\n self.clip_dim = clip_dim\n self.dropout = dropout\n self.opt = opt\n\n self.cond_mode = cond_mode\n # self.cond_drop_prob = cond_drop_prob\n\n if self.cond_mode == 'action':\n assert 'num_actions' in kargs\n self.num_actions = kargs.get('num_actions', 1)\n self.cond_drop_prob = cond_drop_prob\n\n '''\n Preparing Networks\n '''\n self.input_process = InputProcess(self.code_dim, self.latent_dim)\n self.position_enc = PositionalEncoding(self.latent_dim, self.dropout)\n\n seqTransEncoderLayer = nn.TransformerEncoderLayer(d_model=self.latent_dim,\n nhead=num_heads,\n dim_feedforward=ff_size,\n dropout=dropout,\n activation='gelu')\n\n self.seqTransEncoder = nn.TransformerEncoder(seqTransEncoderLayer,\n num_layers=num_layers)\n\n self.encode_quant = partial(F.one_hot, num_classes=self.opt.num_quantizers)\n self.encode_action = partial(F.one_hot, num_classes=self.num_actions)\n\n self.quant_emb = nn.Linear(self.opt.num_quantizers, self.latent_dim)\n # if self.cond_mode != 'no_cond':\n if self.cond_mode == 'text':\n self.cond_emb = nn.Linear(self.clip_dim, self.latent_dim)\n elif self.cond_mode == 'action':\n self.cond_emb = nn.Linear(self.num_actions, self.latent_dim)\n else:\n raise KeyError(\"Unsupported condition mode!!!\")\n\n\n _num_tokens = opt.num_tokens + 1 # one dummy tokens for padding\n self.pad_id = opt.num_tokens\n\n # self.output_process = OutputProcess_Bert(out_feats=opt.num_tokens, latent_dim=latent_dim)\n self.output_process = OutputProcess(out_feats=code_dim, latent_dim=latent_dim)\n\n if shared_codebook:\n token_embed = nn.Parameter(torch.normal(mean=0, std=0.02, size=(_num_tokens, code_dim)))\n self.token_embed_weight = token_embed.expand(opt.num_quantizers-1, _num_tokens, code_dim)\n if share_weight:\n self.output_proj_weight = self.token_embed_weight\n self.output_proj_bias = None\n else:\n output_proj = nn.Parameter(torch.normal(mean=0, std=0.02, size=(_num_tokens, code_dim)))\n output_bias = nn.Parameter(torch.zeros(size=(_num_tokens,)))\n # self.output_proj_bias = 0\n self.output_proj_weight = output_proj.expand(opt.num_quantizers-1, _num_tokens, code_dim)\n self.output_proj_bias = output_bias.expand(opt.num_quantizers-1, _num_tokens)\n\n else:\n if share_weight:\n self.embed_proj_shared_weight = nn.Parameter(torch.normal(mean=0, std=0.02, size=(opt.num_quantizers - 2, _num_tokens, code_dim)))\n self.token_embed_weight_ = nn.Parameter(torch.normal(mean=0, std=0.02, size=(1, _num_tokens, code_dim)))\n self.output_proj_weight_ = nn.Parameter(torch.normal(mean=0, std=0.02, size=(1, _num_tokens, code_dim)))\n self.output_proj_bias = None\n self.registered = False\n else:\n output_proj_weight = torch.normal(mean=0, std=0.02,\n size=(opt.num_quantizers - 1, _num_tokens, code_dim))\n\n self.output_proj_weight = nn.Parameter(output_proj_weight)\n self.output_proj_bias = nn.Parameter(torch.zeros(size=(opt.num_quantizers, _num_tokens)))\n token_embed_weight = torch.normal(mean=0, std=0.02,\n size=(opt.num_quantizers - 1, _num_tokens, code_dim))\n self.token_embed_weight = nn.Parameter(token_embed_weight)\n\n self.apply(self.__init_weights)\n self.shared_codebook = shared_codebook\n self.share_weight = share_weight\n\n if self.cond_mode == 'text':\n print('Loading CLIP...')\n self.clip_version = clip_version\n self.clip_model = self.load_and_freeze_clip(clip_version)\n\n # def\n\n def mask_cond(self, cond, force_mask=False):\n bs, d = cond.shape\n if force_mask:\n return torch.zeros_like(cond)\n elif self.training and self.cond_drop_prob > 0.:\n mask = torch.bernoulli(torch.ones(bs, device=cond.device) * self.cond_drop_prob).view(bs, 1)\n return cond * (1. - mask)\n else:\n return cond\n\n def __init_weights(self, module):\n if isinstance(module, (nn.Linear, nn.Embedding)):\n module.weight.data.normal_(mean=0.0, std=0.02)\n if isinstance(module, nn.Linear) and module.bias is not None:\n module.bias.data.zero_()\n elif isinstance(module, nn.LayerNorm):\n module.bias.data.zero_()\n module.weight.data.fill_(1.0)\n\n def parameters_wo_clip(self):\n return [p for name, p in self.named_parameters() if not name.startswith('clip_model.')]\n\n def load_and_freeze_clip(self, clip_version):\n clip_model, clip_preprocess = clip.load(clip_version, device='cpu',\n jit=False) # Must set jit=False for training\n # Cannot run on cpu\n clip.model.convert_weights(\n clip_model) # Actually this line is unnecessary since clip by default already on float16\n # Date 0707: It's necessary, only unecessary when load directly to gpu. Disable if need to run on cpu\n\n # Freeze CLIP weights\n clip_model.eval()\n for p in clip_model.parameters():\n p.requires_grad = False\n\n return clip_model\n\n def encode_text(self, raw_text):\n device = next(self.parameters()).device\n text = clip.tokenize(raw_text, truncate=True).to(device)\n feat_clip_text = self.clip_model.encode_text(text).float()\n return feat_clip_text\n\n\n def q_schedule(self, bs, low, high):\n noise = uniform((bs,), device=self.opt.device)\n schedule = 1 - cosine_schedule(noise)\n return torch.round(schedule * (high - low)) + low\n\n def process_embed_proj_weight(self):\n if self.share_weight and (not self.shared_codebook):\n # if not self.registered:\n self.output_proj_weight = torch.cat([self.embed_proj_shared_weight, self.output_proj_weight_], dim=0)\n self.token_embed_weight = torch.cat([self.token_embed_weight_, self.embed_proj_shared_weight], dim=0)\n # self.registered = True\n\n def output_project(self, logits, qids):\n '''\n :logits: (bs, code_dim, seqlen)\n :qids: (bs)\n\n :return:\n -logits (bs, ntoken, seqlen)\n '''\n # (num_qlayers-1, num_token, code_dim) -> (bs, ntoken, code_dim)\n output_proj_weight = self.output_proj_weight[qids]\n # (num_qlayers, ntoken) -> (bs, ntoken)\n output_proj_bias = None if self.output_proj_bias is None else self.output_proj_bias[qids]\n\n output = torch.einsum('bnc, bcs->bns', output_proj_weight, logits)\n if output_proj_bias is not None:\n output += output + output_proj_bias.unsqueeze(-1)\n return output\n\n\n\n def trans_forward(self, motion_codes, qids, cond, padding_mask, force_mask=False):\n '''\n :param motion_codes: (b, seqlen, d)\n :padding_mask: (b, seqlen), all pad positions are TRUE else FALSE\n :param qids: (b), quantizer layer ids\n :param cond: (b, embed_dim) for text, (b, num_actions) for action\n :return:\n -logits: (b, num_token, seqlen)\n '''\n cond = self.mask_cond(cond, force_mask=force_mask)\n\n # (b, seqlen, d) -> (seqlen, b, latent_dim)\n x = self.input_process(motion_codes)\n\n # (b, num_quantizer)\n q_onehot = self.encode_quant(qids).float().to(x.device)\n\n q_emb = self.quant_emb(q_onehot).unsqueeze(0) # (1, b, latent_dim)\n cond = self.cond_emb(cond).unsqueeze(0) # (1, b, latent_dim)\n\n x = self.position_enc(x)\n xseq = torch.cat([cond, q_emb, x], dim=0) # (seqlen+2, b, latent_dim)\n\n padding_mask = torch.cat([torch.zeros_like(padding_mask[:, 0:2]), padding_mask], dim=1) # (b, seqlen+2)\n output = self.seqTransEncoder(xseq, src_key_padding_mask=padding_mask)[2:] # (seqlen, b, e)\n logits = self.output_process(output)\n return logits\n\n def forward_with_cond_scale(self,\n motion_codes,\n q_id,\n cond_vector,\n padding_mask,\n cond_scale=3,\n force_mask=False):\n bs = motion_codes.shape[0]\n # if cond_scale == 1:\n qids = torch.full((bs,), q_id, dtype=torch.long, device=motion_codes.device)\n if force_mask:\n logits = self.trans_forward(motion_codes, qids, cond_vector, padding_mask, force_mask=True)\n logits = self.output_project(logits, qids-1)\n return logits\n\n logits = self.trans_forward(motion_codes, qids, cond_vector, padding_mask)\n logits = self.output_project(logits, qids-1)\n if cond_scale == 1:\n return logits\n\n aux_logits = self.trans_forward(motion_codes, qids, cond_vector, padding_mask, force_mask=True)\n aux_logits = self.output_project(aux_logits, qids-1)\n\n scaled_logits = aux_logits + (logits - aux_logits) * cond_scale\n return scaled_logits\n\n def forward(self, all_indices, y, m_lens):\n '''\n :param all_indices: (b, n, q)\n :param y: raw text for cond_mode=text, (b, ) for cond_mode=action\n :m_lens: (b,)\n :return:\n '''\n\n self.process_embed_proj_weight()\n\n bs, ntokens, num_quant_layers = all_indices.shape\n device = all_indices.device\n\n # Positions that are PADDED are ALL FALSE\n non_pad_mask = lengths_to_mask(m_lens, ntokens) # (b, n)\n\n q_non_pad_mask = repeat(non_pad_mask, 'b n -> b n q', q=num_quant_layers)\n all_indices = torch.where(q_non_pad_mask, all_indices, self.pad_id) #(b, n, q)\n\n # randomly sample quantization layers to work on, [1, num_q)\n active_q_layers = q_schedule(bs, low=1, high=num_quant_layers, device=device)\n\n # print(self.token_embed_weight.shape, all_indices.shape)\n token_embed = repeat(self.token_embed_weight, 'q c d-> b c d q', b=bs)\n gather_indices = repeat(all_indices[..., :-1], 'b n q -> b n d q', d=token_embed.shape[2])\n # print(token_embed.shape, gather_indices.shape)\n all_codes = token_embed.gather(1, gather_indices) # (b, n, d, q-1)\n\n cumsum_codes = torch.cumsum(all_codes, dim=-1) #(b, n, d, q-1)\n\n active_indices = all_indices[torch.arange(bs), :, active_q_layers] # (b, n)\n history_sum = cumsum_codes[torch.arange(bs), :, :, active_q_layers - 1]\n\n force_mask = False\n if self.cond_mode == 'text':\n with torch.no_grad():\n cond_vector = self.encode_text(y)\n elif self.cond_mode == 'action':\n cond_vector = self.enc_action(y).to(device).float()\n elif self.cond_mode == 'uncond':\n cond_vector = torch.zeros(bs, self.latent_dim).float().to(device)\n force_mask = True\n else:\n raise NotImplementedError(\"Unsupported condition mode!!!\")\n\n logits = self.trans_forward(history_sum, active_q_layers, cond_vector, ~non_pad_mask, force_mask)\n logits = self.output_project(logits, active_q_layers-1)\n ce_loss, pred_id, acc = cal_performance(logits, active_indices, ignore_index=self.pad_id)\n\n return ce_loss, pred_id, acc\n\n @torch.no_grad()\n @eval_decorator\n def generate(self,\n motion_ids,\n conds,\n m_lens,\n temperature=1,\n topk_filter_thres=0.9,\n cond_scale=2,\n num_res_layers=-1, # If it's -1, use all.\n ):\n\n # print(self.opt.num_quantizers)\n # assert len(timesteps) >= len(cond_scales) == self.opt.num_quantizers\n self.process_embed_proj_weight()\n\n device = next(self.parameters()).device\n seq_len = motion_ids.shape[1]\n batch_size = len(conds)\n\n if self.cond_mode == 'text':\n with torch.no_grad():\n cond_vector = self.encode_text(conds)\n elif self.cond_mode == 'action':\n cond_vector = self.enc_action(conds).to(device)\n elif self.cond_mode == 'uncond':\n cond_vector = torch.zeros(batch_size, self.latent_dim).float().to(device)\n else:\n raise NotImplementedError(\"Unsupported condition mode!!!\")\n\n # token_embed = repeat(self.token_embed_weight, 'c d -> b c d', b=batch_size)\n # gathered_ids = repeat(motion_ids, 'b n -> b n d', d=token_embed.shape[-1])\n # history_sum = token_embed.gather(1, gathered_ids)\n\n # print(pa, seq_len)\n padding_mask = ~lengths_to_mask(m_lens, seq_len)\n # print(padding_mask.shape, motion_ids.shape)\n motion_ids = torch.where(padding_mask, self.pad_id, motion_ids)\n all_indices = [motion_ids]\n history_sum = 0\n num_quant_layers = self.opt.num_quantizers if num_res_layers==-1 else num_res_layers+1\n\n for i in range(1, num_quant_layers):\n # print(f\"--> Working on {i}-th quantizer\")\n # Start from all tokens being masked\n # qids = torch.full((batch_size,), i, dtype=torch.long, device=motion_ids.device)\n token_embed = self.token_embed_weight[i-1]\n token_embed = repeat(token_embed, 'c d -> b c d', b=batch_size)\n gathered_ids = repeat(motion_ids, 'b n -> b n d', d=token_embed.shape[-1])\n history_sum += token_embed.gather(1, gathered_ids)\n\n logits = self.forward_with_cond_scale(history_sum, i, cond_vector, padding_mask, cond_scale=cond_scale)\n # logits = self.trans_forward(history_sum, qids, cond_vector, padding_mask)\n\n logits = logits.permute(0, 2, 1) # (b, seqlen, ntoken)\n # clean low prob token\n filtered_logits = top_k(logits, topk_filter_thres, dim=-1)\n\n pred_ids = gumbel_sample(filtered_logits, temperature=temperature, dim=-1) # (b, seqlen)\n\n # probs = F.softmax(filtered_logits, dim=-1) # (b, seqlen, ntoken)\n # # print(temperature, starting_temperature, steps_until_x0, timesteps)\n # # print(probs / temperature)\n # pred_ids = Categorical(probs / temperature).sample() # (b, seqlen)\n\n ids = torch.where(padding_mask, self.pad_id, pred_ids)\n\n motion_ids = ids\n all_indices.append(ids)\n\n all_indices = torch.stack(all_indices, dim=-1)\n # padding_mask = repeat(padding_mask, 'b n -> b n q', q=all_indices.shape[-1])\n # all_indices = torch.where(padding_mask, -1, all_indices)\n all_indices = torch.where(all_indices==self.pad_id, -1, all_indices)\n # all_indices = all_indices.masked_fill()\n return all_indices\n\n @torch.no_grad()\n @eval_decorator\n def edit(self,\n motion_ids,\n conds,\n m_lens,\n temperature=1,\n topk_filter_thres=0.9,\n cond_scale=2\n ):\n\n # print(self.opt.num_quantizers)\n # assert len(timesteps) >= len(cond_scales) == self.opt.num_quantizers\n self.process_embed_proj_weight()\n\n device = next(self.parameters()).device\n seq_len = motion_ids.shape[1]\n batch_size = len(conds)\n\n if self.cond_mode == 'text':\n with torch.no_grad():\n cond_vector = self.encode_text(conds)\n elif self.cond_mode == 'action':\n cond_vector = self.enc_action(conds).to(device)\n elif self.cond_mode == 'uncond':\n cond_vector = torch.zeros(batch_size, self.latent_dim).float().to(device)\n else:\n raise NotImplementedError(\"Unsupported condition mode!!!\")\n\n # token_embed = repeat(self.token_embed_weight, 'c d -> b c d', b=batch_size)\n # gathered_ids = repeat(motion_ids, 'b n -> b n d', d=token_embed.shape[-1])\n # history_sum = token_embed.gather(1, gathered_ids)\n\n # print(pa, seq_len)\n padding_mask = ~lengths_to_mask(m_lens, seq_len)\n # print(padding_mask.shape, motion_ids.shape)\n motion_ids = torch.where(padding_mask, self.pad_id, motion_ids)\n all_indices = [motion_ids]\n history_sum = 0\n\n for i in range(1, self.opt.num_quantizers):\n # print(f\"--> Working on {i}-th quantizer\")\n # Start from all tokens being masked\n # qids = torch.full((batch_size,), i, dtype=torch.long, device=motion_ids.device)\n token_embed = self.token_embed_weight[i-1]\n token_embed = repeat(token_embed, 'c d -> b c d', b=batch_size)\n gathered_ids = repeat(motion_ids, 'b n -> b n d', d=token_embed.shape[-1])\n history_sum += token_embed.gather(1, gathered_ids)\n\n logits = self.forward_with_cond_scale(history_sum, i, cond_vector, padding_mask, cond_scale=cond_scale)\n # logits = self.trans_forward(history_sum, qids, cond_vector, padding_mask)\n\n logits = logits.permute(0, 2, 1) # (b, seqlen, ntoken)\n # clean low prob token\n filtered_logits = top_k(logits, topk_filter_thres, dim=-1)\n\n pred_ids = gumbel_sample(filtered_logits, temperature=temperature, dim=-1) # (b, seqlen)\n\n # probs = F.softmax(filtered_logits, dim=-1) # (b, seqlen, ntoken)\n # # print(temperature, starting_temperature, steps_until_x0, timesteps)\n # # print(probs / temperature)\n # pred_ids = Categorical(probs / temperature).sample() # (b, seqlen)\n\n ids = torch.where(padding_mask, self.pad_id, pred_ids)\n\n motion_ids = ids\n all_indices.append(ids)\n\n all_indices = torch.stack(all_indices, dim=-1)\n # padding_mask = repeat(padding_mask, 'b n -> b n q', q=all_indices.shape[-1])\n # all_indices = torch.where(padding_mask, -1, all_indices)\n all_indices = torch.where(all_indices==self.pad_id, -1, all_indices)\n # all_indices = all_indices.masked_fill()\n return all_indices" }, { "identifier": "RVQVAE", "path": "models/vq/model.py", "snippet": "class RVQVAE(nn.Module):\n def __init__(self,\n args,\n input_width=263,\n nb_code=1024,\n code_dim=512,\n output_emb_width=512,\n down_t=3,\n stride_t=2,\n width=512,\n depth=3,\n dilation_growth_rate=3,\n activation='relu',\n norm=None):\n\n super().__init__()\n assert output_emb_width == code_dim\n self.code_dim = code_dim\n self.num_code = nb_code\n # self.quant = args.quantizer\n self.encoder = Encoder(input_width, output_emb_width, down_t, stride_t, width, depth,\n dilation_growth_rate, activation=activation, norm=norm)\n self.decoder = Decoder(input_width, output_emb_width, down_t, stride_t, width, depth,\n dilation_growth_rate, activation=activation, norm=norm)\n rvqvae_config = {\n 'num_quantizers': args.num_quantizers,\n 'shared_codebook': args.shared_codebook,\n 'quantize_dropout_prob': args.quantize_dropout_prob,\n 'quantize_dropout_cutoff_index': 0,\n 'nb_code': nb_code,\n 'code_dim':code_dim, \n 'args': args,\n }\n self.quantizer = ResidualVQ(**rvqvae_config)\n\n def preprocess(self, x):\n # (bs, T, Jx3) -> (bs, Jx3, T)\n x = x.permute(0, 2, 1).float()\n return x\n\n def postprocess(self, x):\n # (bs, Jx3, T) -> (bs, T, Jx3)\n x = x.permute(0, 2, 1)\n return x\n\n def encode(self, x):\n N, T, _ = x.shape\n x_in = self.preprocess(x)\n x_encoder = self.encoder(x_in)\n # print(x_encoder.shape)\n code_idx, all_codes = self.quantizer.quantize(x_encoder, return_latent=True)\n # print(code_idx.shape)\n # code_idx = code_idx.view(N, -1)\n # (N, T, Q)\n # print()\n return code_idx, all_codes\n\n def forward(self, x):\n x_in = self.preprocess(x)\n # Encode\n x_encoder = self.encoder(x_in)\n\n ## quantization\n # x_quantized, code_idx, commit_loss, perplexity = self.quantizer(x_encoder, sample_codebook_temp=0.5,\n # force_dropout_index=0) #TODO hardcode\n x_quantized, code_idx, commit_loss, perplexity = self.quantizer(x_encoder, sample_codebook_temp=0.5)\n\n # print(code_idx[0, :, 1])\n ## decoder\n x_out = self.decoder(x_quantized)\n # x_out = self.postprocess(x_decoder)\n return x_out, commit_loss, perplexity\n\n def forward_decoder(self, x):\n x_d = self.quantizer.get_codes_from_indices(x)\n # x_d = x_d.view(1, -1, self.code_dim).permute(0, 2, 1).contiguous()\n x = x_d.sum(dim=0).permute(0, 2, 1)\n\n # decoder\n x_out = self.decoder(x)\n # x_out = self.postprocess(x_decoder)\n return x_out" }, { "identifier": "EvalT2MOptions", "path": "options/eval_option.py", "snippet": "class EvalT2MOptions(BaseOptions):\n def initialize(self):\n BaseOptions.initialize(self)\n self.parser.add_argument('--which_epoch', type=str, default=\"latest\", help='Checkpoint you want to use, {latest, net_best_fid, etc}')\n self.parser.add_argument('--batch_size', type=int, default=32, help='Batch size')\n\n self.parser.add_argument('--ext', type=str, default='text2motion', help='Extension of the result file or folder')\n self.parser.add_argument(\"--num_batch\", default=2, type=int,\n help=\"Number of batch for generation\")\n self.parser.add_argument(\"--repeat_times\", default=1, type=int,\n help=\"Number of repetitions, per sample text prompt\")\n self.parser.add_argument(\"--cond_scale\", default=4, type=float,\n help=\"For classifier-free sampling - specifies the s parameter, as defined in the paper.\")\n self.parser.add_argument(\"--temperature\", default=1., type=float,\n help=\"Sampling Temperature.\")\n self.parser.add_argument(\"--topkr\", default=0.9, type=float,\n help=\"Filter out percentil low prop entries.\")\n self.parser.add_argument(\"--time_steps\", default=18, type=int,\n help=\"Mask Generate steps.\")\n self.parser.add_argument(\"--seed\", default=10107, type=int)\n\n self.parser.add_argument('--gumbel_sample', action=\"store_true\", help='True: gumbel sampling, False: categorical sampling.')\n self.parser.add_argument('--use_res_model', action=\"store_true\", help='Whether to use residual transformer.')\n # self.parser.add_argument('--est_length', action=\"store_true\", help='Training iterations')\n\n self.parser.add_argument('--res_name', type=str, default='tres_nlayer8_ld384_ff1024_rvq6ns_cdp0.2_sw', help='Model name of residual transformer')\n self.parser.add_argument('--text_path', type=str, default=\"\", help='Text prompt file')\n\n\n self.parser.add_argument('-msec', '--mask_edit_section', nargs='*', type=str, help='Indicate sections for editing, use comma to separate the start and end of a section'\n 'type int will specify the token frame, type float will specify the ratio of seq_len')\n self.parser.add_argument('--text_prompt', default='', type=str, help=\"A text prompt to be generated. If empty, will take text prompts from dataset.\")\n self.parser.add_argument('--source_motion', default='example_data/000612.npy', type=str, help=\"Source motion path for editing. (new_joint_vecs format .npy file)\")\n self.parser.add_argument(\"--motion_length\", default=0, type=int,\n help=\"Motion length for generation, only applicable with single text prompt.\")\n self.is_train = False" }, { "identifier": "get_opt", "path": "utils/get_opt.py", "snippet": "def get_opt(opt_path, device, **kwargs):\n opt = Namespace()\n opt_dict = vars(opt)\n\n skip = ('-------------- End ----------------',\n '------------ Options -------------',\n '\\n')\n print('Reading', opt_path)\n with open(opt_path, 'r') as f:\n for line in f:\n if line.strip() not in skip:\n # print(line.strip())\n key, value = line.strip('\\n').split(': ')\n if value in ('True', 'False'):\n opt_dict[key] = (value == 'True')\n # print(key, value)\n elif is_float(value):\n opt_dict[key] = float(value)\n elif is_number(value):\n opt_dict[key] = int(value)\n else:\n opt_dict[key] = str(value)\n\n # print(opt)\n opt_dict['which_epoch'] = 'finest'\n opt.save_root = pjoin(opt.checkpoints_dir, opt.dataset_name, opt.name)\n opt.model_dir = pjoin(opt.save_root, 'model')\n opt.meta_dir = pjoin(opt.save_root, 'meta')\n\n if opt.dataset_name == 't2m':\n opt.data_root = './dataset/HumanML3D/'\n opt.motion_dir = pjoin(opt.data_root, 'new_joint_vecs')\n opt.text_dir = pjoin(opt.data_root, 'texts')\n opt.joints_num = 22\n opt.dim_pose = 263\n opt.max_motion_length = 196\n opt.max_motion_frame = 196\n opt.max_motion_token = 55\n elif opt.dataset_name == 'kit':\n opt.data_root = './dataset/KIT-ML/'\n opt.motion_dir = pjoin(opt.data_root, 'new_joint_vecs')\n opt.text_dir = pjoin(opt.data_root, 'texts')\n opt.joints_num = 21\n opt.dim_pose = 251\n opt.max_motion_length = 196\n opt.max_motion_frame = 196\n opt.max_motion_token = 55\n else:\n raise KeyError('Dataset not recognized')\n if not hasattr(opt, 'unit_length'):\n opt.unit_length = 4\n opt.dim_word = 300\n opt.num_classes = 200 // opt.unit_length\n opt.dim_pos_ohot = len(POS_enumerator)\n opt.is_train = False\n opt.is_continue = False\n opt.device = device\n\n opt_dict.update(kwargs) # Overwrite with kwargs params\n\n return opt" }, { "identifier": "get_dataset_motion_loader", "path": "motion_loaders/dataset_motion_loader.py", "snippet": "def get_dataset_motion_loader(opt_path, batch_size, fname, device):\n opt = get_opt(opt_path, device)\n\n # Configurations of T2M dataset and KIT dataset is almost the same\n if opt.dataset_name == 't2m' or opt.dataset_name == 'kit':\n print('Loading dataset %s ...' % opt.dataset_name)\n\n mean = np.load(pjoin(opt.meta_dir, 'mean.npy'))\n std = np.load(pjoin(opt.meta_dir, 'std.npy'))\n\n w_vectorizer = WordVectorizer('./glove', 'our_vab')\n split_file = pjoin(opt.data_root, '%s.txt'%fname)\n dataset = Text2MotionDatasetEval(opt, mean, std, split_file, w_vectorizer)\n dataloader = DataLoader(dataset, batch_size=batch_size, num_workers=4, drop_last=True,\n collate_fn=collate_fn, shuffle=True)\n else:\n raise KeyError('Dataset not Recognized !!')\n\n print('Ground Truth Dataset Loading Completed!!!')\n return dataloader, dataset" }, { "identifier": "EvaluatorModelWrapper", "path": "models/t2m_eval_wrapper.py", "snippet": "class EvaluatorModelWrapper(object):\n\n def __init__(self, opt):\n\n if opt.dataset_name == 't2m':\n opt.dim_pose = 263\n elif opt.dataset_name == 'kit':\n opt.dim_pose = 251\n else:\n raise KeyError('Dataset not Recognized!!!')\n\n opt.dim_word = 300\n opt.max_motion_length = 196\n opt.dim_pos_ohot = len(POS_enumerator)\n opt.dim_motion_hidden = 1024\n opt.max_text_len = 20\n opt.dim_text_hidden = 512\n opt.dim_coemb_hidden = 512\n\n # print(opt)\n\n self.text_encoder, self.motion_encoder, self.movement_encoder = build_models(opt)\n self.opt = opt\n self.device = opt.device\n\n self.text_encoder.to(opt.device)\n self.motion_encoder.to(opt.device)\n self.movement_encoder.to(opt.device)\n\n self.text_encoder.eval()\n self.motion_encoder.eval()\n self.movement_encoder.eval()\n\n # Please note that the results does not follow the order of inputs\n def get_co_embeddings(self, word_embs, pos_ohot, cap_lens, motions, m_lens):\n with torch.no_grad():\n word_embs = word_embs.detach().to(self.device).float()\n pos_ohot = pos_ohot.detach().to(self.device).float()\n motions = motions.detach().to(self.device).float()\n\n align_idx = np.argsort(m_lens.data.tolist())[::-1].copy()\n motions = motions[align_idx]\n m_lens = m_lens[align_idx]\n\n '''Movement Encoding'''\n movements = self.movement_encoder(motions[..., :-4]).detach()\n m_lens = m_lens // self.opt.unit_length\n motion_embedding = self.motion_encoder(movements, m_lens)\n\n '''Text Encoding'''\n text_embedding = self.text_encoder(word_embs, pos_ohot, cap_lens)\n text_embedding = text_embedding[align_idx]\n return text_embedding, motion_embedding\n\n # Please note that the results does not follow the order of inputs\n def get_motion_embeddings(self, motions, m_lens):\n with torch.no_grad():\n motions = motions.detach().to(self.device).float()\n\n align_idx = np.argsort(m_lens.data.tolist())[::-1].copy()\n motions = motions[align_idx]\n m_lens = m_lens[align_idx]\n\n '''Movement Encoding'''\n movements = self.movement_encoder(motions[..., :-4]).detach()\n m_lens = m_lens // self.opt.unit_length\n motion_embedding = self.motion_encoder(movements, m_lens)\n return motion_embedding" }, { "identifier": "fixseed", "path": "utils/fixseed.py", "snippet": "def fixseed(seed):\n torch.backends.cudnn.benchmark = False\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)" } ]
import os import torch import utils.eval_t2m as eval_t2m import numpy as np from os.path import join as pjoin from models.mask_transformer.transformer import MaskTransformer, ResidualTransformer from models.vq.model import RVQVAE from options.eval_option import EvalT2MOptions from utils.get_opt import get_opt from motion_loaders.dataset_motion_loader import get_dataset_motion_loader from models.t2m_eval_wrapper import EvaluatorModelWrapper from utils.fixseed import fixseed
14,808
def load_vq_model(vq_opt): # opt_path = pjoin(opt.checkpoints_dir, opt.dataset_name, opt.vq_name, 'opt.txt') vq_model = RVQVAE(vq_opt, dim_pose, vq_opt.nb_code, vq_opt.code_dim, vq_opt.output_emb_width, vq_opt.down_t, vq_opt.stride_t, vq_opt.width, vq_opt.depth, vq_opt.dilation_growth_rate, vq_opt.vq_act, vq_opt.vq_norm) ckpt = torch.load(pjoin(vq_opt.checkpoints_dir, vq_opt.dataset_name, vq_opt.name, 'model', 'net_best_fid.tar'), map_location=opt.device) model_key = 'vq_model' if 'vq_model' in ckpt else 'net' vq_model.load_state_dict(ckpt[model_key]) print(f'Loading VQ Model {vq_opt.name} Completed!') return vq_model, vq_opt def load_trans_model(model_opt, which_model): t2m_transformer = MaskTransformer(code_dim=model_opt.code_dim, cond_mode='text', latent_dim=model_opt.latent_dim, ff_size=model_opt.ff_size, num_layers=model_opt.n_layers, num_heads=model_opt.n_heads, dropout=model_opt.dropout, clip_dim=512, cond_drop_prob=model_opt.cond_drop_prob, clip_version=clip_version, opt=model_opt) ckpt = torch.load(pjoin(model_opt.checkpoints_dir, model_opt.dataset_name, model_opt.name, 'model', which_model), map_location=opt.device) model_key = 't2m_transformer' if 't2m_transformer' in ckpt else 'trans' # print(ckpt.keys()) missing_keys, unexpected_keys = t2m_transformer.load_state_dict(ckpt[model_key], strict=False) assert len(unexpected_keys) == 0 assert all([k.startswith('clip_model.') for k in missing_keys]) print(f'Loading Mask Transformer {opt.name} from epoch {ckpt["ep"]}!') return t2m_transformer def load_res_model(res_opt): res_opt.num_quantizers = vq_opt.num_quantizers res_opt.num_tokens = vq_opt.nb_code res_transformer = ResidualTransformer(code_dim=vq_opt.code_dim, cond_mode='text', latent_dim=res_opt.latent_dim, ff_size=res_opt.ff_size, num_layers=res_opt.n_layers, num_heads=res_opt.n_heads, dropout=res_opt.dropout, clip_dim=512, shared_codebook=vq_opt.shared_codebook, cond_drop_prob=res_opt.cond_drop_prob, # codebook=vq_model.quantizer.codebooks[0] if opt.fix_token_emb else None, share_weight=res_opt.share_weight, clip_version=clip_version, opt=res_opt) ckpt = torch.load(pjoin(res_opt.checkpoints_dir, res_opt.dataset_name, res_opt.name, 'model', 'net_best_fid.tar'), map_location=opt.device) missing_keys, unexpected_keys = res_transformer.load_state_dict(ckpt['res_transformer'], strict=False) assert len(unexpected_keys) == 0 assert all([k.startswith('clip_model.') for k in missing_keys]) print(f'Loading Residual Transformer {res_opt.name} from epoch {ckpt["ep"]}!') return res_transformer if __name__ == '__main__': parser = EvalT2MOptions() opt = parser.parse() fixseed(opt.seed) opt.device = torch.device("cpu" if opt.gpu_id == -1 else "cuda:" + str(opt.gpu_id)) torch.autograd.set_detect_anomaly(True) dim_pose = 251 if opt.dataset_name == 'kit' else 263 # out_dir = pjoin(opt.check) root_dir = pjoin(opt.checkpoints_dir, opt.dataset_name, opt.name) model_dir = pjoin(root_dir, 'model') out_dir = pjoin(root_dir, 'eval') os.makedirs(out_dir, exist_ok=True) out_path = pjoin(out_dir, "%s.log"%opt.ext) f = open(pjoin(out_path), 'w') model_opt_path = pjoin(root_dir, 'opt.txt') model_opt = get_opt(model_opt_path, device=opt.device) clip_version = 'ViT-B/32' vq_opt_path = pjoin(opt.checkpoints_dir, opt.dataset_name, model_opt.vq_name, 'opt.txt') vq_opt = get_opt(vq_opt_path, device=opt.device) vq_model, vq_opt = load_vq_model(vq_opt) model_opt.num_tokens = vq_opt.nb_code model_opt.num_quantizers = vq_opt.num_quantizers model_opt.code_dim = vq_opt.code_dim res_opt_path = pjoin(opt.checkpoints_dir, opt.dataset_name, opt.res_name, 'opt.txt') res_opt = get_opt(res_opt_path, device=opt.device) res_model = load_res_model(res_opt) assert res_opt.vq_name == model_opt.vq_name dataset_opt_path = 'checkpoints/kit/Comp_v6_KLD005/opt.txt' if opt.dataset_name == 'kit' \ else 'checkpoints/t2m/Comp_v6_KLD005/opt.txt' wrapper_opt = get_opt(dataset_opt_path, torch.device('cuda')) eval_wrapper = EvaluatorModelWrapper(wrapper_opt) ##### ---- Dataloader ---- ##### opt.nb_joints = 21 if opt.dataset_name == 'kit' else 22
def load_vq_model(vq_opt): # opt_path = pjoin(opt.checkpoints_dir, opt.dataset_name, opt.vq_name, 'opt.txt') vq_model = RVQVAE(vq_opt, dim_pose, vq_opt.nb_code, vq_opt.code_dim, vq_opt.output_emb_width, vq_opt.down_t, vq_opt.stride_t, vq_opt.width, vq_opt.depth, vq_opt.dilation_growth_rate, vq_opt.vq_act, vq_opt.vq_norm) ckpt = torch.load(pjoin(vq_opt.checkpoints_dir, vq_opt.dataset_name, vq_opt.name, 'model', 'net_best_fid.tar'), map_location=opt.device) model_key = 'vq_model' if 'vq_model' in ckpt else 'net' vq_model.load_state_dict(ckpt[model_key]) print(f'Loading VQ Model {vq_opt.name} Completed!') return vq_model, vq_opt def load_trans_model(model_opt, which_model): t2m_transformer = MaskTransformer(code_dim=model_opt.code_dim, cond_mode='text', latent_dim=model_opt.latent_dim, ff_size=model_opt.ff_size, num_layers=model_opt.n_layers, num_heads=model_opt.n_heads, dropout=model_opt.dropout, clip_dim=512, cond_drop_prob=model_opt.cond_drop_prob, clip_version=clip_version, opt=model_opt) ckpt = torch.load(pjoin(model_opt.checkpoints_dir, model_opt.dataset_name, model_opt.name, 'model', which_model), map_location=opt.device) model_key = 't2m_transformer' if 't2m_transformer' in ckpt else 'trans' # print(ckpt.keys()) missing_keys, unexpected_keys = t2m_transformer.load_state_dict(ckpt[model_key], strict=False) assert len(unexpected_keys) == 0 assert all([k.startswith('clip_model.') for k in missing_keys]) print(f'Loading Mask Transformer {opt.name} from epoch {ckpt["ep"]}!') return t2m_transformer def load_res_model(res_opt): res_opt.num_quantizers = vq_opt.num_quantizers res_opt.num_tokens = vq_opt.nb_code res_transformer = ResidualTransformer(code_dim=vq_opt.code_dim, cond_mode='text', latent_dim=res_opt.latent_dim, ff_size=res_opt.ff_size, num_layers=res_opt.n_layers, num_heads=res_opt.n_heads, dropout=res_opt.dropout, clip_dim=512, shared_codebook=vq_opt.shared_codebook, cond_drop_prob=res_opt.cond_drop_prob, # codebook=vq_model.quantizer.codebooks[0] if opt.fix_token_emb else None, share_weight=res_opt.share_weight, clip_version=clip_version, opt=res_opt) ckpt = torch.load(pjoin(res_opt.checkpoints_dir, res_opt.dataset_name, res_opt.name, 'model', 'net_best_fid.tar'), map_location=opt.device) missing_keys, unexpected_keys = res_transformer.load_state_dict(ckpt['res_transformer'], strict=False) assert len(unexpected_keys) == 0 assert all([k.startswith('clip_model.') for k in missing_keys]) print(f'Loading Residual Transformer {res_opt.name} from epoch {ckpt["ep"]}!') return res_transformer if __name__ == '__main__': parser = EvalT2MOptions() opt = parser.parse() fixseed(opt.seed) opt.device = torch.device("cpu" if opt.gpu_id == -1 else "cuda:" + str(opt.gpu_id)) torch.autograd.set_detect_anomaly(True) dim_pose = 251 if opt.dataset_name == 'kit' else 263 # out_dir = pjoin(opt.check) root_dir = pjoin(opt.checkpoints_dir, opt.dataset_name, opt.name) model_dir = pjoin(root_dir, 'model') out_dir = pjoin(root_dir, 'eval') os.makedirs(out_dir, exist_ok=True) out_path = pjoin(out_dir, "%s.log"%opt.ext) f = open(pjoin(out_path), 'w') model_opt_path = pjoin(root_dir, 'opt.txt') model_opt = get_opt(model_opt_path, device=opt.device) clip_version = 'ViT-B/32' vq_opt_path = pjoin(opt.checkpoints_dir, opt.dataset_name, model_opt.vq_name, 'opt.txt') vq_opt = get_opt(vq_opt_path, device=opt.device) vq_model, vq_opt = load_vq_model(vq_opt) model_opt.num_tokens = vq_opt.nb_code model_opt.num_quantizers = vq_opt.num_quantizers model_opt.code_dim = vq_opt.code_dim res_opt_path = pjoin(opt.checkpoints_dir, opt.dataset_name, opt.res_name, 'opt.txt') res_opt = get_opt(res_opt_path, device=opt.device) res_model = load_res_model(res_opt) assert res_opt.vq_name == model_opt.vq_name dataset_opt_path = 'checkpoints/kit/Comp_v6_KLD005/opt.txt' if opt.dataset_name == 'kit' \ else 'checkpoints/t2m/Comp_v6_KLD005/opt.txt' wrapper_opt = get_opt(dataset_opt_path, torch.device('cuda')) eval_wrapper = EvaluatorModelWrapper(wrapper_opt) ##### ---- Dataloader ---- ##### opt.nb_joints = 21 if opt.dataset_name == 'kit' else 22
eval_val_loader, _ = get_dataset_motion_loader(dataset_opt_path, 32, 'test', device=opt.device)
5
2023-11-29 19:21:27+00:00
24k
dvlab-research/LLMGA
llmga/diffusers/src/diffusers/models/autoencoder_tiny.py
[ { "identifier": "ConfigMixin", "path": "llmga/diffusers/src/diffusers/configuration_utils.py", "snippet": "class ConfigMixin:\n r\"\"\"\n Base class for all configuration classes. All configuration parameters are stored under `self.config`. Also\n provides the [`~ConfigMixin.from_config`] and [`~ConfigMixin.save_config`] methods for loading, downloading, and\n saving classes that inherit from [`ConfigMixin`].\n\n Class attributes:\n - **config_name** (`str`) -- A filename under which the config should stored when calling\n [`~ConfigMixin.save_config`] (should be overridden by parent class).\n - **ignore_for_config** (`List[str]`) -- A list of attributes that should not be saved in the config (should be\n overridden by subclass).\n - **has_compatibles** (`bool`) -- Whether the class has compatible classes (should be overridden by subclass).\n - **_deprecated_kwargs** (`List[str]`) -- Keyword arguments that are deprecated. Note that the `init` function\n should only have a `kwargs` argument if at least one argument is deprecated (should be overridden by\n subclass).\n \"\"\"\n config_name = None\n ignore_for_config = []\n has_compatibles = False\n\n _deprecated_kwargs = []\n\n def register_to_config(self, **kwargs):\n if self.config_name is None:\n raise NotImplementedError(f\"Make sure that {self.__class__} has defined a class name `config_name`\")\n # Special case for `kwargs` used in deprecation warning added to schedulers\n # TODO: remove this when we remove the deprecation warning, and the `kwargs` argument,\n # or solve in a more general way.\n kwargs.pop(\"kwargs\", None)\n\n if not hasattr(self, \"_internal_dict\"):\n internal_dict = kwargs\n else:\n previous_dict = dict(self._internal_dict)\n internal_dict = {**self._internal_dict, **kwargs}\n logger.debug(f\"Updating config from {previous_dict} to {internal_dict}\")\n\n self._internal_dict = FrozenDict(internal_dict)\n\n def __getattr__(self, name: str) -> Any:\n \"\"\"The only reason we overwrite `getattr` here is to gracefully deprecate accessing\n config attributes directly. See https://github.com/huggingface/diffusers/pull/3129\n\n Tihs funtion is mostly copied from PyTorch's __getattr__ overwrite:\n https://pytorch.org/docs/stable/_modules/torch/nn/modules/module.html#Module\n \"\"\"\n\n is_in_config = \"_internal_dict\" in self.__dict__ and hasattr(self.__dict__[\"_internal_dict\"], name)\n is_attribute = name in self.__dict__\n\n if is_in_config and not is_attribute:\n deprecation_message = f\"Accessing config attribute `{name}` directly via '{type(self).__name__}' object attribute is deprecated. Please access '{name}' over '{type(self).__name__}'s config object instead, e.g. 'scheduler.config.{name}'.\"\n deprecate(\"direct config name access\", \"1.0.0\", deprecation_message, standard_warn=False)\n return self._internal_dict[name]\n\n raise AttributeError(f\"'{type(self).__name__}' object has no attribute '{name}'\")\n\n def save_config(self, save_directory: Union[str, os.PathLike], push_to_hub: bool = False, **kwargs):\n \"\"\"\n Save a configuration object to the directory specified in `save_directory` so that it can be reloaded using the\n [`~ConfigMixin.from_config`] class method.\n\n Args:\n save_directory (`str` or `os.PathLike`):\n Directory where the configuration JSON file is saved (will be created if it does not exist).\n push_to_hub (`bool`, *optional*, defaults to `False`):\n Whether or not to push your model to the Hugging Face Hub after saving it. You can specify the\n repository you want to push to with `repo_id` (will default to the name of `save_directory` in your\n namespace).\n kwargs (`Dict[str, Any]`, *optional*):\n Additional keyword arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method.\n \"\"\"\n if os.path.isfile(save_directory):\n raise AssertionError(f\"Provided path ({save_directory}) should be a directory, not a file\")\n\n os.makedirs(save_directory, exist_ok=True)\n\n # If we save using the predefined names, we can load using `from_config`\n output_config_file = os.path.join(save_directory, self.config_name)\n\n self.to_json_file(output_config_file)\n logger.info(f\"Configuration saved in {output_config_file}\")\n\n if push_to_hub:\n commit_message = kwargs.pop(\"commit_message\", None)\n private = kwargs.pop(\"private\", False)\n create_pr = kwargs.pop(\"create_pr\", False)\n token = kwargs.pop(\"token\", None)\n repo_id = kwargs.pop(\"repo_id\", save_directory.split(os.path.sep)[-1])\n repo_id = create_repo(repo_id, exist_ok=True, private=private, token=token).repo_id\n\n self._upload_folder(\n save_directory,\n repo_id,\n token=token,\n commit_message=commit_message,\n create_pr=create_pr,\n )\n\n @classmethod\n def from_config(cls, config: Union[FrozenDict, Dict[str, Any]] = None, return_unused_kwargs=False, **kwargs):\n r\"\"\"\n Instantiate a Python class from a config dictionary.\n\n Parameters:\n config (`Dict[str, Any]`):\n A config dictionary from which the Python class is instantiated. Make sure to only load configuration\n files of compatible classes.\n return_unused_kwargs (`bool`, *optional*, defaults to `False`):\n Whether kwargs that are not consumed by the Python class should be returned or not.\n kwargs (remaining dictionary of keyword arguments, *optional*):\n Can be used to update the configuration object (after it is loaded) and initiate the Python class.\n `**kwargs` are passed directly to the underlying scheduler/model's `__init__` method and eventually\n overwrite the same named arguments in `config`.\n\n Returns:\n [`ModelMixin`] or [`SchedulerMixin`]:\n A model or scheduler object instantiated from a config dictionary.\n\n Examples:\n\n ```python\n >>> from diffusers import DDPMScheduler, DDIMScheduler, PNDMScheduler\n\n >>> # Download scheduler from huggingface.co and cache.\n >>> scheduler = DDPMScheduler.from_pretrained(\"google/ddpm-cifar10-32\")\n\n >>> # Instantiate DDIM scheduler class with same config as DDPM\n >>> scheduler = DDIMScheduler.from_config(scheduler.config)\n\n >>> # Instantiate PNDM scheduler class with same config as DDPM\n >>> scheduler = PNDMScheduler.from_config(scheduler.config)\n ```\n \"\"\"\n # <===== TO BE REMOVED WITH DEPRECATION\n # TODO(Patrick) - make sure to remove the following lines when config==\"model_path\" is deprecated\n if \"pretrained_model_name_or_path\" in kwargs:\n config = kwargs.pop(\"pretrained_model_name_or_path\")\n\n if config is None:\n raise ValueError(\"Please make sure to provide a config as the first positional argument.\")\n # ======>\n\n if not isinstance(config, dict):\n deprecation_message = \"It is deprecated to pass a pretrained model name or path to `from_config`.\"\n if \"Scheduler\" in cls.__name__:\n deprecation_message += (\n f\"If you were trying to load a scheduler, please use {cls}.from_pretrained(...) instead.\"\n \" Otherwise, please make sure to pass a configuration dictionary instead. This functionality will\"\n \" be removed in v1.0.0.\"\n )\n elif \"Model\" in cls.__name__:\n deprecation_message += (\n f\"If you were trying to load a model, please use {cls}.load_config(...) followed by\"\n f\" {cls}.from_config(...) instead. Otherwise, please make sure to pass a configuration dictionary\"\n \" instead. This functionality will be removed in v1.0.0.\"\n )\n deprecate(\"config-passed-as-path\", \"1.0.0\", deprecation_message, standard_warn=False)\n config, kwargs = cls.load_config(pretrained_model_name_or_path=config, return_unused_kwargs=True, **kwargs)\n\n init_dict, unused_kwargs, hidden_dict = cls.extract_init_dict(config, **kwargs)\n\n # Allow dtype to be specified on initialization\n if \"dtype\" in unused_kwargs:\n init_dict[\"dtype\"] = unused_kwargs.pop(\"dtype\")\n\n # add possible deprecated kwargs\n for deprecated_kwarg in cls._deprecated_kwargs:\n if deprecated_kwarg in unused_kwargs:\n init_dict[deprecated_kwarg] = unused_kwargs.pop(deprecated_kwarg)\n\n # Return model and optionally state and/or unused_kwargs\n model = cls(**init_dict)\n\n # make sure to also save config parameters that might be used for compatible classes\n model.register_to_config(**hidden_dict)\n\n # add hidden kwargs of compatible classes to unused_kwargs\n unused_kwargs = {**unused_kwargs, **hidden_dict}\n\n if return_unused_kwargs:\n return (model, unused_kwargs)\n else:\n return model\n\n @classmethod\n def get_config_dict(cls, *args, **kwargs):\n deprecation_message = (\n f\" The function get_config_dict is deprecated. Please use {cls}.load_config instead. This function will be\"\n \" removed in version v1.0.0\"\n )\n deprecate(\"get_config_dict\", \"1.0.0\", deprecation_message, standard_warn=False)\n return cls.load_config(*args, **kwargs)\n\n @classmethod\n def load_config(\n cls,\n pretrained_model_name_or_path: Union[str, os.PathLike],\n return_unused_kwargs=False,\n return_commit_hash=False,\n **kwargs,\n ) -> Tuple[Dict[str, Any], Dict[str, Any]]:\n r\"\"\"\n Load a model or scheduler configuration.\n\n Parameters:\n pretrained_model_name_or_path (`str` or `os.PathLike`, *optional*):\n Can be either:\n\n - A string, the *model id* (for example `google/ddpm-celebahq-256`) of a pretrained model hosted on\n the Hub.\n - A path to a *directory* (for example `./my_model_directory`) containing model weights saved with\n [`~ConfigMixin.save_config`].\n\n cache_dir (`Union[str, os.PathLike]`, *optional*):\n Path to a directory where a downloaded pretrained model configuration is cached if the standard cache\n is not used.\n force_download (`bool`, *optional*, defaults to `False`):\n Whether or not to force the (re-)download of the model weights and configuration files, overriding the\n cached versions if they exist.\n resume_download (`bool`, *optional*, defaults to `False`):\n Whether or not to resume downloading the model weights and configuration files. If set to `False`, any\n incompletely downloaded files are deleted.\n proxies (`Dict[str, str]`, *optional*):\n A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128',\n 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.\n output_loading_info(`bool`, *optional*, defaults to `False`):\n Whether or not to also return a dictionary containing missing keys, unexpected keys and error messages.\n local_files_only (`bool`, *optional*, defaults to `False`):\n Whether to only load local model weights and configuration files or not. If set to `True`, the model\n won't be downloaded from the Hub.\n use_auth_token (`str` or *bool*, *optional*):\n The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from\n `diffusers-cli login` (stored in `~/.huggingface`) is used.\n revision (`str`, *optional*, defaults to `\"main\"`):\n The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier\n allowed by Git.\n subfolder (`str`, *optional*, defaults to `\"\"`):\n The subfolder location of a model file within a larger model repository on the Hub or locally.\n return_unused_kwargs (`bool`, *optional*, defaults to `False):\n Whether unused keyword arguments of the config are returned.\n return_commit_hash (`bool`, *optional*, defaults to `False):\n Whether the `commit_hash` of the loaded configuration are returned.\n\n Returns:\n `dict`:\n A dictionary of all the parameters stored in a JSON configuration file.\n\n \"\"\"\n cache_dir = kwargs.pop(\"cache_dir\", DIFFUSERS_CACHE)\n force_download = kwargs.pop(\"force_download\", False)\n resume_download = kwargs.pop(\"resume_download\", False)\n proxies = kwargs.pop(\"proxies\", None)\n use_auth_token = kwargs.pop(\"use_auth_token\", None)\n local_files_only = kwargs.pop(\"local_files_only\", False)\n revision = kwargs.pop(\"revision\", None)\n _ = kwargs.pop(\"mirror\", None)\n subfolder = kwargs.pop(\"subfolder\", None)\n user_agent = kwargs.pop(\"user_agent\", {})\n\n user_agent = {**user_agent, \"file_type\": \"config\"}\n user_agent = http_user_agent(user_agent)\n\n pretrained_model_name_or_path = str(pretrained_model_name_or_path)\n\n if cls.config_name is None:\n raise ValueError(\n \"`self.config_name` is not defined. Note that one should not load a config from \"\n \"`ConfigMixin`. Please make sure to define `config_name` in a class inheriting from `ConfigMixin`\"\n )\n\n if os.path.isfile(pretrained_model_name_or_path):\n config_file = pretrained_model_name_or_path\n elif os.path.isdir(pretrained_model_name_or_path):\n if os.path.isfile(os.path.join(pretrained_model_name_or_path, cls.config_name)):\n # Load from a PyTorch checkpoint\n config_file = os.path.join(pretrained_model_name_or_path, cls.config_name)\n elif subfolder is not None and os.path.isfile(\n os.path.join(pretrained_model_name_or_path, subfolder, cls.config_name)\n ):\n config_file = os.path.join(pretrained_model_name_or_path, subfolder, cls.config_name)\n else:\n raise EnvironmentError(\n f\"Error no file named {cls.config_name} found in directory {pretrained_model_name_or_path}.\"\n )\n else:\n try:\n # Load from URL or cache if already cached\n config_file = hf_hub_download(\n pretrained_model_name_or_path,\n filename=cls.config_name,\n cache_dir=cache_dir,\n force_download=force_download,\n proxies=proxies,\n resume_download=resume_download,\n local_files_only=local_files_only,\n use_auth_token=use_auth_token,\n user_agent=user_agent,\n subfolder=subfolder,\n revision=revision,\n )\n except RepositoryNotFoundError:\n raise EnvironmentError(\n f\"{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier\"\n \" listed on 'https://huggingface.co/models'\\nIf this is a private repository, make sure to pass a\"\n \" token having permission to this repo with `use_auth_token` or log in with `huggingface-cli\"\n \" login`.\"\n )\n except RevisionNotFoundError:\n raise EnvironmentError(\n f\"{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for\"\n \" this model name. Check the model page at\"\n f\" 'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions.\"\n )\n except EntryNotFoundError:\n raise EnvironmentError(\n f\"{pretrained_model_name_or_path} does not appear to have a file named {cls.config_name}.\"\n )\n except HTTPError as err:\n raise EnvironmentError(\n \"There was a specific connection error when trying to load\"\n f\" {pretrained_model_name_or_path}:\\n{err}\"\n )\n except ValueError:\n raise EnvironmentError(\n f\"We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it\"\n f\" in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a\"\n f\" directory containing a {cls.config_name} file.\\nCheckout your internet connection or see how to\"\n \" run the library in offline mode at\"\n \" 'https://huggingface.co/docs/diffusers/installation#offline-mode'.\"\n )\n except EnvironmentError:\n raise EnvironmentError(\n f\"Can't load config for '{pretrained_model_name_or_path}'. If you were trying to load it from \"\n \"'https://huggingface.co/models', make sure you don't have a local directory with the same name. \"\n f\"Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory \"\n f\"containing a {cls.config_name} file\"\n )\n\n try:\n # Load config dict\n config_dict = cls._dict_from_json_file(config_file)\n\n commit_hash = extract_commit_hash(config_file)\n except (json.JSONDecodeError, UnicodeDecodeError):\n raise EnvironmentError(f\"It looks like the config file at '{config_file}' is not a valid JSON file.\")\n\n if not (return_unused_kwargs or return_commit_hash):\n return config_dict\n\n outputs = (config_dict,)\n\n if return_unused_kwargs:\n outputs += (kwargs,)\n\n if return_commit_hash:\n outputs += (commit_hash,)\n\n return outputs\n\n @staticmethod\n def _get_init_keys(cls):\n return set(dict(inspect.signature(cls.__init__).parameters).keys())\n\n @classmethod\n def extract_init_dict(cls, config_dict, **kwargs):\n # Skip keys that were not present in the original config, so default __init__ values were used\n used_defaults = config_dict.get(\"_use_default_values\", [])\n config_dict = {k: v for k, v in config_dict.items() if k not in used_defaults and k != \"_use_default_values\"}\n\n # 0. Copy origin config dict\n original_dict = dict(config_dict.items())\n\n # 1. Retrieve expected config attributes from __init__ signature\n expected_keys = cls._get_init_keys(cls)\n expected_keys.remove(\"self\")\n # remove general kwargs if present in dict\n if \"kwargs\" in expected_keys:\n expected_keys.remove(\"kwargs\")\n # remove flax internal keys\n if hasattr(cls, \"_flax_internal_args\"):\n for arg in cls._flax_internal_args:\n expected_keys.remove(arg)\n\n # 2. Remove attributes that cannot be expected from expected config attributes\n # remove keys to be ignored\n if len(cls.ignore_for_config) > 0:\n expected_keys = expected_keys - set(cls.ignore_for_config)\n\n # load diffusers library to import compatible and original scheduler\n diffusers_library = importlib.import_module(__name__.split(\".\")[0])\n\n if cls.has_compatibles:\n compatible_classes = [c for c in cls._get_compatibles() if not isinstance(c, DummyObject)]\n else:\n compatible_classes = []\n\n expected_keys_comp_cls = set()\n for c in compatible_classes:\n expected_keys_c = cls._get_init_keys(c)\n expected_keys_comp_cls = expected_keys_comp_cls.union(expected_keys_c)\n expected_keys_comp_cls = expected_keys_comp_cls - cls._get_init_keys(cls)\n config_dict = {k: v for k, v in config_dict.items() if k not in expected_keys_comp_cls}\n\n # remove attributes from orig class that cannot be expected\n orig_cls_name = config_dict.pop(\"_class_name\", cls.__name__)\n if orig_cls_name != cls.__name__ and hasattr(diffusers_library, orig_cls_name):\n orig_cls = getattr(diffusers_library, orig_cls_name)\n unexpected_keys_from_orig = cls._get_init_keys(orig_cls) - expected_keys\n config_dict = {k: v for k, v in config_dict.items() if k not in unexpected_keys_from_orig}\n\n # remove private attributes\n config_dict = {k: v for k, v in config_dict.items() if not k.startswith(\"_\")}\n\n # 3. Create keyword arguments that will be passed to __init__ from expected keyword arguments\n init_dict = {}\n for key in expected_keys:\n # if config param is passed to kwarg and is present in config dict\n # it should overwrite existing config dict key\n if key in kwargs and key in config_dict:\n config_dict[key] = kwargs.pop(key)\n\n if key in kwargs:\n # overwrite key\n init_dict[key] = kwargs.pop(key)\n elif key in config_dict:\n # use value from config dict\n init_dict[key] = config_dict.pop(key)\n\n # 4. Give nice warning if unexpected values have been passed\n if len(config_dict) > 0:\n logger.warning(\n f\"The config attributes {config_dict} were passed to {cls.__name__}, \"\n \"but are not expected and will be ignored. Please verify your \"\n f\"{cls.config_name} configuration file.\"\n )\n\n # 5. Give nice info if config attributes are initiliazed to default because they have not been passed\n passed_keys = set(init_dict.keys())\n if len(expected_keys - passed_keys) > 0:\n logger.info(\n f\"{expected_keys - passed_keys} was not found in config. Values will be initialized to default values.\"\n )\n\n # 6. Define unused keyword arguments\n unused_kwargs = {**config_dict, **kwargs}\n\n # 7. Define \"hidden\" config parameters that were saved for compatible classes\n hidden_config_dict = {k: v for k, v in original_dict.items() if k not in init_dict}\n\n return init_dict, unused_kwargs, hidden_config_dict\n\n @classmethod\n def _dict_from_json_file(cls, json_file: Union[str, os.PathLike]):\n with open(json_file, \"r\", encoding=\"utf-8\") as reader:\n text = reader.read()\n return json.loads(text)\n\n def __repr__(self):\n return f\"{self.__class__.__name__} {self.to_json_string()}\"\n\n @property\n def config(self) -> Dict[str, Any]:\n \"\"\"\n Returns the config of the class as a frozen dictionary\n\n Returns:\n `Dict[str, Any]`: Config of the class.\n \"\"\"\n return self._internal_dict\n\n def to_json_string(self) -> str:\n \"\"\"\n Serializes the configuration instance to a JSON string.\n\n Returns:\n `str`:\n String containing all the attributes that make up the configuration instance in JSON format.\n \"\"\"\n config_dict = self._internal_dict if hasattr(self, \"_internal_dict\") else {}\n config_dict[\"_class_name\"] = self.__class__.__name__\n config_dict[\"_diffusers_version\"] = __version__\n\n def to_json_saveable(value):\n if isinstance(value, np.ndarray):\n value = value.tolist()\n elif isinstance(value, PosixPath):\n value = str(value)\n return value\n\n config_dict = {k: to_json_saveable(v) for k, v in config_dict.items()}\n # Don't save \"_ignore_files\" or \"_use_default_values\"\n config_dict.pop(\"_ignore_files\", None)\n config_dict.pop(\"_use_default_values\", None)\n\n return json.dumps(config_dict, indent=2, sort_keys=True) + \"\\n\"\n\n def to_json_file(self, json_file_path: Union[str, os.PathLike]):\n \"\"\"\n Save the configuration instance's parameters to a JSON file.\n\n Args:\n json_file_path (`str` or `os.PathLike`):\n Path to the JSON file to save a configuration instance's parameters.\n \"\"\"\n with open(json_file_path, \"w\", encoding=\"utf-8\") as writer:\n writer.write(self.to_json_string())" }, { "identifier": "register_to_config", "path": "llmga/diffusers/src/diffusers/configuration_utils.py", "snippet": "def register_to_config(self, **kwargs):\n if self.config_name is None:\n raise NotImplementedError(f\"Make sure that {self.__class__} has defined a class name `config_name`\")\n # Special case for `kwargs` used in deprecation warning added to schedulers\n # TODO: remove this when we remove the deprecation warning, and the `kwargs` argument,\n # or solve in a more general way.\n kwargs.pop(\"kwargs\", None)\n\n if not hasattr(self, \"_internal_dict\"):\n internal_dict = kwargs\n else:\n previous_dict = dict(self._internal_dict)\n internal_dict = {**self._internal_dict, **kwargs}\n logger.debug(f\"Updating config from {previous_dict} to {internal_dict}\")\n\n self._internal_dict = FrozenDict(internal_dict)" }, { "identifier": "BaseOutput", "path": "llmga/diffusers/src/diffusers/utils/outputs.py", "snippet": "class BaseOutput(OrderedDict):\n \"\"\"\n Base class for all model outputs as dataclass. Has a `__getitem__` that allows indexing by integer or slice (like a\n tuple) or strings (like a dictionary) that will ignore the `None` attributes. Otherwise behaves like a regular\n Python dictionary.\n\n <Tip warning={true}>\n\n You can't unpack a [`BaseOutput`] directly. Use the [`~utils.BaseOutput.to_tuple`] method to convert it to a tuple\n first.\n\n </Tip>\n \"\"\"\n\n def __post_init__(self):\n class_fields = fields(self)\n\n # Safety and consistency checks\n if not len(class_fields):\n raise ValueError(f\"{self.__class__.__name__} has no fields.\")\n\n first_field = getattr(self, class_fields[0].name)\n other_fields_are_none = all(getattr(self, field.name) is None for field in class_fields[1:])\n\n if other_fields_are_none and isinstance(first_field, dict):\n for key, value in first_field.items():\n self[key] = value\n else:\n for field in class_fields:\n v = getattr(self, field.name)\n if v is not None:\n self[field.name] = v\n\n def __delitem__(self, *args, **kwargs):\n raise Exception(f\"You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.\")\n\n def setdefault(self, *args, **kwargs):\n raise Exception(f\"You cannot use ``setdefault`` on a {self.__class__.__name__} instance.\")\n\n def pop(self, *args, **kwargs):\n raise Exception(f\"You cannot use ``pop`` on a {self.__class__.__name__} instance.\")\n\n def update(self, *args, **kwargs):\n raise Exception(f\"You cannot use ``update`` on a {self.__class__.__name__} instance.\")\n\n def __getitem__(self, k):\n if isinstance(k, str):\n inner_dict = dict(self.items())\n return inner_dict[k]\n else:\n return self.to_tuple()[k]\n\n def __setattr__(self, name, value):\n if name in self.keys() and value is not None:\n # Don't call self.__setitem__ to avoid recursion errors\n super().__setitem__(name, value)\n super().__setattr__(name, value)\n\n def __setitem__(self, key, value):\n # Will raise a KeyException if needed\n super().__setitem__(key, value)\n # Don't call self.__setattr__ to avoid recursion errors\n super().__setattr__(key, value)\n\n def __reduce__(self):\n if not is_dataclass(self):\n return super().__reduce__()\n callable, _args, *remaining = super().__reduce__()\n args = tuple(getattr(self, field.name) for field in fields(self))\n return callable, args, *remaining\n\n def to_tuple(self) -> Tuple[Any]:\n \"\"\"\n Convert self to a tuple containing all the attributes/keys that are not `None`.\n \"\"\"\n return tuple(self[k] for k in self.keys())" }, { "identifier": "apply_forward_hook", "path": "llmga/diffusers/src/diffusers/utils/accelerate_utils.py", "snippet": "def apply_forward_hook(method):\n \"\"\"\n Decorator that applies a registered CpuOffload hook to an arbitrary function rather than `forward`. This is useful\n for cases where a PyTorch module provides functions other than `forward` that should trigger a move to the\n appropriate acceleration device. This is the case for `encode` and `decode` in [`AutoencoderKL`].\n\n This decorator looks inside the internal `_hf_hook` property to find a registered offload hook.\n\n :param method: The method to decorate. This method should be a method of a PyTorch module.\n \"\"\"\n if not is_accelerate_available():\n return method\n accelerate_version = version.parse(accelerate.__version__).base_version\n if version.parse(accelerate_version) < version.parse(\"0.17.0\"):\n return method\n\n def wrapper(self, *args, **kwargs):\n if hasattr(self, \"_hf_hook\") and hasattr(self._hf_hook, \"pre_forward\"):\n self._hf_hook.pre_forward(self)\n return method(self, *args, **kwargs)\n\n return wrapper" }, { "identifier": "ModelMixin", "path": "llmga/diffusers/src/diffusers/models/modeling_utils.py", "snippet": "class ModelMixin(torch.nn.Module, PushToHubMixin):\n r\"\"\"\n Base class for all models.\n\n [`ModelMixin`] takes care of storing the model configuration and provides methods for loading, downloading and\n saving models.\n\n - **config_name** ([`str`]) -- Filename to save a model to when calling [`~models.ModelMixin.save_pretrained`].\n \"\"\"\n config_name = CONFIG_NAME\n _automatically_saved_args = [\"_diffusers_version\", \"_class_name\", \"_name_or_path\"]\n _supports_gradient_checkpointing = False\n _keys_to_ignore_on_load_unexpected = None\n _hf_peft_config_loaded = False\n\n def __init__(self):\n super().__init__()\n\n def __getattr__(self, name: str) -> Any:\n \"\"\"The only reason we overwrite `getattr` here is to gracefully deprecate accessing\n config attributes directly. See https://github.com/huggingface/diffusers/pull/3129 We need to overwrite\n __getattr__ here in addition so that we don't trigger `torch.nn.Module`'s __getattr__':\n https://pytorch.org/docs/stable/_modules/torch/nn/modules/module.html#Module\n \"\"\"\n\n is_in_config = \"_internal_dict\" in self.__dict__ and hasattr(self.__dict__[\"_internal_dict\"], name)\n is_attribute = name in self.__dict__\n\n if is_in_config and not is_attribute:\n deprecation_message = f\"Accessing config attribute `{name}` directly via '{type(self).__name__}' object attribute is deprecated. Please access '{name}' over '{type(self).__name__}'s config object instead, e.g. 'unet.config.{name}'.\"\n deprecate(\"direct config name access\", \"1.0.0\", deprecation_message, standard_warn=False, stacklevel=3)\n return self._internal_dict[name]\n\n # call PyTorch's https://pytorch.org/docs/stable/_modules/torch/nn/modules/module.html#Module\n return super().__getattr__(name)\n\n @property\n def is_gradient_checkpointing(self) -> bool:\n \"\"\"\n Whether gradient checkpointing is activated for this model or not.\n \"\"\"\n return any(hasattr(m, \"gradient_checkpointing\") and m.gradient_checkpointing for m in self.modules())\n\n def enable_gradient_checkpointing(self):\n \"\"\"\n Activates gradient checkpointing for the current model (may be referred to as *activation checkpointing* or\n *checkpoint activations* in other frameworks).\n \"\"\"\n if not self._supports_gradient_checkpointing:\n raise ValueError(f\"{self.__class__.__name__} does not support gradient checkpointing.\")\n self.apply(partial(self._set_gradient_checkpointing, value=True))\n\n def disable_gradient_checkpointing(self):\n \"\"\"\n Deactivates gradient checkpointing for the current model (may be referred to as *activation checkpointing* or\n *checkpoint activations* in other frameworks).\n \"\"\"\n if self._supports_gradient_checkpointing:\n self.apply(partial(self._set_gradient_checkpointing, value=False))\n\n def set_use_memory_efficient_attention_xformers(\n self, valid: bool, attention_op: Optional[Callable] = None\n ) -> None:\n # Recursively walk through all the children.\n # Any children which exposes the set_use_memory_efficient_attention_xformers method\n # gets the message\n def fn_recursive_set_mem_eff(module: torch.nn.Module):\n if hasattr(module, \"set_use_memory_efficient_attention_xformers\"):\n module.set_use_memory_efficient_attention_xformers(valid, attention_op)\n\n for child in module.children():\n fn_recursive_set_mem_eff(child)\n\n for module in self.children():\n if isinstance(module, torch.nn.Module):\n fn_recursive_set_mem_eff(module)\n\n def enable_xformers_memory_efficient_attention(self, attention_op: Optional[Callable] = None):\n r\"\"\"\n Enable memory efficient attention from [xFormers](https://facebookresearch.github.io/xformers/).\n\n When this option is enabled, you should observe lower GPU memory usage and a potential speed up during\n inference. Speed up during training is not guaranteed.\n\n <Tip warning={true}>\n\n ⚠️ When memory efficient attention and sliced attention are both enabled, memory efficient attention takes\n precedent.\n\n </Tip>\n\n Parameters:\n attention_op (`Callable`, *optional*):\n Override the default `None` operator for use as `op` argument to the\n [`memory_efficient_attention()`](https://facebookresearch.github.io/xformers/components/ops.html#xformers.ops.memory_efficient_attention)\n function of xFormers.\n\n Examples:\n\n ```py\n >>> import torch\n >>> from diffusers import UNet2DConditionModel\n >>> from xformers.ops import MemoryEfficientAttentionFlashAttentionOp\n\n >>> model = UNet2DConditionModel.from_pretrained(\n ... \"stabilityai/stable-diffusion-2-1\", subfolder=\"unet\", torch_dtype=torch.float16\n ... )\n >>> model = model.to(\"cuda\")\n >>> model.enable_xformers_memory_efficient_attention(attention_op=MemoryEfficientAttentionFlashAttentionOp)\n ```\n \"\"\"\n self.set_use_memory_efficient_attention_xformers(True, attention_op)\n\n def disable_xformers_memory_efficient_attention(self):\n r\"\"\"\n Disable memory efficient attention from [xFormers](https://facebookresearch.github.io/xformers/).\n \"\"\"\n self.set_use_memory_efficient_attention_xformers(False)\n\n def add_adapter(self, adapter_config, adapter_name: str = \"default\") -> None:\n r\"\"\"\n Adds a new adapter to the current model for training. If no adapter name is passed, a default name is assigned\n to the adapter to follow the convention of the PEFT library.\n\n If you are not familiar with adapters and PEFT methods, we invite you to read more about them in the PEFT\n [documentation](https://huggingface.co/docs/peft).\n\n Args:\n adapter_config (`[~peft.PeftConfig]`):\n The configuration of the adapter to add; supported adapters are non-prefix tuning and adaption prompt\n methods.\n adapter_name (`str`, *optional*, defaults to `\"default\"`):\n The name of the adapter to add. If no name is passed, a default name is assigned to the adapter.\n \"\"\"\n check_peft_version(min_version=MIN_PEFT_VERSION)\n\n from peft import PeftConfig, inject_adapter_in_model\n\n if not self._hf_peft_config_loaded:\n self._hf_peft_config_loaded = True\n elif adapter_name in self.peft_config:\n raise ValueError(f\"Adapter with name {adapter_name} already exists. Please use a different name.\")\n\n if not isinstance(adapter_config, PeftConfig):\n raise ValueError(\n f\"adapter_config should be an instance of PeftConfig. Got {type(adapter_config)} instead.\"\n )\n\n # Unlike transformers, here we don't need to retrieve the name_or_path of the unet as the loading logic is\n # handled by the `load_lora_layers` or `LoraLoaderMixin`. Therefore we set it to `None` here.\n adapter_config.base_model_name_or_path = None\n inject_adapter_in_model(adapter_config, self, adapter_name)\n self.set_adapter(adapter_name)\n\n def set_adapter(self, adapter_name: Union[str, List[str]]) -> None:\n \"\"\"\n Sets a specific adapter by forcing the model to only use that adapter and disables the other adapters.\n\n If you are not familiar with adapters and PEFT methods, we invite you to read more about them on the PEFT\n official documentation: https://huggingface.co/docs/peft\n\n Args:\n adapter_name (Union[str, List[str]])):\n The list of adapters to set or the adapter name in case of single adapter.\n \"\"\"\n check_peft_version(min_version=MIN_PEFT_VERSION)\n\n if not self._hf_peft_config_loaded:\n raise ValueError(\"No adapter loaded. Please load an adapter first.\")\n\n if isinstance(adapter_name, str):\n adapter_name = [adapter_name]\n\n missing = set(adapter_name) - set(self.peft_config)\n if len(missing) > 0:\n raise ValueError(\n f\"Following adapter(s) could not be found: {', '.join(missing)}. Make sure you are passing the correct adapter name(s).\"\n f\" current loaded adapters are: {list(self.peft_config.keys())}\"\n )\n\n from peft.tuners.tuners_utils import BaseTunerLayer\n\n _adapters_has_been_set = False\n\n for _, module in self.named_modules():\n if isinstance(module, BaseTunerLayer):\n if hasattr(module, \"set_adapter\"):\n module.set_adapter(adapter_name)\n # Previous versions of PEFT does not support multi-adapter inference\n elif not hasattr(module, \"set_adapter\") and len(adapter_name) != 1:\n raise ValueError(\n \"You are trying to set multiple adapters and you have a PEFT version that does not support multi-adapter inference. Please upgrade to the latest version of PEFT.\"\n \" `pip install -U peft` or `pip install -U git+https://github.com/huggingface/peft.git`\"\n )\n else:\n module.active_adapter = adapter_name\n _adapters_has_been_set = True\n\n if not _adapters_has_been_set:\n raise ValueError(\n \"Did not succeeded in setting the adapter. Please make sure you are using a model that supports adapters.\"\n )\n\n def disable_adapters(self) -> None:\n r\"\"\"\n Disable all adapters attached to the model and fallback to inference with the base model only.\n\n If you are not familiar with adapters and PEFT methods, we invite you to read more about them on the PEFT\n official documentation: https://huggingface.co/docs/peft\n \"\"\"\n check_peft_version(min_version=MIN_PEFT_VERSION)\n\n if not self._hf_peft_config_loaded:\n raise ValueError(\"No adapter loaded. Please load an adapter first.\")\n\n from peft.tuners.tuners_utils import BaseTunerLayer\n\n for _, module in self.named_modules():\n if isinstance(module, BaseTunerLayer):\n if hasattr(module, \"enable_adapters\"):\n module.enable_adapters(enabled=False)\n else:\n # support for older PEFT versions\n module.disable_adapters = True\n\n def enable_adapters(self) -> None:\n \"\"\"\n Enable adapters that are attached to the model. The model will use `self.active_adapters()` to retrieve the\n list of adapters to enable.\n\n If you are not familiar with adapters and PEFT methods, we invite you to read more about them on the PEFT\n official documentation: https://huggingface.co/docs/peft\n \"\"\"\n check_peft_version(min_version=MIN_PEFT_VERSION)\n\n if not self._hf_peft_config_loaded:\n raise ValueError(\"No adapter loaded. Please load an adapter first.\")\n\n from peft.tuners.tuners_utils import BaseTunerLayer\n\n for _, module in self.named_modules():\n if isinstance(module, BaseTunerLayer):\n if hasattr(module, \"enable_adapters\"):\n module.enable_adapters(enabled=True)\n else:\n # support for older PEFT versions\n module.disable_adapters = False\n\n def active_adapters(self) -> List[str]:\n \"\"\"\n Gets the current list of active adapters of the model.\n\n If you are not familiar with adapters and PEFT methods, we invite you to read more about them on the PEFT\n official documentation: https://huggingface.co/docs/peft\n \"\"\"\n check_peft_version(min_version=MIN_PEFT_VERSION)\n\n if not self._hf_peft_config_loaded:\n raise ValueError(\"No adapter loaded. Please load an adapter first.\")\n\n from peft.tuners.tuners_utils import BaseTunerLayer\n\n for _, module in self.named_modules():\n if isinstance(module, BaseTunerLayer):\n return module.active_adapter\n\n def save_pretrained(\n self,\n save_directory: Union[str, os.PathLike],\n is_main_process: bool = True,\n save_function: Callable = None,\n safe_serialization: bool = True,\n variant: Optional[str] = None,\n push_to_hub: bool = False,\n **kwargs,\n ):\n \"\"\"\n Save a model and its configuration file to a directory so that it can be reloaded using the\n [`~models.ModelMixin.from_pretrained`] class method.\n\n Arguments:\n save_directory (`str` or `os.PathLike`):\n Directory to save a model and its configuration file to. Will be created if it doesn't exist.\n is_main_process (`bool`, *optional*, defaults to `True`):\n Whether the process calling this is the main process or not. Useful during distributed training and you\n need to call this function on all processes. In this case, set `is_main_process=True` only on the main\n process to avoid race conditions.\n save_function (`Callable`):\n The function to use to save the state dictionary. Useful during distributed training when you need to\n replace `torch.save` with another method. Can be configured with the environment variable\n `DIFFUSERS_SAVE_MODE`.\n safe_serialization (`bool`, *optional*, defaults to `True`):\n Whether to save the model using `safetensors` or the traditional PyTorch way with `pickle`.\n variant (`str`, *optional*):\n If specified, weights are saved in the format `pytorch_model.<variant>.bin`.\n push_to_hub (`bool`, *optional*, defaults to `False`):\n Whether or not to push your model to the Hugging Face Hub after saving it. You can specify the\n repository you want to push to with `repo_id` (will default to the name of `save_directory` in your\n namespace).\n kwargs (`Dict[str, Any]`, *optional*):\n Additional keyword arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method.\n \"\"\"\n if os.path.isfile(save_directory):\n logger.error(f\"Provided path ({save_directory}) should be a directory, not a file\")\n return\n\n os.makedirs(save_directory, exist_ok=True)\n\n if push_to_hub:\n commit_message = kwargs.pop(\"commit_message\", None)\n private = kwargs.pop(\"private\", False)\n create_pr = kwargs.pop(\"create_pr\", False)\n token = kwargs.pop(\"token\", None)\n repo_id = kwargs.pop(\"repo_id\", save_directory.split(os.path.sep)[-1])\n repo_id = create_repo(repo_id, exist_ok=True, private=private, token=token).repo_id\n\n # Only save the model itself if we are using distributed training\n model_to_save = self\n\n # Attach architecture to the config\n # Save the config\n if is_main_process:\n model_to_save.save_config(save_directory)\n\n # Save the model\n state_dict = model_to_save.state_dict()\n\n weights_name = SAFETENSORS_WEIGHTS_NAME if safe_serialization else WEIGHTS_NAME\n weights_name = _add_variant(weights_name, variant)\n\n # Save the model\n if safe_serialization:\n safetensors.torch.save_file(\n state_dict, os.path.join(save_directory, weights_name), metadata={\"format\": \"pt\"}\n )\n else:\n torch.save(state_dict, os.path.join(save_directory, weights_name))\n\n logger.info(f\"Model weights saved in {os.path.join(save_directory, weights_name)}\")\n\n if push_to_hub:\n self._upload_folder(\n save_directory,\n repo_id,\n token=token,\n commit_message=commit_message,\n create_pr=create_pr,\n )\n\n @classmethod\n def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.PathLike]], **kwargs):\n r\"\"\"\n Instantiate a pretrained PyTorch model from a pretrained model configuration.\n\n The model is set in evaluation mode - `model.eval()` - by default, and dropout modules are deactivated. To\n train the model, set it back in training mode with `model.train()`.\n\n Parameters:\n pretrained_model_name_or_path (`str` or `os.PathLike`, *optional*):\n Can be either:\n\n - A string, the *model id* (for example `google/ddpm-celebahq-256`) of a pretrained model hosted on\n the Hub.\n - A path to a *directory* (for example `./my_model_directory`) containing the model weights saved\n with [`~ModelMixin.save_pretrained`].\n\n cache_dir (`Union[str, os.PathLike]`, *optional*):\n Path to a directory where a downloaded pretrained model configuration is cached if the standard cache\n is not used.\n torch_dtype (`str` or `torch.dtype`, *optional*):\n Override the default `torch.dtype` and load the model with another dtype. If `\"auto\"` is passed, the\n dtype is automatically derived from the model's weights.\n force_download (`bool`, *optional*, defaults to `False`):\n Whether or not to force the (re-)download of the model weights and configuration files, overriding the\n cached versions if they exist.\n resume_download (`bool`, *optional*, defaults to `False`):\n Whether or not to resume downloading the model weights and configuration files. If set to `False`, any\n incompletely downloaded files are deleted.\n proxies (`Dict[str, str]`, *optional*):\n A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128',\n 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.\n output_loading_info (`bool`, *optional*, defaults to `False`):\n Whether or not to also return a dictionary containing missing keys, unexpected keys and error messages.\n local_files_only(`bool`, *optional*, defaults to `False`):\n Whether to only load local model weights and configuration files or not. If set to `True`, the model\n won't be downloaded from the Hub.\n use_auth_token (`str` or *bool*, *optional*):\n The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from\n `diffusers-cli login` (stored in `~/.huggingface`) is used.\n revision (`str`, *optional*, defaults to `\"main\"`):\n The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier\n allowed by Git.\n from_flax (`bool`, *optional*, defaults to `False`):\n Load the model weights from a Flax checkpoint save file.\n subfolder (`str`, *optional*, defaults to `\"\"`):\n The subfolder location of a model file within a larger model repository on the Hub or locally.\n mirror (`str`, *optional*):\n Mirror source to resolve accessibility issues if you're downloading a model in China. We do not\n guarantee the timeliness or safety of the source, and you should refer to the mirror site for more\n information.\n device_map (`str` or `Dict[str, Union[int, str, torch.device]]`, *optional*):\n A map that specifies where each submodule should go. It doesn't need to be defined for each\n parameter/buffer name; once a given module name is inside, every submodule of it will be sent to the\n same device.\n\n Set `device_map=\"auto\"` to have 🤗 Accelerate automatically compute the most optimized `device_map`. For\n more information about each option see [designing a device\n map](https://hf.co/docs/accelerate/main/en/usage_guides/big_modeling#designing-a-device-map).\n max_memory (`Dict`, *optional*):\n A dictionary device identifier for the maximum memory. Will default to the maximum memory available for\n each GPU and the available CPU RAM if unset.\n offload_folder (`str` or `os.PathLike`, *optional*):\n The path to offload weights if `device_map` contains the value `\"disk\"`.\n offload_state_dict (`bool`, *optional*):\n If `True`, temporarily offloads the CPU state dict to the hard drive to avoid running out of CPU RAM if\n the weight of the CPU state dict + the biggest shard of the checkpoint does not fit. Defaults to `True`\n when there is some disk offload.\n low_cpu_mem_usage (`bool`, *optional*, defaults to `True` if torch version >= 1.9.0 else `False`):\n Speed up model loading only loading the pretrained weights and not initializing the weights. This also\n tries to not use more than 1x model size in CPU memory (including peak memory) while loading the model.\n Only supported for PyTorch >= 1.9.0. If you are using an older version of PyTorch, setting this\n argument to `True` will raise an error.\n variant (`str`, *optional*):\n Load weights from a specified `variant` filename such as `\"fp16\"` or `\"ema\"`. This is ignored when\n loading `from_flax`.\n use_safetensors (`bool`, *optional*, defaults to `None`):\n If set to `None`, the `safetensors` weights are downloaded if they're available **and** if the\n `safetensors` library is installed. If set to `True`, the model is forcibly loaded from `safetensors`\n weights. If set to `False`, `safetensors` weights are not loaded.\n\n <Tip>\n\n To use private or [gated models](https://huggingface.co/docs/hub/models-gated#gated-models), log-in with\n `huggingface-cli login`. You can also activate the special\n [\"offline-mode\"](https://huggingface.co/diffusers/installation.html#offline-mode) to use this method in a\n firewalled environment.\n\n </Tip>\n\n Example:\n\n ```py\n from diffusers import UNet2DConditionModel\n\n unet = UNet2DConditionModel.from_pretrained(\"runwayml/stable-diffusion-v1-5\", subfolder=\"unet\")\n ```\n\n If you get the error message below, you need to finetune the weights for your downstream task:\n\n ```bash\n Some weights of UNet2DConditionModel were not initialized from the model checkpoint at runwayml/stable-diffusion-v1-5 and are newly initialized because the shapes did not match:\n - conv_in.weight: found shape torch.Size([320, 4, 3, 3]) in the checkpoint and torch.Size([320, 9, 3, 3]) in the model instantiated\n You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.\n ```\n \"\"\"\n cache_dir = kwargs.pop(\"cache_dir\", DIFFUSERS_CACHE)\n ignore_mismatched_sizes = kwargs.pop(\"ignore_mismatched_sizes\", False)\n force_download = kwargs.pop(\"force_download\", False)\n from_flax = kwargs.pop(\"from_flax\", False)\n resume_download = kwargs.pop(\"resume_download\", False)\n proxies = kwargs.pop(\"proxies\", None)\n output_loading_info = kwargs.pop(\"output_loading_info\", False)\n local_files_only = kwargs.pop(\"local_files_only\", HF_HUB_OFFLINE)\n use_auth_token = kwargs.pop(\"use_auth_token\", None)\n revision = kwargs.pop(\"revision\", None)\n torch_dtype = kwargs.pop(\"torch_dtype\", None)\n subfolder = kwargs.pop(\"subfolder\", None)\n device_map = kwargs.pop(\"device_map\", None)\n max_memory = kwargs.pop(\"max_memory\", None)\n offload_folder = kwargs.pop(\"offload_folder\", None)\n offload_state_dict = kwargs.pop(\"offload_state_dict\", False)\n low_cpu_mem_usage = kwargs.pop(\"low_cpu_mem_usage\", _LOW_CPU_MEM_USAGE_DEFAULT)\n variant = kwargs.pop(\"variant\", None)\n use_safetensors = kwargs.pop(\"use_safetensors\", None)\n\n allow_pickle = False\n if use_safetensors is None:\n use_safetensors = True\n allow_pickle = True\n\n if low_cpu_mem_usage and not is_accelerate_available():\n low_cpu_mem_usage = False\n logger.warning(\n \"Cannot initialize model with low cpu memory usage because `accelerate` was not found in the\"\n \" environment. Defaulting to `low_cpu_mem_usage=False`. It is strongly recommended to install\"\n \" `accelerate` for faster and less memory-intense model loading. You can do so with: \\n```\\npip\"\n \" install accelerate\\n```\\n.\"\n )\n\n if device_map is not None and not is_accelerate_available():\n raise NotImplementedError(\n \"Loading and dispatching requires `accelerate`. Please make sure to install accelerate or set\"\n \" `device_map=None`. You can install accelerate with `pip install accelerate`.\"\n )\n\n # Check if we can handle device_map and dispatching the weights\n if device_map is not None and not is_torch_version(\">=\", \"1.9.0\"):\n raise NotImplementedError(\n \"Loading and dispatching requires torch >= 1.9.0. Please either update your PyTorch version or set\"\n \" `device_map=None`.\"\n )\n\n if low_cpu_mem_usage is True and not is_torch_version(\">=\", \"1.9.0\"):\n raise NotImplementedError(\n \"Low memory initialization requires torch >= 1.9.0. Please either update your PyTorch version or set\"\n \" `low_cpu_mem_usage=False`.\"\n )\n\n if low_cpu_mem_usage is False and device_map is not None:\n raise ValueError(\n f\"You cannot set `low_cpu_mem_usage` to `False` while using device_map={device_map} for loading and\"\n \" dispatching. Please make sure to set `low_cpu_mem_usage=True`.\"\n )\n\n # Load config if we don't provide a configuration\n config_path = pretrained_model_name_or_path\n\n user_agent = {\n \"diffusers\": __version__,\n \"file_type\": \"model\",\n \"framework\": \"pytorch\",\n }\n\n # load config\n config, unused_kwargs, commit_hash = cls.load_config(\n config_path,\n cache_dir=cache_dir,\n return_unused_kwargs=True,\n return_commit_hash=True,\n force_download=force_download,\n resume_download=resume_download,\n proxies=proxies,\n local_files_only=local_files_only,\n use_auth_token=use_auth_token,\n revision=revision,\n subfolder=subfolder,\n device_map=device_map,\n max_memory=max_memory,\n offload_folder=offload_folder,\n offload_state_dict=offload_state_dict,\n user_agent=user_agent,\n **kwargs,\n )\n\n # load model\n model_file = None\n if from_flax:\n model_file = _get_model_file(\n pretrained_model_name_or_path,\n weights_name=FLAX_WEIGHTS_NAME,\n cache_dir=cache_dir,\n force_download=force_download,\n resume_download=resume_download,\n proxies=proxies,\n local_files_only=local_files_only,\n use_auth_token=use_auth_token,\n revision=revision,\n subfolder=subfolder,\n user_agent=user_agent,\n commit_hash=commit_hash,\n )\n model = cls.from_config(config, **unused_kwargs)\n\n # Convert the weights\n from .modeling_pytorch_flax_utils import load_flax_checkpoint_in_pytorch_model\n\n model = load_flax_checkpoint_in_pytorch_model(model, model_file)\n else:\n if use_safetensors:\n try:\n model_file = _get_model_file(\n pretrained_model_name_or_path,\n weights_name=_add_variant(SAFETENSORS_WEIGHTS_NAME, variant),\n cache_dir=cache_dir,\n force_download=force_download,\n resume_download=resume_download,\n proxies=proxies,\n local_files_only=local_files_only,\n use_auth_token=use_auth_token,\n revision=revision,\n subfolder=subfolder,\n user_agent=user_agent,\n commit_hash=commit_hash,\n )\n except IOError as e:\n if not allow_pickle:\n raise e\n pass\n if model_file is None:\n model_file = _get_model_file(\n pretrained_model_name_or_path,\n weights_name=_add_variant(WEIGHTS_NAME, variant),\n cache_dir=cache_dir,\n force_download=force_download,\n resume_download=resume_download,\n proxies=proxies,\n local_files_only=local_files_only,\n use_auth_token=use_auth_token,\n revision=revision,\n subfolder=subfolder,\n user_agent=user_agent,\n commit_hash=commit_hash,\n )\n\n if low_cpu_mem_usage:\n # Instantiate model with empty weights\n with accelerate.init_empty_weights():\n model = cls.from_config(config, **unused_kwargs)\n\n # if device_map is None, load the state dict and move the params from meta device to the cpu\n if device_map is None:\n param_device = \"cpu\"\n state_dict = load_state_dict(model_file, variant=variant)\n model._convert_deprecated_attention_blocks(state_dict)\n # move the params from meta device to cpu\n missing_keys = set(model.state_dict().keys()) - set(state_dict.keys())\n if len(missing_keys) > 0:\n raise ValueError(\n f\"Cannot load {cls} from {pretrained_model_name_or_path} because the following keys are\"\n f\" missing: \\n {', '.join(missing_keys)}. \\n Please make sure to pass\"\n \" `low_cpu_mem_usage=False` and `device_map=None` if you want to randomly initialize\"\n \" those weights or else make sure your checkpoint file is correct.\"\n )\n\n unexpected_keys = load_model_dict_into_meta(\n model,\n state_dict,\n device=param_device,\n dtype=torch_dtype,\n model_name_or_path=pretrained_model_name_or_path,\n )\n\n if cls._keys_to_ignore_on_load_unexpected is not None:\n for pat in cls._keys_to_ignore_on_load_unexpected:\n unexpected_keys = [k for k in unexpected_keys if re.search(pat, k) is None]\n\n if len(unexpected_keys) > 0:\n logger.warn(\n f\"Some weights of the model checkpoint were not used when initializing {cls.__name__}: \\n {[', '.join(unexpected_keys)]}\"\n )\n\n else: # else let accelerate handle loading and dispatching.\n # Load weights and dispatch according to the device_map\n # by default the device_map is None and the weights are loaded on the CPU\n try:\n accelerate.load_checkpoint_and_dispatch(\n model,\n model_file,\n device_map,\n max_memory=max_memory,\n offload_folder=offload_folder,\n offload_state_dict=offload_state_dict,\n dtype=torch_dtype,\n )\n except AttributeError as e:\n # When using accelerate loading, we do not have the ability to load the state\n # dict and rename the weight names manually. Additionally, accelerate skips\n # torch loading conventions and directly writes into `module.{_buffers, _parameters}`\n # (which look like they should be private variables?), so we can't use the standard hooks\n # to rename parameters on load. We need to mimic the original weight names so the correct\n # attributes are available. After we have loaded the weights, we convert the deprecated\n # names to the new non-deprecated names. Then we _greatly encourage_ the user to convert\n # the weights so we don't have to do this again.\n\n if \"'Attention' object has no attribute\" in str(e):\n logger.warn(\n f\"Taking `{str(e)}` while using `accelerate.load_checkpoint_and_dispatch` to mean {pretrained_model_name_or_path}\"\n \" was saved with deprecated attention block weight names. We will load it with the deprecated attention block\"\n \" names and convert them on the fly to the new attention block format. Please re-save the model after this conversion,\"\n \" so we don't have to do the on the fly renaming in the future. If the model is from a hub checkpoint,\"\n \" please also re-upload it or open a PR on the original repository.\"\n )\n model._temp_convert_self_to_deprecated_attention_blocks()\n accelerate.load_checkpoint_and_dispatch(\n model,\n model_file,\n device_map,\n max_memory=max_memory,\n offload_folder=offload_folder,\n offload_state_dict=offload_state_dict,\n dtype=torch_dtype,\n )\n model._undo_temp_convert_self_to_deprecated_attention_blocks()\n else:\n raise e\n\n loading_info = {\n \"missing_keys\": [],\n \"unexpected_keys\": [],\n \"mismatched_keys\": [],\n \"error_msgs\": [],\n }\n else:\n model = cls.from_config(config, **unused_kwargs)\n\n state_dict = load_state_dict(model_file, variant=variant)\n model._convert_deprecated_attention_blocks(state_dict)\n\n model, missing_keys, unexpected_keys, mismatched_keys, error_msgs = cls._load_pretrained_model(\n model,\n state_dict,\n model_file,\n pretrained_model_name_or_path,\n ignore_mismatched_sizes=ignore_mismatched_sizes,\n )\n\n loading_info = {\n \"missing_keys\": missing_keys,\n \"unexpected_keys\": unexpected_keys,\n \"mismatched_keys\": mismatched_keys,\n \"error_msgs\": error_msgs,\n }\n\n if torch_dtype is not None and not isinstance(torch_dtype, torch.dtype):\n raise ValueError(\n f\"{torch_dtype} needs to be of type `torch.dtype`, e.g. `torch.float16`, but is {type(torch_dtype)}.\"\n )\n elif torch_dtype is not None:\n model = model.to(torch_dtype)\n\n model.register_to_config(_name_or_path=pretrained_model_name_or_path)\n\n # Set model in evaluation mode to deactivate DropOut modules by default\n model.eval()\n if output_loading_info:\n return model, loading_info\n\n return model\n\n @classmethod\n def _load_pretrained_model(\n cls,\n model,\n state_dict,\n resolved_archive_file,\n pretrained_model_name_or_path,\n ignore_mismatched_sizes=False,\n ):\n # Retrieve missing & unexpected_keys\n model_state_dict = model.state_dict()\n loaded_keys = list(state_dict.keys())\n\n expected_keys = list(model_state_dict.keys())\n\n original_loaded_keys = loaded_keys\n\n missing_keys = list(set(expected_keys) - set(loaded_keys))\n unexpected_keys = list(set(loaded_keys) - set(expected_keys))\n\n # Make sure we are able to load base models as well as derived models (with heads)\n model_to_load = model\n\n def _find_mismatched_keys(\n state_dict,\n model_state_dict,\n loaded_keys,\n ignore_mismatched_sizes,\n ):\n mismatched_keys = []\n if ignore_mismatched_sizes:\n for checkpoint_key in loaded_keys:\n model_key = checkpoint_key\n\n if (\n model_key in model_state_dict\n and state_dict[checkpoint_key].shape != model_state_dict[model_key].shape\n ):\n mismatched_keys.append(\n (checkpoint_key, state_dict[checkpoint_key].shape, model_state_dict[model_key].shape)\n )\n del state_dict[checkpoint_key]\n return mismatched_keys\n\n if state_dict is not None:\n # Whole checkpoint\n mismatched_keys = _find_mismatched_keys(\n state_dict,\n model_state_dict,\n original_loaded_keys,\n ignore_mismatched_sizes,\n )\n error_msgs = _load_state_dict_into_model(model_to_load, state_dict)\n\n if len(error_msgs) > 0:\n error_msg = \"\\n\\t\".join(error_msgs)\n if \"size mismatch\" in error_msg:\n error_msg += (\n \"\\n\\tYou may consider adding `ignore_mismatched_sizes=True` in the model `from_pretrained` method.\"\n )\n raise RuntimeError(f\"Error(s) in loading state_dict for {model.__class__.__name__}:\\n\\t{error_msg}\")\n\n if len(unexpected_keys) > 0:\n logger.warning(\n f\"Some weights of the model checkpoint at {pretrained_model_name_or_path} were not used when\"\n f\" initializing {model.__class__.__name__}: {unexpected_keys}\\n- This IS expected if you are\"\n f\" initializing {model.__class__.__name__} from the checkpoint of a model trained on another task\"\n \" or with another architecture (e.g. initializing a BertForSequenceClassification model from a\"\n \" BertForPreTraining model).\\n- This IS NOT expected if you are initializing\"\n f\" {model.__class__.__name__} from the checkpoint of a model that you expect to be exactly\"\n \" identical (initializing a BertForSequenceClassification model from a\"\n \" BertForSequenceClassification model).\"\n )\n else:\n logger.info(f\"All model checkpoint weights were used when initializing {model.__class__.__name__}.\\n\")\n if len(missing_keys) > 0:\n logger.warning(\n f\"Some weights of {model.__class__.__name__} were not initialized from the model checkpoint at\"\n f\" {pretrained_model_name_or_path} and are newly initialized: {missing_keys}\\nYou should probably\"\n \" TRAIN this model on a down-stream task to be able to use it for predictions and inference.\"\n )\n elif len(mismatched_keys) == 0:\n logger.info(\n f\"All the weights of {model.__class__.__name__} were initialized from the model checkpoint at\"\n f\" {pretrained_model_name_or_path}.\\nIf your task is similar to the task the model of the\"\n f\" checkpoint was trained on, you can already use {model.__class__.__name__} for predictions\"\n \" without further training.\"\n )\n if len(mismatched_keys) > 0:\n mismatched_warning = \"\\n\".join(\n [\n f\"- {key}: found shape {shape1} in the checkpoint and {shape2} in the model instantiated\"\n for key, shape1, shape2 in mismatched_keys\n ]\n )\n logger.warning(\n f\"Some weights of {model.__class__.__name__} were not initialized from the model checkpoint at\"\n f\" {pretrained_model_name_or_path} and are newly initialized because the shapes did not\"\n f\" match:\\n{mismatched_warning}\\nYou should probably TRAIN this model on a down-stream task to be\"\n \" able to use it for predictions and inference.\"\n )\n\n return model, missing_keys, unexpected_keys, mismatched_keys, error_msgs\n\n @property\n def device(self) -> device:\n \"\"\"\n `torch.device`: The device on which the module is (assuming that all the module parameters are on the same\n device).\n \"\"\"\n return get_parameter_device(self)\n\n @property\n def dtype(self) -> torch.dtype:\n \"\"\"\n `torch.dtype`: The dtype of the module (assuming that all the module parameters have the same dtype).\n \"\"\"\n return get_parameter_dtype(self)\n\n def num_parameters(self, only_trainable: bool = False, exclude_embeddings: bool = False) -> int:\n \"\"\"\n Get number of (trainable or non-embedding) parameters in the module.\n\n Args:\n only_trainable (`bool`, *optional*, defaults to `False`):\n Whether or not to return only the number of trainable parameters.\n exclude_embeddings (`bool`, *optional*, defaults to `False`):\n Whether or not to return only the number of non-embedding parameters.\n\n Returns:\n `int`: The number of parameters.\n\n Example:\n\n ```py\n from diffusers import UNet2DConditionModel\n\n model_id = \"runwayml/stable-diffusion-v1-5\"\n unet = UNet2DConditionModel.from_pretrained(model_id, subfolder=\"unet\")\n unet.num_parameters(only_trainable=True)\n 859520964\n ```\n \"\"\"\n\n if exclude_embeddings:\n embedding_param_names = [\n f\"{name}.weight\"\n for name, module_type in self.named_modules()\n if isinstance(module_type, torch.nn.Embedding)\n ]\n non_embedding_parameters = [\n parameter for name, parameter in self.named_parameters() if name not in embedding_param_names\n ]\n return sum(p.numel() for p in non_embedding_parameters if p.requires_grad or not only_trainable)\n else:\n return sum(p.numel() for p in self.parameters() if p.requires_grad or not only_trainable)\n\n def _convert_deprecated_attention_blocks(self, state_dict):\n deprecated_attention_block_paths = []\n\n def recursive_find_attn_block(name, module):\n if hasattr(module, \"_from_deprecated_attn_block\") and module._from_deprecated_attn_block:\n deprecated_attention_block_paths.append(name)\n\n for sub_name, sub_module in module.named_children():\n sub_name = sub_name if name == \"\" else f\"{name}.{sub_name}\"\n recursive_find_attn_block(sub_name, sub_module)\n\n recursive_find_attn_block(\"\", self)\n\n # NOTE: we have to check if the deprecated parameters are in the state dict\n # because it is possible we are loading from a state dict that was already\n # converted\n\n for path in deprecated_attention_block_paths:\n # group_norm path stays the same\n\n # query -> to_q\n if f\"{path}.query.weight\" in state_dict:\n state_dict[f\"{path}.to_q.weight\"] = state_dict.pop(f\"{path}.query.weight\")\n if f\"{path}.query.bias\" in state_dict:\n state_dict[f\"{path}.to_q.bias\"] = state_dict.pop(f\"{path}.query.bias\")\n\n # key -> to_k\n if f\"{path}.key.weight\" in state_dict:\n state_dict[f\"{path}.to_k.weight\"] = state_dict.pop(f\"{path}.key.weight\")\n if f\"{path}.key.bias\" in state_dict:\n state_dict[f\"{path}.to_k.bias\"] = state_dict.pop(f\"{path}.key.bias\")\n\n # value -> to_v\n if f\"{path}.value.weight\" in state_dict:\n state_dict[f\"{path}.to_v.weight\"] = state_dict.pop(f\"{path}.value.weight\")\n if f\"{path}.value.bias\" in state_dict:\n state_dict[f\"{path}.to_v.bias\"] = state_dict.pop(f\"{path}.value.bias\")\n\n # proj_attn -> to_out.0\n if f\"{path}.proj_attn.weight\" in state_dict:\n state_dict[f\"{path}.to_out.0.weight\"] = state_dict.pop(f\"{path}.proj_attn.weight\")\n if f\"{path}.proj_attn.bias\" in state_dict:\n state_dict[f\"{path}.to_out.0.bias\"] = state_dict.pop(f\"{path}.proj_attn.bias\")\n\n def _temp_convert_self_to_deprecated_attention_blocks(self):\n deprecated_attention_block_modules = []\n\n def recursive_find_attn_block(module):\n if hasattr(module, \"_from_deprecated_attn_block\") and module._from_deprecated_attn_block:\n deprecated_attention_block_modules.append(module)\n\n for sub_module in module.children():\n recursive_find_attn_block(sub_module)\n\n recursive_find_attn_block(self)\n\n for module in deprecated_attention_block_modules:\n module.query = module.to_q\n module.key = module.to_k\n module.value = module.to_v\n module.proj_attn = module.to_out[0]\n\n # We don't _have_ to delete the old attributes, but it's helpful to ensure\n # that _all_ the weights are loaded into the new attributes and we're not\n # making an incorrect assumption that this model should be converted when\n # it really shouldn't be.\n del module.to_q\n del module.to_k\n del module.to_v\n del module.to_out\n\n def _undo_temp_convert_self_to_deprecated_attention_blocks(self):\n deprecated_attention_block_modules = []\n\n def recursive_find_attn_block(module):\n if hasattr(module, \"_from_deprecated_attn_block\") and module._from_deprecated_attn_block:\n deprecated_attention_block_modules.append(module)\n\n for sub_module in module.children():\n recursive_find_attn_block(sub_module)\n\n recursive_find_attn_block(self)\n\n for module in deprecated_attention_block_modules:\n module.to_q = module.query\n module.to_k = module.key\n module.to_v = module.value\n module.to_out = nn.ModuleList([module.proj_attn, nn.Dropout(module.dropout)])\n\n del module.query\n del module.key\n del module.value\n del module.proj_attn" }, { "identifier": "DecoderOutput", "path": "llmga/diffusers/src/diffusers/models/vae.py", "snippet": "class DecoderOutput(BaseOutput):\n \"\"\"\n Output of decoding method.\n\n Args:\n sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n The decoded output sample from the last layer of the model.\n \"\"\"\n\n sample: torch.FloatTensor" }, { "identifier": "DecoderTiny", "path": "llmga/diffusers/src/diffusers/models/vae.py", "snippet": "class DecoderTiny(nn.Module):\n def __init__(\n self,\n in_channels: int,\n out_channels: int,\n num_blocks: int,\n block_out_channels: int,\n upsampling_scaling_factor: int,\n act_fn: str,\n ):\n super().__init__()\n\n layers = [\n nn.Conv2d(in_channels, block_out_channels[0], kernel_size=3, padding=1),\n get_activation(act_fn),\n ]\n\n for i, num_block in enumerate(num_blocks):\n is_final_block = i == (len(num_blocks) - 1)\n num_channels = block_out_channels[i]\n\n for _ in range(num_block):\n layers.append(AutoencoderTinyBlock(num_channels, num_channels, act_fn))\n\n if not is_final_block:\n layers.append(nn.Upsample(scale_factor=upsampling_scaling_factor))\n\n conv_out_channel = num_channels if not is_final_block else out_channels\n layers.append(nn.Conv2d(num_channels, conv_out_channel, kernel_size=3, padding=1, bias=is_final_block))\n\n self.layers = nn.Sequential(*layers)\n self.gradient_checkpointing = False\n\n def forward(self, x):\n # Clamp.\n x = torch.tanh(x / 3) * 3\n\n if self.training and self.gradient_checkpointing:\n\n def create_custom_forward(module):\n def custom_forward(*inputs):\n return module(*inputs)\n\n return custom_forward\n\n if is_torch_version(\">=\", \"1.11.0\"):\n x = torch.utils.checkpoint.checkpoint(create_custom_forward(self.layers), x, use_reentrant=False)\n else:\n x = torch.utils.checkpoint.checkpoint(create_custom_forward(self.layers), x)\n\n else:\n x = self.layers(x)\n\n # scale image from [0, 1] to [-1, 1] to match diffusers convention\n return x.mul(2).sub(1)" }, { "identifier": "EncoderTiny", "path": "llmga/diffusers/src/diffusers/models/vae.py", "snippet": "class EncoderTiny(nn.Module):\n def __init__(\n self,\n in_channels: int,\n out_channels: int,\n num_blocks: int,\n block_out_channels: int,\n act_fn: str,\n ):\n super().__init__()\n\n layers = []\n for i, num_block in enumerate(num_blocks):\n num_channels = block_out_channels[i]\n\n if i == 0:\n layers.append(nn.Conv2d(in_channels, num_channels, kernel_size=3, padding=1))\n else:\n layers.append(nn.Conv2d(num_channels, num_channels, kernel_size=3, padding=1, stride=2, bias=False))\n\n for _ in range(num_block):\n layers.append(AutoencoderTinyBlock(num_channels, num_channels, act_fn))\n\n layers.append(nn.Conv2d(block_out_channels[-1], out_channels, kernel_size=3, padding=1))\n\n self.layers = nn.Sequential(*layers)\n self.gradient_checkpointing = False\n\n def forward(self, x):\n if self.training and self.gradient_checkpointing:\n\n def create_custom_forward(module):\n def custom_forward(*inputs):\n return module(*inputs)\n\n return custom_forward\n\n if is_torch_version(\">=\", \"1.11.0\"):\n x = torch.utils.checkpoint.checkpoint(create_custom_forward(self.layers), x, use_reentrant=False)\n else:\n x = torch.utils.checkpoint.checkpoint(create_custom_forward(self.layers), x)\n\n else:\n # scale image from [-1, 1] to [0, 1] to match TAESD convention\n x = self.layers(x.add(1).div(2))\n\n return x" } ]
from dataclasses import dataclass from typing import Tuple, Union from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from ..utils.accelerate_utils import apply_forward_hook from .modeling_utils import ModelMixin from .vae import DecoderOutput, DecoderTiny, EncoderTiny import torch
19,857
# Copyright 2023 Ollin Boer Bohan and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. @dataclass class AutoencoderTinyOutput(BaseOutput): """ Output of AutoencoderTiny encoding method. Args: latents (`torch.Tensor`): Encoded outputs of the `Encoder`. """ latents: torch.Tensor class AutoencoderTiny(ModelMixin, ConfigMixin): r""" A tiny distilled VAE model for encoding images into latents and decoding latent representations into images. [`AutoencoderTiny`] is a wrapper around the original implementation of `TAESD`. This model inherits from [`ModelMixin`]. Check the superclass documentation for its generic methods implemented for all models (such as downloading or saving). Parameters: in_channels (`int`, *optional*, defaults to 3): Number of channels in the input image. out_channels (`int`, *optional*, defaults to 3): Number of channels in the output. encoder_block_out_channels (`Tuple[int]`, *optional*, defaults to `(64, 64, 64, 64)`): Tuple of integers representing the number of output channels for each encoder block. The length of the tuple should be equal to the number of encoder blocks. decoder_block_out_channels (`Tuple[int]`, *optional*, defaults to `(64, 64, 64, 64)`): Tuple of integers representing the number of output channels for each decoder block. The length of the tuple should be equal to the number of decoder blocks. act_fn (`str`, *optional*, defaults to `"relu"`): Activation function to be used throughout the model. latent_channels (`int`, *optional*, defaults to 4): Number of channels in the latent representation. The latent space acts as a compressed representation of the input image. upsampling_scaling_factor (`int`, *optional*, defaults to 2): Scaling factor for upsampling in the decoder. It determines the size of the output image during the upsampling process. num_encoder_blocks (`Tuple[int]`, *optional*, defaults to `(1, 3, 3, 3)`): Tuple of integers representing the number of encoder blocks at each stage of the encoding process. The length of the tuple should be equal to the number of stages in the encoder. Each stage has a different number of encoder blocks. num_decoder_blocks (`Tuple[int]`, *optional*, defaults to `(3, 3, 3, 1)`): Tuple of integers representing the number of decoder blocks at each stage of the decoding process. The length of the tuple should be equal to the number of stages in the decoder. Each stage has a different number of decoder blocks. latent_magnitude (`float`, *optional*, defaults to 3.0): Magnitude of the latent representation. This parameter scales the latent representation values to control the extent of information preservation. latent_shift (float, *optional*, defaults to 0.5): Shift applied to the latent representation. This parameter controls the center of the latent space. scaling_factor (`float`, *optional*, defaults to 1.0): The component-wise standard deviation of the trained latent space computed using the first batch of the training set. This is used to scale the latent space to have unit variance when training the diffusion model. The latents are scaled with the formula `z = z * scaling_factor` before being passed to the diffusion model. When decoding, the latents are scaled back to the original scale with the formula: `z = 1 / scaling_factor * z`. For more details, refer to sections 4.3.2 and D.1 of the [High-Resolution Image Synthesis with Latent Diffusion Models](https://arxiv.org/abs/2112.10752) paper. For this Autoencoder, however, no such scaling factor was used, hence the value of 1.0 as the default. force_upcast (`bool`, *optional*, default to `False`): If enabled it will force the VAE to run in float32 for high image resolution pipelines, such as SD-XL. VAE can be fine-tuned / trained to a lower range without losing too much precision, in which case `force_upcast` can be set to `False` (see this fp16-friendly [AutoEncoder](https://huggingface.co/madebyollin/sdxl-vae-fp16-fix)). """ _supports_gradient_checkpointing = True
# Copyright 2023 Ollin Boer Bohan and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. @dataclass class AutoencoderTinyOutput(BaseOutput): """ Output of AutoencoderTiny encoding method. Args: latents (`torch.Tensor`): Encoded outputs of the `Encoder`. """ latents: torch.Tensor class AutoencoderTiny(ModelMixin, ConfigMixin): r""" A tiny distilled VAE model for encoding images into latents and decoding latent representations into images. [`AutoencoderTiny`] is a wrapper around the original implementation of `TAESD`. This model inherits from [`ModelMixin`]. Check the superclass documentation for its generic methods implemented for all models (such as downloading or saving). Parameters: in_channels (`int`, *optional*, defaults to 3): Number of channels in the input image. out_channels (`int`, *optional*, defaults to 3): Number of channels in the output. encoder_block_out_channels (`Tuple[int]`, *optional*, defaults to `(64, 64, 64, 64)`): Tuple of integers representing the number of output channels for each encoder block. The length of the tuple should be equal to the number of encoder blocks. decoder_block_out_channels (`Tuple[int]`, *optional*, defaults to `(64, 64, 64, 64)`): Tuple of integers representing the number of output channels for each decoder block. The length of the tuple should be equal to the number of decoder blocks. act_fn (`str`, *optional*, defaults to `"relu"`): Activation function to be used throughout the model. latent_channels (`int`, *optional*, defaults to 4): Number of channels in the latent representation. The latent space acts as a compressed representation of the input image. upsampling_scaling_factor (`int`, *optional*, defaults to 2): Scaling factor for upsampling in the decoder. It determines the size of the output image during the upsampling process. num_encoder_blocks (`Tuple[int]`, *optional*, defaults to `(1, 3, 3, 3)`): Tuple of integers representing the number of encoder blocks at each stage of the encoding process. The length of the tuple should be equal to the number of stages in the encoder. Each stage has a different number of encoder blocks. num_decoder_blocks (`Tuple[int]`, *optional*, defaults to `(3, 3, 3, 1)`): Tuple of integers representing the number of decoder blocks at each stage of the decoding process. The length of the tuple should be equal to the number of stages in the decoder. Each stage has a different number of decoder blocks. latent_magnitude (`float`, *optional*, defaults to 3.0): Magnitude of the latent representation. This parameter scales the latent representation values to control the extent of information preservation. latent_shift (float, *optional*, defaults to 0.5): Shift applied to the latent representation. This parameter controls the center of the latent space. scaling_factor (`float`, *optional*, defaults to 1.0): The component-wise standard deviation of the trained latent space computed using the first batch of the training set. This is used to scale the latent space to have unit variance when training the diffusion model. The latents are scaled with the formula `z = z * scaling_factor` before being passed to the diffusion model. When decoding, the latents are scaled back to the original scale with the formula: `z = 1 / scaling_factor * z`. For more details, refer to sections 4.3.2 and D.1 of the [High-Resolution Image Synthesis with Latent Diffusion Models](https://arxiv.org/abs/2112.10752) paper. For this Autoencoder, however, no such scaling factor was used, hence the value of 1.0 as the default. force_upcast (`bool`, *optional*, default to `False`): If enabled it will force the VAE to run in float32 for high image resolution pipelines, such as SD-XL. VAE can be fine-tuned / trained to a lower range without losing too much precision, in which case `force_upcast` can be set to `False` (see this fp16-friendly [AutoEncoder](https://huggingface.co/madebyollin/sdxl-vae-fp16-fix)). """ _supports_gradient_checkpointing = True
@register_to_config
1
2023-11-27 18:46:55+00:00
24k
JiahuiLei/GART
solver.py
[ { "identifier": "prepare_real_seq", "path": "lib_data/get_data.py", "snippet": "def prepare_real_seq(\n seq_name,\n dataset_mode,\n split=\"train\",\n image_zoom_ratio=0.5,\n balance=False,\n ins_avt_wild_start_end_skip=None,\n):\n logging.info(\"Prepare real seq: {}\".format(seq_name))\n # * Get dataset\n if dataset_mode == \"ubcfashion\":\n dataset = UBCFasionDataset(\n data_root=\"./data/ubcfashion/\",\n video_list=[seq_name],\n image_zoom_ratio=image_zoom_ratio,\n start_end_skip=ins_avt_wild_start_end_skip,\n )\n elif dataset_mode == \"people_snapshot\":\n dataset = InstantAvatarDataset(\n noisy_flag=False,\n data_root=\"./data/people_snapshot/\",\n video_name=seq_name,\n split=split,\n image_zoom_ratio=image_zoom_ratio,\n )\n print(\"Load Instant Avatar processed PeopleSnapshot\")\n elif dataset_mode == \"zju\":\n dataset = ZJUDataset(\n data_root=\"./data/zju_mocap\",\n video_name=seq_name,\n split=split,\n image_zoom_ratio=image_zoom_ratio,\n )\n elif dataset_mode == \"instant_avatar_wild\":\n # assert image_zoom_ratio == 1.0, \"Check! in the wild data should use 1.0\"\n if image_zoom_ratio != 1.0:\n logging.warning(\n f\"Check! in the wild data should use 1.0, but got {image_zoom_ratio}\"\n )\n dataset = InstantAvatarWildDataset(\n data_root=\"./data/insav_wild\",\n video_name=seq_name,\n split=split,\n image_zoom_ratio=image_zoom_ratio,\n start_end_skip=ins_avt_wild_start_end_skip,\n )\n elif dataset_mode == \"dog_demo\":\n dataset = DogDemoDataset(data_root=\"./data/dog_data_official/\", video_name=seq_name)\n else:\n raise NotImplementedError(\"Unknown mode: {}\".format(dataset_mode))\n\n # prepare an optimizable data provider\n optimizable_data_provider = RealDataOptimizablePoseProviderPose(\n dataset,\n balance=balance,\n )\n return optimizable_data_provider, dataset" }, { "identifier": "DatabasePoseProvider", "path": "lib_data/data_provider.py", "snippet": "class DatabasePoseProvider(nn.Module):\n def __init__(\n self,\n pose_dirs: list,\n da_pose_prob=0.1,\n da_range=[0.0, np.pi / 4],\n device=torch.device(\"cuda\"),\n ) -> None:\n super().__init__()\n self.device = device\n self.base_R = matrix_to_axis_angle(\n torch.as_tensor(euler2mat(np.pi / 2.0, 0, np.pi / 2.0, \"sxyz\"))[None]\n )[0]\n self.base_R = self.base_R.float().to(self.device)\n\n self.da_pose_prob = da_pose_prob\n self.da_range = da_range\n\n self.data = []\n\n # cache the poses\n for d in pose_dirs:\n print(f\"Caching {d} ...\")\n for subject in tqdm(os.listdir(d)):\n sub_dir = os.path.join(d, subject)\n if not os.path.isdir(sub_dir):\n continue\n npz_files = [f for f in os.listdir(sub_dir) if f.endswith(\".npz\")]\n npz_files.sort()\n for fn in npz_files:\n try:\n npz_fn = os.path.join(sub_dir, fn)\n pose_data = np.load(npz_fn)\n amass_len = pose_data[\"poses\"].shape[0]\n smplx_to_smpl = list(range(66)) + [72, 73, 74, 117, 118, 119]\n poses = pose_data[\"poses\"][:, smplx_to_smpl].reshape(\n amass_len, 24, 3\n )\n self.data.append(poses.astype(np.float16))\n except:\n # print(f\"Error in {npz_fn}, skip!\")\n pass\n self.data = np.concatenate(self.data, axis=0)\n print(\n f\"Database has poses {len(self.data)} with DA-pose prob {self.da_pose_prob} and range {self.da_range}\"\n )\n return\n\n def forward(self, N: int):\n pose, trans = self.sample_pose(N)\n return pose, trans\n\n def sample_pose(self, N: int):\n # da pose\n pose_list = []\n for i in range(N):\n seed = np.random.rand()\n if seed > self.da_pose_prob:\n # from database\n idx = np.random.randint(len(self.data))\n pose = torch.from_numpy(self.data[idx]).float().to(self.device)\n else:\n # da pose\n pose = torch.zeros(24, 3).to(self.device)\n da_theta = float(np.random.uniform(*self.da_range))\n pose[1, -1] = da_theta\n pose[2, -1] = -da_theta\n pose[0] = self.base_R\n pose_list.append(pose)\n pose = torch.stack(pose_list, dim=0)\n trans = torch.zeros(N, 3).to(self.device)\n return pose, trans" }, { "identifier": "get_template", "path": "lib_gart/templates.py", "snippet": "def get_template(\n mode, init_beta, cano_pose_type, voxel_deformer_res, template_model_path=None\n):\n if mode == \"human\":\n template = SMPLTemplate(\n smpl_model_path=template_model_path,\n init_beta=init_beta,\n cano_pose_type=cano_pose_type,\n voxel_deformer_res=voxel_deformer_res,\n )\n elif mode == \"dog\":\n template = SMALTemplate(\n init_beta=init_beta,\n cano_pose_type=cano_pose_type,\n voxel_deformer_res=voxel_deformer_res,\n )\n else:\n raise ValueError(f\"Unknown mode {mode}\")\n return template" }, { "identifier": "GaussianTemplateModel", "path": "lib_gart/model.py", "snippet": "class GaussianTemplateModel(nn.Module):\n def __init__(\n self,\n template,\n add_bones: AdditionalBones,\n ##################################\n # attr config\n w_correction_flag=True,\n # w_rest_dim=0, # additional skinnign weight\n f_localcode_dim=0,\n max_sph_order=0,\n w_memory_type=\"point\",\n ##################################\n max_scale=0.1, # use sigmoid activation, can't be too large\n min_scale=0.0,\n # geo init\n init_mode=\"on_mesh\",\n opacity_init_value=0.9, # the init value of opacity\n # on mesh init params\n onmesh_init_subdivide_num=0,\n onmesh_init_scale_factor=1.0,\n onmesh_init_thickness_factor=0.5,\n # near mesh init params\n scale_init_value=0.01, # the init value of scale\n nearmesh_init_num=10000,\n nearmesh_init_std=0.1,\n ##################################\n ) -> None:\n super().__init__()\n\n self.template = template\n self.num_bones = template.voxel_deformer.num_bones\n self.add_bones = add_bones\n self.num_add_bones = add_bones.num_bones\n\n self.max_scale = max_scale\n self.min_scale = min_scale\n self._init_act(self.max_scale, self.min_scale)\n self.opacity_init_logit = self.o_inv_act(opacity_init_value)\n\n # * init geometry\n if init_mode == \"on_mesh\":\n x, q, s, o = get_on_mesh_init_geo_values(\n template,\n on_mesh_subdivide=onmesh_init_subdivide_num,\n scale_init_factor=onmesh_init_scale_factor,\n thickness_init_factor=onmesh_init_thickness_factor,\n max_scale=max_scale,\n min_scale=min_scale,\n s_inv_act=self.s_inv_act,\n opacity_init_logit=self.opacity_init_logit,\n )\n elif init_mode == \"near_mesh\":\n self.scale_init_logit = self.s_inv_act(scale_init_value)\n x, q, s, o = get_near_mesh_init_geo_values(\n template,\n scale_base_logit=self.scale_init_logit,\n opacity_base_logit=self.opacity_init_logit,\n random_init_num=nearmesh_init_num,\n random_init_std=nearmesh_init_std,\n )\n elif init_mode == \"in_mesh\":\n self.scale_init_logit = self.s_inv_act(scale_init_value)\n x, q, s, o = get_inside_mesh_init_geo_values(\n template,\n scale_base_logit=self.scale_init_logit,\n opacity_base_logit=self.opacity_init_logit,\n random_init_num=nearmesh_init_num,\n )\n else:\n raise NotImplementedError(f\"Unknown init_mode {init_mode}\")\n self._xyz = nn.Parameter(x)\n self._rotation = nn.Parameter(q)\n self._scaling = nn.Parameter(s)\n self._opacity = nn.Parameter(o)\n\n # * init attributes\n self.w_memory_type = w_memory_type\n assert self.w_memory_type in [\"point\", \"voxel\"], f\"Unknown {w_memory_type}\"\n\n self.max_sph_order = max_sph_order\n self.w_dc_dim = self.template.dim if w_correction_flag else 0\n self.w_rest_dim = self.add_bones.num_bones\n self.f_localcode_dim = f_localcode_dim\n\n sph_rest_dim = 3 * (sph_order2nfeat(self.max_sph_order) - 1)\n self._features_dc = nn.Parameter(torch.zeros_like(self._xyz))\n self._features_rest = nn.Parameter(torch.zeros(self.N, sph_rest_dim))\n\n # * Different implementation of smoothness\n if self.w_memory_type == \"point\":\n self._w_correction_dc = nn.Parameter(torch.zeros(self.N, self.w_dc_dim))\n self._w_correction_rest = nn.Parameter(\n torch.ones(self.N, self.w_rest_dim) * 1e-4\n )\n elif self.w_memory_type == \"voxel\":\n self._w_correction_dc = nn.Parameter(torch.zeros(self.N, 0))\n self._w_correction_rest = nn.Parameter(torch.zeros(self.N, 0))\n if self.w_dc_dim > 0:\n self.template.voxel_deformer.enable_voxel_correction()\n if self.w_rest_dim > 0:\n self.template.voxel_deformer.enable_additional_correction(\n self.w_rest_dim\n )\n elif self.w_memory_type == \"hash\":\n raise NotImplementedError(\"TODO\")\n else:\n raise NotImplementedError(f\"Unknown {w_memory_type}\")\n\n self._features_localcode = nn.Parameter(\n torch.zeros(self.N, self.f_localcode_dim)\n )\n\n assert self.f_localcode_dim == 0, \"TODO, add local mlp ablation\"\n\n # * States\n # warning, our code use N, instead of (N,1) as in GS code\n self.register_buffer(\"xyz_gradient_accum\", torch.zeros(self.N).float())\n self.register_buffer(\"xyz_gradient_denom\", torch.zeros(self.N).long())\n self.register_buffer(\"max_radii2D\", torch.zeros(self.N).float())\n\n self.op_update_exclude = [\"add_bones\"]\n if self.w_memory_type != \"point\":\n self.op_update_exclude.extend([\"w_dc_vox\", \"w_rest_vox\"])\n # self.summary()\n return\n\n def summary(self):\n # logging.info number of parameters per pytorch sub module\n msg = \"\"\n for name, param in self.named_parameters():\n if name.startswith(\"add_bones\"):\n continue # compact print\n msg = msg + f\"[{name}:{param.numel()/1e3:.1f}K] \" \n # logging.info(f\"{name}, {param.numel()/1e6:.3f}M\")\n logging.info(msg)\n return\n\n def _init_act(self, max_s_value, min_s_value):\n def s_act(x):\n if isinstance(x, float):\n x = torch.tensor(x).squeeze()\n return min_s_value + torch.sigmoid(x) * (max_s_value - min_s_value)\n\n def s_inv_act(x):\n if isinstance(x, float):\n x = torch.tensor(x).squeeze()\n y = (x - min_s_value) / (max_s_value - min_s_value) + 1e-5\n y = torch.logit(y)\n assert not torch.isnan(\n y\n ).any(), f\"{x.min()}, {x.max()}, {y.min()}, {y.max()}\"\n return y\n\n def o_act(x):\n if isinstance(x, float):\n x = torch.tensor(x).squeeze()\n return torch.sigmoid(x)\n\n def o_inv_act(x):\n if isinstance(x, float):\n x = torch.tensor(x).squeeze()\n return torch.logit(x)\n\n self.s_act = s_act\n self.s_inv_act = s_inv_act\n self.o_act = o_act\n self.o_inv_act = o_inv_act\n\n return\n\n @property\n def N(self):\n return len(self._xyz)\n\n @property\n def get_x(self):\n return self._xyz\n\n @property\n def get_R(self):\n return quaternion_to_matrix(self._rotation)\n\n @property\n def get_o(self):\n return self.o_act(self._opacity)\n\n @property\n def get_s(self):\n return self.s_act(self._scaling)\n\n @property\n def get_c(self):\n return torch.cat([self._features_dc, self._features_rest], dim=-1)\n\n def cache_for_fast(self):\n _cached_W, _ = self.template.forward(None, self._xyz)\n self._cached_W = _cached_W.detach().clone()\n return\n\n def forward(\n self, theta, trans, additional_dict={}, active_sph_order=None, fast=False\n ):\n # * fast will use the cached per point attr, no query anymore\n # TODO: the additional dict contain info to do flexible skinning: it can contain the As directly for optimization, or it can contain t index to query some buffers to provide As, or it can contain t along with the input theta to query some MLP;\n\n # TODO: if use vol memory, every forward update self.xxx, and remove them from parameters, pretend that the attributes are per point, but actually they are queried every forward\n\n # theta: B,24,3; trans: B,3\n B = len(theta)\n if active_sph_order is None:\n active_sph_order = self.max_sph_order\n else:\n assert (\n active_sph_order <= self.max_sph_order\n ), \"active_sph_order should be smaller\"\n sph_dim = 3 * sph_order2nfeat(active_sph_order)\n\n xyz = self.get_x\n mu_can = xyz\n frame_can = self.get_R\n s = self.get_s\n o = self.get_o\n sph = self.get_c[:, :sph_dim]\n\n mu_can = mu_can[None].expand(B, -1, -1)\n frame_can = frame_can[None].expand(B, -1, -1, -1)\n\n if fast:\n # only forward skeleton, no query voxel\n _, A = self.template.forward(theta, None)\n W = self._cached_W[None].expand(B, -1, -1)\n else:\n W, A = self.template.forward(theta, mu_can)\n if self._w_correction_dc.shape[-1] > 0:\n W = W + self._w_correction_dc[None]\n T = torch.einsum(\"bnj, bjrc -> bnrc\", W[..., : self.num_bones], A)\n\n # * additional correction here\n if \"pose\" not in additional_dict.keys():\n # maybe later we want to viz the different pose effect in cano\n additional_dict[\"pose\"] = theta.reshape(B, -1)[:, 3:]\n add_A = self.add_bones(**additional_dict)\n if add_A is not None:\n if theta.ndim == 2:\n global_axis_angle = theta[:, :3]\n else:\n global_axis_angle = theta[:, 0]\n global_orient_action = self.template.get_rot_action(global_axis_angle) # B,4,4\n add_A = torch.einsum(\"bij, bnjk -> bnik\", global_orient_action, add_A)\n\n if self.w_memory_type == \"point\":\n assert self._w_correction_rest.shape[-1] > 0\n add_W = self._w_correction_rest[None].expand(B, -1, -1)\n elif self.w_memory_type == \"voxel\":\n add_W = W[..., self.num_bones :]\n\n add_T = torch.einsum(\"bnj, bjrc -> bnrc\", add_W, add_A)\n T = T + add_T # Linear\n additional_dict[\"As\"] = add_A\n\n R, t = T[:, :, :3, :3], T[:, :, :3, 3] # B,N,3,3; B,N,3\n\n mu = torch.einsum(\"bnij,bnj->bni\", R, mu_can) + t # B,N,3\n frame = torch.einsum(\"bnij,bnjk->bnik\", R, frame_can) # B,N,3,3\n\n s = s[None].expand(B, -1, -1) # B,N,1\n o = o[None].expand(B, -1, -1) # B,N,1\n sph = sph[:, :sph_dim][None].expand(B, -1, -1) # B,N,C\n\n mu = mu + trans[:, None, :]\n\n return mu, frame, s, o, sph, additional_dict\n\n def compute_reg(self, K):\n # !can cancel the knn, but the w reg is critical\n if K > 0:\n xyz = self._xyz\n # todo: this can be cached and updated every several steps!!\n dist_sq, nn_ind, _ = knn_points(xyz[None], xyz[None], K=K, return_nn=False)\n nn_ind = nn_ind.squeeze(0)\n # reg the std inside knn\n q = self._rotation[nn_ind, :] # N,K,4\n s = self.get_s[nn_ind, :] # N,K,3\n o = self.get_o[nn_ind, :] # N,K,1\n q_std = q.std(dim=1).mean()\n s_std = s.std(dim=1).mean()\n o_std = o.std(dim=1).mean()\n\n cd = self._features_dc[nn_ind, :] # N,K,3\n ch = self._features_rest[nn_ind, :] # N,K,C\n cd_std = cd.std(dim=1).mean()\n ch_std = ch.std(dim=1).mean()\n if ch.shape[-1] == 0:\n ch_std = torch.zeros_like(ch_std)\n\n w = self._w_correction_dc[nn_ind, :] # N,K,3\n w_rest = self._w_correction_rest[nn_ind, :] # N,K,C\n f = self._features_localcode[nn_ind, :] # N,K,C\n w_std = w.std(dim=1).mean()\n w_rest_std = w_rest.std(dim=1).mean()\n f_std = f.std(dim=1).mean()\n if w.shape[-1] == 0:\n w_std = torch.zeros_like(cd_std)\n if w_rest.shape[-1] == 0:\n w_rest_std = torch.zeros_like(cd_std)\n if f.shape[-1] == 0:\n f_std = torch.zeros_like(cd_std)\n else:\n dummy = torch.zeros(1).to(self._xyz).squeeze()\n q_std, s_std, o_std = dummy, dummy, dummy\n cd_std, ch_std = dummy, dummy\n w_std, w_rest_std, f_std = dummy, dummy, dummy\n dist_sq = dummy\n\n w_norm = self._w_correction_dc.norm(dim=-1).mean() # N\n w_rest_norm = self._w_correction_rest.norm(dim=-1).mean() # N\n\n if self.w_memory_type == \"voxel\":\n # update the w related std and norm\n w_std = self.template.voxel_deformer.get_tv(\"dc\")\n w_rest_std = self.template.voxel_deformer.get_tv(\"rest\")\n w_norm = self.template.voxel_deformer.get_mag(\"dc\")\n w_rest_norm = self.template.voxel_deformer.get_mag(\"rest\")\n\n max_s_square = torch.mean((self.get_s.max(dim=1).values) ** 2)\n\n return (\n q_std,\n s_std,\n o_std,\n cd_std,\n ch_std,\n w_std,\n w_rest_std,\n f_std,\n w_norm,\n w_rest_norm,\n dist_sq.mean(),\n max_s_square,\n )\n\n def get_optimizable_list(\n self,\n lr_p=0.00016,\n lr_q=0.001,\n lr_s=0.005,\n lr_o=0.05,\n lr_sph=0.0025,\n lr_sph_rest=None,\n lr_w=0.001,\n lr_w_rest=0.001,\n lr_f=0.0001,\n ):\n lr_sph_rest = lr_sph / 20 if lr_sph_rest is None else lr_sph_rest\n l = [\n {\"params\": [self._xyz], \"lr\": lr_p, \"name\": \"xyz\"},\n {\"params\": [self._opacity], \"lr\": lr_o, \"name\": \"opacity\"},\n {\"params\": [self._scaling], \"lr\": lr_s, \"name\": \"scaling\"},\n {\"params\": [self._rotation], \"lr\": lr_q, \"name\": \"rotation\"},\n {\"params\": [self._features_dc], \"lr\": lr_sph, \"name\": \"f_dc\"},\n {\"params\": [self._features_rest], \"lr\": lr_sph_rest, \"name\": \"f_rest\"},\n {\"params\": [self._w_correction_dc], \"lr\": lr_w, \"name\": \"w_dc\"},\n {\"params\": [self._w_correction_rest], \"lr\": lr_w_rest, \"name\": \"w_rest\"},\n {\"params\": [self._features_localcode], \"lr\": lr_f, \"name\": \"f_localcode\"},\n ]\n if self.w_memory_type == \"voxel\":\n if self.w_dc_dim > 0:\n l.append(\n {\n \"params\": [self.template.voxel_deformer.voxel_w_correction],\n \"lr\": lr_w,\n \"name\": \"w_dc_vox\",\n }\n )\n if self.w_rest_dim > 0:\n l.append(\n {\n \"params\": [self.template.voxel_deformer.additional_correction],\n \"lr\": lr_w_rest,\n \"name\": \"w_rest_vox\",\n }\n )\n return l\n\n # * Gaussian Control\n def record_xyz_grad_radii(self, viewspace_point_tensor, radii, update_filter):\n # Record the gradient norm, invariant across different poses\n assert len(viewspace_point_tensor) == self.N\n self.xyz_gradient_accum[update_filter] += torch.norm(\n viewspace_point_tensor.grad[update_filter, :2], dim=-1, keepdim=False\n )\n self.xyz_gradient_denom[update_filter] += 1\n self.max_radii2D[update_filter] = torch.max(\n self.max_radii2D[update_filter], radii[update_filter]\n )\n return\n\n def _densification_postprocess(\n self,\n optimizer,\n new_xyz,\n new_r,\n new_s,\n new_o,\n new_sph_dc,\n new_sph_rest,\n new_w_dc,\n new_w_rest,\n new_localcode,\n ):\n d = {\n \"xyz\": new_xyz,\n \"f_dc\": new_sph_dc,\n \"f_rest\": new_sph_rest,\n \"opacity\": new_o,\n \"scaling\": new_s,\n \"rotation\": new_r,\n \"w_dc\": new_w_dc,\n \"w_rest\": new_w_rest,\n \"f_localcode\": new_localcode,\n }\n d = {k: v for k, v in d.items() if v is not None}\n\n # First cat to optimizer and then return to self\n optimizable_tensors = cat_tensors_to_optimizer(optimizer, d)\n\n self._xyz = optimizable_tensors[\"xyz\"]\n self._opacity = optimizable_tensors[\"opacity\"]\n self._scaling = optimizable_tensors[\"scaling\"]\n self._rotation = optimizable_tensors[\"rotation\"]\n self._features_dc = optimizable_tensors[\"f_dc\"]\n self._features_rest = optimizable_tensors[\"f_rest\"]\n self._w_correction_dc = optimizable_tensors[\"w_dc\"]\n self._w_correction_rest = optimizable_tensors[\"w_rest\"]\n self._features_localcode = optimizable_tensors[\"f_localcode\"]\n\n self.xyz_gradient_accum = torch.zeros(self._xyz.shape[0], device=\"cuda\")\n self.xyz_gradient_denom = torch.zeros(self._xyz.shape[0], device=\"cuda\")\n self.max_radii2D = torch.cat(\n [self.max_radii2D, torch.zeros_like(new_xyz[:, 0])], dim=0\n )\n return\n\n def _densify_and_clone(self, optimizer, grad_norm, grad_threshold, scale_th):\n # Extract points that satisfy the gradient condition\n # padding for enabling both call of clone and split\n padded_grad = torch.zeros((self.N), device=\"cuda\")\n padded_grad[: grad_norm.shape[0]] = grad_norm.squeeze()\n selected_pts_mask = torch.where(padded_grad >= grad_threshold, True, False)\n selected_pts_mask = torch.logical_and(\n selected_pts_mask,\n torch.max(self.get_s, dim=1).values <= scale_th,\n )\n if selected_pts_mask.sum() == 0:\n return 0\n\n new_xyz = self._xyz[selected_pts_mask]\n new_rotation = self._rotation[selected_pts_mask]\n new_scaling = self._scaling[selected_pts_mask]\n new_opacities = self._opacity[selected_pts_mask]\n new_features_dc = self._features_dc[selected_pts_mask]\n new_features_rest = self._features_rest[selected_pts_mask]\n new_w_dc = self._w_correction_dc[selected_pts_mask]\n new_w_rest = self._w_correction_rest[selected_pts_mask]\n new_localcode = self._features_localcode[selected_pts_mask]\n\n self._densification_postprocess(\n optimizer,\n new_xyz=new_xyz,\n new_r=new_rotation,\n new_s=new_scaling,\n new_o=new_opacities,\n new_sph_dc=new_features_dc,\n new_sph_rest=new_features_rest,\n new_w_dc=new_w_dc,\n new_w_rest=new_w_rest,\n new_localcode=new_localcode,\n )\n\n return len(new_xyz)\n\n def _densify_and_split(\n self,\n optimizer,\n grad_norm,\n grad_threshold,\n scale_th,\n N=2,\n ):\n # Extract points that satisfy the gradient condition\n _scaling = self.get_s\n # padding for enabling both call of clone and split\n padded_grad = torch.zeros((self.N), device=\"cuda\")\n padded_grad[: grad_norm.shape[0]] = grad_norm.squeeze()\n selected_pts_mask = torch.where(padded_grad >= grad_threshold, True, False)\n selected_pts_mask = torch.logical_and(\n selected_pts_mask,\n torch.max(_scaling, dim=1).values > scale_th,\n )\n if selected_pts_mask.sum() == 0:\n return 0\n\n stds = _scaling[selected_pts_mask].repeat(N, 1)\n means = torch.zeros((stds.size(0), 3), device=\"cuda\")\n samples = torch.normal(mean=means, std=stds)\n rots = quaternion_to_matrix(self._rotation[selected_pts_mask]).repeat(N, 1, 1)\n new_xyz = torch.bmm(rots, samples.unsqueeze(-1)).squeeze(-1) + self._xyz[\n selected_pts_mask\n ].repeat(N, 1)\n new_scaling = _scaling[selected_pts_mask].repeat(N, 1) / (0.8 * N)\n new_scaling = torch.clamp(new_scaling, max=self.max_scale, min=self.min_scale)\n new_scaling = self.s_inv_act(new_scaling)\n new_rotation = self._rotation[selected_pts_mask].repeat(N, 1)\n new_features_dc = self._features_dc[selected_pts_mask].repeat(N, 1)\n new_features_rest = self._features_rest[selected_pts_mask].repeat(N, 1)\n new_opacities = self._opacity[selected_pts_mask].repeat(N, 1)\n new_w_dc = self._w_correction_dc[selected_pts_mask].repeat(N, 1)\n new_w_rest = self._w_correction_rest[selected_pts_mask].repeat(N, 1)\n new_localcode = self._features_localcode[selected_pts_mask].repeat(N, 1)\n\n self._densification_postprocess(\n optimizer,\n new_xyz=new_xyz,\n new_r=new_rotation,\n new_s=new_scaling,\n new_o=new_opacities,\n new_sph_dc=new_features_dc,\n new_sph_rest=new_features_rest,\n new_w_dc=new_w_dc,\n new_w_rest=new_w_rest,\n new_localcode=new_localcode,\n )\n\n prune_filter = torch.cat(\n (\n selected_pts_mask,\n torch.zeros(N * selected_pts_mask.sum(), device=\"cuda\", dtype=bool),\n )\n )\n self._prune_points(optimizer, prune_filter)\n return len(new_xyz)\n\n def densify(self, optimizer, max_grad, percent_dense, extent, verbose=True):\n grads = self.xyz_gradient_accum / self.xyz_gradient_denom\n grads[grads.isnan()] = 0.0\n\n # n_clone = self._densify_and_clone(optimizer, grads, max_grad)\n n_clone = self._densify_and_clone(\n optimizer, grads, max_grad, percent_dense * extent\n )\n n_split = self._densify_and_split(\n optimizer, grads, max_grad, percent_dense * extent, N=2\n )\n\n if verbose:\n logging.info(f\"Densify: Clone[+] {n_clone}, Split[+] {n_split}\")\n # logging.info(f\"Densify: Clone[+] {n_clone}\")\n # torch.cuda.empty_cache()\n return\n\n def random_grow(self, optimizer, num_factor=0.05, std=0.1, init_opa_value=0.1):\n # * New operation, randomly add largely disturbed points to the geometry\n ind = torch.randperm(self.N)[: int(self.N * num_factor)]\n selected_pts_mask = torch.zeros(self.N, dtype=bool, device=\"cuda\")\n selected_pts_mask[ind] = True\n\n new_xyz = self._xyz[selected_pts_mask]\n noise = torch.randn_like(new_xyz) * std\n new_xyz = new_xyz + noise\n new_features_dc = self._features_dc[selected_pts_mask]\n new_features_rest = self._features_rest[selected_pts_mask]\n\n new_opacities = torch.ones_like(self._opacity[selected_pts_mask])\n new_opacities = new_opacities * self.o_inv_act(init_opa_value)\n\n new_scaling = self._scaling[selected_pts_mask]\n new_rotation = self._rotation[selected_pts_mask]\n\n new_w_dc = self._w_correction_dc[selected_pts_mask]\n new_w_rest = self._w_correction_rest[selected_pts_mask]\n new_localcode = self._features_localcode[selected_pts_mask]\n\n self._densification_postprocess(\n optimizer,\n new_xyz=new_xyz,\n new_r=new_rotation,\n new_s=new_scaling,\n new_o=new_opacities,\n new_sph_dc=new_features_dc,\n new_sph_rest=new_features_rest,\n new_w_dc=new_w_dc,\n new_w_rest=new_w_rest,\n new_localcode=new_localcode,\n )\n logging.info(f\"Random grow: {len(new_xyz)}\")\n return len(new_xyz)\n\n def prune_points(self, optimizer, min_opacity, max_screen_size, verbose=True):\n opacity = self.o_act(self._opacity)\n prune_mask = (opacity < min_opacity).squeeze()\n if max_screen_size: # if a point is too large\n big_points_vs = self.max_radii2D > max_screen_size\n prune_mask = torch.logical_or(prune_mask, big_points_vs)\n # * reset the maxRadii\n self.max_radii2D = torch.zeros_like(self.max_radii2D)\n self._prune_points(optimizer, prune_mask)\n if verbose:\n logging.info(f\"Prune: {prune_mask.sum()}\")\n\n def _prune_points(self, optimizer, mask):\n valid_points_mask = ~mask\n optimizable_tensors = prune_optimizer(\n optimizer,\n valid_points_mask,\n exclude_names=self.op_update_exclude,\n )\n\n self._xyz = optimizable_tensors[\"xyz\"]\n if getattr(self, \"color_memory\", None) is None:\n self._features_dc = optimizable_tensors[\"f_dc\"]\n self._features_rest = optimizable_tensors[\"f_rest\"]\n self._opacity = optimizable_tensors[\"opacity\"]\n self._scaling = optimizable_tensors[\"scaling\"]\n self._rotation = optimizable_tensors[\"rotation\"]\n self._w_correction_dc = optimizable_tensors[\"w_dc\"]\n self._w_correction_rest = optimizable_tensors[\"w_rest\"]\n self._features_localcode = optimizable_tensors[\"f_localcode\"]\n\n self.xyz_gradient_accum = self.xyz_gradient_accum[valid_points_mask]\n self.xyz_gradient_denom = self.xyz_gradient_denom[valid_points_mask]\n self.max_radii2D = self.max_radii2D[valid_points_mask]\n # torch.cuda.empty_cache()\n return\n\n @torch.no_grad()\n def regaussian(self, optimizer, max_scale=0.03):\n # raise NotImplementedError(\"TODO, like split\")\n # * New operation, manually split the large gaussians with smaller ones to approximate\n # * Now, try bi-split\n\n # Extract points that satisfy the gradient condition\n _scaling = self.get_s\n selected_pts_mask = torch.max(_scaling, dim=1).values > max_scale\n\n step = 0\n before_num = self.N\n while selected_pts_mask.any():\n # This can be done more than 3 times, becuase there may be huge gaussians, which should be devided several times\n fg_xyz = self._xyz[selected_pts_mask]\n fg_scale = _scaling[selected_pts_mask]\n fg_frame = quaternion_to_matrix(self._rotation[selected_pts_mask])\n # each column is the direction of axis in global frame\n axis_ind = torch.argmax(fg_scale, dim=1)\n axis_scale = fg_scale.max(dim=1).values\n # select column\n axis_dir = torch.gather(\n fg_frame, dim=2, index=axis_ind[:, None, None].expand(-1, 3, -1)\n ).squeeze(\n -1\n ) # N,3\n new_x1 = fg_xyz + axis_dir.squeeze() * axis_scale[:, None] / 2.0\n new_x2 = fg_xyz - axis_dir.squeeze() * axis_scale[:, None] / 2.0\n # Repeat will change [1,2,3...] to [1,2,3..., 1,2,3...]\n new_xyz = torch.cat([new_x1, new_x2], dim=0).reshape(-1, 3)\n new_scaling = _scaling[selected_pts_mask]\n new_scaling = torch.scatter(\n new_scaling,\n dim=1,\n index=axis_ind[:, None],\n src=axis_scale[:, None] / 2.0,\n ).repeat(2, 1)\n new_scaling = torch.clamp(\n new_scaling, max=self.max_scale, min=self.min_scale\n )\n new_scaling = self.s_inv_act(new_scaling)\n new_rotation = self._rotation[selected_pts_mask].repeat(2, 1)\n new_features_dc = self._features_dc[selected_pts_mask].repeat(2, 1)\n new_features_rest = self._features_rest[selected_pts_mask].repeat(2, 1)\n new_opacities = self._opacity[selected_pts_mask].repeat(2, 1)\n new_w_dc = self._w_correction_dc[selected_pts_mask].repeat(2, 1)\n new_w_rest = self._w_correction_rest[selected_pts_mask].repeat(2, 1)\n new_localcode = self._features_localcode[selected_pts_mask].repeat(2, 1)\n\n self._densification_postprocess(\n optimizer,\n new_xyz=new_xyz.float(),\n new_r=new_rotation.float(),\n new_s=new_scaling.float(),\n new_o=new_opacities.float(),\n new_sph_dc=new_features_dc.float(),\n new_sph_rest=new_features_rest.float(),\n new_w_dc=new_w_dc.float(),\n new_w_rest=new_w_rest.float(),\n new_localcode=new_localcode.float(),\n )\n\n prune_filter = torch.cat(\n (\n selected_pts_mask,\n torch.zeros(2 * selected_pts_mask.sum(), device=\"cuda\", dtype=bool),\n )\n )\n self._prune_points(optimizer, prune_filter)\n\n step += 1\n logging.info(\n f\"Regaussian-[{step}], {selected_pts_mask.sum()} ({selected_pts_mask.float().mean()*100}% pts-scale>{max_scale})\"\n )\n\n _scaling = self.get_s\n selected_pts_mask = torch.max(_scaling, dim=1).values > max_scale\n logging.info(f\"Re-gaussian: {before_num} -> {self.N}\")\n return\n\n def reset_opacity(self, optimizer, value=0.01, verbose=True):\n opacities_new = self.o_inv_act(\n torch.min(self.o_act(self._opacity), torch.ones_like(self._opacity) * value)\n )\n optimizable_tensors = replace_tensor_to_optimizer(\n optimizer, opacities_new, \"opacity\"\n )\n if verbose:\n logging.info(f\"Reset opacity to {value}\")\n self._opacity = optimizable_tensors[\"opacity\"]\n\n def load(self, ckpt):\n # because N changed, have to re-init the buffers\n self._xyz = nn.Parameter(torch.as_tensor(ckpt[\"_xyz\"], dtype=torch.float32))\n\n self._features_dc = nn.Parameter(\n torch.as_tensor(ckpt[\"_features_dc\"], dtype=torch.float32)\n )\n self._features_rest = nn.Parameter(\n torch.as_tensor(ckpt[\"_features_rest\"], dtype=torch.float32)\n )\n self._opacity = nn.Parameter(\n torch.as_tensor(ckpt[\"_opacity\"], dtype=torch.float32)\n )\n self._scaling = nn.Parameter(\n torch.as_tensor(ckpt[\"_scaling\"], dtype=torch.float32)\n )\n self._rotation = nn.Parameter(\n torch.as_tensor(ckpt[\"_rotation\"], dtype=torch.float32)\n )\n self._w_correction_dc = nn.Parameter(\n torch.as_tensor(ckpt[\"_w_correction_dc\"], dtype=torch.float32)\n )\n self._w_correction_rest = nn.Parameter(\n torch.as_tensor(ckpt[\"_w_correction_rest\"], dtype=torch.float32)\n )\n self._features_localcode = nn.Parameter(\n torch.as_tensor(ckpt[\"_features_localcode\"], dtype=torch.float32)\n )\n self.xyz_gradient_accum = torch.as_tensor(\n ckpt[\"xyz_gradient_accum\"], dtype=torch.float32\n )\n self.xyz_gradient_denom = torch.as_tensor(\n ckpt[\"xyz_gradient_denom\"], dtype=torch.int64\n )\n self.max_radii2D = torch.as_tensor(ckpt[\"max_radii2D\"], dtype=torch.float32)\n\n # * add bones may have different total_t\n if \"add_bones.dt_list\" in ckpt.keys():\n self.add_bones.total_t = ckpt[\"add_bones.dt_list\"].shape[0]\n self.add_bones.dt_list = nn.Parameter(\n torch.as_tensor(ckpt[\"add_bones.dt_list\"], dtype=torch.float32)\n )\n self.add_bones.dr_list = nn.Parameter(\n torch.as_tensor(ckpt[\"add_bones.dr_list\"], dtype=torch.float32)\n )\n # load others\n self.load_state_dict(ckpt, strict=True)\n # this is critical, reinit the funcs\n self._init_act(self.max_scale, self.min_scale)\n return" }, { "identifier": "AdditionalBones", "path": "lib_gart/model.py", "snippet": "class AdditionalBones(nn.Module):\n def __init__(\n self, # additional bones\n num_bones: int = 0,\n total_t: int = 0, # any usage of time should use this!\n mode=\"pose-mlp\",\n # pose-mlp\n pose_dim=23 * 3,\n mlp_hidden_dims=[256, 256, 256, 256],\n mlp_act=nn.LeakyReLU,\n # pose+t-mlp\n ):\n super().__init__()\n self.num_bones = num_bones\n if self.num_bones == 0:\n return\n self.mode = mode\n assert self.mode in [\"pose-mlp\", \"pose+t-mlp\", \"delta-list\", \"list\"]\n self.total_t = total_t\n\n if self.mode == \"pose-mlp\":\n self.pose_dim = pose_dim\n self.mlp_layers = nn.ModuleList()\n c_in = self.pose_dim\n for c_out in mlp_hidden_dims:\n self.mlp_layers.append(nn.Sequential(nn.Linear(c_in, c_out), mlp_act()))\n c_in = c_out\n self.mlp_output_head = nn.Linear(c_in, 7 * self.num_bones, bias=False)\n with torch.no_grad():\n self.mlp_output_head.weight.data = (\n torch.randn_like(self.mlp_output_head.weight.data) * 1e-3\n )\n elif self.mode == \"delta-list\":\n self.dr_list = nn.Parameter(torch.zeros(self.total_t, num_bones, 3))\n self.dt_list = nn.Parameter(torch.zeros(self.total_t, num_bones, 3))\n else:\n raise NotImplementedError()\n\n return\n\n def forward(self, pose=None, t=None, As=None):\n if self.num_bones == 0:\n # * No additional bones\n return None\n if As is not None:\n # * Directly return if As already provided\n return As\n if self.mode == \"pose-mlp\":\n assert pose is not None\n assert pose.ndim == 2 and pose.shape[1] == self.pose_dim\n B = len(pose)\n x = pose\n for layer in self.mlp_layers:\n x = layer(x)\n x = self.mlp_output_head(x).reshape(B, -1, 7)\n q, t = x[:, :, :4], x[:, :, 4:]\n q[..., 0] = q[..., 0] + 1.0\n q = F.normalize(q, dim=-1)\n R = quaternion_to_matrix(q)\n Rt = torch.cat([R, t[:, :, :, None]], dim=-1)\n bottom = torch.zeros_like(Rt[:, :, 0:1])\n bottom[:, :, :, -1] = 1.0\n As = torch.cat([Rt, bottom], dim=2)\n return As\n elif self.mode == \"delta-list\":\n As = self._roll_out_continuous_T()\n if t is None:\n B = len(pose)\n # # ! If no time is set, now return eye(4)\n # ret = (\n # torch.eye(4)\n # .to(As.device)[None, None]\n # .repeat(B, self.num_bones, 1, 1)\n # )\n # ! If no time is set, now return first frame\n ret = As[0][None].repeat(B, 1, 1, 1)\n else:\n if isinstance(t, int):\n t = torch.tensor([t]).to(As.device)\n ret = As[t]\n return ret\n else:\n raise NotImplementedError()\n\n return # As in canonical frame\n\n def _roll_out_continuous_T(self):\n # ! this assumes continuous frames, single frame!\n R = axis_angle_to_matrix(self.dr_list)\n dT = (\n torch.eye(4).to(R.device)[None, None].repeat(self.total_t, R.shape[1], 1, 1)\n )\n dT[:, :, :3, :3] = dT[:, :, :3, :3] * 0 + R\n dT[:, :, :3, 3] = dT[:, :, :3, 3] * 0 + self.dt_list\n T = [dT[0]]\n for i in range(1, self.total_t):\n T.append(torch.einsum(\"nij, njk->nik\", T[-1], dT[i]))\n T = torch.stack(T, dim=0)\n return T" }, { "identifier": "render_cam_pcl", "path": "lib_render/gauspl_renderer.py", "snippet": "def render_cam_pcl(\n xyz,\n frame,\n scale,\n opacity,\n color_feat,\n H,\n W,\n CAM_K,\n verbose=False,\n active_sph_order=0,\n bg_color=[1.0, 1.0, 1.0],\n):\n # ! Camera is at origin, every input is in camera coordinate space\n\n S = torch.zeros_like(frame)\n S[:, 0, 0] = scale[:, 0]\n S[:, 1, 1] = scale[:, 1]\n S[:, 2, 2] = scale[:, 2]\n actual_covariance = frame @ (S**2) @ frame.permute(0, 2, 1)\n\n # Create zero tensor. We will use it to make pytorch return gradients of the 2D (screen-space) means\n device = xyz.device\n screenspace_points = (\n torch.zeros_like(xyz, dtype=xyz.dtype, requires_grad=True, device=xyz.device) + 0\n )\n # screenspace_points.retain_grad()\n try:\n screenspace_points.retain_grad()\n except:\n pass\n\n # * Specially handle the non-centered camera, using first padding and finally crop\n if abs(H // 2 - CAM_K[1, 2]) > 1.0 or abs(W // 2 - CAM_K[0, 2]) > 1.0:\n center_handling_flag = True\n left_w, right_w = CAM_K[0, 2], W - CAM_K[0, 2]\n top_h, bottom_h = CAM_K[1, 2], H - CAM_K[1, 2]\n new_W = int(2 * max(left_w, right_w))\n new_H = int(2 * max(top_h, bottom_h))\n else:\n center_handling_flag = False\n new_W, new_H = W, H\n\n # Set up rasterization configuration\n FoVx = focal2fov(CAM_K[0, 0], new_W)\n FoVy = focal2fov(CAM_K[1, 1], new_H)\n tanfovx = math.tan(FoVx * 0.5)\n tanfovy = math.tan(FoVy * 0.5)\n\n # TODO: Check dynamic gaussian repos and original gaussian repo, they use projection matrix to handle non-centered K, not using this stupid padding like me\n viewmatrix = torch.from_numpy(getWorld2View2(np.eye(3), np.zeros(3)).transpose(0, 1)).to(device)\n projection_matrix = (\n getProjectionMatrix(znear=0.01, zfar=1.0, fovX=FoVx, fovY=FoVy).transpose(0, 1).to(device)\n )\n full_proj_transform = (viewmatrix.unsqueeze(0).bmm(projection_matrix.unsqueeze(0))).squeeze(0)\n camera_center = viewmatrix.inverse()[3, :3]\n\n raster_settings = GaussianRasterizationSettings(\n image_height=new_H,\n image_width=new_W,\n tanfovx=tanfovx,\n tanfovy=tanfovy,\n bg=torch.tensor(bg_color, dtype=torch.float32, device=device),\n scale_modifier=1.0,\n viewmatrix=viewmatrix,\n projmatrix=full_proj_transform,\n sh_degree=0, # ! use pre-compute color!\n campos=camera_center,\n prefiltered=False,\n debug=False,\n )\n rasterizer = GaussianRasterizer(raster_settings=raster_settings)\n\n means3D = xyz\n means2D = screenspace_points\n # opacity = torch.ones_like(means3D[:, 0]) * sigma\n\n # If precomputed 3d covariance is provided, use it. If not, then it will be computed from\n # scaling / rotation by the rasterizer.\n scales = None\n rotations = None\n # JH\n cov3D_precomp = strip_lowerdiag(actual_covariance)\n\n # If precomputed colors are provided, use them. Otherwise, if it is desired to precompute colors\n # from SHs in Python, do it. If not, then SH -> RGB conversion will be done by rasterizer.\n # xyz are in camera frame, so the dir in camera frame is just their normalized direction\n dir_cam = F.normalize(xyz, dim=-1)\n # P_w = Frame @ P_local\n dir_local = torch.einsum(\"nji,nj->ni\", frame, dir_cam) # note the transpose\n dir_local = F.normalize(\n dir_local, dim=-1\n ) # If frame is not SO(3) but Affinity, have to normalize\n N = len(color_feat)\n shs_view = color_feat.reshape(N, -1, 3) # N, Deg, Channels\n sh2rgb = eval_sh(active_sph_order, shs_view.permute(0, 2, 1), dir_local)\n colors_precomp = torch.clamp_min(sh2rgb + 0.5, 0.0)\n # colors_precomp = color_feat\n\n # Rasterize visible Gaussians to image, obtain their radii (on screen).\n\n start_time = time.time()\n ret = rasterizer(\n means3D=means3D.float(),\n means2D=means2D.float(),\n shs=None,\n colors_precomp=colors_precomp.float(),\n opacities=opacity.float(),\n scales=scales,\n rotations=rotations,\n cov3D_precomp=cov3D_precomp.float(),\n )\n if len(ret) == 2:\n rendered_image, radii = ret\n depth, alpha = None, None\n elif len(ret) == 4:\n rendered_image, radii, depth, alpha = ret\n else:\n raise ValueError(f\"Unexpected return value from rasterizer with len={len(ret)}\")\n if verbose:\n print(\n f\"render time: {(time.time() - start_time)*1000:.3f}ms\",\n )\n ret = {\n \"rgb\": rendered_image,\n \"dep\": depth,\n \"alpha\": alpha,\n \"viewspace_points\": screenspace_points,\n \"visibility_filter\": radii > 0,\n \"radii\": radii,\n }\n if center_handling_flag:\n for k in [\"rgb\", \"dep\", \"alpha\"]:\n if ret[k] is None:\n continue\n if left_w > right_w:\n ret[k] = ret[k][:, :, :W]\n else:\n ret[k] = ret[k][:, :, -W:]\n if top_h > bottom_h:\n ret[k] = ret[k][:, :H, :]\n else:\n ret[k] = ret[k][:, -H:, :]\n return ret" }, { "identifier": "transform_mu_frame", "path": "lib_gart/model_utils.py", "snippet": "def transform_mu_frame(mu, frame, T):\n if len(mu) != len(T):\n assert len(mu) == 1 and len(frame) == 1\n mu = mu.expand(len(T), -1, -1)\n frame = frame.expand(len(T), -1, -1, -1)\n R, t = T[:, :3, :3], T[:, :3, 3]\n new_frame = torch.einsum(\"bij, bnjk->bnik\", R, frame)\n new_mu = torch.einsum(\"bij, bnj->bni\", R, mu) + t[:, None]\n return new_mu, new_frame" }, { "identifier": "viz_render", "path": "utils/viz.py", "snippet": "def viz_render(gt_rgb, gt_mask, pred_pkg, save_path=None):\n pred_rgb = pred_pkg[\"rgb\"].permute(1, 2, 0)\n pred_mask = pred_pkg[\"alpha\"].squeeze(0)\n pred_depth = pred_pkg[\"dep\"].squeeze(0)\n fig = plt.figure(figsize=(20, 5))\n plt.subplot(1, 5, 1)\n plt.imshow(torch.clamp(gt_rgb, 0.0, 1.0).detach().cpu().numpy())\n plt.title(\"GT\"), plt.axis(\"off\")\n plt.subplot(1, 5, 2)\n plt.imshow(torch.clamp(pred_rgb, 0.0, 1.0).detach().cpu().numpy())\n plt.title(\"Pred view\"), plt.axis(\"off\")\n plt.subplot(1, 5, 3)\n error = torch.clamp(abs(pred_rgb - gt_rgb), 0.0, 1.0).detach().cpu().numpy().max(axis=-1)\n cmap = plt.imshow(error)\n plt.title(\"Render Error (max in rgb)\"), plt.axis(\"off\")\n plt.colorbar(cmap, shrink=0.8)\n\n plt.subplot(1, 5, 4)\n error = torch.clamp(pred_mask - gt_mask, -1.0, 1.0).detach().cpu().numpy()\n cmap = plt.imshow(error)\n plt.title(\"(Pr - GT) Mask Error\"), plt.axis(\"off\")\n plt.colorbar(cmap, shrink=0.8)\n \n plt.subplot(1, 5, 5)\n depth = pred_depth.detach().cpu().numpy()\n cmap = plt.imshow(depth)\n plt.title(\"Pred Depth\"), plt.axis(\"off\")\n plt.colorbar(cmap, shrink=0.8)\n\n plt.tight_layout()\n fig.canvas.draw()\n fig_np = np.frombuffer(fig.canvas.tostring_rgb(), dtype=np.uint8)\n fig_np = fig_np.reshape(fig.canvas.get_width_height()[::-1] + (3,))\n if save_path is not None:\n plt.savefig(save_path)\n plt.close(fig)\n return fig_np" }, { "identifier": "sample_camera", "path": "lib_guidance/camera_sampling.py", "snippet": "def sample_camera(\n global_step=1,\n n_view=4,\n real_batch_size=1,\n random_azimuth_range=[-180.0, 180.0],\n random_elevation_range=[0.0, 30.0],\n eval_elevation_deg=15,\n camera_distance_range=[0.8, 1.0], # relative\n fovy_range=[15, 60],\n zoom_range=[1.0, 1.0],\n progressive_until=0,\n relative_radius=True,\n):\n # camera_perturb = 0.0\n # center_perturb = 0.0\n # up_perturb: 0.0\n\n # ! from uncond.py\n # ThreeStudio has progressive increase of camera poses, from eval to random\n r = min(1.0, global_step / (progressive_until + 1))\n elevation_range = [\n (1 - r) * eval_elevation_deg + r * random_elevation_range[0],\n (1 - r) * eval_elevation_deg + r * random_elevation_range[1],\n ]\n azimuth_range = [\n (1 - r) * 0.0 + r * random_azimuth_range[0],\n (1 - r) * 0.0 + r * random_azimuth_range[1],\n ]\n\n # sample elevation angles\n if random.random() < 0.5:\n # sample elevation angles uniformly with a probability 0.5 (biased towards poles)\n elevation_deg = (\n torch.rand(real_batch_size) * (elevation_range[1] - elevation_range[0])\n + elevation_range[0]\n ).repeat_interleave(n_view, dim=0)\n elevation = elevation_deg * math.pi / 180\n else:\n # otherwise sample uniformly on sphere\n elevation_range_percent = [\n (elevation_range[0] + 90.0) / 180.0,\n (elevation_range[1] + 90.0) / 180.0,\n ]\n # inverse transform sampling\n elevation = torch.asin(\n 2\n * (\n torch.rand(real_batch_size)\n * (elevation_range_percent[1] - elevation_range_percent[0])\n + elevation_range_percent[0]\n )\n - 1.0\n ).repeat_interleave(n_view, dim=0)\n elevation_deg = elevation / math.pi * 180.0\n\n # sample azimuth angles from a uniform distribution bounded by azimuth_range\n # ensures sampled azimuth angles in a batch cover the whole range\n azimuth_deg = (\n torch.rand(real_batch_size).reshape(-1, 1) + torch.arange(n_view).reshape(1, -1)\n ).reshape(-1) / n_view * (azimuth_range[1] - azimuth_range[0]) + azimuth_range[0]\n azimuth = azimuth_deg * math.pi / 180\n\n ######## Different from original ########\n # sample fovs from a uniform distribution bounded by fov_range\n fovy_deg = (\n torch.rand(real_batch_size) * (fovy_range[1] - fovy_range[0]) + fovy_range[0]\n ).repeat_interleave(n_view, dim=0)\n fovy = fovy_deg * math.pi / 180\n\n # sample distances from a uniform distribution bounded by distance_range\n camera_distances = (\n torch.rand(real_batch_size) * (camera_distance_range[1] - camera_distance_range[0])\n + camera_distance_range[0]\n ).repeat_interleave(n_view, dim=0)\n if relative_radius:\n scale = 1 / torch.tan(0.5 * fovy)\n camera_distances = scale * camera_distances\n\n # zoom in by decreasing fov after camera distance is fixed\n zoom = (\n torch.rand(real_batch_size) * (zoom_range[1] - zoom_range[0]) + zoom_range[0]\n ).repeat_interleave(n_view, dim=0)\n fovy = fovy * zoom\n fovy_deg = fovy_deg * zoom\n ###########################################\n\n # convert spherical coordinates to cartesian coordinates\n # right hand coordinate system, x back, y right, z up\n # elevation in (-90, 90), azimuth from +x to +y in (-180, 180)\n camera_positions = torch.stack(\n [\n camera_distances * torch.cos(elevation) * torch.cos(azimuth),\n camera_distances * torch.cos(elevation) * torch.sin(azimuth),\n camera_distances * torch.sin(elevation),\n ],\n dim=-1,\n )\n\n azimuth, elevation\n # build opencv camera\n z = -torch.stack(\n [\n torch.cos(elevation) * torch.cos(azimuth),\n torch.cos(elevation) * torch.sin(azimuth),\n torch.sin(elevation),\n ],\n -1,\n ) # nview, 3\n # up is 0,0,1\n x = torch.cross(z, torch.tensor([0.0, 0.0, 1.0], device=z.device).repeat(n_view, 1), -1)\n y = torch.cross(z, x, -1)\n\n R_wc = torch.stack([x, y, z], dim=2) # nview, 3, 3, col is basis\n t_wc = camera_positions\n\n T_wc = torch.eye(4, device=R_wc.device).repeat(n_view, 1, 1)\n T_wc[:, :3, :3] = R_wc\n T_wc[:, :3, 3] = t_wc\n\n return T_wc, fovy_deg # B,4,4, B" }, { "identifier": "fov2K", "path": "lib_guidance/camera_sampling.py", "snippet": "def fov2K(fov=90, H=256, W=256):\n if isinstance(fov, torch.Tensor):\n f = H / (2 * torch.tan(fov / 2 * np.pi / 180))\n K = torch.eye(3).repeat(fov.shape[0], 1, 1).to(fov)\n K[:, 0, 0], K[:, 0, 2] = f, W / 2.0\n K[:, 1, 1], K[:, 1, 2] = f, H / 2.0\n return K.clone()\n else:\n f = H / (2 * np.tan(fov / 2 * np.pi / 180))\n K = np.eye(3)\n K[0, 0], K[0, 2] = f, W / 2.0\n K[1, 1], K[1, 2] = f, H / 2.0\n return K.copy()" }, { "identifier": "opencv2blender", "path": "lib_guidance/camera_sampling.py", "snippet": "def opencv2blender(T):\n ret = T.clone()\n # y,z are negative\n ret[:, :, 1] *= -1\n ret[:, :, 2] *= -1\n return ret" }, { "identifier": "viz_spinning", "path": "viz_utils.py", "snippet": "@torch.no_grad()\ndef viz_spinning(\n model,\n pose,\n trans,\n H,\n W,\n K,\n save_path,\n time_index=None,\n n_spinning=10,\n model_mask=None,\n active_sph_order=0,\n bg_color=[1.0, 1.0, 1.0],\n):\n device = pose.device\n mu, fr, s, o, sph, additional_ret = model(\n pose, trans, {\"t\": time_index}, active_sph_order=active_sph_order\n )\n if model_mask is not None:\n assert len(model_mask) == mu.shape[1]\n mu = mu[:, model_mask.bool()]\n fr = fr[:, model_mask.bool()]\n s = s[:, model_mask.bool()]\n o = o[:, model_mask.bool()]\n sph = sph[:, model_mask.bool()]\n\n viz_frames = []\n for vid in range(n_spinning):\n spin_R = (\n torch.from_numpy(euler2mat(0, 2 * np.pi * vid / n_spinning, 0, \"sxyz\"))\n .to(device)\n .float()\n )\n spin_t = mu.mean(1)[0]\n spin_t = (torch.eye(3).to(device) - spin_R) @ spin_t[:, None]\n spin_T = torch.eye(4).to(device)\n spin_T[:3, :3] = spin_R\n spin_T[:3, 3] = spin_t.squeeze(-1)\n viz_mu, viz_fr = transform_mu_frame(mu, fr, spin_T[None])\n\n render_pkg = render_cam_pcl(\n viz_mu[0],\n viz_fr[0],\n s[0],\n o[0],\n sph[0],\n H,\n W,\n K,\n False,\n active_sph_order,\n bg_color=bg_color,\n )\n viz_frame = (\n torch.clamp(render_pkg[\"rgb\"], 0.0, 1.0)\n .permute(1, 2, 0)\n .detach()\n .cpu()\n .numpy()\n )\n viz_frame = (viz_frame * 255).astype(np.uint8)\n viz_frames.append(viz_frame)\n imageio.mimsave(save_path, viz_frames)\n return" }, { "identifier": "viz_human_all", "path": "viz_utils.py", "snippet": "@torch.no_grad()\ndef viz_human_all(\n solver,\n data_provider: RealDataOptimizablePoseProviderPose = None,\n ckpt_dir=None,\n training_skip=1,\n n_spinning=40,\n novel_pose_dir=\"novel_poses\",\n novel_skip=2,\n model=None,\n model_mask=None,\n viz_name=\"\",\n export_mesh_flag=False, # remove this from release version\n):\n if model is None:\n model = solver.load_saved_model(ckpt_dir)\n model.eval()\n\n viz_dir = osp.join(solver.log_dir, f\"{viz_name}_human_viz\")\n os.makedirs(viz_dir, exist_ok=True)\n\n active_sph_order = int(model.max_sph_order)\n\n if data_provider is not None:\n # if ckpt_dir is None:\n # ckpt_dir = solver.log_dir\n # pose_path = osp.join(ckpt_dir, \"pose.pth\")\n pose_base_list = data_provider.pose_base_list\n pose_rest_list = data_provider.pose_rest_list\n global_trans_list = data_provider.global_trans_list\n pose_list = torch.cat([pose_base_list, pose_rest_list], 1)\n pose_list, global_trans_list = pose_list.to(\n solver.device\n ), global_trans_list.to(solver.device)\n rgb_list = data_provider.rgb_list\n mask_list = data_provider.mask_list\n K_list = data_provider.K_list\n H, W = rgb_list.shape[1:3]\n else:\n H, W = 512, 512\n K_list = [torch.from_numpy(fov2K(45, H, W)).float().to(solver.device)]\n global_trans_list = torch.zeros(1, 3).to(solver.device)\n global_trans_list[0, -1] = 3.0\n\n # viz training\n if data_provider is not None:\n print(\"Viz training...\")\n viz_frames = []\n for t in range(len(pose_list)):\n if t % training_skip != 0:\n continue\n pose = pose_list[t][None]\n K = K_list[t]\n trans = global_trans_list[t][None]\n time_index = torch.Tensor([t]).long().to(solver.device)\n mu, fr, s, o, sph, _ = model(\n pose,\n trans,\n {\"t\": time_index}, # use time_index from training set\n active_sph_order=active_sph_order,\n )\n if model_mask is not None:\n assert len(model_mask) == mu.shape[1]\n mu = mu[:, model_mask.bool()]\n fr = fr[:, model_mask.bool()]\n s = s[:, model_mask.bool()]\n o = o[:, model_mask.bool()]\n sph = sph[:, model_mask.bool()]\n render_pkg = render_cam_pcl(\n mu[0],\n fr[0],\n s[0],\n o[0],\n sph[0],\n H,\n W,\n K,\n False,\n active_sph_order,\n bg_color=getattr(solver, \"DEFAULT_BG\", [1.0, 1.0, 1.0]),\n )\n viz_frame = viz_render(rgb_list[t], mask_list[t], render_pkg)\n viz_frames.append(viz_frame)\n imageio.mimsave(f\"{viz_dir}/training.gif\", viz_frames)\n\n # viz static spinning\n print(\"Viz spinning...\")\n can_pose = model.template.canonical_pose.detach()\n viz_base_R_opencv = np.asarray(euler2mat(np.pi, 0, 0, \"sxyz\"))\n viz_base_R_opencv = torch.from_numpy(viz_base_R_opencv).float()\n can_pose[0] = viz_base_R_opencv.to(can_pose.device)\n can_pose = matrix_to_axis_angle(can_pose)[None]\n dapose = torch.from_numpy(np.zeros((1, 24, 3))).float().to(solver.device)\n dapose[:, 1, -1] = np.pi / 4\n dapose[:, 2, -1] = -np.pi / 4\n dapose[:, 0] = matrix_to_axis_angle(solver.viz_base_R[None])[0]\n tpose = torch.from_numpy(np.zeros((1, 24, 3))).float().to(solver.device)\n tpose[:, 0] = matrix_to_axis_angle(solver.viz_base_R[None])[0]\n to_viz = {\"cano-pose\": can_pose, \"t-pose\": tpose, \"da-pose\": dapose}\n if data_provider is not None:\n to_viz[\"first-frame\"] = pose_list[0][None]\n\n for name, pose in to_viz.items():\n print(f\"Viz novel {name}...\")\n # if export_mesh_flag:\n # from lib_marchingcubes.gaumesh_utils import MeshExtractor\n # # also extract a mesh\n # mesh = solver.extract_mesh(model, pose)\n # mesh.export(f\"{viz_dir}/mc_{name}.obj\", \"obj\")\n\n # # for making figures, the rotation is in another way\n # viz_spinning_self_rotate(\n # model,\n # solver.viz_base_R.detach(),\n # pose,\n # global_trans_list[0][None],\n # H,\n # W,\n # K_list[0],\n # f\"{viz_dir}/{name}_selfrotate.gif\",\n # time_index=None, # if set to None and use t, the add_bone will hand this\n # n_spinning=n_spinning,\n # active_sph_order=model.max_sph_order,\n # )\n viz_spinning(\n model,\n pose,\n global_trans_list[0][None],\n H,\n W,\n K_list[0],\n f\"{viz_dir}/{name}.gif\",\n time_index=None, # if set to None and use t, the add_bone will hand this\n n_spinning=n_spinning,\n active_sph_order=model.max_sph_order,\n bg_color=getattr(solver, \"DEFAULT_BG\", [1.0, 1.0, 1.0]),\n )\n\n # viz novel pose dynamic spinning\n print(\"Viz novel seq...\")\n novel_pose_names = [\n f[:-4] for f in os.listdir(novel_pose_dir) if f.endswith(\".npy\")\n ]\n seq_viz_todo = {}\n for name in novel_pose_names:\n novel_pose_fn = osp.join(novel_pose_dir, f\"{name}.npy\")\n novel_poses = np.load(novel_pose_fn, allow_pickle=True)\n novel_poses = novel_poses[::novel_skip]\n N_frames = len(novel_poses)\n novel_poses = torch.from_numpy(novel_poses).float().to(solver.device)\n novel_poses = novel_poses.reshape(N_frames, 24, 3)\n\n seq_viz_todo[name] = (novel_poses, N_frames)\n if data_provider is not None:\n seq_viz_todo[\"training\"] = [pose_list, len(pose_list)]\n\n for name, (novel_poses, N_frames) in seq_viz_todo.items():\n base_R = solver.viz_base_R.detach().cpu().numpy()\n viz_frames = []\n K = K_list[0]\n for vid in range(N_frames):\n pose = novel_poses[vid][None]\n # pose = novel_poses[0][None] # debug\n rotation = euler2mat(2 * np.pi * vid / N_frames, 0.0, 0.0, \"syxz\")\n rotation = torch.from_numpy(rotation @ base_R).float().to(solver.device)\n pose[:, 0] = matrix_to_axis_angle(rotation[None])[0]\n trans = global_trans_list[0][None]\n mu, fr, s, o, sph, _ = model(\n pose,\n trans,\n # not pass in {}, so t is auto none\n additional_dict={},\n active_sph_order=active_sph_order,\n )\n if model_mask is not None:\n assert len(model_mask) == mu.shape[1]\n mu = mu[:, model_mask.bool()]\n fr = fr[:, model_mask.bool()]\n s = s[:, model_mask.bool()]\n o = o[:, model_mask.bool()]\n sph = sph[:, model_mask.bool()]\n render_pkg = render_cam_pcl(\n mu[0],\n fr[0],\n s[0],\n o[0],\n sph[0],\n H,\n W,\n K,\n False,\n active_sph_order,\n bg_color=getattr(solver, \"DEFAULT_BG\", [1.0, 1.0, 1.0]),\n # bg_color=[1.0, 1.0, 1.0], # ! use white bg for viz\n )\n viz_frame = (\n torch.clamp(render_pkg[\"rgb\"], 0.0, 1.0)\n .permute(1, 2, 0)\n .detach()\n .cpu()\n .numpy()\n )\n viz_frame = (viz_frame * 255).astype(np.uint8)\n viz_frames.append(viz_frame)\n imageio.mimsave(f\"{viz_dir}/novel_pose_{name}.gif\", viz_frames)\n return" }, { "identifier": "viz_dog_all", "path": "viz_utils.py", "snippet": "@torch.no_grad()\ndef viz_dog_all(solver, data_provider, model=None, ckpt_dir=None, viz_name=\"\"):\n if model is None:\n model = solver.load_saved_model(ckpt_dir)\n model.eval()\n viz_dir = osp.join(solver.log_dir, f\"{viz_name}_dog_viz\")\n os.makedirs(viz_dir, exist_ok=True)\n\n viz_pose = (\n torch.cat([data_provider.pose_base_list, data_provider.pose_rest_list], 1)\n .detach()\n .clone()\n )\n viz_pose = torch.mean(viz_pose, dim=0, keepdim=True) # use mean pose for viz \n limb = viz_pose[:, -7:] \n pose = viz_pose[:, :-7].reshape(-1, 35, 3)\n pose[:, :-3] = 0 # exclude ears and mouth poses\n\n viz_pose = torch.concat([pose.reshape(1, -1), limb], dim=1)\n viz_trans = torch.tensor([[0.0, -0.3, 25.0]], device=\"cuda:0\")\n\n viz_dog_spin(\n model.to(\"cuda\"),\n viz_pose,\n viz_trans,\n data_provider.H,\n data_provider.W,\n data_provider.K_list[0],\n save_path=osp.join(viz_dir, \"spin.gif\"),\n n_spinning=42,\n )\n\n viz_dog_spin2(\n model.to(\"cuda\"),\n viz_pose,\n viz_trans,\n data_provider.H,\n data_provider.W,\n data_provider.K_list[0],\n save_path=osp.join(viz_dir, \"spin2.gif\"),\n n_spinning=20,\n )\n\n ######################################################################\n # Dataset pose seq\n viz_pose = (\n torch.cat([data_provider.pose_base_list, data_provider.pose_rest_list], 1)\n .detach()\n .clone()\n )\n viz_pose = torch.mean(viz_pose, dim=0, keepdim=True)\n pose = viz_pose[:, :-7].reshape(-1, 35, 3)\n limb = viz_pose[:, -7:]\n\n # Animation\n aroot = osp.join(osp.dirname(__file__), \"novel_poses/husky\")\n window = list(range(350, 440)) # Run\n trans = torch.tensor([[0.3, -0.3, 25.0]], device=\"cuda:0\")\n files = [f\"{aroot}/{i:04d}.npz\" for i in window]\n pose_list = [dict(np.load(file))[\"pred_pose\"] for file in files]\n pose_list = np.concatenate(pose_list)\n animation = matrix_to_axis_angle(torch.from_numpy(pose_list)).to(solver.device)\n animation[:, [32, 33, 34]] = pose[:, [32, 33, 34]] \n\n viz_dog_animation(\n model.to(\"cuda\"),\n animation,\n limb,\n trans,\n data_provider.H,\n data_provider.W,\n data_provider.K_list[0],\n save_path=osp.join(viz_dir, \"animation.gif\"),\n fps=12,\n )\n return" }, { "identifier": "ssim", "path": "utils/ssim.py", "snippet": "def ssim(img1, img2, window_size=11, size_average=True):\n channel = img1.size(-3)\n window = create_window(window_size, channel)\n\n if img1.is_cuda:\n window = window.cuda(img1.get_device())\n window = window.type_as(img1)\n\n return _ssim(img1, img2, window, window_size, channel, size_average)" }, { "identifier": "test", "path": "test_utils/test_func.py", "snippet": "def test(\n solver,\n seq_name: str,\n tto_flag=True,\n tto_step=300,\n tto_decay=60,\n tto_decay_factor=0.5,\n pose_base_lr=3e-3,\n pose_rest_lr=3e-3,\n trans_lr=3e-3,\n dataset_mode=\"people_snapshot\",\n training_optimized_seq=None,\n):\n device = solver.device\n model = solver.load_saved_model()\n\n assert dataset_mode in [\n \"people_snapshot\",\n \"zju\",\n \"instant_avatar_wild\",\n \"dog_demo\",\n ], f\"Unknown dataset mode {dataset_mode}\"\n\n if dataset_mode == \"people_snapshot\":\n eval_mode = \"avatar\"\n bg = [1.0, 1.0, 1.0]\n test_dataset = InstantAvatarDataset(\n noisy_flag=False,\n data_root=\"./data/people_snapshot/\",\n video_name=seq_name,\n split=\"test\",\n image_zoom_ratio=0.5,\n )\n elif dataset_mode == \"zju\":\n eval_mode = \"nvr\"\n test_dataset = ZJUDataset(\n data_root=\"./data/zju_mocap\",\n video_name=seq_name,\n split=\"test\",\n image_zoom_ratio=0.5,\n )\n bg = [0.0, 0.0, 0.0] # zju use black background\n elif dataset_mode == \"instant_avatar_wild\":\n eval_mode = \"avatar\"\n test_dataset = InstantAvatarWildDataset(\n data_root=\"./data/insav_wild\",\n video_name=seq_name,\n split=\"test\",\n image_zoom_ratio=1.0,\n # ! warning, here follow the `ubc_hard.yaml` in InstAVT setting, use slicing\n start_end_skip=[2, 1000000000, 4],\n )\n bg = [1.0, 1.0, 1.0]\n\n test_len = len(test_dataset)\n assert (training_optimized_seq.total_t == test_len) or (\n training_optimized_seq.total_t == 1 + test_len\n ), \"Now UBC can only support the same length of training and testing or + 1\"\n test_dataset.smpl_params[\"body_pose\"] = (\n training_optimized_seq.pose_rest_list.reshape(-1, 69)[:test_len]\n .detach()\n .cpu()\n .numpy()\n )\n test_dataset.smpl_params[\"global_orient\"] = (\n training_optimized_seq.pose_base_list.reshape(-1, 3)[:test_len]\n .detach()\n .cpu()\n .numpy()\n )\n test_dataset.smpl_params[\"transl\"] = (\n training_optimized_seq.global_trans_list.reshape(-1, 3)[:test_len]\n .detach()\n .cpu()\n .numpy()\n )\n elif dataset_mode == \"dog_demo\":\n eval_mode = \"avatar_brightness\"\n bg = [1.0, 1.0, 1.0]\n test_dataset = DogDemoDataset(\n data_root=\"./data/dog_data_official/\", video_name=seq_name, test=True\n )\n else:\n raise NotImplementedError()\n\n evaluator = get_evaluator(eval_mode, device)\n\n _save_eval_maps(\n solver.log_dir,\n \"test\",\n model,\n solver,\n test_dataset,\n dataset_mode=dataset_mode,\n device=device,\n bg=bg,\n tto_flag=tto_flag,\n tto_step=tto_step,\n tto_decay=tto_decay,\n tto_decay_factor=tto_decay_factor,\n tto_evaluator=evaluator,\n pose_base_lr=pose_base_lr,\n pose_rest_lr=pose_rest_lr,\n trans_lr=trans_lr,\n )\n\n if tto_flag:\n _evaluate_dir(evaluator, solver.log_dir, \"test_tto\")\n else:\n _evaluate_dir(evaluator, solver.log_dir, \"test\")\n\n return" } ]
from matplotlib import pyplot as plt from pytorch3d.transforms import matrix_to_axis_angle from tqdm import tqdm from transforms3d.euler import euler2mat from omegaconf import OmegaConf from lib_data.get_data import prepare_real_seq from lib_data.data_provider import DatabasePoseProvider from lib_gart.templates import get_template from lib_gart.model import GaussianTemplateModel, AdditionalBones from lib_gart.optim_utils import * from lib_render.gauspl_renderer import render_cam_pcl from lib_gart.model_utils import transform_mu_frame from utils.misc import * from utils.viz import viz_render from pytorch3d.transforms import axis_angle_to_matrix, matrix_to_axis_angle from pytorch3d.ops import knn_points from lib_guidance.camera_sampling import sample_camera, fov2K, opencv2blender from viz_utils import viz_spinning, viz_human_all, viz_dog_all from utils.ssim import ssim from datetime import datetime from test_utils import test from lib_guidance.mvdream.mvdream_guidance import MVDream from utils.lpips import LPIPS import imageio import torch import numpy as np import os, os.path as osp, shutil, sys import time import logging import argparse
21,036
setattr(self, k, v) # * explicitly set flags self.FAST_TRAINING = getattr(self, "FAST_TRAINING", False) self.LAMBDA_SSIM = getattr(self, "LAMBDA_SSIM", 0.0) self.LAMBDA_LPIPS = getattr(self, "LAMBDA_LPIPS", 0.0) if self.LAMBDA_LPIPS > 0: self.lpips = LPIPS(net="vgg").to(self.device) for param in self.lpips.parameters(): param.requires_grad = False if isinstance(self.RESET_OPACITY_STEPS, int): self.RESET_OPACITY_STEPS = [ i for i in range(1, self.TOTAL_steps) if i % self.RESET_OPACITY_STEPS == 0 ] if isinstance(self.REGAUSSIAN_STEPS, int): self.REGAUSSIAN_STEPS = [ i for i in range(1, self.TOTAL_steps) if i % self.REGAUSSIAN_STEPS == 0 ] # prepare base R if self.mode == "human": viz_base_R_opencv = np.asarray(euler2mat(np.pi, 0, 0, "sxyz")) else: viz_base_R_opencv = np.asarray(euler2mat(np.pi / 2.0, 0, np.pi, "rxyz")) viz_base_R_opencv = torch.from_numpy(viz_base_R_opencv).float() self.viz_base_R = viz_base_R_opencv.to(self.device) if self.mode == "human": self.reg_base_R_global = ( matrix_to_axis_angle( torch.as_tensor(euler2mat(np.pi / 2.0, 0, np.pi / 2.0, "sxyz"))[ None ] )[0] .float() .to(self.device) ) else: # TODO, for generation of dog pass self.writer = create_log( self.log_dir, name=osp.basename(self.profile_fn).split(".")[0], debug=False ) return def prepare_fake_data(self, mode, *args, **kwargs): if mode == "amass": # todo: change to amass provider = DatabasePoseProvider(*args, **kwargs, device=torch.device("cpu")) return provider return provider def prepare_real_seq( self, seq_name, dataset_mode, split, ins_avt_wild_start_end_skip=None, image_zoom_ratio=0.5, data_stay_gpu_flag=True, ): provider, dataset = prepare_real_seq( seq_name=seq_name, dataset_mode=dataset_mode, split=split, ins_avt_wild_start_end_skip=ins_avt_wild_start_end_skip, image_zoom_ratio=getattr( self, "IMAGE_ZOOM_RATIO", image_zoom_ratio ), # ! this overwrite the func arg balance=getattr(self, "VIEW_BALANCE_FLAG", False), ) provider.to(self.device) if getattr(self, "DATA_STAY_GPU_FLAG", data_stay_gpu_flag): provider.move_images_to_device(self.device) provider.viz_selection_prob( osp.join(self.log_dir, f"split_{split}_view_prob.png") ) return provider, dataset def load_saved_model(self, ckpt_path=None): if ckpt_path is None: ckpt_path = osp.join(self.log_dir, "model.pth") ret = self._get_model_optimizer(betas=None) model = ret[0] model.load(torch.load(ckpt_path)) model.to(self.device) model.eval() logging.info("After loading:") model.summary() return model def _get_model_optimizer(self, betas, add_bones_total_t=0): seed_everything(self.SEED) template = get_template( mode=self.mode, template_model_path=self.template_model_path, init_beta=betas, cano_pose_type=getattr(self, "CANO_POSE_TYPE", "t_pose"), voxel_deformer_res=getattr(self, "VOXEL_DEFORMER_RES", 64), ) add_bones = AdditionalBones( num_bones=getattr(self, "W_REST_DIM", 0), mode=getattr(self, "W_REST_MODE", "pose-mlp"), total_t=add_bones_total_t, # pose mlp mlp_hidden_dims=getattr( self, "ADD_BONES_MLP_HIDDEN_DIMS", [256, 256, 256, 256] ), pose_dim=23 * 3 if self.mode == "human" else 34 * 3 + 7, )
# from lib_marchingcubes.gaumesh_utils import MeshExtractor try: # from lib_guidance.sd_utils import StableDiffusion except: logging.warning("No guidance module") class TGFitter: def __init__( self, log_dir, profile_fn, mode, template_model_path="data/smpl_model/SMPL_NEUTRAL.pkl", device=torch.device("cuda:0"), **kwargs, ) -> None: self.log_dir = log_dir os.makedirs(self.log_dir, exist_ok=True) self.profile_fn = profile_fn try: shutil.copy(profile_fn, osp.join(self.log_dir, osp.basename(profile_fn))) except: pass self.mode = mode assert self.mode in ["human", "dog"], "Only support human and dog for now" self.template_model_path = template_model_path self.device = device # * auto set attr cfg = OmegaConf.load(profile_fn) # assign the cfg to self attribute for k, v in cfg.items(): setattr(self, k, v) for k, v in kwargs.items(): setattr(self, k, v) # * explicitly set flags self.FAST_TRAINING = getattr(self, "FAST_TRAINING", False) self.LAMBDA_SSIM = getattr(self, "LAMBDA_SSIM", 0.0) self.LAMBDA_LPIPS = getattr(self, "LAMBDA_LPIPS", 0.0) if self.LAMBDA_LPIPS > 0: self.lpips = LPIPS(net="vgg").to(self.device) for param in self.lpips.parameters(): param.requires_grad = False if isinstance(self.RESET_OPACITY_STEPS, int): self.RESET_OPACITY_STEPS = [ i for i in range(1, self.TOTAL_steps) if i % self.RESET_OPACITY_STEPS == 0 ] if isinstance(self.REGAUSSIAN_STEPS, int): self.REGAUSSIAN_STEPS = [ i for i in range(1, self.TOTAL_steps) if i % self.REGAUSSIAN_STEPS == 0 ] # prepare base R if self.mode == "human": viz_base_R_opencv = np.asarray(euler2mat(np.pi, 0, 0, "sxyz")) else: viz_base_R_opencv = np.asarray(euler2mat(np.pi / 2.0, 0, np.pi, "rxyz")) viz_base_R_opencv = torch.from_numpy(viz_base_R_opencv).float() self.viz_base_R = viz_base_R_opencv.to(self.device) if self.mode == "human": self.reg_base_R_global = ( matrix_to_axis_angle( torch.as_tensor(euler2mat(np.pi / 2.0, 0, np.pi / 2.0, "sxyz"))[ None ] )[0] .float() .to(self.device) ) else: # TODO, for generation of dog pass self.writer = create_log( self.log_dir, name=osp.basename(self.profile_fn).split(".")[0], debug=False ) return def prepare_fake_data(self, mode, *args, **kwargs): if mode == "amass": # todo: change to amass provider = DatabasePoseProvider(*args, **kwargs, device=torch.device("cpu")) return provider return provider def prepare_real_seq( self, seq_name, dataset_mode, split, ins_avt_wild_start_end_skip=None, image_zoom_ratio=0.5, data_stay_gpu_flag=True, ): provider, dataset = prepare_real_seq( seq_name=seq_name, dataset_mode=dataset_mode, split=split, ins_avt_wild_start_end_skip=ins_avt_wild_start_end_skip, image_zoom_ratio=getattr( self, "IMAGE_ZOOM_RATIO", image_zoom_ratio ), # ! this overwrite the func arg balance=getattr(self, "VIEW_BALANCE_FLAG", False), ) provider.to(self.device) if getattr(self, "DATA_STAY_GPU_FLAG", data_stay_gpu_flag): provider.move_images_to_device(self.device) provider.viz_selection_prob( osp.join(self.log_dir, f"split_{split}_view_prob.png") ) return provider, dataset def load_saved_model(self, ckpt_path=None): if ckpt_path is None: ckpt_path = osp.join(self.log_dir, "model.pth") ret = self._get_model_optimizer(betas=None) model = ret[0] model.load(torch.load(ckpt_path)) model.to(self.device) model.eval() logging.info("After loading:") model.summary() return model def _get_model_optimizer(self, betas, add_bones_total_t=0): seed_everything(self.SEED) template = get_template( mode=self.mode, template_model_path=self.template_model_path, init_beta=betas, cano_pose_type=getattr(self, "CANO_POSE_TYPE", "t_pose"), voxel_deformer_res=getattr(self, "VOXEL_DEFORMER_RES", 64), ) add_bones = AdditionalBones( num_bones=getattr(self, "W_REST_DIM", 0), mode=getattr(self, "W_REST_MODE", "pose-mlp"), total_t=add_bones_total_t, # pose mlp mlp_hidden_dims=getattr( self, "ADD_BONES_MLP_HIDDEN_DIMS", [256, 256, 256, 256] ), pose_dim=23 * 3 if self.mode == "human" else 34 * 3 + 7, )
model = GaussianTemplateModel(
3
2023-11-27 17:30:04+00:00
24k
skhu101/GauHuman
scene/dataset_readers.py
[ { "identifier": "read_extrinsics_text", "path": "scene/colmap_loader.py", "snippet": "def read_extrinsics_text(path):\n \"\"\"\n Taken from https://github.com/colmap/colmap/blob/dev/scripts/python/read_write_model.py\n \"\"\"\n images = {}\n with open(path, \"r\") as fid:\n while True:\n line = fid.readline()\n if not line:\n break\n line = line.strip()\n if len(line) > 0 and line[0] != \"#\":\n elems = line.split()\n image_id = int(elems[0])\n qvec = np.array(tuple(map(float, elems[1:5])))\n tvec = np.array(tuple(map(float, elems[5:8])))\n camera_id = int(elems[8])\n image_name = elems[9]\n elems = fid.readline().split()\n xys = np.column_stack([tuple(map(float, elems[0::3])),\n tuple(map(float, elems[1::3]))])\n point3D_ids = np.array(tuple(map(int, elems[2::3])))\n images[image_id] = Image(\n id=image_id, qvec=qvec, tvec=tvec,\n camera_id=camera_id, name=image_name,\n xys=xys, point3D_ids=point3D_ids)\n return images" }, { "identifier": "read_intrinsics_text", "path": "scene/colmap_loader.py", "snippet": "def read_intrinsics_text(path):\n \"\"\"\n Taken from https://github.com/colmap/colmap/blob/dev/scripts/python/read_write_model.py\n \"\"\"\n cameras = {}\n with open(path, \"r\") as fid:\n while True:\n line = fid.readline()\n if not line:\n break\n line = line.strip()\n if len(line) > 0 and line[0] != \"#\":\n elems = line.split()\n camera_id = int(elems[0])\n model = elems[1]\n assert model == \"PINHOLE\", \"While the loader support other types, the rest of the code assumes PINHOLE\"\n width = int(elems[2])\n height = int(elems[3])\n params = np.array(tuple(map(float, elems[4:])))\n cameras[camera_id] = Camera(id=camera_id, model=model,\n width=width, height=height,\n params=params)\n return cameras" }, { "identifier": "qvec2rotmat", "path": "scene/colmap_loader.py", "snippet": "def qvec2rotmat(qvec):\n return np.array([\n [1 - 2 * qvec[2]**2 - 2 * qvec[3]**2,\n 2 * qvec[1] * qvec[2] - 2 * qvec[0] * qvec[3],\n 2 * qvec[3] * qvec[1] + 2 * qvec[0] * qvec[2]],\n [2 * qvec[1] * qvec[2] + 2 * qvec[0] * qvec[3],\n 1 - 2 * qvec[1]**2 - 2 * qvec[3]**2,\n 2 * qvec[2] * qvec[3] - 2 * qvec[0] * qvec[1]],\n [2 * qvec[3] * qvec[1] - 2 * qvec[0] * qvec[2],\n 2 * qvec[2] * qvec[3] + 2 * qvec[0] * qvec[1],\n 1 - 2 * qvec[1]**2 - 2 * qvec[2]**2]])" }, { "identifier": "read_extrinsics_binary", "path": "scene/colmap_loader.py", "snippet": "def read_extrinsics_binary(path_to_model_file):\n \"\"\"\n see: src/base/reconstruction.cc\n void Reconstruction::ReadImagesBinary(const std::string& path)\n void Reconstruction::WriteImagesBinary(const std::string& path)\n \"\"\"\n images = {}\n with open(path_to_model_file, \"rb\") as fid:\n num_reg_images = read_next_bytes(fid, 8, \"Q\")[0]\n for _ in range(num_reg_images):\n binary_image_properties = read_next_bytes(\n fid, num_bytes=64, format_char_sequence=\"idddddddi\")\n image_id = binary_image_properties[0]\n qvec = np.array(binary_image_properties[1:5])\n tvec = np.array(binary_image_properties[5:8])\n camera_id = binary_image_properties[8]\n image_name = \"\"\n current_char = read_next_bytes(fid, 1, \"c\")[0]\n while current_char != b\"\\x00\": # look for the ASCII 0 entry\n image_name += current_char.decode(\"utf-8\")\n current_char = read_next_bytes(fid, 1, \"c\")[0]\n num_points2D = read_next_bytes(fid, num_bytes=8,\n format_char_sequence=\"Q\")[0]\n x_y_id_s = read_next_bytes(fid, num_bytes=24*num_points2D,\n format_char_sequence=\"ddq\"*num_points2D)\n xys = np.column_stack([tuple(map(float, x_y_id_s[0::3])),\n tuple(map(float, x_y_id_s[1::3]))])\n point3D_ids = np.array(tuple(map(int, x_y_id_s[2::3])))\n images[image_id] = Image(\n id=image_id, qvec=qvec, tvec=tvec,\n camera_id=camera_id, name=image_name,\n xys=xys, point3D_ids=point3D_ids)\n return images" }, { "identifier": "read_intrinsics_binary", "path": "scene/colmap_loader.py", "snippet": "def read_intrinsics_binary(path_to_model_file):\n \"\"\"\n see: src/base/reconstruction.cc\n void Reconstruction::WriteCamerasBinary(const std::string& path)\n void Reconstruction::ReadCamerasBinary(const std::string& path)\n \"\"\"\n cameras = {}\n with open(path_to_model_file, \"rb\") as fid:\n num_cameras = read_next_bytes(fid, 8, \"Q\")[0]\n for _ in range(num_cameras):\n camera_properties = read_next_bytes(\n fid, num_bytes=24, format_char_sequence=\"iiQQ\")\n camera_id = camera_properties[0]\n model_id = camera_properties[1]\n model_name = CAMERA_MODEL_IDS[camera_properties[1]].model_name\n width = camera_properties[2]\n height = camera_properties[3]\n num_params = CAMERA_MODEL_IDS[model_id].num_params\n params = read_next_bytes(fid, num_bytes=8*num_params,\n format_char_sequence=\"d\"*num_params)\n cameras[camera_id] = Camera(id=camera_id,\n model=model_name,\n width=width,\n height=height,\n params=np.array(params))\n assert len(cameras) == num_cameras\n return cameras" }, { "identifier": "read_points3D_binary", "path": "scene/colmap_loader.py", "snippet": "def read_points3D_binary(path_to_model_file):\n \"\"\"\n see: src/base/reconstruction.cc\n void Reconstruction::ReadPoints3DBinary(const std::string& path)\n void Reconstruction::WritePoints3DBinary(const std::string& path)\n \"\"\"\n\n\n with open(path_to_model_file, \"rb\") as fid:\n num_points = read_next_bytes(fid, 8, \"Q\")[0]\n\n xyzs = np.empty((num_points, 3))\n rgbs = np.empty((num_points, 3))\n errors = np.empty((num_points, 1))\n\n for p_id in range(num_points):\n binary_point_line_properties = read_next_bytes(\n fid, num_bytes=43, format_char_sequence=\"QdddBBBd\")\n xyz = np.array(binary_point_line_properties[1:4])\n rgb = np.array(binary_point_line_properties[4:7])\n error = np.array(binary_point_line_properties[7])\n track_length = read_next_bytes(\n fid, num_bytes=8, format_char_sequence=\"Q\")[0]\n track_elems = read_next_bytes(\n fid, num_bytes=8*track_length,\n format_char_sequence=\"ii\"*track_length)\n xyzs[p_id] = xyz\n rgbs[p_id] = rgb\n errors[p_id] = error\n return xyzs, rgbs, errors" }, { "identifier": "read_points3D_text", "path": "scene/colmap_loader.py", "snippet": "def read_points3D_text(path):\n \"\"\"\n see: src/base/reconstruction.cc\n void Reconstruction::ReadPoints3DText(const std::string& path)\n void Reconstruction::WritePoints3DText(const std::string& path)\n \"\"\"\n xyzs = None\n rgbs = None\n errors = None\n num_points = 0\n with open(path, \"r\") as fid:\n while True:\n line = fid.readline()\n if not line:\n break\n line = line.strip()\n if len(line) > 0 and line[0] != \"#\":\n num_points += 1\n\n\n xyzs = np.empty((num_points, 3))\n rgbs = np.empty((num_points, 3))\n errors = np.empty((num_points, 1))\n count = 0\n with open(path, \"r\") as fid:\n while True:\n line = fid.readline()\n if not line:\n break\n line = line.strip()\n if len(line) > 0 and line[0] != \"#\":\n elems = line.split()\n xyz = np.array(tuple(map(float, elems[1:4])))\n rgb = np.array(tuple(map(int, elems[4:7])))\n error = np.array(float(elems[7]))\n xyzs[count] = xyz\n rgbs[count] = rgb\n errors[count] = error\n count += 1\n\n return xyzs, rgbs, errors" }, { "identifier": "getWorld2View2", "path": "utils/graphics_utils.py", "snippet": "def getWorld2View2(R, t, translate=np.array([.0, .0, .0]), scale=1.0):\n Rt = np.zeros((4, 4))\n Rt[:3, :3] = R.transpose()\n Rt[:3, 3] = t\n Rt[3, 3] = 1.0\n\n C2W = np.linalg.inv(Rt)\n cam_center = C2W[:3, 3]\n cam_center = (cam_center + translate) * scale\n C2W[:3, 3] = cam_center\n Rt = np.linalg.inv(C2W)\n return np.float32(Rt)" }, { "identifier": "focal2fov", "path": "utils/graphics_utils.py", "snippet": "def focal2fov(focal, pixels):\n return 2*math.atan(pixels/(2*focal))" }, { "identifier": "fov2focal", "path": "utils/graphics_utils.py", "snippet": "def fov2focal(fov, pixels):\n return pixels / (2 * math.tan(fov / 2))" }, { "identifier": "SH2RGB", "path": "utils/sh_utils.py", "snippet": "def SH2RGB(sh):\n return sh * C0 + 0.5" }, { "identifier": "BasicPointCloud", "path": "scene/gaussian_model.py", "snippet": "class GaussianModel:\n def setup_functions(self):\n def build_covariance_from_scaling_rotation(scaling, scaling_modifier, rotation, transform):\n def __init__(self, sh_degree : int, smpl_type : str, motion_offset_flag : bool, actor_gender: str):\n def capture(self):\n def restore(self, model_args, training_args):\n def get_scaling(self):\n def get_rotation(self):\n def get_xyz(self):\n def get_features(self):\n def get_opacity(self):\n def get_covariance(self, scaling_modifier = 1, transform=None):\n def oneupSHdegree(self):\n def create_from_pcd(self, pcd : BasicPointCloud, spatial_lr_scale : float):\n def training_setup(self, training_args):\n def update_learning_rate(self, iteration):\n def construct_list_of_attributes(self):\n def save_ply(self, path):\n def reset_opacity(self):\n def load_ply(self, path):\n def replace_tensor_to_optimizer(self, tensor, name):\n def _prune_optimizer(self, mask):\n def prune_points(self, mask):\n def cat_tensors_to_optimizer(self, tensors_dict):\n def densification_postfix(self, new_xyz, new_features_dc, new_features_rest, new_opacities, new_scaling, new_rotation):\n def densify_and_split(self, grads, grad_threshold, scene_extent, N=2):\n def densify_and_clone(self, grads, grad_threshold, scene_extent):\n def kl_densify_and_clone(self, grads, grad_threshold, scene_extent, kl_threshold=0.4):\n def kl_densify_and_split(self, grads, grad_threshold, scene_extent, kl_threshold=0.4, N=2):\n def kl_merge(self, grads, grad_threshold, scene_extent, kl_threshold=0.1):\n def densify_and_prune(self, max_grad, min_opacity, extent, max_screen_size, kl_threshold=0.4, t_vertices=None, iter=None):\n def kl_div(self, mu_0, rotation_0_q, scaling_0_diag, mu_1, rotation_1_q, scaling_1_diag):\n def add_densification_stats(self, viewspace_point_tensor, update_filter):\n def coarse_deform_c2source(self, query_pts, params, t_params, t_vertices, lbs_weights=None, correct_Rs=None, return_transl=False):\ndef read_pickle(pkl_path):\ndef SMPL_to_tensor(params, device):\ndef batch_rodrigues_torch(poses):\ndef get_rigid_transformation_torch(rot_mats, joints, parents):\ndef get_transform_params_torch(smpl, params, rot_mats=None, correct_Rs=None):\ndef batch_rodrigues(rot_vecs, epsilon=1e-8, dtype=torch.float32):\n L = build_scaling_rotation(scaling_modifier * scaling, rotation)\n L_0 = rotation_0 @ scaling_0\n A = torch.matmul(bweights, A.reshape(bs, joints_num, -1))\n A = torch.reshape(A, (bs, -1, 4, 4))\n A = torch.matmul(bweights, self.s_A.reshape(bs, joints_num, -1))\n A = torch.reshape(A, (bs, -1, 4, 4))\n K = torch.cat([zeros, -rz, ry, rz, zeros, -rx, -ry, rx, zeros], dim=1)\n K = K.reshape([batch_size, 3, 3])\n A = get_rigid_transformation_torch(rot_mats, joints, parents)\n R = params['R'] \n K = torch.zeros((batch_size, 3, 3), dtype=dtype, device=device)\n K = torch.cat([zeros, -rz, ry, rz, zeros, -rx, -ry, rx, zeros], dim=1) \\\n .view((batch_size, 3, 3))" }, { "identifier": "SMPL", "path": "smpl/smpl_numpy.py", "snippet": "class SMPL():\n def __init__(self, sex, model_dir):\n super(SMPL, self).__init__()\n\n model_paths = {\n 'male': os.path.join(model_dir, MALE_PATH),\n 'female': os.path.join(model_dir, FEMALE_PATH),\n # 'neutral': os.path.join(model_dir, NEUTRAL_PATH)\n 'neutral': os.path.join('assets/SMPL_NEUTRAL.pkl')\n }\n\n with open(model_paths[sex], 'rb') as f:\n smpl_model = pickle.load(f, encoding='latin1')\n self.J_regressor = np.array(smpl_model['J_regressor'].todense()) # (24, 6890)\n self.weights = smpl_model['weights'] # (6890, 24)\n self.posedirs = smpl_model['posedirs'] # (6890, 3, 207)\n self.v_template = smpl_model['v_template'] # (6890, 3)\n self.shapedirs = np.array(smpl_model['shapedirs']) # (6890, 3, 10)\n self.faces = smpl_model['f'].astype('int32') # (13776, 3)\n self.kintree_table = smpl_model['kintree_table'].astype('int64') # (2, 24)\n\n id_to_col = {self.kintree_table[1, i].item(): i for i in range(self.kintree_table.shape[1])}\n self.parent = np.array([id_to_col[self.kintree_table[0, it]] for it in range(1, self.kintree_table.shape[1])])\n\n self.pose_shape = [24, 3]\n self.beta_shape = [10]\n self.pose = np.zeros(self.pose_shape)\n self.beta = np.zeros(self.beta_shape)\n\n self.verts = None\n self.J = None\n self.R = None\n\n def __call__(self, pose, beta):\n\n v_template = self.v_template # (6890, 3)\n shapedirs = self.shapedirs.reshape(-1,10) # (6890*3, 10)\n beta = beta[:, None] # (10, 1)\n\n v_shaped = shapedirs.dot(beta).reshape(6890, 3) + v_template # (6890, 3)\n J = self.J_regressor.dot(v_shaped) # (24, 3)\n\n # input is a rotation matrix: (24,3,3)\n if pose.shape == (24, 3, 3):\n R = pose\n # input is a rotation axis-angle vector: (1, 72), (72, 1) or (72, )\n elif pose.shape == (1, 72) or pose.shape == (72, 1) or pose.shape == (72,):\n pose_vectors = pose.reshape(-1, 3) # (24, 3)\n R = np.array([rodrigues(pose_vectors[p_idx])[0] \n for p_idx in range(pose_vectors.shape[0])\n ], \n dtype='float32') # (24, 3, 3)\n else:\n raise ValueError(\"Unsupported Pose Inputs - the Pose Shape is {}\".format(pose.shape))\n\n Is = np.eye(3, dtype='float32')[None, :] # (1, 3, 3)\n lrotmin = (R[1:,:] - Is).reshape(-1, 1) # (23x3x3, 1)\n posedirs = self.posedirs.reshape(-1,207) # (6890x3, 207)\n v_posed = v_shaped + posedirs.dot(lrotmin).reshape(6890, 3) # (6890, 3)\n\n J_ = J.copy()\n J_[1:, :] = J[1:, :] - J[self.parent, :] # (24, 3)\n G_ = np.concatenate([R, J_[:, :, None]], axis=-1) # (24, 3, 4)\n pad_rows = np.array([[0, 0, 0, 1]], dtype='float32')\n pad_rows = np.repeat(pad_rows, 24, axis=0).reshape(-1, 1, 4)\n G_ = np.concatenate([G_, pad_rows], axis=1) # (24, 4, 4)\n\n G = [G_[0].copy()]\n for i in range(1, 24):\n G.append(G[self.parent[i-1]].dot(G_[i, :, :]))\n G = np.stack(G, axis=0) # (24, 4, 4)\n\n joints = G[:, :3, 3]\n rest_joints = np.concatenate([J, np.zeros((24, 1))], axis=-1)[:, :, None] # (24, 4, 1)\n zeros = np.zeros((24, 4, 3), dtype='float32') # (24, 4, 3)\n rest_joints_mtx = np.concatenate([zeros, rest_joints], axis=-1) # (24, 4, 4) \n # print(\"G1: \", G[0], \"rest_joints_mtx1: \", rest_joints_mtx[0])\n posed_joints_mtx = np.matmul(G, rest_joints_mtx)\n # print(\"rest_joints_mtx2: \", posed_joints_mtx[0])\n G = G - posed_joints_mtx\n # print(G[0]) \n rest_shape_h = np.concatenate([v_posed, np.ones(v_posed.shape[0])[:, None]], axis=-1) #(6890, 4)\n T = self.weights.dot(G.reshape(24, -1)).reshape(6890, 4, 4)\n v = np.matmul(T, rest_shape_h[:, :, None])[:, :3, 0]\n \n return v, joints" }, { "identifier": "SMPLX", "path": "smplx/body_models.py", "snippet": "class SMPLX(SMPLH):\n '''\n SMPL-X (SMPL eXpressive) is a unified body model, with shape parameters\n trained jointly for the face, hands and body.\n SMPL-X uses standard vertex based linear blend skinning with learned\n corrective blend shapes, has N=10475 vertices and K=54 joints,\n which includes joints for the neck, jaw, eyeballs and fingers.\n '''\n\n NUM_BODY_JOINTS = SMPLH.NUM_BODY_JOINTS\n NUM_HAND_JOINTS = 15\n NUM_FACE_JOINTS = 3\n NUM_JOINTS = NUM_BODY_JOINTS + 2 * NUM_HAND_JOINTS + NUM_FACE_JOINTS\n EXPRESSION_SPACE_DIM = 100\n NECK_IDX = 12\n\n def __init__(\n self, model_path: str,\n kid_template_path: str = '',\n num_expression_coeffs: int = 10,\n create_expression: bool = True,\n expression: Optional[Tensor] = None,\n create_jaw_pose: bool = True,\n jaw_pose: Optional[Tensor] = None,\n create_leye_pose: bool = True,\n leye_pose: Optional[Tensor] = None,\n create_reye_pose=True,\n reye_pose: Optional[Tensor] = None,\n use_face_contour: bool = False,\n batch_size: int = 1,\n gender: str = 'neutral',\n age: str = 'adult',\n dtype=torch.float32,\n ext: str = 'npz',\n **kwargs\n ) -> None:\n ''' SMPLX model constructor\n\n Parameters\n ----------\n model_path: str\n The path to the folder or to the file where the model\n parameters are stored\n num_expression_coeffs: int, optional\n Number of expression components to use\n (default = 10).\n create_expression: bool, optional\n Flag for creating a member variable for the expression space\n (default = True).\n expression: torch.tensor, optional, Bx10\n The default value for the expression member variable.\n (default = None)\n create_jaw_pose: bool, optional\n Flag for creating a member variable for the jaw pose.\n (default = False)\n jaw_pose: torch.tensor, optional, Bx3\n The default value for the jaw pose variable.\n (default = None)\n create_leye_pose: bool, optional\n Flag for creating a member variable for the left eye pose.\n (default = False)\n leye_pose: torch.tensor, optional, Bx10\n The default value for the left eye pose variable.\n (default = None)\n create_reye_pose: bool, optional\n Flag for creating a member variable for the right eye pose.\n (default = False)\n reye_pose: torch.tensor, optional, Bx10\n The default value for the right eye pose variable.\n (default = None)\n use_face_contour: bool, optional\n Whether to compute the keypoints that form the facial contour\n batch_size: int, optional\n The batch size used for creating the member variables\n gender: str, optional\n Which gender to load\n dtype: torch.dtype\n The data type for the created variables\n '''\n\n # Load the model\n if osp.isdir(model_path):\n model_fn = 'SMPLX_{}.{ext}'.format(gender.upper(), ext=ext)\n smplx_path = os.path.join(model_path, model_fn)\n else:\n smplx_path = model_path\n assert osp.exists(smplx_path), 'Path {} does not exist!'.format(\n smplx_path)\n\n if ext == 'pkl':\n with open(smplx_path, 'rb') as smplx_file:\n model_data = pickle.load(smplx_file, encoding='latin1')\n elif ext == 'npz':\n model_data = np.load(smplx_path, allow_pickle=True)\n else:\n raise ValueError('Unknown extension: {}'.format(ext))\n\n data_struct = Struct(**model_data)\n\n super(SMPLX, self).__init__(\n model_path=model_path,\n kid_template_path=kid_template_path,\n data_struct=data_struct,\n dtype=dtype,\n batch_size=batch_size,\n vertex_ids=VERTEX_IDS['smplx'],\n gender=gender, age=age, ext=ext,\n **kwargs)\n\n lmk_faces_idx = data_struct.lmk_faces_idx\n self.register_buffer('lmk_faces_idx',\n torch.tensor(lmk_faces_idx, dtype=torch.long))\n lmk_bary_coords = data_struct.lmk_bary_coords\n self.register_buffer('lmk_bary_coords',\n torch.tensor(lmk_bary_coords, dtype=dtype))\n\n self.use_face_contour = use_face_contour\n if self.use_face_contour:\n dynamic_lmk_faces_idx = data_struct.dynamic_lmk_faces_idx\n dynamic_lmk_faces_idx = torch.tensor(\n dynamic_lmk_faces_idx,\n dtype=torch.long)\n self.register_buffer('dynamic_lmk_faces_idx',\n dynamic_lmk_faces_idx)\n\n dynamic_lmk_bary_coords = data_struct.dynamic_lmk_bary_coords\n dynamic_lmk_bary_coords = torch.tensor(\n dynamic_lmk_bary_coords, dtype=dtype)\n self.register_buffer('dynamic_lmk_bary_coords',\n dynamic_lmk_bary_coords)\n\n neck_kin_chain = find_joint_kin_chain(self.NECK_IDX, self.parents)\n self.register_buffer(\n 'neck_kin_chain',\n torch.tensor(neck_kin_chain, dtype=torch.long))\n\n if create_jaw_pose:\n if jaw_pose is None:\n default_jaw_pose = torch.zeros([batch_size, 3], dtype=dtype)\n else:\n default_jaw_pose = torch.tensor(jaw_pose, dtype=dtype)\n jaw_pose_param = nn.Parameter(default_jaw_pose,\n requires_grad=True)\n self.register_parameter('jaw_pose', jaw_pose_param)\n\n if create_leye_pose:\n if leye_pose is None:\n default_leye_pose = torch.zeros([batch_size, 3], dtype=dtype)\n else:\n default_leye_pose = torch.tensor(leye_pose, dtype=dtype)\n leye_pose_param = nn.Parameter(default_leye_pose,\n requires_grad=True)\n self.register_parameter('leye_pose', leye_pose_param)\n\n if create_reye_pose:\n if reye_pose is None:\n default_reye_pose = torch.zeros([batch_size, 3], dtype=dtype)\n else:\n default_reye_pose = torch.tensor(reye_pose, dtype=dtype)\n reye_pose_param = nn.Parameter(default_reye_pose,\n requires_grad=True)\n self.register_parameter('reye_pose', reye_pose_param)\n\n shapedirs = data_struct.shapedirs\n if len(shapedirs.shape) < 3:\n shapedirs = shapedirs[:, :, None]\n if (shapedirs.shape[-1] < self.SHAPE_SPACE_DIM +\n self.EXPRESSION_SPACE_DIM):\n print(f'WARNING: You are using a {self.name()} model, with only'\n ' 10 shape and 10 expression coefficients.')\n expr_start_idx = 10\n expr_end_idx = 20\n num_expression_coeffs = min(num_expression_coeffs, 10)\n else:\n expr_start_idx = self.SHAPE_SPACE_DIM\n expr_end_idx = self.SHAPE_SPACE_DIM + num_expression_coeffs\n num_expression_coeffs = min(\n num_expression_coeffs, self.EXPRESSION_SPACE_DIM)\n\n self._num_expression_coeffs = num_expression_coeffs\n\n expr_dirs = shapedirs[:, :, expr_start_idx:expr_end_idx]\n self.register_buffer(\n 'expr_dirs', to_tensor(to_np(expr_dirs), dtype=dtype))\n\n if create_expression:\n if expression is None:\n default_expression = torch.zeros(\n [batch_size, self.num_expression_coeffs], dtype=dtype)\n else:\n default_expression = torch.tensor(expression, dtype=dtype)\n expression_param = nn.Parameter(default_expression,\n requires_grad=True)\n self.register_parameter('expression', expression_param)\n\n def name(self) -> str:\n return 'SMPL-X'\n\n @property\n def num_expression_coeffs(self):\n return self._num_expression_coeffs\n\n def create_mean_pose(self, data_struct, flat_hand_mean=False):\n # Create the array for the mean pose. If flat_hand is false, then use\n # the mean that is given by the data, rather than the flat open hand\n global_orient_mean = torch.zeros([3], dtype=self.dtype)\n body_pose_mean = torch.zeros([self.NUM_BODY_JOINTS * 3],\n dtype=self.dtype)\n jaw_pose_mean = torch.zeros([3], dtype=self.dtype)\n leye_pose_mean = torch.zeros([3], dtype=self.dtype)\n reye_pose_mean = torch.zeros([3], dtype=self.dtype)\n # pose_mean = np.concatenate([global_orient_mean, body_pose_mean, jaw_pose_mean, leye_pose_mean, reye_pose_mean, self.left_hand_mean, self.right_hand_mean], axis=0)\n pose_mean = torch.cat([global_orient_mean, body_pose_mean, jaw_pose_mean, leye_pose_mean, reye_pose_mean, self.left_hand_mean, self.right_hand_mean], 0)\n\n return pose_mean\n\n def extra_repr(self):\n msg = super(SMPLX, self).extra_repr()\n msg = [\n msg,\n f'Number of Expression Coefficients: {self.num_expression_coeffs}'\n ]\n return '\\n'.join(msg)\n\n def forward(\n self,\n betas: Optional[Tensor] = None,\n global_orient: Optional[Tensor] = None,\n body_pose: Optional[Tensor] = None,\n left_hand_pose: Optional[Tensor] = None,\n right_hand_pose: Optional[Tensor] = None,\n transl: Optional[Tensor] = None,\n expression: Optional[Tensor] = None,\n jaw_pose: Optional[Tensor] = None,\n leye_pose: Optional[Tensor] = None,\n reye_pose: Optional[Tensor] = None,\n return_verts: bool = True,\n return_full_pose: bool = False,\n pose2rot: bool = True,\n return_shaped: bool = True,\n **kwargs\n ) -> TensorOutput:\n '''\n Forward pass for the SMPLX model\n\n Parameters\n ----------\n global_orient: torch.tensor, optional, shape Bx3\n If given, ignore the member variable and use it as the global\n rotation of the body. Useful if someone wishes to predicts this\n with an external model. (default=None)\n betas: torch.tensor, optional, shape BxN_b\n If given, ignore the member variable `betas` and use it\n instead. For example, it can used if shape parameters\n `betas` are predicted from some external model.\n (default=None)\n expression: torch.tensor, optional, shape BxN_e\n If given, ignore the member variable `expression` and use it\n instead. For example, it can used if expression parameters\n `expression` are predicted from some external model.\n body_pose: torch.tensor, optional, shape Bx(J*3)\n If given, ignore the member variable `body_pose` and use it\n instead. For example, it can used if someone predicts the\n pose of the body joints are predicted from some external model.\n It should be a tensor that contains joint rotations in\n axis-angle format. (default=None)\n left_hand_pose: torch.tensor, optional, shape BxP\n If given, ignore the member variable `left_hand_pose` and\n use this instead. It should either contain PCA coefficients or\n joint rotations in axis-angle format.\n right_hand_pose: torch.tensor, optional, shape BxP\n If given, ignore the member variable `right_hand_pose` and\n use this instead. It should either contain PCA coefficients or\n joint rotations in axis-angle format.\n jaw_pose: torch.tensor, optional, shape Bx3\n If given, ignore the member variable `jaw_pose` and\n use this instead. It should either joint rotations in\n axis-angle format.\n transl: torch.tensor, optional, shape Bx3\n If given, ignore the member variable `transl` and use it\n instead. For example, it can used if the translation\n `transl` is predicted from some external model.\n (default=None)\n return_verts: bool, optional\n Return the vertices. (default=True)\n return_full_pose: bool, optional\n Returns the full axis-angle pose vector (default=False)\n\n Returns\n -------\n output: ModelOutput\n A named tuple of type `ModelOutput`\n '''\n\n # If no shape and pose parameters are passed along, then use the\n # ones from the module\n global_orient = (global_orient if global_orient is not None else\n self.global_orient)\n body_pose = body_pose if body_pose is not None else self.body_pose\n betas = betas if betas is not None else self.betas\n\n left_hand_pose = (left_hand_pose if left_hand_pose is not None else\n self.left_hand_pose)\n right_hand_pose = (right_hand_pose if right_hand_pose is not None else\n self.right_hand_pose)\n jaw_pose = jaw_pose if jaw_pose is not None else self.jaw_pose\n leye_pose = leye_pose if leye_pose is not None else self.leye_pose\n reye_pose = reye_pose if reye_pose is not None else self.reye_pose\n expression = expression if expression is not None else self.expression\n\n apply_trans = transl is not None or hasattr(self, 'transl')\n if transl is None:\n if hasattr(self, 'transl'):\n transl = self.transl\n\n if self.use_pca:\n left_hand_pose = torch.einsum(\n 'bi,ij->bj', [left_hand_pose, self.left_hand_components])\n right_hand_pose = torch.einsum(\n 'bi,ij->bj', [right_hand_pose, self.right_hand_components])\n\n full_pose = torch.cat([global_orient.reshape(-1, 1, 3),\n body_pose.reshape(-1, self.NUM_BODY_JOINTS, 3),\n jaw_pose.reshape(-1, 1, 3),\n leye_pose.reshape(-1, 1, 3),\n reye_pose.reshape(-1, 1, 3),\n left_hand_pose.reshape(-1, 15, 3),\n right_hand_pose.reshape(-1, 15, 3)],\n dim=1).reshape(-1, 165).to(self.pose_mean.device)\n\n # Add the mean pose of the model. Does not affect the body, only the\n # hands when flat_hand_mean == False\n full_pose += self.pose_mean\n\n batch_size = max(betas.shape[0], global_orient.shape[0],\n body_pose.shape[0])\n # Concatenate the shape and expression coefficients\n scale = int(batch_size / betas.shape[0])\n if scale > 1:\n betas = betas.expand(scale, -1)\n shape_components = torch.cat([betas, expression], dim=-1).to(self.pose_mean.device)\n\n shapedirs = torch.cat([self.shapedirs, self.expr_dirs], dim=-1)\n\n vertices, joints, A, T = lbs(shape_components, full_pose, self.v_template,\n shapedirs, self.posedirs,\n self.J_regressor, self.parents,\n self.lbs_weights, pose2rot=pose2rot,\n )\n\n lmk_faces_idx = self.lmk_faces_idx.unsqueeze(\n dim=0).expand(batch_size, -1).contiguous()\n lmk_bary_coords = self.lmk_bary_coords.unsqueeze(dim=0).repeat(\n self.batch_size, 1, 1)\n if self.use_face_contour:\n lmk_idx_and_bcoords = find_dynamic_lmk_idx_and_bcoords(\n vertices, full_pose, self.dynamic_lmk_faces_idx,\n self.dynamic_lmk_bary_coords,\n self.neck_kin_chain,\n pose2rot=True,\n )\n dyn_lmk_faces_idx, dyn_lmk_bary_coords = lmk_idx_and_bcoords\n\n lmk_faces_idx = torch.cat([lmk_faces_idx,\n dyn_lmk_faces_idx], 1)\n lmk_bary_coords = torch.cat(\n [lmk_bary_coords.expand(batch_size, -1, -1),\n dyn_lmk_bary_coords], 1)\n\n landmarks = vertices2landmarks(vertices, self.faces_tensor,\n lmk_faces_idx,\n lmk_bary_coords)\n\n # import matplotlib.pyplot as plt\n # import numpy as np\n # xs = joints[0,:,0]\n # ys = joints[0,:,1]\n # plt.scatter(xs, ys)\n\n # # zip joins x and y coordinates in pairs\n # count = 0\n # for x,y in zip(xs, ys):\n\n # label = \"{:.2f}\".format(count)\n\n # plt.annotate(label, # this is the text\n # (x,y), # these are the coordinates to position the label\n # textcoords=\"offset points\", # how to position the text\n # xytext=(0,10), # distance from text to points (x,y)\n # ha='center') # horizontal alignment can be left, right or center\n # count += 1\n # plt.savefig(\"joints.png\")\n # import pdb; pdb.set_trace()\n\n # Add any extra joints that might be needed\n joints = self.vertex_joint_selector(vertices, joints)\n # Add the landmarks to the joints\n joints = torch.cat([joints, landmarks], dim=1)\n # Map the joints to the current dataset\n\n if self.joint_mapper is not None:\n joints = self.joint_mapper(joints=joints, vertices=vertices)\n\n if apply_trans:\n joints += transl.unsqueeze(dim=1)\n vertices += transl.unsqueeze(dim=1)\n # clone because we are modifying them in-place\n A = A.clone()\n A[..., :3, 3] += transl.unsqueeze(dim=1)\n T = T.clone()\n T[..., :3, 3] += transl.unsqueeze(dim=1)\n\n v_shaped = None\n if return_shaped:\n v_shaped = self.v_template + blend_shapes(betas, self.shapedirs)\n else:\n v_shaped = Tensor(0)\n\n output = TensorOutput(vertices=vertices if return_verts else None,\n joints=joints,\n betas=betas,\n expression=expression,\n global_orient=global_orient,\n body_pose=body_pose,\n left_hand_pose=left_hand_pose,\n right_hand_pose=right_hand_pose,\n jaw_pose=jaw_pose,\n v_shaped=v_shaped,\n full_pose=full_pose if return_full_pose else None,\n A=A,\n T=T,\n f=self.faces)\n return output" }, { "identifier": "SMCReader", "path": "data/dna_rendering/dna_rendering_sample_code/SMCReader.py", "snippet": "class SMCReader:\n\n def __init__(self, file_path):\n \"\"\"Read SenseMocapFile endswith \".smc\".\n\n Args:\n file_path (str):\n Path to an SMC file.\n body_model (nn.Module or dict):\n Only needed for SMPL transformation to device frame\n if nn.Module: a body_model instance\n if dict: a body_model config\n \"\"\"\n self.smc = h5py.File(file_path, 'r')\n self.__calibration_dict__ = None\n self.__kinect_calib_dict__ = None \n self.__available_keys__ = list(self.smc.keys())\n \n self.actor_info = None \n if hasattr(self.smc, 'attrs') and len(self.smc.attrs.keys()) > 0:\n self.actor_info = dict(\n id=self.smc.attrs['actor_id'],\n perf_id=self.smc.attrs['performance_id'],\n age=self.smc.attrs['age'],\n gender=self.smc.attrs['gender'],\n height=self.smc.attrs['height'],\n weight=self.smc.attrs['weight'],\n ethnicity=self.smc.attrs['ethnicity'],\n )\n\n self.Camera_5mp_info = None \n if 'Camera_5mp' in self.smc:\n self.Camera_5mp_info = dict(\n num_device=self.smc['Camera_5mp'].attrs['num_device'],\n num_frame=self.smc['Camera_5mp'].attrs['num_frame'],\n resolution=self.smc['Camera_5mp'].attrs['resolution'],\n )\n self.Camera_12mp_info = None \n if 'Camera_12mp' in self.smc:\n self.Camera_12mp_info = dict(\n num_device=self.smc['Camera_12mp'].attrs['num_device'],\n num_frame=self.smc['Camera_12mp'].attrs['num_frame'],\n resolution=self.smc['Camera_12mp'].attrs['resolution'],\n )\n self.Kinect_info = None\n if 'Kinect' in self.smc:\n self.Kinect_info=dict(\n num_device=self.smc['Kinect'].attrs['num_device'],\n num_frame=self.smc['Kinect'].attrs['num_frame'],\n resolution=self.smc['Kinect'].attrs['resolution'],\n )\n\n def get_available_keys(self):\n return self.__available_keys__ \n\n def get_actor_info(self):\n return self.actor_info\n \n def get_Camera_12mp_info(self):\n return self.Camera_12mp_info\n\n def get_Camera_5mp_info(self):\n return self.Camera_5mp_info\n \n def get_Kinect_info(self):\n return self.Kinect_info\n \n ### RGB Camera Calibration\n def get_Calibration_all(self):\n \"\"\"Get calibration matrix of all cameras and save it in self\n \n Args:\n None\n\n Returns:\n Dictionary of calibration matrixs of all matrixs.\n dict( \n Camera_Parameter: Camera_id : Matrix_type : value\n )\n Notice:\n Camera_id(str) in {'Camera_5mp': '0'~'47', 'Camera_12mp':'48'~'60'}\n Matrix_type in ['D', 'K', 'RT', 'Color_Calibration'] \n \"\"\" \n if not 'Camera_Parameter' in self.smc:\n print(\"=== no key: Camera_Parameter.\\nplease check available keys!\")\n return None \n\n if self.__calibration_dict__ is not None:\n return self.__calibration_dict__\n\n self.__calibration_dict__ = dict()\n for ci in self.smc['Camera_Parameter'].keys():\n self.__calibration_dict__.setdefault(ci,dict())\n for mt in ['D', 'K', 'RT', 'Color_Calibration'] :\n self.__calibration_dict__[ci][mt] = \\\n self.smc['Camera_Parameter'][ci][mt][()]\n return self.__calibration_dict__\n\n def get_Calibration(self, Camera_id):\n \"\"\"Get calibration matrixs of a certain camera by its type and id \n\n Args:\n Camera_id (int/str of a number):\n Camera_id(str) in {'Camera_5mp': '0'~'47', \n 'Camera_12mp':'48'~'60'}\n Returns:\n Dictionary of calibration matrixs.\n ['D', 'K', 'RT', 'Color_Calibration'] \n \"\"\"\n if not 'Camera_Parameter' in self.smc:\n print(\"=== no key: Camera_Parameter.\\nplease check available keys!\")\n return None \n\n rs = dict()\n for k in ['D', 'K', 'RT', 'Color_Calibration'] :\n rs[k] = self.smc['Camera_Parameter'][f'{int(Camera_id):02d}'][k][()]\n return rs\n\n ### Kinect Camera Calibration\n def get_Kinect_Calibration_all(self):\n \"\"\"Get calibration matrix of all kinect cameras and save it in self\n \n Args:\n None\n\n Returns:\n Dictionary of calibration matrixs of all matrixs.\n dict( \n Camera_group: Camera_id : Matrix_type : value\n )\n Notice:\n Camera_group(str) in ['Kinect']\n Camera_id(str) in {'Kinect': '0'~'7'}\n Matrix_type in ['D', 'K', 'RT'] \n \"\"\" \n if not 'Calibration' in self.smc:\n print(\"=== no key: Calibration.\\nplease check available keys!\")\n return None \n\n if self.__kinect_calib_dict__ is not None:\n return self.__kinect_calib_dict__\n\n self.__kinect_calib_dict__ = dict()\n for cg in ['Kinect']:\n self.__kinect_calib_dict__.setdefault(cg,dict())\n for ci in self.smc['Calibration'][cg].keys():\n self.__kinect_calib_dict__[cg].setdefault(ci,dict())\n for mt in ['D', 'K', 'RT'] :\n self.__kinect_calib_dict__[cg][ci][mt] = \\\n self.smc['Calibration'][cg][ci][mt][()]\n return self.__kinect_calib_dict__\n\n def get_kinect_Calibration(self, Camera_id):\n \"\"\"Get calibration matrixs of a certain kinect camera by its type and id \n\n Args:\n Camera_group (str):\n Camera_group in ['Kinect'].\n Camera_id (int/str of a number):\n CameraID(str) in {'Kinect': '0'~'7'}\n Returns:\n Dictionary of calibration matrixs.\n ['D', 'K', 'RT'] \n \"\"\" \n if not 'Calibration' in self.smc:\n print(\"=== no key: Calibration.\\nplease check available keys!\")\n return None \n\n Camera_id = f'{int(Camera_id):02d}'\n assert(Camera_id in self.smc['Calibration'][\"Kinect\"].keys())\n rs = dict()\n for k in ['D', 'K', 'RT']:\n rs[k] = self.smc['Calibration'][\"Kinect\"][Camera_id][k][()]\n return rs\n\n ### RGB image\n def __read_color_from_bytes__(self, color_array):\n \"\"\"Decode an RGB image from an encoded byte array.\"\"\"\n return cv2.imdecode(color_array, cv2.IMREAD_COLOR)\n\n def get_mask(self, Camera_id, Frame_id=None, disable_tqdm=True):\n \"\"\"Get mask from Camera_id, Frame_id\n\n Args:\n Camera_id (int/str of a number):\n Camera_id (str) in \n {'Camera_5mp': '0'~'47', \n 'Camera_12mp':'48'~'60',\n 'Kinect': '0'~'7'}\n Frame_id a.(int/str of a number): '0' ~ 'num_frame'\n b.list of numbers (int/str)\n c.None: get batch of all imgs in order of time sequence \n Returns:\n a single img :\n 'color': HWC in bgr (uint8)\n 'mask' : HW (uint8)\n 'depth': HW (uint16)\n \"\"\" \n if not 'Mask' in self.smc:\n print(\"=== no key: Mask.\\nplease check available keys!\")\n return None \n\n Camera_id = str(Camera_id)\n\n assert(isinstance(Frame_id,(list,int, str, type(None))))\n if isinstance(Frame_id, (str,int)):\n Frame_id = str(Frame_id)\n assert(Frame_id in self.smc['Mask'][Camera_id]['mask'].keys())\n img_byte = self.smc['Mask'][Camera_id]['mask'][Frame_id][()]\n img_color = self.__read_color_from_bytes__(img_byte)\n img_color = np.max(img_color,2)\n return img_color \n else:\n if Frame_id is None:\n Frame_id_list =sorted([int(l) for l in self.smc['Mask'][Camera_id]['mask'].keys()])\n elif isinstance(Frame_id, list):\n Frame_id_list = Frame_id\n rs = []\n for fi in tqdm.tqdm(Frame_id_list, disable=disable_tqdm):\n rs.append(self.get_mask(Camera_id,fi))\n return np.stack(rs,axis=0)\n\n def get_img(self, Camera_group, Camera_id, Image_type, Frame_id=None, disable_tqdm=True):\n \"\"\"Get image its Camera_group, Camera_id, Image_type and Frame_id\n\n Args:\n Camera_group (str):\n Camera_group in ['Camera_12mp', 'Camera_5mp','Kinect'].\n Camera_id (int/str of a number):\n CameraID (str) in \n {'Camera_5mp': '0'~'47', \n 'Camera_12mp':'48'~'60',\n 'Kinect': '0'~'7'}\n Image_type(str) in \n {'Camera_5mp': ['color'], \n 'Camera_12mp': ['color'],\n 'Kinect': ['depth', 'mask']}\n Frame_id a.(int/str of a number): '0' ~ 'num_frame'('149') \n b.list of numbers (int/str)\n c.None: get batch of all imgs in order of time sequence \n Returns:\n a single img :\n 'color': HWC in bgr (uint8)\n 'mask' : HW (uint8)\n 'depth': HW (uint16)\n \"\"\" \n if not Camera_group in self.smc:\n print(\"=== no key: %s.\\nplease check available keys!\" % Camera_group)\n return None\n\n assert(Camera_group in ['Camera_12mp', 'Camera_5mp','Kinect'])\n Camera_id = str(Camera_id)\n assert(Camera_id in self.smc[Camera_group].keys())\n assert(Image_type in self.smc[Camera_group][Camera_id].keys())\n assert(isinstance(Frame_id,(list,int, str, type(None))))\n if isinstance(Frame_id, (str,int)):\n Frame_id = str(Frame_id)\n assert(Frame_id in self.smc[Camera_group][Camera_id][Image_type].keys())\n if Image_type in ['color']:\n img_byte = self.smc[Camera_group][Camera_id][Image_type][Frame_id][()]\n img_color = self.__read_color_from_bytes__(img_byte)\n if Image_type == 'mask':\n img_byte = self.smc[Camera_group][Camera_id][Image_type][Frame_id][()]\n img_color = self.__read_color_from_bytes__(img_byte)\n img_color = np.max(img_color,2)\n if Image_type == 'depth':\n img_color = self.smc[Camera_group][Camera_id][Image_type][Frame_id][()]\n return img_color \n else:\n if Frame_id is None:\n Frame_id_list =sorted([int(l) for l in self.smc[Camera_group][Camera_id][Image_type].keys()])\n elif isinstance(Frame_id, list):\n Frame_id_list = Frame_id\n rs = []\n for fi in tqdm(Frame_id_list, disable=disable_tqdm):\n rs.append(self.get_img(Camera_group, Camera_id, Image_type,fi))\n return np.stack(rs,axis=0)\n \n ###Keypoints2d\n def get_Keypoints2d(self, Camera_id, Frame_id=None):\n \"\"\"Get keypoint2D by its Camera_group, Camera_id and Frame_id\n\n Args:\n Camera_id (int/str of a number):\n CameraID (str) in \n {'Camera_5mp': '0'~'47', \n 'Camera_12mp':'48'~'60',}\n Frame_id a.(int/str of a number): '0' ~ 'num_frame-1'('149') \n b.list of numbers (int/str)\n c.None: get batch of all imgs in order of time sequence \n Returns:\n a single img :\n 'color': HWC in bgr (uint8)\n 'mask' : HW (uint8)\n 'depth': HW (uint16)\n \"\"\" \n if not 'Keypoints_2D' in self.smc:\n print(\"=== no key: Keypoints_2D.\\nplease check available keys!\")\n return None \n\n Camera_id = f'{int(Camera_id):02d}'\n assert(isinstance(Frame_id,(list,int, str, type(None))))\n if isinstance(Frame_id, (str,int)):\n Frame_id = int(Frame_id)\n return self.smc['Keypoints_2D'][Camera_id][()][Frame_id,:]\n else:\n if Frame_id is None:\n return self.smc['Keypoints_2D'][Camera_id][()]\n elif isinstance(Frame_id, list):\n Frame_id_list = Frame_id\n rs = []\n for fi in tqdm.tqdm(Frame_id_list):\n rs.append(self.get_Keypoints2d(Camera_id,fi))\n return np.stack(rs,axis=0)\n\n ###Keypoints3d\n def get_Keypoints3d(self, Frame_id=None):\n \"\"\"Get keypoint3D Frame_id, TODO coordinate\n\n Args:\n Frame_id a.(int/str of a number): '0' ~ 'num_frame-1'('149') \n b.list of numbers (int/str)\n c.None: get batch of all imgs in order of time sequence \n Returns:\n Keypoints3d tensor: np.ndarray of shape ([N], ,3)\n \"\"\" \n if not 'Keypoints_3D' in self.smc:\n print(\"=== no key: Keypoints_3D.\\nplease check available keys!\")\n return None \n\n if isinstance(Frame_id, (str,int)):\n Frame_id = int(Frame_id)\n return self.smc['Keypoints_3D'][\"keypoints3d\"][Frame_id,:]\n else:\n if Frame_id is None:\n return self.smc['Keypoints_3D'][\"keypoints3d\"]\n elif isinstance(Frame_id, list):\n Frame_id_list = Frame_id\n rs = []\n for fi in tqdm.tqdm(Frame_id_list):\n rs.append(self.get_Keypoints3d(fi))\n return np.stack(rs,axis=0)\n\n ###SMPLx\n def get_SMPLx(self, Frame_id=None):\n \"\"\"Get SMPL (world coordinate) computed by mocap processing pipeline.\n\n Args:\n Frame_id (int, list or None, optional):\n int: frame id of one selected frame\n list: a list of frame id\n None: all frames will be returned\n Defaults to None.\n\n Returns:\n dict:\n 'global_orient': np.ndarray of shape (N, 3)\n 'body_pose': np.ndarray of shape (N, 21, 3)\n 'transl': np.ndarray of shape (N, 3)\n 'betas': np.ndarray of shape (1, 10)\n \"\"\"\n if not 'SMPLx' in self.smc:\n print(\"=== no key: SMPLx.\\nplease check available keys!\")\n return None \n\n t_frame = self.smc['SMPLx']['betas'][()].shape[0]\n if Frame_id is None:\n frame_list = range(t_frame)\n elif isinstance(Frame_id, list):\n frame_list = [int(fi) for fi in Frame_id]\n elif isinstance(Frame_id, (int,str)):\n Frame_id = int(Frame_id)\n assert Frame_id < t_frame,\\\n f'Invalid frame_index {Frame_id}'\n frame_list = Frame_id\n else:\n raise TypeError('frame_id should be int, list or None.')\n\n smpl_dict = {}\n for key in ['betas', 'expression', 'fullpose', 'transl']:\n smpl_dict[key] = self.smc['SMPLx'][key][()][frame_list, ...]\n smpl_dict['scale'] = self.smc['SMPLx']['scale'][()]\n\n return smpl_dict\n\n def release(self):\n self.smc = None \n self.__calibration_dict__ = None\n self.__kinect_calib_dict__ = None\n self.__available_keys__ = None\n self.actor_info = None \n self.Camera_5mp_info = None\n self.Camera_12mp_info = None \n self.Kinect_info = None" } ]
import os import sys import numpy as np import torch import json import imageio import cv2 import random from PIL import Image from typing import NamedTuple from scene.colmap_loader import read_extrinsics_text, read_intrinsics_text, qvec2rotmat, \ read_extrinsics_binary, read_intrinsics_binary, read_points3D_binary, read_points3D_text from utils.graphics_utils import getWorld2View2, focal2fov, fov2focal from pathlib import Path from plyfile import PlyData, PlyElement from utils.sh_utils import SH2RGB from scene.gaussian_model import BasicPointCloud from smpl.smpl_numpy import SMPL from smplx.body_models import SMPLX from data.dna_rendering.dna_rendering_sample_code.SMCReader import SMCReader
16,223
elements = np.empty(xyz.shape[0], dtype=dtype) attributes = np.concatenate((xyz, normals, rgb), axis=1) elements[:] = list(map(tuple, attributes)) # Create the PlyData object and write to file vertex_element = PlyElement.describe(elements, 'vertex') ply_data = PlyData([vertex_element]) ply_data.write(path) def readColmapSceneInfo(path, images, eval, llffhold=8): try: cameras_extrinsic_file = os.path.join(path, "sparse/0", "images.bin") cameras_intrinsic_file = os.path.join(path, "sparse/0", "cameras.bin") cam_extrinsics = read_extrinsics_binary(cameras_extrinsic_file) cam_intrinsics = read_intrinsics_binary(cameras_intrinsic_file) except: cameras_extrinsic_file = os.path.join(path, "sparse/0", "images.txt") cameras_intrinsic_file = os.path.join(path, "sparse/0", "cameras.txt") cam_extrinsics = read_extrinsics_text(cameras_extrinsic_file) cam_intrinsics = read_intrinsics_text(cameras_intrinsic_file) reading_dir = "images" if images == None else images cam_infos_unsorted = readColmapCameras(cam_extrinsics=cam_extrinsics, cam_intrinsics=cam_intrinsics, images_folder=os.path.join(path, reading_dir)) cam_infos = sorted(cam_infos_unsorted.copy(), key = lambda x : x.image_name) if eval: train_cam_infos = [c for idx, c in enumerate(cam_infos) if idx % llffhold != 0] test_cam_infos = [c for idx, c in enumerate(cam_infos) if idx % llffhold == 0] else: train_cam_infos = cam_infos test_cam_infos = [] nerf_normalization = getNerfppNorm(train_cam_infos) ply_path = os.path.join(path, "sparse/0/points3D.ply") bin_path = os.path.join(path, "sparse/0/points3D.bin") txt_path = os.path.join(path, "sparse/0/points3D.txt") if not os.path.exists(ply_path): print("Converting point3d.bin to .ply, will happen only the first time you open the scene.") try: xyz, rgb, _ = read_points3D_binary(bin_path) except: xyz, rgb, _ = read_points3D_text(txt_path) storePly(ply_path, xyz, rgb) try: pcd = fetchPly(ply_path) except: pcd = None scene_info = SceneInfo(point_cloud=pcd, train_cameras=train_cam_infos, test_cameras=test_cam_infos, nerf_normalization=nerf_normalization, ply_path=ply_path) return scene_info def readCamerasFromTransforms(path, transformsfile, white_background, extension=".png"): cam_infos = [] with open(os.path.join(path, transformsfile)) as json_file: contents = json.load(json_file) fovx = contents["camera_angle_x"] frames = contents["frames"] for idx, frame in enumerate(frames[:20]): cam_name = os.path.join(path, frame["file_path"] + extension) # NeRF 'transform_matrix' is a camera-to-world transform c2w = np.array(frame["transform_matrix"]) # change from OpenGL/Blender camera axes (Y up, Z back) to COLMAP (Y down, Z forward) c2w[:3, 1:3] *= -1 # get the world-to-camera transform and set R, T w2c = np.linalg.inv(c2w) R = np.transpose(w2c[:3,:3]) # R is stored transposed due to 'glm' in CUDA code T = w2c[:3, 3] image_path = os.path.join(path, cam_name) image_name = Path(cam_name).stem image = Image.open(image_path) im_data = np.array(image.convert("RGBA")) bg = np.array([1,1,1]) if white_background else np.array([0, 0, 0]) norm_data = im_data / 255.0 arr = norm_data[:,:,:3] * norm_data[:, :, 3:4] + bg * (1 - norm_data[:, :, 3:4]) image = Image.fromarray(np.array(arr*255.0, dtype=np.byte), "RGB") fovy = focal2fov(fov2focal(fovx, image.size[0]), image.size[1]) FovY = fovy FovX = fovx cam_infos.append(CameraInfo(uid=idx, R=R, T=T, FovY=FovY, FovX=FovX, image=image, image_path=image_path, image_name=image_name, bkgd_mask=None, bound_mask=None, width=image.size[0], height=image.size[1])) return cam_infos def readNerfSyntheticInfo(path, white_background, eval, extension=".png"): print("Reading Training Transforms") train_cam_infos = readCamerasFromTransforms(path, "transforms_train.json", white_background, extension) print("Reading Test Transforms") test_cam_infos = readCamerasFromTransforms(path, "transforms_test.json", white_background, extension) if not eval: train_cam_infos.extend(test_cam_infos) test_cam_infos = [] nerf_normalization = getNerfppNorm(train_cam_infos) ply_path = os.path.join(path, "points3d.ply") if not os.path.exists(ply_path): # Since this data set has no colmap data, we start with random points num_pts = 100_000 print(f"Generating random point cloud ({num_pts})...") # We create random points inside the bounds of the synthetic Blender scenes xyz = np.random.random((num_pts, 3)) * 2.6 - 1.3 shs = np.random.random((num_pts, 3)) / 255.0
# # Copyright (C) 2023, Inria # GRAPHDECO research group, https://team.inria.fr/graphdeco # All rights reserved. # # This software is free for non-commercial, research and evaluation use # under the terms of the LICENSE.md file. # # For inquiries contact [email protected] # class CameraInfo(NamedTuple): uid: int pose_id: int R: np.array T: np.array K: np.array FovY: np.array FovX: np.array image: np.array image_path: str image_name: str bkgd_mask: np.array bound_mask: np.array width: int height: int smpl_param: dict world_vertex: np.array world_bound: np.array big_pose_smpl_param: dict big_pose_world_vertex: np.array big_pose_world_bound: np.array class SceneInfo(NamedTuple): point_cloud: BasicPointCloud train_cameras: list test_cameras: list nerf_normalization: dict ply_path: str def getNerfppNorm(cam_info): def get_center_and_diag(cam_centers): cam_centers = np.hstack(cam_centers) avg_cam_center = np.mean(cam_centers, axis=1, keepdims=True) center = avg_cam_center dist = np.linalg.norm(cam_centers - center, axis=0, keepdims=True) diagonal = np.max(dist) return center.flatten(), diagonal cam_centers = [] for cam in cam_info: W2C = getWorld2View2(cam.R, cam.T) C2W = np.linalg.inv(W2C) cam_centers.append(C2W[:3, 3:4]) center, diagonal = get_center_and_diag(cam_centers) radius = diagonal * 1.1 translate = -center return {"translate": translate, "radius": radius} def readColmapCameras(cam_extrinsics, cam_intrinsics, images_folder): cam_infos = [] for idx, key in enumerate(cam_extrinsics): sys.stdout.write('\r') # the exact output you're looking for: sys.stdout.write("Reading camera {}/{}".format(idx+1, len(cam_extrinsics))) sys.stdout.flush() extr = cam_extrinsics[key] intr = cam_intrinsics[extr.camera_id] height = intr.height width = intr.width uid = intr.id R = np.transpose(qvec2rotmat(extr.qvec)) T = np.array(extr.tvec) if intr.model=="SIMPLE_PINHOLE": focal_length_x = intr.params[0] FovY = focal2fov(focal_length_x, height) FovX = focal2fov(focal_length_x, width) elif intr.model=="PINHOLE": focal_length_x = intr.params[0] focal_length_y = intr.params[1] FovY = focal2fov(focal_length_y, height) FovX = focal2fov(focal_length_x, width) else: assert False, "Colmap camera model not handled: only undistorted datasets (PINHOLE or SIMPLE_PINHOLE cameras) supported!" image_path = os.path.join(images_folder, os.path.basename(extr.name)) image_name = os.path.basename(image_path).split(".")[0] image = Image.open(image_path) cam_info = CameraInfo(uid=uid, R=R, T=T, FovY=FovY, FovX=FovX, image=image, image_path=image_path, image_name=image_name, width=width, height=height) cam_infos.append(cam_info) sys.stdout.write('\n') return cam_infos def fetchPly(path): plydata = PlyData.read(path) vertices = plydata['vertex'] positions = np.vstack([vertices['x'], vertices['y'], vertices['z']]).T colors = np.vstack([vertices['red'], vertices['green'], vertices['blue']]).T / 255.0 normals = np.vstack([vertices['nx'], vertices['ny'], vertices['nz']]).T return BasicPointCloud(points=positions, colors=colors, normals=normals) def storePly(path, xyz, rgb): # Define the dtype for the structured array dtype = [('x', 'f4'), ('y', 'f4'), ('z', 'f4'), ('nx', 'f4'), ('ny', 'f4'), ('nz', 'f4'), ('red', 'u1'), ('green', 'u1'), ('blue', 'u1')] normals = np.zeros_like(xyz) elements = np.empty(xyz.shape[0], dtype=dtype) attributes = np.concatenate((xyz, normals, rgb), axis=1) elements[:] = list(map(tuple, attributes)) # Create the PlyData object and write to file vertex_element = PlyElement.describe(elements, 'vertex') ply_data = PlyData([vertex_element]) ply_data.write(path) def readColmapSceneInfo(path, images, eval, llffhold=8): try: cameras_extrinsic_file = os.path.join(path, "sparse/0", "images.bin") cameras_intrinsic_file = os.path.join(path, "sparse/0", "cameras.bin") cam_extrinsics = read_extrinsics_binary(cameras_extrinsic_file) cam_intrinsics = read_intrinsics_binary(cameras_intrinsic_file) except: cameras_extrinsic_file = os.path.join(path, "sparse/0", "images.txt") cameras_intrinsic_file = os.path.join(path, "sparse/0", "cameras.txt") cam_extrinsics = read_extrinsics_text(cameras_extrinsic_file) cam_intrinsics = read_intrinsics_text(cameras_intrinsic_file) reading_dir = "images" if images == None else images cam_infos_unsorted = readColmapCameras(cam_extrinsics=cam_extrinsics, cam_intrinsics=cam_intrinsics, images_folder=os.path.join(path, reading_dir)) cam_infos = sorted(cam_infos_unsorted.copy(), key = lambda x : x.image_name) if eval: train_cam_infos = [c for idx, c in enumerate(cam_infos) if idx % llffhold != 0] test_cam_infos = [c for idx, c in enumerate(cam_infos) if idx % llffhold == 0] else: train_cam_infos = cam_infos test_cam_infos = [] nerf_normalization = getNerfppNorm(train_cam_infos) ply_path = os.path.join(path, "sparse/0/points3D.ply") bin_path = os.path.join(path, "sparse/0/points3D.bin") txt_path = os.path.join(path, "sparse/0/points3D.txt") if not os.path.exists(ply_path): print("Converting point3d.bin to .ply, will happen only the first time you open the scene.") try: xyz, rgb, _ = read_points3D_binary(bin_path) except: xyz, rgb, _ = read_points3D_text(txt_path) storePly(ply_path, xyz, rgb) try: pcd = fetchPly(ply_path) except: pcd = None scene_info = SceneInfo(point_cloud=pcd, train_cameras=train_cam_infos, test_cameras=test_cam_infos, nerf_normalization=nerf_normalization, ply_path=ply_path) return scene_info def readCamerasFromTransforms(path, transformsfile, white_background, extension=".png"): cam_infos = [] with open(os.path.join(path, transformsfile)) as json_file: contents = json.load(json_file) fovx = contents["camera_angle_x"] frames = contents["frames"] for idx, frame in enumerate(frames[:20]): cam_name = os.path.join(path, frame["file_path"] + extension) # NeRF 'transform_matrix' is a camera-to-world transform c2w = np.array(frame["transform_matrix"]) # change from OpenGL/Blender camera axes (Y up, Z back) to COLMAP (Y down, Z forward) c2w[:3, 1:3] *= -1 # get the world-to-camera transform and set R, T w2c = np.linalg.inv(c2w) R = np.transpose(w2c[:3,:3]) # R is stored transposed due to 'glm' in CUDA code T = w2c[:3, 3] image_path = os.path.join(path, cam_name) image_name = Path(cam_name).stem image = Image.open(image_path) im_data = np.array(image.convert("RGBA")) bg = np.array([1,1,1]) if white_background else np.array([0, 0, 0]) norm_data = im_data / 255.0 arr = norm_data[:,:,:3] * norm_data[:, :, 3:4] + bg * (1 - norm_data[:, :, 3:4]) image = Image.fromarray(np.array(arr*255.0, dtype=np.byte), "RGB") fovy = focal2fov(fov2focal(fovx, image.size[0]), image.size[1]) FovY = fovy FovX = fovx cam_infos.append(CameraInfo(uid=idx, R=R, T=T, FovY=FovY, FovX=FovX, image=image, image_path=image_path, image_name=image_name, bkgd_mask=None, bound_mask=None, width=image.size[0], height=image.size[1])) return cam_infos def readNerfSyntheticInfo(path, white_background, eval, extension=".png"): print("Reading Training Transforms") train_cam_infos = readCamerasFromTransforms(path, "transforms_train.json", white_background, extension) print("Reading Test Transforms") test_cam_infos = readCamerasFromTransforms(path, "transforms_test.json", white_background, extension) if not eval: train_cam_infos.extend(test_cam_infos) test_cam_infos = [] nerf_normalization = getNerfppNorm(train_cam_infos) ply_path = os.path.join(path, "points3d.ply") if not os.path.exists(ply_path): # Since this data set has no colmap data, we start with random points num_pts = 100_000 print(f"Generating random point cloud ({num_pts})...") # We create random points inside the bounds of the synthetic Blender scenes xyz = np.random.random((num_pts, 3)) * 2.6 - 1.3 shs = np.random.random((num_pts, 3)) / 255.0
pcd = BasicPointCloud(points=xyz, colors=SH2RGB(shs), normals=np.zeros((num_pts, 3)))
10
2023-11-29 07:10:39+00:00
24k
cswry/SeeSR
test_seesr.py
[ { "identifier": "StableDiffusionControlNetPipeline", "path": "pipelines/pipeline_seesr.py", "snippet": "class StableDiffusionControlNetPipeline(DiffusionPipeline, TextualInversionLoaderMixin):\n r\"\"\"\n Pipeline for text-to-image generation using Stable Diffusion with ControlNet guidance.\n\n This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the\n library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)\n\n In addition the pipeline inherits the following loading methods:\n - *Textual-Inversion*: [`loaders.TextualInversionLoaderMixin.load_textual_inversion`]\n\n Args:\n vae ([`AutoencoderKL`]):\n Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.\n text_encoder ([`CLIPTextModel`]):\n Frozen text-encoder. Stable Diffusion uses the text portion of\n [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically\n the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.\n tokenizer (`CLIPTokenizer`):\n Tokenizer of class\n [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).\n unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.\n controlnet ([`ControlNetModel`] or `List[ControlNetModel]`):\n Provides additional conditioning to the unet during the denoising process. If you set multiple ControlNets\n as a list, the outputs from each ControlNet are added together to create one combined additional\n conditioning.\n scheduler ([`SchedulerMixin`]):\n A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of\n [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].\n safety_checker ([`StableDiffusionSafetyChecker`]):\n Classification module that estimates whether generated images could be considered offensive or harmful.\n Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details.\n feature_extractor ([`CLIPImageProcessor`]):\n Model that extracts features from generated images to be used as inputs for the `safety_checker`.\n \"\"\"\n _optional_components = [\"safety_checker\", \"feature_extractor\"]\n\n def __init__(\n self,\n vae: AutoencoderKL,\n text_encoder: CLIPTextModel,\n tokenizer: CLIPTokenizer,\n unet: UNet2DConditionModel,\n controlnet: Union[ControlNetModel, List[ControlNetModel], Tuple[ControlNetModel], MultiControlNetModel],\n scheduler: KarrasDiffusionSchedulers,\n safety_checker: StableDiffusionSafetyChecker,\n feature_extractor: CLIPImageProcessor,\n requires_safety_checker: bool = True,\n ):\n super().__init__()\n\n if safety_checker is None and requires_safety_checker:\n logger.warning(\n f\"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure\"\n \" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered\"\n \" results in services or applications open to the public. Both the diffusers team and Hugging Face\"\n \" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling\"\n \" it only for use-cases that involve analyzing network behavior or auditing its results. For more\"\n \" information, please have a look at https://github.com/huggingface/diffusers/pull/254 .\"\n )\n\n if safety_checker is not None and feature_extractor is None:\n raise ValueError(\n \"Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety\"\n \" checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead.\"\n )\n\n if isinstance(controlnet, (list, tuple)):\n controlnet = MultiControlNetModel(controlnet)\n\n self.register_modules(\n vae=vae,\n text_encoder=text_encoder,\n tokenizer=tokenizer,\n unet=unet,\n controlnet=controlnet,\n scheduler=scheduler,\n safety_checker=safety_checker,\n feature_extractor=feature_extractor,\n )\n self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)\n self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)\n self.register_to_config(requires_safety_checker=requires_safety_checker)\n\n def _init_tiled_vae(self,\n encoder_tile_size = 256,\n decoder_tile_size = 256,\n fast_decoder = False,\n fast_encoder = False,\n color_fix = False,\n vae_to_gpu = True):\n # save original forward (only once)\n if not hasattr(self.vae.encoder, 'original_forward'):\n setattr(self.vae.encoder, 'original_forward', self.vae.encoder.forward)\n if not hasattr(self.vae.decoder, 'original_forward'):\n setattr(self.vae.decoder, 'original_forward', self.vae.decoder.forward)\n\n encoder = self.vae.encoder\n decoder = self.vae.decoder\n\n self.vae.encoder.forward = VAEHook(\n encoder, encoder_tile_size, is_decoder=False, fast_decoder=fast_decoder, fast_encoder=fast_encoder, color_fix=color_fix, to_gpu=vae_to_gpu)\n self.vae.decoder.forward = VAEHook(\n decoder, decoder_tile_size, is_decoder=True, fast_decoder=fast_decoder, fast_encoder=fast_encoder, color_fix=color_fix, to_gpu=vae_to_gpu)\n\n # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_vae_slicing\n def enable_vae_slicing(self):\n r\"\"\"\n Enable sliced VAE decoding.\n\n When this option is enabled, the VAE will split the input tensor in slices to compute decoding in several\n steps. This is useful to save some memory and allow larger batch sizes.\n \"\"\"\n self.vae.enable_slicing()\n\n # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_vae_slicing\n def disable_vae_slicing(self):\n r\"\"\"\n Disable sliced VAE decoding. If `enable_vae_slicing` was previously invoked, this method will go back to\n computing decoding in one step.\n \"\"\"\n self.vae.disable_slicing()\n\n # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_vae_tiling\n def enable_vae_tiling(self):\n r\"\"\"\n Enable tiled VAE decoding.\n\n When this option is enabled, the VAE will split the input tensor into tiles to compute decoding and encoding in\n several steps. This is useful to save a large amount of memory and to allow the processing of larger images.\n \"\"\"\n self.vae.enable_tiling()\n\n # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_vae_tiling\n def disable_vae_tiling(self):\n r\"\"\"\n Disable tiled VAE decoding. If `enable_vae_tiling` was previously invoked, this method will go back to\n computing decoding in one step.\n \"\"\"\n self.vae.disable_tiling()\n\n def enable_sequential_cpu_offload(self, gpu_id=0):\n r\"\"\"\n Offloads all models to CPU using accelerate, significantly reducing memory usage. When called, unet,\n text_encoder, vae, controlnet, and safety checker have their state dicts saved to CPU and then are moved to a\n `torch.device('meta') and loaded to GPU only when their specific submodule has its `forward` method called.\n Note that offloading happens on a submodule basis. Memory savings are higher than with\n `enable_model_cpu_offload`, but performance is lower.\n \"\"\"\n if is_accelerate_available():\n from accelerate import cpu_offload\n else:\n raise ImportError(\"Please install accelerate via `pip install accelerate`\")\n\n device = torch.device(f\"cuda:{gpu_id}\")\n\n for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.controlnet]:\n cpu_offload(cpu_offloaded_model, device)\n\n if self.safety_checker is not None:\n cpu_offload(self.safety_checker, execution_device=device, offload_buffers=True)\n\n def enable_model_cpu_offload(self, gpu_id=0):\n r\"\"\"\n Offloads all models to CPU using accelerate, reducing memory usage with a low impact on performance. Compared\n to `enable_sequential_cpu_offload`, this method moves one whole model at a time to the GPU when its `forward`\n method is called, and the model remains in GPU until the next model runs. Memory savings are lower than with\n `enable_sequential_cpu_offload`, but performance is much better due to the iterative execution of the `unet`.\n \"\"\"\n if is_accelerate_available() and is_accelerate_version(\">=\", \"0.17.0.dev0\"):\n from accelerate import cpu_offload_with_hook\n else:\n raise ImportError(\"`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.\")\n\n device = torch.device(f\"cuda:{gpu_id}\")\n\n hook = None\n for cpu_offloaded_model in [self.text_encoder, self.unet, self.vae]:\n _, hook = cpu_offload_with_hook(cpu_offloaded_model, device, prev_module_hook=hook)\n\n if self.safety_checker is not None:\n # the safety checker can offload the vae again\n _, hook = cpu_offload_with_hook(self.safety_checker, device, prev_module_hook=hook)\n\n # control net hook has be manually offloaded as it alternates with unet\n cpu_offload_with_hook(self.controlnet, device)\n\n # We'll offload the last model manually.\n self.final_offload_hook = hook\n\n @property\n # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device\n def _execution_device(self):\n r\"\"\"\n Returns the device on which the pipeline's models will be executed. After calling\n `pipeline.enable_sequential_cpu_offload()` the execution device can only be inferred from Accelerate's module\n hooks.\n \"\"\"\n if not hasattr(self.unet, \"_hf_hook\"):\n return self.device\n for module in self.unet.modules():\n if (\n hasattr(module, \"_hf_hook\")\n and hasattr(module._hf_hook, \"execution_device\")\n and module._hf_hook.execution_device is not None\n ):\n return torch.device(module._hf_hook.execution_device)\n return self.device\n\n # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt\n def _encode_prompt(\n self,\n prompt,\n device,\n num_images_per_prompt,\n do_classifier_free_guidance,\n negative_prompt=None,\n prompt_embeds: Optional[torch.FloatTensor] = None,\n negative_prompt_embeds: Optional[torch.FloatTensor] = None,\n ram_encoder_hidden_states: Optional[torch.FloatTensor] = None,\n ):\n r\"\"\"\n Encodes the prompt into text encoder hidden states.\n\n Args:\n prompt (`str` or `List[str]`, *optional*):\n prompt to be encoded\n device: (`torch.device`):\n torch device\n num_images_per_prompt (`int`):\n number of images that should be generated per prompt\n do_classifier_free_guidance (`bool`):\n whether to use classifier free guidance or not\n negative_prompt (`str` or `List[str]`, *optional*):\n The prompt or prompts not to guide the image generation. If not defined, one has to pass\n `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is\n less than `1`).\n prompt_embeds (`torch.FloatTensor`, *optional*):\n Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not\n provided, text embeddings will be generated from `prompt` input argument.\n negative_prompt_embeds (`torch.FloatTensor`, *optional*):\n Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt\n weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input\n argument.\n \"\"\"\n if prompt is not None and isinstance(prompt, str):\n batch_size = 1\n elif prompt is not None and isinstance(prompt, list):\n batch_size = len(prompt)\n else:\n batch_size = prompt_embeds.shape[0]\n\n if prompt_embeds is None:\n # textual inversion: procecss multi-vector tokens if necessary\n if isinstance(self, TextualInversionLoaderMixin):\n prompt = self.maybe_convert_prompt(prompt, self.tokenizer)\n\n text_inputs = self.tokenizer(\n prompt,\n padding=\"max_length\",\n max_length=self.tokenizer.model_max_length,\n truncation=True,\n return_tensors=\"pt\",\n )\n text_input_ids = text_inputs.input_ids\n untruncated_ids = self.tokenizer(prompt, padding=\"longest\", return_tensors=\"pt\").input_ids\n\n if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(\n text_input_ids, untruncated_ids\n ):\n removed_text = self.tokenizer.batch_decode(\n untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]\n )\n logger.warning(\n \"The following part of your input was truncated because CLIP can only handle sequences up to\"\n f\" {self.tokenizer.model_max_length} tokens: {removed_text}\"\n )\n\n if hasattr(self.text_encoder.config, \"use_attention_mask\") and self.text_encoder.config.use_attention_mask:\n attention_mask = text_inputs.attention_mask.to(device)\n else:\n attention_mask = None\n\n prompt_embeds = self.text_encoder(\n text_input_ids.to(device),\n attention_mask=attention_mask,\n )\n prompt_embeds = prompt_embeds[0]\n\n prompt_embeds = prompt_embeds.to(dtype=self.text_encoder.dtype, device=device)\n\n bs_embed, seq_len, _ = prompt_embeds.shape\n # duplicate text embeddings for each generation per prompt, using mps friendly method\n prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)\n prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)\n\n # get unconditional embeddings for classifier free guidance\n if do_classifier_free_guidance and negative_prompt_embeds is None:\n uncond_tokens: List[str]\n if negative_prompt is None:\n uncond_tokens = [\"\"] * batch_size\n elif prompt is not None and type(prompt) is not type(negative_prompt):\n raise TypeError(\n f\"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=\"\n f\" {type(prompt)}.\"\n )\n elif isinstance(negative_prompt, str):\n uncond_tokens = [negative_prompt]\n elif batch_size != len(negative_prompt):\n raise ValueError(\n f\"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:\"\n f\" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches\"\n \" the batch size of `prompt`.\"\n )\n else:\n uncond_tokens = negative_prompt\n\n # textual inversion: procecss multi-vector tokens if necessary\n if isinstance(self, TextualInversionLoaderMixin):\n uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer)\n\n max_length = prompt_embeds.shape[1]\n uncond_input = self.tokenizer(\n uncond_tokens,\n padding=\"max_length\",\n max_length=max_length,\n truncation=True,\n return_tensors=\"pt\",\n )\n\n if hasattr(self.text_encoder.config, \"use_attention_mask\") and self.text_encoder.config.use_attention_mask:\n attention_mask = uncond_input.attention_mask.to(device)\n else:\n attention_mask = None\n\n negative_prompt_embeds = self.text_encoder(\n uncond_input.input_ids.to(device),\n attention_mask=attention_mask,\n )\n negative_prompt_embeds = negative_prompt_embeds[0]\n\n if do_classifier_free_guidance:\n # duplicate unconditional embeddings for each generation per prompt, using mps friendly method\n seq_len = negative_prompt_embeds.shape[1]\n\n negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder.dtype, device=device)\n\n negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)\n negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)\n\n # For classifier free guidance, we need to do two forward passes.\n # Here we concatenate the unconditional and text embeddings into a single batch\n # to avoid doing two forward passes\n prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])\n ram_encoder_hidden_states = torch.cat([ram_encoder_hidden_states, ram_encoder_hidden_states])\n\n return prompt_embeds, ram_encoder_hidden_states\n\n # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker\n def run_safety_checker(self, image, device, dtype):\n if self.safety_checker is None:\n has_nsfw_concept = None\n else:\n if torch.is_tensor(image):\n feature_extractor_input = self.image_processor.postprocess(image, output_type=\"pil\")\n else:\n feature_extractor_input = self.image_processor.numpy_to_pil(image)\n safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors=\"pt\").to(device)\n image, has_nsfw_concept = self.safety_checker(\n images=image, clip_input=safety_checker_input.pixel_values.to(dtype)\n )\n return image, has_nsfw_concept\n\n # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents\n def decode_latents(self, latents):\n warnings.warn(\n \"The decode_latents method is deprecated and will be removed in a future version. Please\"\n \" use VaeImageProcessor instead\",\n FutureWarning,\n )\n latents = 1 / self.vae.config.scaling_factor * latents\n image = self.vae.decode(latents, return_dict=False)[0]\n image = (image / 2 + 0.5).clamp(0, 1)\n # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16\n image = image.cpu().permute(0, 2, 3, 1).float().numpy()\n return image\n\n # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs\n def prepare_extra_step_kwargs(self, generator, eta):\n # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature\n # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.\n # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502\n # and should be between [0, 1]\n\n accepts_eta = \"eta\" in set(inspect.signature(self.scheduler.step).parameters.keys())\n extra_step_kwargs = {}\n if accepts_eta:\n extra_step_kwargs[\"eta\"] = eta\n\n # check if the scheduler accepts generator\n accepts_generator = \"generator\" in set(inspect.signature(self.scheduler.step).parameters.keys())\n if accepts_generator:\n extra_step_kwargs[\"generator\"] = generator\n #extra_step_kwargs[\"generator\"] = generator\n return extra_step_kwargs\n\n def check_inputs(\n self,\n prompt,\n image,\n height,\n width,\n callback_steps,\n negative_prompt=None,\n prompt_embeds=None,\n negative_prompt_embeds=None,\n controlnet_conditioning_scale=1.0,\n ):\n if height % 8 != 0 or width % 8 != 0:\n raise ValueError(f\"`height` and `width` have to be divisible by 8 but are {height} and {width}.\")\n\n if (callback_steps is None) or (\n callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)\n ):\n raise ValueError(\n f\"`callback_steps` has to be a positive integer but is {callback_steps} of type\"\n f\" {type(callback_steps)}.\"\n )\n\n if prompt is not None and prompt_embeds is not None:\n raise ValueError(\n f\"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to\"\n \" only forward one of the two.\"\n )\n elif prompt is None and prompt_embeds is None:\n raise ValueError(\n \"Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined.\"\n )\n elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):\n raise ValueError(f\"`prompt` has to be of type `str` or `list` but is {type(prompt)}\")\n\n if negative_prompt is not None and negative_prompt_embeds is not None:\n raise ValueError(\n f\"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:\"\n f\" {negative_prompt_embeds}. Please make sure to only forward one of the two.\"\n )\n\n if prompt_embeds is not None and negative_prompt_embeds is not None:\n if prompt_embeds.shape != negative_prompt_embeds.shape:\n raise ValueError(\n \"`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but\"\n f\" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`\"\n f\" {negative_prompt_embeds.shape}.\"\n )\n\n # `prompt` needs more sophisticated handling when there are multiple\n # conditionings.\n if isinstance(self.controlnet, MultiControlNetModel):\n if isinstance(prompt, list):\n logger.warning(\n f\"You have {len(self.controlnet.nets)} ControlNets and you have passed {len(prompt)}\"\n \" prompts. The conditionings will be fixed across the prompts.\"\n )\n\n # Check `image`\n is_compiled = hasattr(F, \"scaled_dot_product_attention\") and isinstance(\n self.controlnet, torch._dynamo.eval_frame.OptimizedModule\n )\n if (\n isinstance(self.controlnet, ControlNetModel)\n or is_compiled\n and isinstance(self.controlnet._orig_mod, ControlNetModel)\n ):\n self.check_image(image, prompt, prompt_embeds)\n elif (\n isinstance(self.controlnet, MultiControlNetModel)\n or is_compiled\n and isinstance(self.controlnet._orig_mod, MultiControlNetModel)\n ):\n if not isinstance(image, list):\n raise TypeError(\"For multiple controlnets: `image` must be type `list`\")\n\n # When `image` is a nested list:\n # (e.g. [[canny_image_1, pose_image_1], [canny_image_2, pose_image_2]])\n elif any(isinstance(i, list) for i in image):\n raise ValueError(\"A single batch of multiple conditionings are supported at the moment.\")\n elif len(image) != len(self.controlnet.nets):\n raise ValueError(\n \"For multiple controlnets: `image` must have the same length as the number of controlnets.\"\n )\n\n for image_ in image:\n self.check_image(image_, prompt, prompt_embeds)\n else:\n assert False\n\n # Check `controlnet_conditioning_scale`\n if (\n isinstance(self.controlnet, ControlNetModel)\n or is_compiled\n and isinstance(self.controlnet._orig_mod, ControlNetModel)\n ):\n if not isinstance(controlnet_conditioning_scale, float):\n raise TypeError(\"For single controlnet: `controlnet_conditioning_scale` must be type `float`.\")\n elif (\n isinstance(self.controlnet, MultiControlNetModel)\n or is_compiled\n and isinstance(self.controlnet._orig_mod, MultiControlNetModel)\n ):\n if isinstance(controlnet_conditioning_scale, list):\n if any(isinstance(i, list) for i in controlnet_conditioning_scale):\n raise ValueError(\"A single batch of multiple conditionings are supported at the moment.\")\n elif isinstance(controlnet_conditioning_scale, list) and len(controlnet_conditioning_scale) != len(\n self.controlnet.nets\n ):\n raise ValueError(\n \"For multiple controlnets: When `controlnet_conditioning_scale` is specified as `list`, it must have\"\n \" the same length as the number of controlnets\"\n )\n else:\n assert False\n\n def check_image(self, image, prompt, prompt_embeds):\n image_is_pil = isinstance(image, PIL.Image.Image)\n image_is_tensor = isinstance(image, torch.Tensor)\n image_is_pil_list = isinstance(image, list) and isinstance(image[0], PIL.Image.Image)\n image_is_tensor_list = isinstance(image, list) and isinstance(image[0], torch.Tensor)\n\n if not image_is_pil and not image_is_tensor and not image_is_pil_list and not image_is_tensor_list:\n raise TypeError(\n \"image must be passed and be one of PIL image, torch tensor, list of PIL images, or list of torch tensors\"\n )\n\n if image_is_pil:\n image_batch_size = 1\n elif image_is_tensor:\n image_batch_size = image.shape[0]\n elif image_is_pil_list:\n image_batch_size = len(image)\n elif image_is_tensor_list:\n image_batch_size = len(image)\n\n if prompt is not None and isinstance(prompt, str):\n prompt_batch_size = 1\n elif prompt is not None and isinstance(prompt, list):\n prompt_batch_size = len(prompt)\n elif prompt_embeds is not None:\n prompt_batch_size = prompt_embeds.shape[0]\n\n if image_batch_size != 1 and image_batch_size != prompt_batch_size:\n raise ValueError(\n f\"If image batch size is not 1, image batch size must be same as prompt batch size. image batch size: {image_batch_size}, prompt batch size: {prompt_batch_size}\"\n )\n\n def prepare_image(\n self,\n image,\n width,\n height,\n batch_size,\n num_images_per_prompt,\n device,\n dtype,\n do_classifier_free_guidance=False,\n guess_mode=False,\n ):\n if not isinstance(image, torch.Tensor):\n if isinstance(image, PIL.Image.Image):\n image = [image]\n\n if isinstance(image[0], PIL.Image.Image):\n images = []\n\n for image_ in image:\n image_ = image_.convert(\"RGB\")\n #image_ = image_.resize((width, height), resample=PIL_INTERPOLATION[\"lanczos\"])\n image_ = np.array(image_)\n image_ = image_[None, :]\n images.append(image_)\n\n image = images\n\n image = np.concatenate(image, axis=0)\n image = np.array(image).astype(np.float32) / 255.0\n image = image.transpose(0, 3, 1, 2)\n image = torch.from_numpy(image)#.flip(1)\n elif isinstance(image[0], torch.Tensor):\n image = torch.cat(image, dim=0)\n\n image_batch_size = image.shape[0]\n\n if image_batch_size == 1:\n repeat_by = batch_size\n else:\n # image batch size is the same as prompt batch size\n repeat_by = num_images_per_prompt\n\n image = image.repeat_interleave(repeat_by, dim=0)\n\n image = image.to(device=device, dtype=dtype)\n\n if do_classifier_free_guidance and not guess_mode:\n image = torch.cat([image] * 2)\n\n return image\n\n # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents\n def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None):\n shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor)\n if isinstance(generator, list) and len(generator) != batch_size:\n raise ValueError(\n f\"You have passed a list of generators of length {len(generator)}, but requested an effective batch\"\n f\" size of {batch_size}. Make sure the batch size matches the length of the generators.\"\n )\n\n if latents is None:\n latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)\n #latents = randn_tensor(shape, generator=None, device=device, dtype=dtype)\n #offset_noise = torch.randn(batch_size, num_channels_latents, 1, 1, device=device)\n #latents = latents + 0.1 * offset_noise\n else:\n latents = latents.to(device)\n\n # scale the initial noise by the standard deviation required by the scheduler\n latents = latents * self.scheduler.init_noise_sigma\n return latents\n\n def _default_height_width(self, height, width, image):\n # NOTE: It is possible that a list of images have different\n # dimensions for each image, so just checking the first image\n # is not _exactly_ correct, but it is simple.\n while isinstance(image, list):\n image = image[0]\n\n if height is None:\n if isinstance(image, PIL.Image.Image):\n height = image.height\n elif isinstance(image, torch.Tensor):\n height = image.shape[2]\n\n height = (height // 8) * 8 # round down to nearest multiple of 8\n\n if width is None:\n if isinstance(image, PIL.Image.Image):\n width = image.width\n elif isinstance(image, torch.Tensor):\n width = image.shape[3]\n\n width = (width // 8) * 8 # round down to nearest multiple of 8\n\n return height, width\n\n # override DiffusionPipeline\n def save_pretrained(\n self,\n save_directory: Union[str, os.PathLike],\n safe_serialization: bool = False,\n variant: Optional[str] = None,\n ):\n if isinstance(self.controlnet, ControlNetModel):\n super().save_pretrained(save_directory, safe_serialization, variant)\n else:\n raise NotImplementedError(\"Currently, the `save_pretrained()` is not implemented for Multi-ControlNet.\")\n \n def _gaussian_weights(self, tile_width, tile_height, nbatches):\n \"\"\"Generates a gaussian mask of weights for tile contributions\"\"\"\n from numpy import pi, exp, sqrt\n import numpy as np\n\n latent_width = tile_width\n latent_height = tile_height\n\n var = 0.01\n midpoint = (latent_width - 1) / 2 # -1 because index goes from 0 to latent_width - 1\n x_probs = [exp(-(x-midpoint)*(x-midpoint)/(latent_width*latent_width)/(2*var)) / sqrt(2*pi*var) for x in range(latent_width)]\n midpoint = latent_height / 2\n y_probs = [exp(-(y-midpoint)*(y-midpoint)/(latent_height*latent_height)/(2*var)) / sqrt(2*pi*var) for y in range(latent_height)]\n\n weights = np.outer(y_probs, x_probs)\n return torch.tile(torch.tensor(weights, device=self.device), (nbatches, self.unet.config.in_channels, 1, 1))\n\n @perfcount\n @torch.no_grad()\n @replace_example_docstring(EXAMPLE_DOC_STRING)\n def __call__(\n self,\n prompt: Union[str, List[str]] = None,\n image: Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]] = None,\n height: Optional[int] = None,\n width: Optional[int] = None,\n num_inference_steps: int = 50,\n guidance_scale: float = 7.5,\n negative_prompt: Optional[Union[str, List[str]]] = None,\n num_images_per_prompt: Optional[int] = 1,\n eta: float = 0.0,\n generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,\n latents: Optional[torch.FloatTensor] = None,\n prompt_embeds: Optional[torch.FloatTensor] = None,\n negative_prompt_embeds: Optional[torch.FloatTensor] = None,\n output_type: Optional[str] = \"pil\",\n return_dict: bool = True,\n callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,\n callback_steps: int = 1,\n cross_attention_kwargs: Optional[Dict[str, Any]] = None,\n conditioning_scale: Union[float, List[float]] = 1.0,\n guess_mode: bool = False,\n image_sr = None,\n start_steps = 999,\n start_point = 'noise',\n ram_encoder_hidden_states=None,\n latent_tiled_size=320,\n latent_tiled_overlap=4,\n args=None\n ):\n r\"\"\"\n Function invoked when calling the pipeline for generation.\n\n Args:\n prompt (`str` or `List[str]`, *optional*):\n The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.\n instead.\n image (`torch.FloatTensor`, `PIL.Image.Image`, `List[torch.FloatTensor]`, `List[PIL.Image.Image]`,\n `List[List[torch.FloatTensor]]`, or `List[List[PIL.Image.Image]]`):\n The ControlNet input condition. ControlNet uses this input condition to generate guidance to Unet. If\n the type is specified as `Torch.FloatTensor`, it is passed to ControlNet as is. `PIL.Image.Image` can\n also be accepted as an image. The dimensions of the output image defaults to `image`'s dimensions. If\n height and/or width are passed, `image` is resized according to them. If multiple ControlNets are\n specified in init, images must be passed as a list such that each element of the list can be correctly\n batched for input to a single controlnet.\n height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):\n The height in pixels of the generated image.\n width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):\n The width in pixels of the generated image.\n num_inference_steps (`int`, *optional*, defaults to 50):\n The number of denoising steps. More denoising steps usually lead to a higher quality image at the\n expense of slower inference.\n guidance_scale (`float`, *optional*, defaults to 7.5):\n Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).\n `guidance_scale` is defined as `w` of equation 2. of [Imagen\n Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >\n 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,\n usually at the expense of lower image quality.\n negative_prompt (`str` or `List[str]`, *optional*):\n The prompt or prompts not to guide the image generation. If not defined, one has to pass\n `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is\n less than `1`).\n num_images_per_prompt (`int`, *optional*, defaults to 1):\n The number of images to generate per prompt.\n eta (`float`, *optional*, defaults to 0.0):\n Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to\n [`schedulers.DDIMScheduler`], will be ignored for others.\n generator (`torch.Generator` or `List[torch.Generator]`, *optional*):\n One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)\n to make generation deterministic.\n latents (`torch.FloatTensor`, *optional*):\n Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image\n generation. Can be used to tweak the same generation with different prompts. If not provided, a latents\n tensor will ge generated by sampling using the supplied random `generator`.\n prompt_embeds (`torch.FloatTensor`, *optional*):\n Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not\n provided, text embeddings will be generated from `prompt` input argument.\n negative_prompt_embeds (`torch.FloatTensor`, *optional*):\n Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt\n weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input\n argument.\n output_type (`str`, *optional*, defaults to `\"pil\"`):\n The output format of the generate image. Choose between\n [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.\n return_dict (`bool`, *optional*, defaults to `True`):\n Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a\n plain tuple.\n callback (`Callable`, *optional*):\n A function that will be called every `callback_steps` steps during inference. The function will be\n called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.\n callback_steps (`int`, *optional*, defaults to 1):\n The frequency at which the `callback` function will be called. If not specified, the callback will be\n called at every step.\n cross_attention_kwargs (`dict`, *optional*):\n A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under\n `self.processor` in\n [diffusers.cross_attention](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/cross_attention.py).\n conditioning_scale (`float` or `List[float]`, *optional*, defaults to 1.0):\n The outputs of the controlnet are multiplied by `conditioning_scale` before they are added\n to the residual in the original unet. If multiple ControlNets are specified in init, you can set the\n corresponding scale as a list.\n guess_mode (`bool`, *optional*, defaults to `False`):\n In this mode, the ControlNet encoder will try best to recognize the content of the input image even if\n you remove all prompts. The `guidance_scale` between 3.0 and 5.0 is recommended.\n\n Examples:\n\n Returns:\n [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:\n [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.\n When returning a tuple, the first element is a list with the generated images, and the second element is a\n list of `bool`s denoting whether the corresponding generated image likely represents \"not-safe-for-work\"\n (nsfw) content, according to the `safety_checker`.\n \"\"\"\n # 0. Default height and width to unet\n height, width = self._default_height_width(height, width, image)\n \n # 1. Check inputs. Raise error if not correct\n \"\"\"\n self.check_inputs(\n prompt,\n image,\n height,\n width,\n callback_steps,\n negative_prompt,\n prompt_embeds,\n negative_prompt_embeds,\n conditioning_scale,\n )\n \"\"\"\n\n # 2. Define call parameters\n if prompt is not None and isinstance(prompt, str):\n batch_size = 1\n elif prompt is not None and isinstance(prompt, list):\n batch_size = len(prompt)\n else:\n batch_size = prompt_embeds.shape[0]\n\n device = self._execution_device\n # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)\n # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`\n # corresponds to doing no classifier free guidance.\n do_classifier_free_guidance = guidance_scale > 1.0\n\n controlnet = self.controlnet._orig_mod if is_compiled_module(self.controlnet) else self.controlnet\n \"\"\"\n if isinstance(controlnet, MultiControlNetModel) and isinstance(conditioning_scale, float):\n conditioning_scale = [conditioning_scale] * len(controlnet.nets)\n \n global_pool_conditions = (\n controlnet.config.global_pool_conditions\n if isinstance(controlnet, ControlNetModel)\n else controlnet.nets[0].config.global_pool_conditions\n )\n \n guess_mode = guess_mode or global_pool_conditions\n \"\"\"\n\n # 3. Encode input prompt\n prompt_embeds, ram_encoder_hidden_states = self._encode_prompt(\n prompt,\n device,\n num_images_per_prompt,\n do_classifier_free_guidance,\n negative_prompt,\n prompt_embeds=prompt_embeds,\n negative_prompt_embeds=negative_prompt_embeds,\n ram_encoder_hidden_states=ram_encoder_hidden_states\n )\n\n # 4. Prepare image\n image = self.prepare_image(\n image=image,\n width=width,\n height=height,\n batch_size=batch_size * num_images_per_prompt,\n num_images_per_prompt=num_images_per_prompt,\n device=device,\n dtype=controlnet.dtype,\n do_classifier_free_guidance=do_classifier_free_guidance,\n guess_mode=guess_mode,\n )\n\n # 5. Prepare timesteps\n self.scheduler.set_timesteps(num_inference_steps, device=device)\n timesteps = self.scheduler.timesteps\n\n # 6. Prepare latent variables\n num_channels_latents = self.unet.config.in_channels\n latents = self.prepare_latents(\n batch_size * num_images_per_prompt,\n num_channels_latents,\n height,\n width,\n prompt_embeds.dtype,\n device,\n generator,\n latents,\n )\n\n # 6. Prepare the start point\n if start_point == 'noise':\n latents = latents\n elif start_point == 'lr': # LRE Strategy\n latents_condition_image = self.vae.encode(image*2-1).latent_dist.sample()\n latents_condition_image = latents_condition_image * self.vae.config.scaling_factor\n start_steps_tensor = torch.randint(start_steps, start_steps+1, (latents.shape[0],), device=latents.device)\n start_steps_tensor = start_steps_tensor.long()\n latents = self.scheduler.add_noise(latents_condition_image[0:1, ...], latents, start_steps_tensor)\n \n\n # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline\n extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)\n\n # 8. Denoising loop\n num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order\n with self.progress_bar(total=num_inference_steps) as progress_bar:\n \n _, _, h, w = latents.size()\n tile_size, tile_overlap = (latent_tiled_size, latent_tiled_overlap) if args is not None else (256, 8)\n if h*w<=tile_size*tile_size:\n print(f\"[Tiled Latent]: the input size is tiny and unnecessary to tile.\")\n else:\n print(f\"[Tiled Latent]: the input size is {image.shape[-2]}x{image.shape[-1]}, need to tiled\")\n\n for i, t in enumerate(timesteps):\n # pass, if the timestep is larger than start_steps\n if t > start_steps:\n print(f'pass {t} steps.')\n continue\n\n # expand the latents if we are doing classifier free guidance\n latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents\n latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)\n\n # controlnet(s) inference\n if guess_mode and do_classifier_free_guidance:\n # Infer ControlNet only for the conditional batch.\n controlnet_latent_model_input = latents\n controlnet_prompt_embeds = prompt_embeds.chunk(2)[1]\n \n else:\n controlnet_latent_model_input = latent_model_input\n controlnet_prompt_embeds = prompt_embeds\n\n if h*w<=tile_size*tile_size: # tiled latent input\n down_block_res_samples, mid_block_res_sample = [None]*10, None\n down_block_res_samples, mid_block_res_sample = self.controlnet(\n controlnet_latent_model_input,\n t,\n encoder_hidden_states=controlnet_prompt_embeds,\n controlnet_cond=image,\n conditioning_scale=conditioning_scale,\n guess_mode=guess_mode,\n return_dict=False,\n image_encoder_hidden_states = ram_encoder_hidden_states,\n )\n\n\n if guess_mode and do_classifier_free_guidance:\n # Infered ControlNet only for the conditional batch.\n # To apply the output of ControlNet to both the unconditional and conditional batches,\n # add 0 to the unconditional batch to keep it unchanged.\n down_block_res_samples = [torch.cat([torch.zeros_like(d), d]) for d in down_block_res_samples]\n mid_block_res_sample = torch.cat([torch.zeros_like(mid_block_res_sample), mid_block_res_sample])\n\n # predict the noise residual\n noise_pred = self.unet(\n latent_model_input,\n t,\n encoder_hidden_states=prompt_embeds,\n cross_attention_kwargs=cross_attention_kwargs,\n down_block_additional_residuals=down_block_res_samples,\n mid_block_additional_residual=mid_block_res_sample,\n return_dict=False,\n image_encoder_hidden_states = ram_encoder_hidden_states,\n )[0]\n else:\n tile_weights = self._gaussian_weights(tile_size, tile_size, 1)\n tile_size = min(tile_size, min(h, w))\n tile_weights = self._gaussian_weights(tile_size, tile_size, 1)\n\n grid_rows = 0\n cur_x = 0\n while cur_x < latent_model_input.size(-1):\n cur_x = max(grid_rows * tile_size-tile_overlap * grid_rows, 0)+tile_size\n grid_rows += 1\n\n grid_cols = 0\n cur_y = 0\n while cur_y < latent_model_input.size(-2):\n cur_y = max(grid_cols * tile_size-tile_overlap * grid_cols, 0)+tile_size\n grid_cols += 1\n\n input_list = []\n cond_list = []\n img_list = []\n noise_preds = []\n for row in range(grid_rows):\n noise_preds_row = []\n for col in range(grid_cols):\n if col < grid_cols-1 or row < grid_rows-1:\n # extract tile from input image\n ofs_x = max(row * tile_size-tile_overlap * row, 0)\n ofs_y = max(col * tile_size-tile_overlap * col, 0)\n # input tile area on total image\n if row == grid_rows-1:\n ofs_x = w - tile_size\n if col == grid_cols-1:\n ofs_y = h - tile_size\n\n input_start_x = ofs_x\n input_end_x = ofs_x + tile_size\n input_start_y = ofs_y\n input_end_y = ofs_y + tile_size\n\n # input tile dimensions\n input_tile = latent_model_input[:, :, input_start_y:input_end_y, input_start_x:input_end_x]\n input_list.append(input_tile)\n cond_tile = controlnet_latent_model_input[:, :, input_start_y:input_end_y, input_start_x:input_end_x]\n cond_list.append(cond_tile)\n img_tile = image[:, :, input_start_y*8:input_end_y*8, input_start_x*8:input_end_x*8]\n img_list.append(img_tile)\n\n if len(input_list) == batch_size or col == grid_cols-1:\n input_list_t = torch.cat(input_list, dim=0)\n cond_list_t = torch.cat(cond_list, dim=0)\n img_list_t = torch.cat(img_list, dim=0)\n #print(input_list_t.shape, cond_list_t.shape, img_list_t.shape, fg_mask_list_t.shape)\n\n down_block_res_samples, mid_block_res_sample = self.controlnet(\n cond_list_t,\n t,\n encoder_hidden_states=controlnet_prompt_embeds,\n controlnet_cond=img_list_t,\n conditioning_scale=conditioning_scale,\n guess_mode=guess_mode,\n return_dict=False,\n image_encoder_hidden_states = ram_encoder_hidden_states,\n )\n\n if guess_mode and do_classifier_free_guidance:\n # Infered ControlNet only for the conditional batch.\n # To apply the output of ControlNet to both the unconditional and conditional batches,\n # add 0 to the unconditional batch to keep it unchanged.\n down_block_res_samples = [torch.cat([torch.zeros_like(d), d]) for d in down_block_res_samples]\n mid_block_res_sample = torch.cat([torch.zeros_like(mid_block_res_sample), mid_block_res_sample])\n\n # predict the noise residual\n model_out = self.unet(\n input_list_t,\n t,\n encoder_hidden_states=prompt_embeds,\n cross_attention_kwargs=cross_attention_kwargs,\n down_block_additional_residuals=down_block_res_samples,\n mid_block_additional_residual=mid_block_res_sample,\n return_dict=False,\n image_encoder_hidden_states = ram_encoder_hidden_states,\n )[0]\n\n #for sample_i in range(model_out.size(0)):\n # noise_preds_row.append(model_out[sample_i].unsqueeze(0))\n input_list = []\n cond_list = []\n img_list = []\n\n noise_preds.append(model_out)\n\n # Stitch noise predictions for all tiles\n noise_pred = torch.zeros(latent_model_input.shape, device=latent_model_input.device)\n contributors = torch.zeros(latent_model_input.shape, device=latent_model_input.device)\n # Add each tile contribution to overall latents\n for row in range(grid_rows):\n for col in range(grid_cols):\n if col < grid_cols-1 or row < grid_rows-1:\n # extract tile from input image\n ofs_x = max(row * tile_size-tile_overlap * row, 0)\n ofs_y = max(col * tile_size-tile_overlap * col, 0)\n # input tile area on total image\n if row == grid_rows-1:\n ofs_x = w - tile_size\n if col == grid_cols-1:\n ofs_y = h - tile_size\n\n input_start_x = ofs_x\n input_end_x = ofs_x + tile_size\n input_start_y = ofs_y\n input_end_y = ofs_y + tile_size\n \n noise_pred[:, :, input_start_y:input_end_y, input_start_x:input_end_x] += noise_preds[row*grid_cols + col] * tile_weights\n contributors[:, :, input_start_y:input_end_y, input_start_x:input_end_x] += tile_weights\n # Average overlapping areas with more than 1 contributor\n noise_pred /= contributors\n \n \n # perform guidance\n if do_classifier_free_guidance:\n noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)\n noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)\n\n\n\n # compute the previous noisy sample x_t -> x_t-1\n latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]\n\n # call the callback, if provided\n if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):\n progress_bar.update()\n if callback is not None and i % callback_steps == 0:\n callback(i, t, latents)\n\n # If we do sequential model offloading, let's offload unet and controlnet\n # manually for max memory savings\n if hasattr(self, \"final_offload_hook\") and self.final_offload_hook is not None:\n self.unet.to(\"cpu\")\n self.controlnet.to(\"cpu\")\n torch.cuda.empty_cache()\n\n has_nsfw_concept = None\n if not output_type == \"latent\":\n image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0]#.flip(1)\n #image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)\n else:\n image = latents\n has_nsfw_concept = None\n\n if has_nsfw_concept is None:\n do_denormalize = [True] * image.shape[0]\n else:\n do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]\n\n image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize)\n\n # Offload last model to CPU\n if hasattr(self, \"final_offload_hook\") and self.final_offload_hook is not None:\n self.final_offload_hook.offload()\n\n if not return_dict:\n return (image, has_nsfw_concept)\n\n return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)" }, { "identifier": "load_dreambooth_lora", "path": "utils/misc.py", "snippet": "def load_dreambooth_lora(unet, vae=None, model_path=None, alpha=1.0, model_base=\"\"):\n if model_path is None: return unet\n \n if model_path.endswith(\".ckpt\"):\n base_state_dict = torch.load(model_path)['state_dict']\n elif model_path.endswith(\".safetensors\"):\n state_dict = {}\n with safe_open(model_path, framework=\"pt\", device=\"cpu\") as f:\n for key in f.keys():\n state_dict[key] = f.get_tensor(key)\n \n is_lora = all(\"lora\" in k for k in state_dict.keys())\n if not is_lora:\n base_state_dict = state_dict\n else:\n base_state_dict = {}\n with safe_open(model_base, framework=\"pt\", device=\"cpu\") as f:\n for key in f.keys():\n base_state_dict[key] = f.get_tensor(key)\n \n converted_unet_checkpoint = convert_ldm_unet_checkpoint(base_state_dict, unet.config)\n unet_state_dict = unet.state_dict()\n for key in converted_unet_checkpoint:\n converted_unet_checkpoint[key] = alpha * converted_unet_checkpoint[key] + (1.0-alpha) * unet_state_dict[key]\n unet.load_state_dict(converted_unet_checkpoint, strict=False)\n\n if vae is not None:\n converted_vae_checkpoint = convert_ldm_vae_checkpoint(base_state_dict, vae.config)\n vae.load_state_dict(converted_vae_checkpoint)\n \n return unet, vae" }, { "identifier": "wavelet_color_fix", "path": "utils/wavelet_color_fix.py", "snippet": "def wavelet_color_fix(target: Image, source: Image):\n # Convert images to tensors\n to_tensor = ToTensor()\n target_tensor = to_tensor(target).unsqueeze(0)\n source_tensor = to_tensor(source).unsqueeze(0)\n\n # Apply wavelet reconstruction\n result_tensor = wavelet_reconstruction(target_tensor, source_tensor)\n\n # Convert tensor back to image\n to_image = ToPILImage()\n result_image = to_image(result_tensor.squeeze(0).clamp_(0.0, 1.0))\n\n return result_image" }, { "identifier": "adain_color_fix", "path": "utils/wavelet_color_fix.py", "snippet": "def adain_color_fix(target: Image, source: Image):\n # Convert images to tensors\n to_tensor = ToTensor()\n target_tensor = to_tensor(target).unsqueeze(0)\n source_tensor = to_tensor(source).unsqueeze(0)\n\n # Apply adaptive instance normalization\n result_tensor = adaptive_instance_normalization(target_tensor, source_tensor)\n\n # Convert tensor back to image\n to_image = ToPILImage()\n result_image = to_image(result_tensor.squeeze(0).clamp_(0.0, 1.0))\n\n return result_image" }, { "identifier": "ram", "path": "ram/models/ram_lora.py", "snippet": "def ram(pretrained='', pretrained_condition='', **kwargs):\n model = RAMLora(**kwargs)\n\n if pretrained:\n if kwargs['vit'] == 'swin_b':\n model, msg = load_checkpoint_swinbase(model, pretrained, kwargs)\n elif kwargs['vit'] == 'swin_l':\n model, msg = load_checkpoint_swinlarge(model, pretrained, kwargs)\n else:\n model, msg = load_checkpoint(model, pretrained)\n print('vit:', kwargs['vit'])\n \n if pretrained_condition:\n model.load_state_dict(torch.load(pretrained_condition), strict=False)\n print(f'load lora weights from {pretrained_condition}')\n\n return model" }, { "identifier": "inference_ram", "path": "ram/inference.py", "snippet": "def inference_ram(image, model):\n\n with torch.no_grad():\n tags, tags_chinese = model.generate_tag(image)\n\n return tags[0],tags_chinese[0]" }, { "identifier": "get_transform", "path": "ram/transform.py", "snippet": "def get_transform(image_size=384):\n return Compose([\n convert_to_rgb,\n Resize((image_size, image_size)),\n ToTensor(),\n Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n ])" } ]
import os import sys import cv2 import glob import argparse import numpy as np import torch import torch.utils.checkpoint import torch.nn as nn import torch.nn.functional as F from PIL import Image from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.utils import set_seed from diffusers import AutoencoderKL, DDPMScheduler from diffusers.utils import check_min_version from diffusers.utils.import_utils import is_xformers_available from transformers import CLIPTextModel, CLIPTokenizer, CLIPImageProcessor from pipelines.pipeline_seesr import StableDiffusionControlNetPipeline from utils.misc import load_dreambooth_lora from utils.wavelet_color_fix import wavelet_color_fix, adain_color_fix from ram.models.ram_lora import ram from ram import inference_ram as inference from ram import get_transform from typing import Mapping, Any from torchvision import transforms from torchvision import transforms from models.controlnet import ControlNetModel from models.unet_2d_condition import UNet2DConditionModel
15,067
''' * SeeSR: Towards Semantics-Aware Real-World Image Super-Resolution * Modified from diffusers by Rongyuan Wu * 24/12/2023 ''' sys.path.append(os.getcwd()) logger = get_logger(__name__, log_level="INFO") tensor_transforms = transforms.Compose([ transforms.ToTensor(), ]) ram_transforms = transforms.Compose([ transforms.Resize((384, 384)), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) ]) def load_state_dict_diffbirSwinIR(model: nn.Module, state_dict: Mapping[str, Any], strict: bool=False) -> None: state_dict = state_dict.get("state_dict", state_dict) is_model_key_starts_with_module = list(model.state_dict().keys())[0].startswith("module.") is_state_dict_key_starts_with_module = list(state_dict.keys())[0].startswith("module.") if ( is_model_key_starts_with_module and (not is_state_dict_key_starts_with_module) ): state_dict = {f"module.{key}": value for key, value in state_dict.items()} if ( (not is_model_key_starts_with_module) and is_state_dict_key_starts_with_module ): state_dict = {key[len("module."):]: value for key, value in state_dict.items()} model.load_state_dict(state_dict, strict=strict) def load_seesr_pipeline(args, accelerator, enable_xformers_memory_efficient_attention): # Load scheduler, tokenizer and models. scheduler = DDPMScheduler.from_pretrained(args.pretrained_model_path, subfolder="scheduler") text_encoder = CLIPTextModel.from_pretrained(args.pretrained_model_path, subfolder="text_encoder") tokenizer = CLIPTokenizer.from_pretrained(args.pretrained_model_path, subfolder="tokenizer") vae = AutoencoderKL.from_pretrained(args.pretrained_model_path, subfolder="vae") feature_extractor = CLIPImageProcessor.from_pretrained(f"{args.pretrained_model_path}/feature_extractor") unet = UNet2DConditionModel.from_pretrained(args.seesr_model_path, subfolder="unet") controlnet = ControlNetModel.from_pretrained(args.seesr_model_path, subfolder="controlnet") # Freeze vae and text_encoder vae.requires_grad_(False) text_encoder.requires_grad_(False) unet.requires_grad_(False) controlnet.requires_grad_(False) if enable_xformers_memory_efficient_attention: if is_xformers_available(): unet.enable_xformers_memory_efficient_attention() controlnet.enable_xformers_memory_efficient_attention() else: raise ValueError("xformers is not available. Make sure it is installed correctly") # Get the validation pipeline validation_pipeline = StableDiffusionControlNetPipeline( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, feature_extractor=feature_extractor, unet=unet, controlnet=controlnet, scheduler=scheduler, safety_checker=None, requires_safety_checker=False, ) validation_pipeline._init_tiled_vae(encoder_tile_size=args.vae_encoder_tiled_size, decoder_tile_size=args.vae_decoder_tiled_size) # For mixed precision training we cast the text_encoder and vae weights to half-precision # as these models are only used for inference, keeping weights in full precision is not required. weight_dtype = torch.float32 if accelerator.mixed_precision == "fp16": weight_dtype = torch.float16 elif accelerator.mixed_precision == "bf16": weight_dtype = torch.bfloat16 # Move text_encode and vae to gpu and cast to weight_dtype text_encoder.to(accelerator.device, dtype=weight_dtype) vae.to(accelerator.device, dtype=weight_dtype) unet.to(accelerator.device, dtype=weight_dtype) controlnet.to(accelerator.device, dtype=weight_dtype) return validation_pipeline def load_tag_model(args, device='cuda'):
''' * SeeSR: Towards Semantics-Aware Real-World Image Super-Resolution * Modified from diffusers by Rongyuan Wu * 24/12/2023 ''' sys.path.append(os.getcwd()) logger = get_logger(__name__, log_level="INFO") tensor_transforms = transforms.Compose([ transforms.ToTensor(), ]) ram_transforms = transforms.Compose([ transforms.Resize((384, 384)), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) ]) def load_state_dict_diffbirSwinIR(model: nn.Module, state_dict: Mapping[str, Any], strict: bool=False) -> None: state_dict = state_dict.get("state_dict", state_dict) is_model_key_starts_with_module = list(model.state_dict().keys())[0].startswith("module.") is_state_dict_key_starts_with_module = list(state_dict.keys())[0].startswith("module.") if ( is_model_key_starts_with_module and (not is_state_dict_key_starts_with_module) ): state_dict = {f"module.{key}": value for key, value in state_dict.items()} if ( (not is_model_key_starts_with_module) and is_state_dict_key_starts_with_module ): state_dict = {key[len("module."):]: value for key, value in state_dict.items()} model.load_state_dict(state_dict, strict=strict) def load_seesr_pipeline(args, accelerator, enable_xformers_memory_efficient_attention): # Load scheduler, tokenizer and models. scheduler = DDPMScheduler.from_pretrained(args.pretrained_model_path, subfolder="scheduler") text_encoder = CLIPTextModel.from_pretrained(args.pretrained_model_path, subfolder="text_encoder") tokenizer = CLIPTokenizer.from_pretrained(args.pretrained_model_path, subfolder="tokenizer") vae = AutoencoderKL.from_pretrained(args.pretrained_model_path, subfolder="vae") feature_extractor = CLIPImageProcessor.from_pretrained(f"{args.pretrained_model_path}/feature_extractor") unet = UNet2DConditionModel.from_pretrained(args.seesr_model_path, subfolder="unet") controlnet = ControlNetModel.from_pretrained(args.seesr_model_path, subfolder="controlnet") # Freeze vae and text_encoder vae.requires_grad_(False) text_encoder.requires_grad_(False) unet.requires_grad_(False) controlnet.requires_grad_(False) if enable_xformers_memory_efficient_attention: if is_xformers_available(): unet.enable_xformers_memory_efficient_attention() controlnet.enable_xformers_memory_efficient_attention() else: raise ValueError("xformers is not available. Make sure it is installed correctly") # Get the validation pipeline validation_pipeline = StableDiffusionControlNetPipeline( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, feature_extractor=feature_extractor, unet=unet, controlnet=controlnet, scheduler=scheduler, safety_checker=None, requires_safety_checker=False, ) validation_pipeline._init_tiled_vae(encoder_tile_size=args.vae_encoder_tiled_size, decoder_tile_size=args.vae_decoder_tiled_size) # For mixed precision training we cast the text_encoder and vae weights to half-precision # as these models are only used for inference, keeping weights in full precision is not required. weight_dtype = torch.float32 if accelerator.mixed_precision == "fp16": weight_dtype = torch.float16 elif accelerator.mixed_precision == "bf16": weight_dtype = torch.bfloat16 # Move text_encode and vae to gpu and cast to weight_dtype text_encoder.to(accelerator.device, dtype=weight_dtype) vae.to(accelerator.device, dtype=weight_dtype) unet.to(accelerator.device, dtype=weight_dtype) controlnet.to(accelerator.device, dtype=weight_dtype) return validation_pipeline def load_tag_model(args, device='cuda'):
model = ram(pretrained='preset/models/ram_swin_large_14m.pth',
4
2023-11-27 08:50:33+00:00
24k
xmu-xiaoma666/X-Dreamer
train_x_dreamer.py
[ { "identifier": "DatasetMesh", "path": "dataset/dataset_mesh.py", "snippet": "class DatasetMesh(torch.utils.data.Dataset):\n\n\n def __init__(self, glctx, FLAGS, validate=False, gif=False):\n # Init \n self.glctx = glctx\n self.FLAGS = FLAGS\n self.validate = validate\n self.gif = gif\n self.aspect = FLAGS.train_res[1] / FLAGS.train_res[0]\n self.fovy_range_min = np.deg2rad(FLAGS.fovy_range[0])\n self.fovy_range_max = np.deg2rad(FLAGS.fovy_range[1])\n self.elevation_range_min= np.deg2rad(FLAGS.elevation_range[0])\n self.elevation_range_max= np.deg2rad(FLAGS.elevation_range[1])\n self.angle_front = np.deg2rad(FLAGS.front_threshold)\n \n\n def _gif_scene(self, itr):\n fovy = np.deg2rad(45)\n proj_mtx = util.perspective(fovy, self.FLAGS.display_res[1] / self.FLAGS.display_res[0], self.FLAGS.cam_near_far[0], self.FLAGS.cam_near_far[1])\n ang = (itr / 100) * np.pi * 2\n rotate_x = np.deg2rad(20)\n prompt_index = 0\n mv = util.translate(0, 0, -3) @ (util.rotate_x(-rotate_x) @ util.rotate_y(ang ))\n normal_rotate = util.rotate_y_1(0)\n mvp = proj_mtx @ mv\n campos = torch.linalg.inv(mv)[:3, 3]\n\n return mv[None, ...], mvp[None, ...], campos[None, ...], self.FLAGS.display_res, self.FLAGS.spp, normal_rotate[None,...], prompt_index, np.rad2deg(rotate_x), np.rad2deg(ang), torch.tensor([fovy])\n \n \n\n def _validate_scene(self, itr):\n fovy = np.deg2rad(45)\n proj_mtx = util.perspective(fovy, self.FLAGS.train_res[1] / self.FLAGS.train_res[0], self.FLAGS.cam_near_far[0], self.FLAGS.cam_near_far[1])\n ang = (itr / 4) * np.pi * 2\n rotate_x = np.random.uniform(-np.pi/4,np.pi/18)\n prompt_index = 0\n mv = util.translate(0, 0, -3) @ (util.rotate_x(rotate_x) @ util.rotate_y( ang ))\n normal_rotate = util.rotate_y_1(0)\n mvp = proj_mtx @ mv\n campos = torch.linalg.inv(mv)[:3, 3]\n return mv[None, ...], mvp[None, ...], campos[None, ...], self.FLAGS.display_res, self.FLAGS.spp, normal_rotate[None,...], prompt_index, np.rad2deg(rotate_x), np.rad2deg(ang), torch.tensor([fovy])\n\n def _train_scene(self, itr):\n fovy = np.random.uniform(self.fovy_range_min, self.fovy_range_max)\n proj_mtx = util.perspective(fovy, self.FLAGS.train_res[1] / self.FLAGS.train_res[0], self.FLAGS.cam_near_far[0], self.FLAGS.cam_near_far[1])\n if self.FLAGS.gpu_number == 8: # All the results in the paper were generated using 8 3090 GPUs. We cannot guarantee that fewer than 8 GPUs can achieve the same effect.\n if self.FLAGS.local_rank in [0,4]:\n rotate_y = np.random.uniform(np.deg2rad(-45), np.deg2rad(45))\n elif self.FLAGS.local_rank in [1,5]:\n rotate_y = np.random.uniform(np.deg2rad(45), np.deg2rad(135))\n elif self.FLAGS.local_rank in [2,6]:#back\n rotate_y = np.random.uniform( np.deg2rad(135), np.deg2rad(225))\n elif self.FLAGS.local_rank in [3,7]:\n rotate_y = np.random.uniform(np.deg2rad(-135), np.deg2rad(-45)) \n if rotate_y > np.pi:\n rotate_y = rotate_y - np.pi*2\n elif self.FLAGS.gpu_number == 4: #All the results in the paper were generated using 8 3090 GPUs. We cannot guarantee that fewer than 8 GPUs can achieve the same effect.\n if self.FLAGS.local_rank in [0]:\n rotate_y = np.random.uniform(np.deg2rad(-45), np.deg2rad(45))\n elif self.FLAGS.local_rank in [1]:\n rotate_y = np.random.uniform(np.deg2rad(45), np.deg2rad(135))\n elif self.FLAGS.local_rank in [2]:#back\n rotate_y = np.random.uniform( np.deg2rad(135), np.deg2rad(225))\n elif self.FLAGS.local_rank in [3]:\n rotate_y = np.random.uniform(np.deg2rad(-135), np.deg2rad(-45)) \n if rotate_y > np.pi:\n rotate_y = rotate_y - np.pi*2\n else:\n rotate_y = np.random.uniform(np.deg2rad(-180), np.deg2rad(180)) #All the results in the paper were generated using 8 3090 GPUs. We cannot guarantee that fewer than 8 GPUs can achieve the same effect.\n \n rotate_x = -np.random.uniform(self.elevation_range_min, self.elevation_range_max)\n # angle_front = np.deg2rad(45)\n prompt_index = get_view_direction(thetas= rotate_x, phis = rotate_y, front= self.angle_front)\n cam_radius = 3\n x = np.random.uniform(-self.FLAGS.camera_random_jitter, self.FLAGS.camera_random_jitter)\n y = np.random.uniform(-self.FLAGS.camera_random_jitter, self.FLAGS.camera_random_jitter)\n mv = util.translate(x, y, -cam_radius) @ (util.rotate_x(rotate_x) @ util.rotate_y(rotate_y))\n if ((itr+1)/self.FLAGS.batch) <=self.FLAGS.coarse_iter:\n rotate_y1 = np.random.uniform(0,np.pi*2) \n rotate_x1 = np.random.uniform(-np.pi,np.pi)\n normal_rotate = util.rotate_y_1(rotate_y1 )@ util.rotate_x_1(rotate_x1) \n else:\n normal_rotate = util.rotate_y_1(0)@util.rotate_x_1(0)\n mvp = proj_mtx @ mv\n campos = torch.linalg.inv(mv)[:3, 3]\n return mv[None, ...], mvp[None, ...], campos[None, ...], self.FLAGS.display_res, self.FLAGS.spp, normal_rotate[None,...], prompt_index, np.rad2deg(rotate_x), np.rad2deg(rotate_y), torch.tensor([fovy])\n\n def __len__(self):\n if self.gif == True:\n return 100\n else:\n return 4 if self.validate else (self.FLAGS.iter + 1) * self.FLAGS.batch\n\n def __getitem__(self, itr):\n if self.gif:\n mv, mvp, campos, iter_res, iter_spp, normal_rotate, prompt_index, elev, azim, fov = self._gif_scene(itr)\n elif self.validate:\n mv, mvp, campos, iter_res, iter_spp, normal_rotate, prompt_index, elev, azim, fov = self._validate_scene(itr)\n else:\n mv, mvp, campos, iter_res, iter_spp, normal_rotate, prompt_index, elev, azim, fov = self._train_scene(itr)\n\n return {\n 'mv' : mv,\n 'mvp' : mvp,\n 'campos' : campos,\n 'resolution' : iter_res,\n 'spp' : iter_spp,\n 'normal_rotate': normal_rotate,\n 'prompt_index' : prompt_index,\n 'elev': elev,\n 'azim': azim,\n 'fov': fov\n }\n def collate(self, batch):\n iter_res, iter_spp = batch[0]['resolution'], batch[0]['spp']\n return {\n 'mv' : torch.cat(list([item['mv'] for item in batch]), dim=0),\n 'mvp' : torch.cat(list([item['mvp'] for item in batch]), dim=0),\n 'campos' : torch.cat(list([item['campos'] for item in batch]), dim=0),\n 'resolution' : iter_res,\n 'spp' : iter_spp,\n 'normal_rotate' : torch.cat(list([item['normal_rotate'] for item in batch]), dim=0),\n # 'prompt_index' : torch.cat(list([item['prompt_index'] for item in batch]), dim=0),\n 'prompt_index' : np.array([item['prompt_index'] for item in batch], dtype=np.int32),\n 'elev' : np.array([item['elev'] for item in batch], dtype=np.float16),\n 'azim' : np.array([item['azim'] for item in batch], dtype=np.float16),\n 'fov' : torch.cat(list([item['fov'] for item in batch]), dim=0),\n }" }, { "identifier": "get_camera_params", "path": "dataset/dataset_mesh.py", "snippet": "def get_camera_params(resolution= 512, fov=45, elev_angle=-20, azim_angle=0):\n fovy = np.deg2rad(fov) \n elev = np.radians( elev_angle )\n azim = np.radians( azim_angle ) \n proj_mtx = util.perspective(fovy, resolution /resolution, 1, 50)\n mv = util.translate(0, 0, -3) @ (util.rotate_x(elev) @ util.rotate_y(azim))\n normal_rotate = util.rotate_y_1(-azim ) @ util.rotate_x_1(-elev) \n # nomral_rotate = util.rotate_y_1(0) @ util.rotate_x_1(0) \n mvp = proj_mtx @ mv\n campos = torch.linalg.inv(mv)[:3, 3]\n bkgs = torch.ones(1, resolution, resolution, 3, dtype=torch.float32, device='cuda')\n return {\n 'mvp' : mvp[None, ...].cuda(),\n 'mv' : mv[None, ...].cuda(),\n 'campos' : campos[None, ...].cuda(),\n 'resolution' : [resolution, resolution], \n 'spp' : 1,\n 'background' : bkgs,\n 'normal_rotate' : normal_rotate[None,...].cuda(),\n 'elev_angle' : torch.tensor(elev_angle).cuda(),\n 'azim_angle' : torch.tensor(azim_angle).cuda(),\n 'fov' : torch.tensor(fovy).cuda(),\n }" }, { "identifier": "DMTetGeometry", "path": "geometry/dmtet_x_dreamer.py", "snippet": "class DMTetGeometry(torch.nn.Module):\n def __init__(self, grid_res, scale, FLAGS):\n super(DMTetGeometry, self).__init__()\n\n self.FLAGS = FLAGS\n self.grid_res = grid_res\n self.marching_tets = DMTet()\n \n tets = np.load('data/tets/{}_tets.npz'.format(self.grid_res))\n self.verts = torch.tensor(tets['vertices'], dtype=torch.float32, device='cuda') * scale\n print(\"tet grid min/max\", torch.min(self.verts).item(), torch.max(self.verts).item())\n self.decoder = Decoder(multires=0 , AABB= self.getAABB(), mesh_scale= scale)\n self.indices = torch.tensor(tets['indices'], dtype=torch.long, device='cuda')\n self.generate_edges()\n self.pos_encoder = CameraEncoder().to(self.verts.device)\n\n def generate_edges(self):\n with torch.no_grad():\n edges = torch.tensor([0,1,0,2,0,3,1,2,1,3,2,3], dtype = torch.long, device = \"cuda\")\n all_edges = self.indices[:,edges].reshape(-1,2) \n all_edges_sorted = torch.sort(all_edges, dim=1)[0]\n self.all_edges = torch.unique(all_edges_sorted, dim=0)\n\n @torch.no_grad()\n def getAABB(self):\n return torch.min(self.verts, dim=0).values, torch.max(self.verts, dim=0).values\n\n def getMesh(self, material):\n pred= self.decoder(self.verts)\n \n self.sdf , self.deform = pred[:, 0], pred[:, 1:] \n v_deformed = self.verts + 1 / (self.grid_res ) * torch.tanh(self.deform)\n verts, faces = self.marching_tets(v_deformed, self.sdf, self.indices)\n \n imesh = mesh.Mesh(verts, faces, material=material)\n imesh = mesh.auto_normals(imesh)\n return imesh\n\n def render(self, glctx, target, lgt, opt_material, bsdf=None, if_normal=False, mode = 'geometry_modeling', if_flip_the_normal = False, if_use_bump = False):\n opt_mesh = self.getMesh(opt_material) \n return render.render_mesh(glctx, \n opt_mesh, \n target['mvp'], \n target['campos'], \n lgt, \n target['resolution'], \n spp=target['spp'], \n msaa= True,\n background= target['background'],\n bsdf= bsdf,\n if_normal= if_normal,\n normal_rotate= target['normal_rotate'],\n mode = mode,\n if_flip_the_normal = if_flip_the_normal,\n if_use_bump = if_use_bump\n )\n\n \n def tick(self, glctx, target, lgt, opt_material, iteration, if_normal, guidance, mode, if_flip_the_normal, if_use_bump):\n # ==============================================================================================\n # Render optimizable object with identical conditions\n # ==============================================================================================\n buffers= self.render(glctx, target, lgt, opt_material, if_normal= if_normal, mode = mode, if_flip_the_normal = if_flip_the_normal, if_use_bump = if_use_bump)\n if self.FLAGS.add_directional_text:\n text_embeddings = torch.cat([guidance.uncond_z[target['prompt_index']], guidance.text_z[target['prompt_index']]]) # [B*2, 77, 1024]\n indexs = torch.cat([guidance.uncond_index[target['prompt_index']], guidance.index[target['prompt_index']]]) # [B*2, 77, 1024]\n else:\n text_embeddings = torch.cat([guidance.uncond_z, guidance.text_z]) # [B * 2, 77, 1024]\n indexs = torch.cat([guidance.uncond_index, guidance.index]) # [B*2, 77, 1024]\n\n \n if iteration <=self.FLAGS.coarse_iter:\n t = torch.randint( guidance.min_step_early, guidance.max_step_early + 1, [self.FLAGS.batch], dtype=torch.long, device='cuda') # [B]\n pred_rgb_512 = buffers['shaded'][..., 0:4].permute(0, 3, 1, 2).contiguous() # [B, 4, 64, 64]\n latents = F.interpolate(pred_rgb_512, (64, 64), mode='bilinear', align_corners=False)\n mask = (buffers['shaded'][..., 3:4]).permute(0, 3, 1, 2).contiguous()\n mask2 = mask.squeeze()\n \n else:\n t = torch.randint(guidance.min_step_late, guidance.max_step_late + 1, [self.FLAGS.batch], dtype=torch.long, device='cuda')\n srgb = buffers['shaded'][...,0:3] #* buffers['shaded'][..., 3:4] # normal * mask\n # \n pred_rgb_512 = srgb.permute(0, 3, 1, 2).contiguous() # [B, 3, 512, 512]\n latents = guidance.encode_imgs(pred_rgb_512)\n mask = (buffers['shaded'][..., 3:4]).permute(0, 3, 1, 2).contiguous()\n mask2 = mask.squeeze()\n\n ### calculate camera pos feature\n came_pos = torch.cat([target['campos'],torch.from_numpy(target['elev']).unsqueeze(-1).cuda(),torch.from_numpy(target['azim']).cuda().unsqueeze(-1),target['fov'].unsqueeze(-1)],dim=-1)\n came_pos = torch.cat([came_pos,came_pos],dim=0) #bs*2, 5\n came_pos = normalize_camera(came_pos,self.FLAGS)\n came_posfeat = self.pos_encoder(came_pos)\n\n # add noise\n noise = torch.randn_like(latents)\n latents_noisy = guidance.scheduler.add_noise(latents, noise, t)\n # pred noise\n latent_model_input = torch.cat([latents_noisy] * 2)\n tt = torch.cat([t] * 2)\n noise_pred, attention_map = guidance.unet(latent_model_input, tt, encoder_hidden_states=text_embeddings, index=indexs, came_posfeat=came_posfeat)\n noise_pred = noise_pred.sample\n\n attention_map[0] = attention_map[0].reshape(self.FLAGS.batch*2, 64, 64).contiguous()\n attention_map[1] = attention_map[1].reshape(self.FLAGS.batch*2, 32, 32).contiguous()\n attention_map[2] = attention_map[2].reshape(self.FLAGS.batch*2, 16, 16).contiguous()\n attention_map[3] = attention_map[3].reshape(self.FLAGS.batch*2, 8 , 8 ).contiguous()\n attention_map[4] = attention_map[4].reshape(self.FLAGS.batch*2, 16, 16).contiguous()\n attention_map[5] = attention_map[5].reshape(self.FLAGS.batch*2, 32, 32).contiguous()\n attention_map[6] = attention_map[6].reshape(self.FLAGS.batch*2, 64, 64).contiguous()\n\n noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)\n noise_pred =noise_pred_uncond + guidance.guidance_weight * (noise_pred_text - noise_pred_uncond) # [B, 4, 64, 64]\n if iteration <= self.FLAGS.coarse_iter:\n w = (1 - guidance.alphas[t]) # [B]\n else:\n w = guidance.alphas[t] ** 0.5 * (1 - guidance.alphas[t])\n w = w[:, None, None, None] # [B, 1, 1, 1]\n grad = w * (noise_pred - noise ) #*w1\n grad = torch.nan_to_num(grad)\n \n sds_loss = SpecifyGradient.apply(latents, grad) \n img_loss = torch.tensor([0], dtype=torch.float32, device=\"cuda\")\n reg_loss = torch.tensor([0], dtype=torch.float32, device=\"cuda\")\n\n attention_loss = 0\n mask_sizes = [(64, 64), (32,32), (16,16), (8,8), (16,16), (32,32), (64,64)]\n for i in range(7):\n _, attention_map_text = attention_map[i].chunk(2)\n if(self.FLAGS.batch==1):\n mask2 = F.interpolate(mask2.unsqueeze(0).unsqueeze(0), mask_sizes[i], mode='bilinear').squeeze()\n else:\n mask2 = F.interpolate(mask2.unsqueeze(0), mask_sizes[i], mode='bilinear').squeeze()\n attention_map_text = (attention_map_text - attention_map_text.min())/(attention_map_text.max() - attention_map_text.min()+1e-6)\n attention_map_text = F.interpolate(attention_map_text.unsqueeze(0), size=mask_sizes[i], mode='bilinear', align_corners=False).squeeze()\n attention_loss = 0.1*F.l1_loss(mask2.float(), attention_map_text.float(), reduction=\"mean\") #0.1 1 10\n attention_loss = attention_loss/7\n \n return sds_loss, img_loss, reg_loss, attention_loss" }, { "identifier": "DLMesh", "path": "geometry/dlmesh_x_dreamer.py", "snippet": "class DLMesh(torch.nn.Module):\n def __init__(self, initial_guess, FLAGS):\n super(DLMesh, self).__init__()\n self.FLAGS = FLAGS\n self.initial_guess = initial_guess\n self.mesh = initial_guess.clone()\n self.pos_encoder = CameraEncoder().cuda()\n print(\"Base mesh has %d triangles and %d vertices.\" % (self.mesh.t_pos_idx.shape[0], self.mesh.v_pos.shape[0]))\n \n @torch.no_grad()\n def getAABB(self):\n return mesh.aabb(self.mesh)\n\n def getMesh(self, material):\n self.mesh.material = material\n\n imesh = mesh.Mesh(base=self.mesh)\n # Compute normals and tangent space\n imesh = mesh.auto_normals(imesh)\n imesh = mesh.compute_tangents(imesh)\n return imesh\n\n def render(self, glctx, target, lgt, opt_material, bsdf=None,if_normal=False, mode = 'appearance_modeling', if_flip_the_normal = False, if_use_bump = False):\n opt_mesh = self.getMesh(opt_material)\n return render.render_mesh(glctx, \n opt_mesh,\n target['mvp'],\n target['campos'],\n lgt,\n target['resolution'], \n spp=target['spp'], \n msaa=True,\n background= target['background'] ,\n bsdf= bsdf,\n if_normal=if_normal,\n normal_rotate=target['normal_rotate'], \n mode = mode,\n if_flip_the_normal = if_flip_the_normal,\n if_use_bump = if_use_bump\n )\n\n def tick(self, glctx, target, lgt, opt_material, iteration, if_normal, guidance, mode, if_flip_the_normal, if_use_bump):\n # ==============================================================================================\n # Render optimizable object with identical conditions\n # ==============================================================================================\n buffers= self.render(glctx, target, lgt, opt_material, if_normal = if_normal, mode = mode, if_flip_the_normal = if_flip_the_normal, if_use_bump = if_use_bump)\n if self.FLAGS.add_directional_text:\n text_embeddings = torch.cat([guidance.uncond_z[target['prompt_index']], guidance.text_z[target['prompt_index']]])\n indexs = torch.cat([guidance.uncond_index[target['prompt_index']], guidance.index[target['prompt_index']]]) # [B*2, 77, 1024]\n else:\n text_embeddings = torch.cat([guidance.uncond_z, guidance.text_z])\n indexs = torch.cat([guidance.uncond_index, guidance.index]) # [B*2, 77, 1024]\n\n\n if iteration <= self.FLAGS.coarse_iter:\n srgb = buffers['shaded'][...,0:3]\n srgb = util.rgb_to_srgb(srgb)\n mask = (buffers['shaded'][..., 3:4]).permute(0, 3, 1, 2).contiguous()\n mask2 = mask.squeeze()\n t = torch.randint( guidance.min_step_early, guidance.max_step_early+1, [self.FLAGS.batch], dtype=torch.long, device='cuda') # [B]\n else:\n srgb = buffers['shaded'][...,0:3]\n srgb = util.rgb_to_srgb(srgb)\n mask = (buffers['shaded'][..., 3:4]).permute(0, 3, 1, 2).contiguous()\n mask2 = mask.squeeze()\n t = torch.randint( guidance.min_step_late, guidance.max_step_late+1, [self.FLAGS.batch], dtype=torch.long, device='cuda') # [B]\n\n pred_rgb_512 = srgb.permute(0, 3, 1, 2).contiguous() # [1, 3, H, W]\n latents = guidance.encode_imgs(pred_rgb_512)\n \n ### calculate camera pos feature\n came_pos = torch.cat([target['campos'],torch.from_numpy(target['elev']).unsqueeze(-1).cuda(),torch.from_numpy(target['azim']).cuda().unsqueeze(-1),target['fov'].unsqueeze(-1)],dim=-1)\n came_pos = torch.cat([came_pos,came_pos],dim=0) #bs*2, 5\n came_pos = normalize_camera(came_pos,self.FLAGS)\n came_posfeat = self.pos_encoder(came_pos)\n\n\n # add noise\n noise = torch.randn_like(latents)\n latents_noisy = guidance.scheduler.add_noise(latents, noise, t)\n # pred noise\n latent_model_input = torch.cat([latents_noisy] * 2)\n tt = torch.cat([t] * 2)\n noise_pred, attention_map = guidance.unet(latent_model_input, tt, encoder_hidden_states= text_embeddings, index=indexs, came_posfeat=came_posfeat)#.sample######################\n noise_pred = noise_pred.sample\n\n attention_map[0] = attention_map[0].reshape(self.FLAGS.batch*2, 64, 64).contiguous()\n attention_map[1] = attention_map[1].reshape(self.FLAGS.batch*2, 32, 32).contiguous()\n attention_map[2] = attention_map[2].reshape(self.FLAGS.batch*2, 16, 16).contiguous()\n attention_map[3] = attention_map[3].reshape(self.FLAGS.batch*2, 8 , 8 ).contiguous()\n attention_map[4] = attention_map[4].reshape(self.FLAGS.batch*2, 16, 16).contiguous()\n attention_map[5] = attention_map[5].reshape(self.FLAGS.batch*2, 32, 32).contiguous()\n attention_map[6] = attention_map[6].reshape(self.FLAGS.batch*2, 64, 64).contiguous()\n\n noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)\n noise_pred = noise_pred_uncond + guidance.guidance_weight * (noise_pred_text - noise_pred_uncond)\n \n if guidance.sds_weight_strategy == 0:\n w = guidance.alphas[t] ** 0.5 * (1 - guidance.alphas[t])\n elif guidance.sds_weight_strategy == 1:\n w = 1 / (1 - guidance.alphas[t])\n elif guidance.sds_weight_strategy == 2:\n if iteration <= self.FLAGS.coarse_iter:\n w = guidance.alphas[t] ** 0.5 * (1 - guidance.alphas[t])\n else:\n w = 1 / (1 - guidance.alphas[t])\n w = w[:, None, None, None] # [B, 1, 1, 1]\n grad = w* (noise_pred -noise) \n grad = torch.nan_to_num(grad)\n sds_loss = SpecifyGradient.apply(latents, grad) \n img_loss = torch.tensor([0], dtype=torch.float32, device=\"cuda\")\n reg_loss = torch.tensor([0], dtype=torch.float32, device=\"cuda\")\n \n attention_loss = 0\n mask_sizes = [(64, 64), (32,32), (16,16), (8,8), (16,16), (32,32), (64,64)]\n for i in range(7):\n _, attention_map_text = attention_map[i].chunk(2)\n if(self.FLAGS.batch==1):\n mask2 = F.interpolate(mask2.unsqueeze(0).unsqueeze(0), mask_sizes[i], mode='bilinear').squeeze()\n else:\n mask2 = F.interpolate(mask2.unsqueeze(0), mask_sizes[i], mode='bilinear').squeeze()\n attention_map_text = (attention_map_text - attention_map_text.min())/(attention_map_text.max() - attention_map_text.min()+1e-6)\n attention_map_text = F.interpolate(attention_map_text.unsqueeze(0), size=mask2.shape, mode='bilinear', align_corners=False).squeeze()\n attention_loss = 0.1*F.l1_loss(mask2.float(), attention_map_text.float(), reduction=\"mean\") #0.1 1 10\n attention_loss = attention_loss/7\n \n return sds_loss, img_loss, reg_loss, attention_loss" }, { "identifier": "obj", "path": "render/obj.py", "snippet": "def _find_mat(materials, name):\ndef load_obj(filename, clear_ks=True, mtl_override=None):\ndef write_obj(folder, mesh, save_material=True):" }, { "identifier": "material", "path": "render/material.py", "snippet": "class Material(torch.nn.Module):\n def __init__(self, mat_dict):\n def __contains__(self, key):\n def __getitem__(self, key):\n def __setitem__(self, key, val):\n def __delitem__(self, key):\n def keys(self):\ndef load_mtl(fn, clear_ks=True):\ndef save_mtl(fn, material):\ndef _upscale_replicate(x, full_res):\ndef merge_materials(materials, texcoords, tfaces, mfaces):" }, { "identifier": "util", "path": "render/util.py", "snippet": "def dot(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:\ndef reflect(x: torch.Tensor, n: torch.Tensor) -> torch.Tensor:\ndef length(x: torch.Tensor, eps: float =1e-20) -> torch.Tensor:\ndef safe_normalize(x: torch.Tensor, eps: float =1e-20) -> torch.Tensor:\ndef to_hvec(x: torch.Tensor, w: float) -> torch.Tensor:\ndef _rgb_to_srgb(f: torch.Tensor) -> torch.Tensor:\ndef rgb_to_srgb(f: torch.Tensor) -> torch.Tensor:\ndef _srgb_to_rgb(f: torch.Tensor) -> torch.Tensor:\ndef srgb_to_rgb(f: torch.Tensor) -> torch.Tensor:\ndef reinhard(f: torch.Tensor) -> torch.Tensor:\ndef mse_to_psnr(mse):\ndef psnr_to_mse(psnr):\ndef get_miplevels(texture: np.ndarray) -> float:\ndef tex_2d(tex_map : torch.Tensor, coords : torch.Tensor, filter='nearest') -> torch.Tensor:\ndef cube_to_dir(s, x, y):\ndef latlong_to_cubemap(latlong_map, res):\ndef cubemap_to_latlong(cubemap, res):\ndef scale_img_hwc(x : torch.Tensor, size, mag='bilinear', min='area') -> torch.Tensor:\ndef scale_img_nhwc(x : torch.Tensor, size, mag='bilinear', min='area') -> torch.Tensor:\ndef avg_pool_nhwc(x : torch.Tensor, size) -> torch.Tensor:\ndef segment_sum(data: torch.Tensor, segment_ids: torch.Tensor) -> torch.Tensor:\ndef fovx_to_fovy(fovx, aspect):\ndef focal_length_to_fovy(focal_length, sensor_height):\ndef perspective(fovy=0.7854, aspect=1.0, n=0.1, f= 1000.0, device=None):\ndef perspective_offcenter(fovy, fraction, rx, ry, aspect=1.0, n=0.1, f=1000.0, device=None):\ndef translate(x, y, z, device=None):\ndef rotate_x(a, device=None):\ndef rotate_x_1(a, device=None):\ndef rotate_y(a, device=None):\ndef rotate_y_1(a, device=None):\ndef rotate_y_2(a, device=None):\ndef rotate_x_2(a, device=None):\ndef scale(s, device=None):\ndef lookAt(eye, at, up):\ndef random_rotation_translation(t, device=None):\ndef random_rotation(device=None):\ndef lines_focal(o, d):\ndef cosine_sample(N, size=None):\ndef bilinear_downsample(x : torch.tensor) -> torch.Tensor:\ndef bilinear_downsample(x : torch.tensor, spp) -> torch.Tensor:\ndef init_glfw():\ndef save_image(fn, x : np.ndarray):\ndef save_image_raw(fn, x : np.ndarray):\ndef load_image_raw(fn) -> np.ndarray:\ndef load_image(fn) -> np.ndarray:\ndef time_to_text(x):\ndef checkerboard(res, checker_size) -> np.ndarray:\ndef get_random_bg(h, w):\n R, L = aspect*y, -aspect*y\n T, B = y, -y\n I = torch.eye(3, dtype=o.dtype, device=o.device)\n S = torch.sum(d[..., None] @ torch.transpose(d[..., None], 1, 2) - I[None, ...], dim=0)\n C = torch.sum((d[..., None] @ torch.transpose(d[..., None], 1, 2) - I[None, ...]) @ o[..., None], dim=0).squeeze(1)\n N = N/torch.linalg.norm(N)" }, { "identifier": "mesh", "path": "render/mesh.py", "snippet": "class Mesh:\n def __init__(self, v_pos=None, t_pos_idx=None, v_nrm=None, t_nrm_idx=None, v_tex=None, t_tex_idx=None, v_tng=None, t_tng_idx=None, material=None, base=None):\n def copy_none(self, other):\n def clone(self):\ndef load_mesh(filename, mtl_override=None):\ndef aabb(mesh):\ndef compute_edges(attr_idx, return_inverse=False):\ndef compute_edge_to_face_mapping(attr_idx, return_inverse=False):\ndef unit_size(mesh):\ndef center_by_reference(base_mesh, ref_aabb, scale):\ndef auto_normals(imesh):\ndef compute_tangents(imesh):" }, { "identifier": "texture", "path": "render/texture.py", "snippet": "class texture2d_mip(torch.autograd.Function):\nclass Texture2D(torch.nn.Module):\n def forward(ctx, texture):\n def backward(ctx, dout):\n def __init__(self, init, min_max=None):\n def sample(self, texc, texc_deriv, filter_mode='linear-mipmap-linear'):\n def getRes(self):\n def getChannels(self):\n def getMips(self):\n def clamp_(self):\n def normalize_(self):\ndef create_trainable(init, res=None, auto_mipmaps=True, min_max=None):\ndef srgb_to_rgb(texture):\ndef rgb_to_srgb(texture):\ndef _load_mip2D(fn, lambda_fn=None, channels=None):\ndef load_texture2D(fn, lambda_fn=None, channels=None):\ndef _save_mip2D(fn, mip, mipidx, lambda_fn):\ndef save_texture2D(fn, tex, lambda_fn=None):" }, { "identifier": "mlptexture", "path": "render/mlptexture.py", "snippet": "class _MLP(torch.nn.Module):\nclass MLPTexture3D(torch.nn.Module):\n def __init__(self, cfg, loss_scale=1.0):\n def forward(self, x):\n def _init_weights(m):\n def __init__(self, AABB, channels = 3, internal_dims = 32, hidden = 1, min_max = None):\n def sample(self, texc):\n def clamp_(self):\n def cleanup(self):" }, { "identifier": "light", "path": "render/light.py", "snippet": "class cubemap_mip(torch.autograd.Function):\nclass EnvironmentLight(torch.nn.Module):\n def forward(ctx, cubemap):\n def backward(ctx, dout):\n def __init__(self, base):\n def xfm(self, mtx):\n def clone(self):\n def clamp_(self, min=None, max=None):\n def get_mip(self, roughness):\n def build_mips(self, cutoff=0.99):\n def regularizer(self):\n def shade(self, gb_pos, gb_normal, kd, ks, view_pos, specular=True):\ndef _load_env_hdr(fn, scale=1.0):\ndef load_env(fn, scale=1.0):\ndef save_env_map(fn, light):\ndef create_trainable_env_rnd(base_res, scale=0.5, bias=0.25):\n LIGHT_MIN_RES = 16\n MIN_ROUGHNESS = 0.08\n MAX_ROUGHNESS = 0.5" }, { "identifier": "render", "path": "render/render.py", "snippet": "def interpolate(attr, rast, attr_idx, rast_db=None):\ndef shade(\n gb_pos,\n gb_geometric_normal,\n gb_normal,\n gb_tangent,\n gb_texc,\n gb_texc_deriv,\n view_pos,\n lgt,\n material,\n bsdf,\n if_normal,\n normal_rotate,\n mode,\n if_flip_the_normal,\n if_use_bump\n ):\ndef render_layer(\n rast,\n rast_deriv,\n mesh,\n view_pos,\n lgt,\n resolution,\n spp,\n msaa,\n bsdf,\n if_normal,\n normal_rotate,\n mode,\n if_flip_the_normal,\n if_use_bump\n ):\ndef render_mesh(\n ctx,\n mesh,\n mtx_in,\n view_pos,\n lgt,\n resolution,\n spp = 1,\n num_layers = 1,\n msaa = False,\n background = None, \n bsdf = None,\n if_normal = False,\n normal_rotate = None,\n mode = 'geometry_modeling',\n if_flip_the_normal = False,\n if_use_bump = False\n ):\n def prepare_input_vector(x):\n def composite_buffer(key, layers, background, antialias):\ndef render_uv(ctx, mesh, resolution, mlp_texture):\ndef uv_padding(image, hole_mask, padding = 2, uv_padding_block = 4):\ndef render_uv1(ctx, mesh, resolution, mlp_texture, uv_padding_block):" }, { "identifier": "StableDiffusion", "path": "sd_cglora.py", "snippet": "class StableDiffusion(nn.Module):\n def __init__(self, \n device, \n mode='geometry', \n text= '', \n add_directional_text= False, \n batch = 1, \n guidance_weight = 100, \n sds_weight_strategy = 0,\n early_time_step_range = [0.02, 0.5],\n late_time_step_range = [0.02, 0.5]):\n super().__init__()\n\n self.device = device\n self.mode = mode\n self.text= text\n self.add_directional_text = add_directional_text\n self.batch = batch \n print(f'[INFO] loading stable diffusion...')\n model_key = \"stabilityai/stable-diffusion-2-1-base\"\n self.vae = AutoencoderKL.from_pretrained(model_key, subfolder=\"vae\",torch_dtype=torch.float16).to(self.device)\n self.tokenizer = CLIPTokenizer.from_pretrained(model_key, subfolder=\"tokenizer\",torch_dtype=torch.float16)\n self.text_encoder = CLIPTextModel.from_pretrained(model_key, subfolder=\"text_encoder\",torch_dtype=torch.float16).to(self.device)\n self.unet = UNet2DConditionModel.from_pretrained(model_key, subfolder=\"unet\",torch_dtype=torch.float16).to(self.device)\n if is_xformers_available():\n self.unet.enable_xformers_memory_efficient_attention()\n self.negative_text = ''\n if add_directional_text:\n self.text_z = []\n self.uncond_z = []\n self.index = []\n self.uncond_index = []\n for d in ['front', 'side', 'back', 'side']:\n text = f\"{self.text}, {d} view\"\n # text = f\"{d} view of {self.text}\"\n negative_text = f\"{self.negative_text}\"\n # if d == 'back': negative_text += \"face\"\n text_z, index = self.get_text_embeds([text], batch = 1)\n uncond_z, uncond_index =self.get_uncond_embeds([negative_text], batch = 1)\n self.text_z.append(text_z)\n self.uncond_z.append(uncond_z)\n self.index.append(index)\n self.uncond_index.append(uncond_index)\n self.text_z = torch.cat(self.text_z)\n self.uncond_z = torch.cat(self.uncond_z)\n self.index = torch.cat(self.index)\n self.uncond_index = torch.cat(self.uncond_index)\n else: \n self.text_z, self.index = self.get_text_embeds([self.text], batch = self.batch)\n self.uncond_z =self.get_uncond_embeds([self.negative_text], batch = self.batch)\n # del self.text_encoder\n self.scheduler = DPMSolverMultistepScheduler.from_pretrained(model_key, subfolder=\"scheduler\", torch_dtype=torch.float16)\n self.num_train_timesteps = self.scheduler.config.num_train_timesteps\n self.min_step_early = int(self.num_train_timesteps * early_time_step_range[0])\n self.max_step_early = int(self.num_train_timesteps * early_time_step_range[1])\n self.min_step_late = int(self.num_train_timesteps * late_time_step_range[0])\n self.max_step_late = int(self.num_train_timesteps * late_time_step_range[1])\n self.alphas = self.scheduler.alphas_cumprod.to(self.device) # for convenience\n self.guidance_weight = guidance_weight\n self.sds_weight_strategy = sds_weight_strategy\n print(f'[INFO] loaded stable diffusion!')\n\n for p in self.parameters():\n p.requires_grad_(False)\n self.unet_lora_params, self.names = inject_trainable_cglora(self.unet) # This will\n\n\n def get_text_embeds_global(self, prompt, batch=1):\n text_input = self.tokenizer(prompt, padding='max_length', max_length=self.tokenizer.model_max_length, truncation=True, return_tensors='pt')\n with torch.no_grad():\n text_embeddings = self.text_encoder(text_input.input_ids.to(self.device))[0]\n if batch > 1:\n text_embeddings = text_embeddings.repeat(batch, 1, 1)\n \n global_embedding = text_embeddings[:,text_input['input_ids'].argmax(dim=-1),:].squeeze()\n \n return global_embedding\n\n\n def get_text_embeds(self, prompt, batch=1):\n text_input = self.tokenizer(prompt, padding='max_length', max_length=self.tokenizer.model_max_length, truncation=True, return_tensors='pt')\n with torch.no_grad():\n text_embeddings = self.text_encoder(text_input.input_ids.to(self.device))[0]\n if batch > 1:\n text_embeddings = text_embeddings.repeat(batch, 1, 1)\n ###################################################################\n index = text_input['input_ids'].argmax(dim=-1)\n #global_embedding = text_embeddings[:, index, :].squeeze()\n ##################################################################\n \n return text_embeddings, index\n \n def get_uncond_embeds(self, negative_prompt, batch):\n uncond_input = self.tokenizer(negative_prompt, padding='max_length', max_length=self.tokenizer.model_max_length, return_tensors='pt')\n with torch.no_grad():\n uncond_embeddings = self.text_encoder(uncond_input.input_ids.to(self.device))[0]\n \n if batch > 1:\n uncond_embeddings = uncond_embeddings.repeat(batch, 1, 1)\n ###################################################################\n index = uncond_input['input_ids'].argmax(dim=-1)\n # global_embedding = uncond_embeddings[:, index, :].squeeze()\n ##################################################################\n return uncond_embeddings,index\n\n def encode_imgs(self, imgs):\n # imgs: [B, 3, H, W]\n if self.mode == 'appearance_modeling':\n \n imgs = 2 * imgs - 1\n\n posterior = self.vae.encode(imgs).latent_dist\n latents = posterior.sample() * 0.18215\n\n return latents" }, { "identifier": "util", "path": "render/util.py", "snippet": "def dot(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:\ndef reflect(x: torch.Tensor, n: torch.Tensor) -> torch.Tensor:\ndef length(x: torch.Tensor, eps: float =1e-20) -> torch.Tensor:\ndef safe_normalize(x: torch.Tensor, eps: float =1e-20) -> torch.Tensor:\ndef to_hvec(x: torch.Tensor, w: float) -> torch.Tensor:\ndef _rgb_to_srgb(f: torch.Tensor) -> torch.Tensor:\ndef rgb_to_srgb(f: torch.Tensor) -> torch.Tensor:\ndef _srgb_to_rgb(f: torch.Tensor) -> torch.Tensor:\ndef srgb_to_rgb(f: torch.Tensor) -> torch.Tensor:\ndef reinhard(f: torch.Tensor) -> torch.Tensor:\ndef mse_to_psnr(mse):\ndef psnr_to_mse(psnr):\ndef get_miplevels(texture: np.ndarray) -> float:\ndef tex_2d(tex_map : torch.Tensor, coords : torch.Tensor, filter='nearest') -> torch.Tensor:\ndef cube_to_dir(s, x, y):\ndef latlong_to_cubemap(latlong_map, res):\ndef cubemap_to_latlong(cubemap, res):\ndef scale_img_hwc(x : torch.Tensor, size, mag='bilinear', min='area') -> torch.Tensor:\ndef scale_img_nhwc(x : torch.Tensor, size, mag='bilinear', min='area') -> torch.Tensor:\ndef avg_pool_nhwc(x : torch.Tensor, size) -> torch.Tensor:\ndef segment_sum(data: torch.Tensor, segment_ids: torch.Tensor) -> torch.Tensor:\ndef fovx_to_fovy(fovx, aspect):\ndef focal_length_to_fovy(focal_length, sensor_height):\ndef perspective(fovy=0.7854, aspect=1.0, n=0.1, f= 1000.0, device=None):\ndef perspective_offcenter(fovy, fraction, rx, ry, aspect=1.0, n=0.1, f=1000.0, device=None):\ndef translate(x, y, z, device=None):\ndef rotate_x(a, device=None):\ndef rotate_x_1(a, device=None):\ndef rotate_y(a, device=None):\ndef rotate_y_1(a, device=None):\ndef rotate_y_2(a, device=None):\ndef rotate_x_2(a, device=None):\ndef scale(s, device=None):\ndef lookAt(eye, at, up):\ndef random_rotation_translation(t, device=None):\ndef random_rotation(device=None):\ndef lines_focal(o, d):\ndef cosine_sample(N, size=None):\ndef bilinear_downsample(x : torch.tensor) -> torch.Tensor:\ndef bilinear_downsample(x : torch.tensor, spp) -> torch.Tensor:\ndef init_glfw():\ndef save_image(fn, x : np.ndarray):\ndef save_image_raw(fn, x : np.ndarray):\ndef load_image_raw(fn) -> np.ndarray:\ndef load_image(fn) -> np.ndarray:\ndef time_to_text(x):\ndef checkerboard(res, checker_size) -> np.ndarray:\ndef get_random_bg(h, w):\n R, L = aspect*y, -aspect*y\n T, B = y, -y\n I = torch.eye(3, dtype=o.dtype, device=o.device)\n S = torch.sum(d[..., None] @ torch.transpose(d[..., None], 1, 2) - I[None, ...], dim=0)\n C = torch.sum((d[..., None] @ torch.transpose(d[..., None], 1, 2) - I[None, ...]) @ o[..., None], dim=0).squeeze(1)\n N = N/torch.linalg.norm(N)" }, { "identifier": "Video", "path": "render/video.py", "snippet": "class Video():\n def __init__(self, path, name='video_log.mp4', mode='I', fps=30, codec='libx264', bitrate='16M') -> None:\n \n if path[-1] != \"/\":\n path += \"/\"\n \n self.writer = imageio.get_writer(path+name, mode=mode, fps=fps, codec=codec, bitrate=bitrate)\n \n def ready_image(self, image, write_video=True):\n # assuming channels last - as renderer returns it\n if len(image.shape) == 4: \n image = image.squeeze(0)[..., :3].detach().cpu().numpy()\n else:\n image = image[..., :3].detach().cpu().numpy()\n\n image = np.clip(np.rint(image*255.0), 0, 255).astype(np.uint8)\n\n if write_video:\n self.writer.append_data(image)\n\n return image\n\n def close(self):\n self.writer.close()" } ]
import os import time import argparse import json import math import numpy as np import torch import nvdiffrast.torch as dr import itertools import xatlas import open3d as o3d import random import imageio import os.path as osp import pickle from dataset.dataset_mesh import DatasetMesh from dataset.dataset_mesh import get_camera_params from geometry.dmtet_x_dreamer import DMTetGeometry from geometry.dlmesh_x_dreamer import DLMesh from render import obj from render import material from render import util from render import mesh from render import texture from render import mlptexture from render import light from render import render from sd_cglora import StableDiffusion from tqdm import tqdm from render import util from render.video import Video
15,076
parser.add_argument("--sds_weight_strategy", type=int, nargs=1, default=0, choices=[0, 1, 2], help="The strategy of the sds loss's weight") parser.add_argument("--translation_y", type= float, nargs=1, default= 0 , help="translation of the initial shape on the y-axis") parser.add_argument("--coarse_iter", type= int, nargs=1, default= 1000 , help="The iteration number of the coarse stage.") parser.add_argument('--early_time_step_range', nargs=2, type=float, default=[0.02, 0.5], help="The time step range in early phase") parser.add_argument('--late_time_step_range', nargs=2, type=float, default=[0.02, 0.5], help="The time step range in late phase") parser.add_argument("--sdf_init_shape_rotate_x", type= int, nargs=1, default= 0 , help="rotation of the initial shape on the x-axis") parser.add_argument("--if_flip_the_normal", action='store_true', default=False , help="Flip the x-axis positive half-axis of Normal. We find this process helps to alleviate the Janus problem.") parser.add_argument("--front_threshold", type= int, nargs=1, default= 45 , help="the range of front view would be [-front_threshold, front_threshold") parser.add_argument("--if_use_bump", type=bool, default= True , help="whether to use perturbed normals during appearing modeling") parser.add_argument("--uv_padding_block", type= int, default= 4 , help="The block of uv padding.") FLAGS = parser.parse_args() FLAGS.mtl_override = None # Override material of model FLAGS.dmtet_grid = 64 # Resolution of initial tet grid. We provide 64, 128 and 256 resolution grids. Other resolutions can be generated with https://github.com/crawforddoran/quartet FLAGS.mesh_scale = 2.1 # Scale of tet grid box. Adjust to cover the model FLAGS.env_scale = 1.0 # Env map intensity multiplier FLAGS.envmap = None # HDR environment probe FLAGS.relight = None # HDR environment probe(relight) FLAGS.display = None # Conf validation window/display. E.g. [{"relight" : <path to envlight>}] FLAGS.camera_space_light = False # Fixed light in camera space. This is needed for setups like ethiopian head where the scanned object rotates on a stand. FLAGS.lock_light = False # Disable light optimization in the second pass FLAGS.lock_pos = False # Disable vertex position optimization in the second pass FLAGS.pre_load = True # Pre-load entire dataset into memory for faster training FLAGS.kd_min = [ 0.0, 0.0, 0.0, 0.0] # Limits for kd FLAGS.kd_max = [ 1.0, 1.0, 1.0, 1.0] FLAGS.ks_min = [ 0.0, 0.08, 0.0] # Limits for ks FLAGS.ks_max = [ 1.0, 1.0, 1.0] FLAGS.nrm_min = [-1.0, -1.0, 0.0] # Limits for normal map FLAGS.nrm_max = [ 1.0, 1.0, 1.0] FLAGS.cam_near_far = [1, 50] FLAGS.learn_light = False FLAGS.gpu_number = 1 FLAGS.sdf_init_shape_scale=[1.0, 1.0, 1.0] FLAGS.multi_gpu = "WORLD_SIZE" in os.environ and int(os.environ["WORLD_SIZE"]) > 1 if FLAGS.multi_gpu: FLAGS.gpu_number = int(os.environ["WORLD_SIZE"]) FLAGS.local_rank = int(os.environ["LOCAL_RANK"]) torch.distributed.init_process_group(backend="nccl", world_size = FLAGS.gpu_number, rank = FLAGS.local_rank) torch.cuda.set_device(FLAGS.local_rank) if FLAGS.config is not None: data = json.load(open(FLAGS.config, 'r')) for key in data: FLAGS.__dict__[key] = data[key] if FLAGS.display_res is None: FLAGS.display_res = FLAGS.train_res if FLAGS.local_rank == 0: print("Config / Flags:") print("---------") for key in FLAGS.__dict__.keys(): print(key, FLAGS.__dict__[key]) print("---------") seed_everything(FLAGS.seed, FLAGS.local_rank) os.makedirs(FLAGS.out_dir, exist_ok=True) glctx = dr.RasterizeCudaContext() # ============================================================================================== # Create data pipeline # ============================================================================================== dataset_train = DatasetMesh(glctx, FLAGS, validate=False) dataset_validate = DatasetMesh(glctx, FLAGS, validate=True) dataset_gif = DatasetMesh(glctx, FLAGS, gif=True) # ============================================================================================== # Create env light with trainable parameters # ============================================================================================== if FLAGS.mode == 'appearance_modeling' and FLAGS.base_mesh is not None: if FLAGS.learn_light: lgt = light.create_trainable_env_rnd(512, scale=0.0, bias=1) else: lgt = light.load_env(FLAGS.envmap, scale=FLAGS.env_scale) else: lgt = None if FLAGS.sdf_init_shape in ['ellipsoid', 'cylinder', 'custom_mesh'] and FLAGS.mode == 'geometry_modeling': if FLAGS.sdf_init_shape == 'ellipsoid': init_shape = o3d.geometry.TriangleMesh.create_sphere(1) elif FLAGS.sdf_init_shape == 'cylinder': init_shape = o3d.geometry.TriangleMesh.create_cylinder(radius=0.75, height=1.2, resolution=20, split=4, create_uv_map=False) elif FLAGS.sdf_init_shape == 'custom_mesh': if FLAGS.base_mesh: init_shape = get_normalize_mesh(FLAGS.base_mesh) else: assert False, "[Error] The path of custom mesh is invalid ! (geometry modeling)" else: assert False, "Invalid init type" vertices = np.asarray(init_shape.vertices) vertices[...,0]=vertices[...,0] * FLAGS.sdf_init_shape_scale[0] vertices[...,1]=vertices[...,1] * FLAGS.sdf_init_shape_scale[1] vertices[...,2]=vertices[...,2] * FLAGS.sdf_init_shape_scale[2] vertices = vertices @ util.rotate_x_2(np.deg2rad(FLAGS.sdf_init_shape_rotate_x)) vertices[...,1]=vertices[...,1] + FLAGS.translation_y init_shape.vertices = o3d.cuda.pybind.utility.Vector3dVector(vertices) points_surface = np.asarray(init_shape.sample_points_poisson_disk(5000).points) init_shape = o3d.t.geometry.TriangleMesh.from_legacy(init_shape) scene = o3d.t.geometry.RaycastingScene() scene.add_triangles(init_shape) scene_and_vertices = [scene, points_surface] guidance = StableDiffusion(device = 'cuda', mode = FLAGS.mode, text = FLAGS.text, add_directional_text = FLAGS.add_directional_text, batch = FLAGS.batch, guidance_weight = FLAGS.guidance_weight, sds_weight_strategy = FLAGS.sds_weight_strategy, early_time_step_range = FLAGS.early_time_step_range, late_time_step_range= FLAGS.late_time_step_range) if FLAGS.mode == 'geometry_modeling' :
############################################################################### # Mix background into a dataset image ############################################################################### @torch.no_grad() def prepare_batch(target, background= 'black'): target['mv'] = target['mv'].cuda() target['mvp'] = target['mvp'].cuda() target['campos'] = target['campos'].cuda() target['fov'] = target['fov'].cuda() target['normal_rotate'] = target['normal_rotate'].cuda() batch_size = target['mv'].shape[0] resolution = target['resolution'] if background == 'white': target['background']= torch.ones(batch_size, resolution[0], resolution[1], 3, dtype=torch.float32, device='cuda') if background == 'black': target['background'] = torch.zeros(batch_size, resolution[0], resolution[1], 3, dtype=torch.float32, device='cuda') return target ############################################################################### # UV - map geometry & convert to a mesh ############################################################################### @torch.no_grad() def xatlas_uvmap(glctx, geometry, mat, FLAGS): eval_mesh = geometry.getMesh(mat) # Create uvs with xatlas v_pos = eval_mesh.v_pos.detach().cpu().numpy() t_pos_idx = eval_mesh.t_pos_idx.detach().cpu().numpy() vmapping, indices, uvs = xatlas.parametrize(v_pos, t_pos_idx) # Convert to tensors indices_int64 = indices.astype(np.uint64, casting='same_kind').view(np.int64) uvs = torch.tensor(uvs, dtype=torch.float32, device='cuda') faces = torch.tensor(indices_int64, dtype=torch.int64, device='cuda') new_mesh = mesh.Mesh(v_tex=uvs, t_tex_idx=faces, base=eval_mesh) mask, kd, ks, normal = render.render_uv(glctx, new_mesh, FLAGS.texture_res, eval_mesh.material['kd_ks_normal']) if FLAGS.layers > 1: kd = torch.cat((kd, torch.rand_like(kd[...,0:1])), dim=-1) kd_min, kd_max = torch.tensor(FLAGS.kd_min, dtype=torch.float32, device='cuda'), torch.tensor(FLAGS.kd_max, dtype=torch.float32, device='cuda') ks_min, ks_max = torch.tensor(FLAGS.ks_min, dtype=torch.float32, device='cuda'), torch.tensor(FLAGS.ks_max, dtype=torch.float32, device='cuda') nrm_min, nrm_max = torch.tensor(FLAGS.nrm_min, dtype=torch.float32, device='cuda'), torch.tensor(FLAGS.nrm_max, dtype=torch.float32, device='cuda') new_mesh.material = material.Material({ 'bsdf' : mat['bsdf'], 'kd' : texture.Texture2D(kd, min_max=[kd_min, kd_max]), 'ks' : texture.Texture2D(ks, min_max=[ks_min, ks_max]), 'normal' : texture.Texture2D(normal, min_max=[nrm_min, nrm_max]) }) return new_mesh @torch.no_grad() def xatlas_uvmap1(glctx, geometry, mat, FLAGS): eval_mesh = geometry.getMesh(mat) new_mesh = mesh.Mesh( base=eval_mesh) mask, kd, ks, normal = render.render_uv1(glctx, new_mesh, FLAGS.texture_res, eval_mesh.material['kd_ks_normal'], FLAGS.uv_padding_block) if FLAGS.layers > 1: kd = torch.cat((kd, torch.rand_like(kd[...,0:1])), dim=-1) kd_min, kd_max = torch.tensor(FLAGS.kd_min, dtype=torch.float32, device='cuda'), torch.tensor(FLAGS.kd_max, dtype=torch.float32, device='cuda') ks_min, ks_max = torch.tensor(FLAGS.ks_min, dtype=torch.float32, device='cuda'), torch.tensor(FLAGS.ks_max, dtype=torch.float32, device='cuda') nrm_min, nrm_max = torch.tensor(FLAGS.nrm_min, dtype=torch.float32, device='cuda'), torch.tensor(FLAGS.nrm_max, dtype=torch.float32, device='cuda') new_mesh.material = material.Material({ 'bsdf' : mat['bsdf'], 'kd' : texture.Texture2D(kd, min_max=[kd_min, kd_max]), 'ks' : texture.Texture2D(ks, min_max=[ks_min, ks_max]), 'normal' : texture.Texture2D(normal, min_max=[nrm_min, nrm_max]) }) return new_mesh ############################################################################### # Utility functions for material ############################################################################### def get_normalize_mesh(pro_path): mesh = o3d.io.read_triangle_mesh(pro_path) vertices = np.asarray(mesh.vertices) shift = np.mean(vertices,axis=0) scale = np.max(np.linalg.norm(vertices-shift, ord=2, axis=1)) vertices = (vertices-shift) / scale mesh.vertices = o3d.cuda.pybind.utility.Vector3dVector(vertices) return mesh def initial_guness_material(geometry, mlp, FLAGS, init_mat=None): # ipdb.set_trace(()) kd_min, kd_max = torch.tensor(FLAGS.kd_min, dtype=torch.float32, device='cuda'), torch.tensor(FLAGS.kd_max, dtype=torch.float32, device='cuda') ks_min, ks_max = torch.tensor(FLAGS.ks_min, dtype=torch.float32, device='cuda'), torch.tensor(FLAGS.ks_max, dtype=torch.float32, device='cuda') nrm_min, nrm_max = torch.tensor(FLAGS.nrm_min, dtype=torch.float32, device='cuda'), torch.tensor(FLAGS.nrm_max, dtype=torch.float32, device='cuda') if mlp: mlp_min = torch.cat((kd_min[0:3], ks_min, nrm_min), dim=0) mlp_max = torch.cat((kd_max[0:3], ks_max, nrm_max), dim=0) mlp_map_opt = mlptexture.MLPTexture3D(geometry.getAABB(), channels=9, min_max=[mlp_min, mlp_max]) mat = material.Material({'kd_ks_normal' : mlp_map_opt}) else: # Setup Kd (albedo) and Ks (x, roughness, metalness) textures if FLAGS.random_textures or init_mat is None: num_channels = 4 if FLAGS.layers > 1 else 3 kd_init = torch.rand(size=FLAGS.texture_res + [num_channels], device='cuda') * (kd_max - kd_min)[None, None, 0:num_channels] + kd_min[None, None, 0:num_channels] kd_map_opt = texture.create_trainable(kd_init , FLAGS.texture_res, not FLAGS.custom_mip, [kd_min, kd_max]) ksR = np.random.uniform(size=FLAGS.texture_res + [1], low=0.0, high=0.01) ksG = np.random.uniform(size=FLAGS.texture_res + [1], low=ks_min[1].cpu(), high=ks_max[1].cpu()) ksB = np.random.uniform(size=FLAGS.texture_res + [1], low=ks_min[2].cpu(), high=ks_max[2].cpu()) ks_map_opt = texture.create_trainable(np.concatenate((ksR, ksG, ksB), axis=2), FLAGS.texture_res, not FLAGS.custom_mip, [ks_min, ks_max]) else: kd_map_opt = texture.create_trainable(init_mat['kd'], FLAGS.texture_res, not FLAGS.custom_mip, [kd_min, kd_max]) ks_map_opt = texture.create_trainable(init_mat['ks'], FLAGS.texture_res, not FLAGS.custom_mip, [ks_min, ks_max]) # Setup normal map if FLAGS.random_textures or init_mat is None or 'normal' not in init_mat: normal_map_opt = texture.create_trainable(np.array([0, 0, 1]), FLAGS.texture_res, not FLAGS.custom_mip, [nrm_min, nrm_max]) else: normal_map_opt = texture.create_trainable(init_mat['normal'], FLAGS.texture_res, not FLAGS.custom_mip, [nrm_min, nrm_max]) mat = material.Material({ 'kd' : kd_map_opt, 'ks' : ks_map_opt, 'normal' : normal_map_opt }) if init_mat is not None: mat['bsdf'] = init_mat['bsdf'] else: mat['bsdf'] = 'pbr' return mat ############################################################################### # Validation & testing ############################################################################### # @torch.no_grad() def validate_itr(glctx, target, geometry, opt_material, lgt, FLAGS, relight = None): result_dict = {} with torch.no_grad(): if FLAGS.mode == 'appearance_modeling': with torch.no_grad(): lgt.build_mips() if FLAGS.camera_space_light: lgt.xfm(target['mv']) if relight != None: relight.build_mips() buffers = geometry.render(glctx, target, lgt, opt_material, if_use_bump = FLAGS.if_use_bump) result_dict['shaded'] = buffers['shaded'][0, ..., 0:3] result_dict['shaded'] = util.rgb_to_srgb(result_dict['shaded']) if relight != None: result_dict['relight'] = geometry.render(glctx, target, relight, opt_material, if_use_bump = FLAGS.if_use_bump)['shaded'][0, ..., 0:3] result_dict['relight'] = util.rgb_to_srgb(result_dict['relight']) result_dict['mask'] = (buffers['shaded'][0, ..., 3:4]) result_image = result_dict['shaded'] if FLAGS.display is not None : # white_bg = torch.ones_like(target['background']) for layer in FLAGS.display: if 'latlong' in layer and layer['latlong']: if isinstance(lgt, light.EnvironmentLight): result_dict['light_image'] = util.cubemap_to_latlong(lgt.base, FLAGS.display_res) result_image = torch.cat([result_image, result_dict['light_image']], axis=1) elif 'bsdf' in layer: buffers = geometry.render(glctx, target, lgt, opt_material, bsdf=layer['bsdf'], if_use_bump = FLAGS.if_use_bump) if layer['bsdf'] == 'kd': result_dict[layer['bsdf']] = util.rgb_to_srgb(buffers['shaded'][0, ..., 0:3]) elif layer['bsdf'] == 'normal': result_dict[layer['bsdf']] = (buffers['shaded'][0, ..., 0:3] + 1) * 0.5 else: result_dict[layer['bsdf']] = buffers['shaded'][0, ..., 0:3] result_image = torch.cat([result_image, result_dict[layer['bsdf']]], axis=1) return result_image, result_dict def save_gif(dir,fps): imgpath = dir frames = [] for idx in sorted(os.listdir(imgpath)): img = osp.join(imgpath,idx) frames.append(imageio.imread(img)) imageio.mimsave(os.path.join(dir, 'eval.gif'),frames,'GIF',duration=1/fps,loop=0) @torch.no_grad() def validate(glctx, geometry, opt_material, lgt, dataset_validate, out_dir, FLAGS, relight= None): # ============================================================================================== # Validation loop # ============================================================================================== mse_values = [] psnr_values = [] dataloader_validate = torch.utils.data.DataLoader(dataset_validate, batch_size=1, collate_fn=dataset_validate.collate) os.makedirs(out_dir, exist_ok=True) shaded_dir = os.path.join(out_dir, "shaded") relight_dir = os.path.join(out_dir, "relight") kd_dir = os.path.join(out_dir, "kd") ks_dir = os.path.join(out_dir, "ks") normal_dir = os.path.join(out_dir, "normal") mask_dir = os.path.join(out_dir, "mask") os.makedirs(shaded_dir, exist_ok=True) os.makedirs(relight_dir, exist_ok=True) os.makedirs(kd_dir, exist_ok=True) os.makedirs(ks_dir, exist_ok=True) os.makedirs(normal_dir, exist_ok=True) os.makedirs(mask_dir, exist_ok=True) print("Running validation") dataloader_validate = tqdm(dataloader_validate) for it, target in enumerate(dataloader_validate): # Mix validation background target = prepare_batch(target, 'white') result_image, result_dict = validate_itr(glctx, target, geometry, opt_material, lgt, FLAGS, relight) for k in result_dict.keys(): np_img = result_dict[k].detach().cpu().numpy() if k == 'shaded': util.save_image(shaded_dir + '/' + ('val_%06d_%s.png' % (it, k)), np_img) elif k == 'relight': util.save_image(relight_dir + '/' + ('val_%06d_%s.png' % (it, k)), np_img) elif k == 'kd': util.save_image(kd_dir + '/' + ('val_%06d_%s.png' % (it, k)), np_img) elif k == 'ks': util.save_image(ks_dir + '/' + ('val_%06d_%s.png' % (it, k)), np_img) elif k == 'normal': util.save_image(normal_dir + '/' + ('val_%06d_%s.png' % (it, k)), np_img) elif k == 'mask': util.save_image(mask_dir + '/' + ('val_%06d_%s.png' % (it, k)), np_img) if 'shaded' in result_dict.keys(): save_gif(shaded_dir,30) if 'relight' in result_dict.keys(): save_gif(relight_dir,30) if 'kd' in result_dict.keys(): save_gif(kd_dir,30) if 'ks' in result_dict.keys(): save_gif(ks_dir,30) if 'normal' in result_dict.keys(): save_gif(normal_dir,30) return 0 ############################################################################### # Main shape fitter function / optimization loop ############################################################################### class Trainer(torch.nn.Module): def __init__(self, glctx, geometry, lgt, mat, optimize_geometry, optimize_light, FLAGS, guidance): super(Trainer, self).__init__() self.glctx = glctx self.geometry = geometry self.light = lgt self.material = mat self.optimize_geometry = optimize_geometry self.optimize_light = optimize_light self.FLAGS = FLAGS self.guidance = guidance self.if_flip_the_normal = FLAGS.if_flip_the_normal self.if_use_bump = FLAGS.if_use_bump if self.FLAGS.mode == 'appearance_modeling': if not self.optimize_light: with torch.no_grad(): self.light.build_mips() self.params = list(self.material.parameters()) self.params += list(self.geometry.pos_encoder.parameters()) self.params += list(self.light.parameters()) if optimize_light else [] self.geo_params = list(self.geometry.parameters()) if optimize_geometry else [] def forward(self, target, it, if_normal, if_pretrain, scene_and_vertices ): if self.FLAGS.mode == 'appearance_modeling': if self.optimize_light: self.light.build_mips() if self.FLAGS.camera_space_light: self.light.xfm(target['mv']) if if_pretrain: return self.geometry.decoder.pre_train_ellipsoid(it, scene_and_vertices) else: return self.geometry.tick(glctx, target, self.light, self.material, it , if_normal, self.guidance, self.FLAGS.mode, self.if_flip_the_normal, self.if_use_bump) def optimize_mesh( glctx, geometry, opt_material, lgt, dataset_train, dataset_validate, FLAGS, log_interval=10, optimize_light=True, optimize_geometry=True, guidance = None, scene_and_vertices = None, ): dataloader_train = torch.utils.data.DataLoader(dataset_train, batch_size=FLAGS.batch, collate_fn=dataset_train.collate, shuffle=False) dataloader_validate = torch.utils.data.DataLoader(dataset_validate, batch_size=1, collate_fn=dataset_train.collate) model = Trainer(glctx, geometry, lgt, opt_material, optimize_geometry, optimize_light, FLAGS, guidance) if optimize_geometry: optimizer_mesh = torch.optim.AdamW(model.geo_params, lr=0.001, betas=(0.9, 0.99), eps=1e-15) optimizer = torch.optim.AdamW(model.params, lr=0.01, betas=(0.9, 0.99), eps=1e-15) optimizer_lora = torch.optim.SGD(itertools.chain(*guidance.unet_lora_params), lr=1e-5) if FLAGS.multi_gpu: model = model.cuda() model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[FLAGS.local_rank], find_unused_parameters= True ) img_cnt = 0 img_loss_vec = [] reg_loss_vec = [] iter_dur_vec = [] def cycle(iterable): iterator = iter(iterable) while True: try: yield next(iterator) except StopIteration: iterator = iter(iterable) v_it = cycle(dataloader_validate) scaler = torch.cuda.amp.GradScaler(enabled=True) rot_ang = 0 if FLAGS.local_rank == 0: video = Video(FLAGS.out_dir) if FLAGS.local_rank == 0: dataloader_train = tqdm(dataloader_train) for it, target in enumerate(dataloader_train): # Mix randomized background into dataset image target = prepare_batch(target, FLAGS.train_background) # Show/save image before training step (want to get correct rendering of input) if FLAGS.local_rank == 0: save_image = FLAGS.save_interval and (it % FLAGS.save_interval == 0) save_video = FLAGS.video_interval and (it % FLAGS.video_interval == 0) if save_image: result_image, result_dict = validate_itr(glctx, prepare_batch(next(v_it), FLAGS.train_background), geometry, opt_material, lgt, FLAGS) #prepare_batch(next(v_it), FLAGS.background) np_result_image = result_image.detach().cpu().numpy() util.save_image(FLAGS.out_dir + '/' + ('img_%s_%06d.png' % (FLAGS.mode, img_cnt)), np_result_image) util.save_image(FLAGS.out_dir + '/' + ('mask_%s_%06d.png' % (FLAGS.mode, img_cnt)), result_dict['mask'].detach().cpu().numpy()) img_cnt = img_cnt+1 if save_video: with torch.no_grad(): params = get_camera_params( resolution=512, fov=45, elev_angle=-20, azim_angle =rot_ang, ) rot_ang += 1 if FLAGS.mode =='geometry_modeling': buffers = geometry.render(glctx, params, lgt, opt_material, bsdf='normal', if_use_bump = FLAGS.if_use_bump) video_image = (buffers['shaded'][0, ..., 0:3]+1)/2 else: buffers = geometry.render(glctx, params, lgt, opt_material, bsdf='pbr', if_use_bump = FLAGS.if_use_bump) video_image = util.rgb_to_srgb(buffers['shaded'][0, ..., 0:3]) video_image = video.ready_image(video_image) iter_start_time = time.time() if FLAGS.mode =='geometry_modeling': if it<=400: if_pretrain = True else: if_pretrain = False if_normal =True else: if_pretrain = False if_normal = False with torch.cuda.amp.autocast(enabled= True): if if_pretrain== True: reg_loss = model(target, it, if_normal, if_pretrain= if_pretrain, scene_and_vertices = scene_and_vertices) img_loss = 0 sds_loss = 0 attention_loss = 0 if if_pretrain == False: sds_loss, img_loss, reg_loss, attention_loss = model(target, it, if_normal, if_pretrain= if_pretrain, scene_and_vertices =None) if FLAGS.mode =='geometry_modeling': if(it<1000): attention_loss = 0 else: if(it<500): attention_loss = 0 # ============================================================================================== # Final loss # ============================================================================================== total_loss = img_loss + reg_loss + sds_loss + attention_loss if if_pretrain == True: scaler.scale(total_loss).backward() if if_pretrain == False: scaler.scale(total_loss).backward() img_loss_vec.append(img_loss.item()) reg_loss_vec.append(reg_loss.item()) # ============================================================================================== # Backpropagate # ============================================================================================== if if_normal == False and if_pretrain == False: scaler.step(optimizer) optimizer.zero_grad() if if_normal == True or if_pretrain == True: if optimize_geometry: scaler.step(optimizer_mesh) optimizer_mesh.zero_grad() for param in guidance.parameters(): if param.grad is not None and torch.isnan(param.grad).any(): param.grad = torch.nan_to_num(param.grad, nan=0.0) max_norm = 5.0 torch.nn.utils.clip_grad_norm_(guidance.parameters(), max_norm) if if_pretrain == False: optimizer_lora.step() optimizer_lora.zero_grad() for param in guidance.parameters(): param.data = torch.nan_to_num(param.data, nan=0.0, posinf=None, neginf=None) scaler.update() # ============================================================================================== # Clamp trainables to reasonable range # ============================================================================================== with torch.no_grad(): if 'kd' in opt_material: opt_material['kd'].clamp_() if 'ks' in opt_material: opt_material['ks'].clamp_() if 'normal' in opt_material: opt_material['normal'].clamp_() opt_material['normal'].normalize_() if lgt is not None: lgt.clamp_(min=0.0) torch.cuda.current_stream().synchronize() iter_dur_vec.append(time.time() - iter_start_time) return geometry, opt_material def seed_everything(seed, local_rank): random.seed(seed + local_rank) os.environ['PYTHONHASHSEED'] = str(seed) np.random.seed(seed + local_rank) torch.manual_seed(seed) torch.cuda.manual_seed(seed) if __name__ == "__main__": parser = argparse.ArgumentParser(description='nvdiffrec') parser.add_argument('--config', type=str, default='configs_clean3/icecream_geometry_debug.json', help='Config file') parser.add_argument('-i', '--iter', type=int, default=5000) parser.add_argument('-b', '--batch', type=int, default=1) parser.add_argument('-s', '--spp', type=int, default=1) parser.add_argument('-l', '--layers', type=int, default=1) parser.add_argument('-r', '--train-res', nargs=2, type=int, default=[512, 512]) parser.add_argument('-dr', '--display-res', type=int, default=None) parser.add_argument('-tr', '--texture-res', nargs=2, type=int, default=[1024, 1024]) parser.add_argument('-si', '--save-interval', type=int, default=1000, help="The interval of saving an image") parser.add_argument('-vi', '--video_interval', type=int, default=10, help="The interval of saving a frame of the video") parser.add_argument('-mr', '--min-roughness', type=float, default=0.08) parser.add_argument('-mip', '--custom-mip', action='store_true', default=False) parser.add_argument('-rt', '--random-textures', action='store_true', default=False) parser.add_argument('-bg', '--train_background', default='black', choices=['black', 'white', 'checker', 'reference']) parser.add_argument('-o', '--out-dir', type=str, default='results/result_debug/icecream_geometry') parser.add_argument('-rm', '--ref_mesh', type=str) parser.add_argument('-bm', '--base-mesh', type=str, default=None) parser.add_argument('--validate', type=bool, default=True) parser.add_argument("--local_rank", type=int, default=0, help="For distributed training: local_rank") parser.add_argument("--seed", type=int, default=42, help="A seed for reproducible training.") parser.add_argument("--add_directional_text", action='store_true', default=False) parser.add_argument('--mode', default='geometry_modeling', choices=['geometry_modeling', 'appearance_modeling']) parser.add_argument('--text', default=None, help="text prompt") parser.add_argument('--sdf_init_shape', default='ellipsoid', choices=['ellipsoid', 'cylinder', 'custom_mesh']) parser.add_argument('--camera_random_jitter', type= float, default=0.4, help="A large value is advantageous for the extension of objects such as ears or sharp corners to grow.") parser.add_argument('--fovy_range', nargs=2, type=float, default=[25.71, 45.00]) parser.add_argument('--elevation_range', nargs=2, type=int, default=[-10, 45], help="The elevatioin range must in [-90, 90].") parser.add_argument("--guidance_weight", type=int, default=100, help="The weight of classifier-free guidance") parser.add_argument("--sds_weight_strategy", type=int, nargs=1, default=0, choices=[0, 1, 2], help="The strategy of the sds loss's weight") parser.add_argument("--translation_y", type= float, nargs=1, default= 0 , help="translation of the initial shape on the y-axis") parser.add_argument("--coarse_iter", type= int, nargs=1, default= 1000 , help="The iteration number of the coarse stage.") parser.add_argument('--early_time_step_range', nargs=2, type=float, default=[0.02, 0.5], help="The time step range in early phase") parser.add_argument('--late_time_step_range', nargs=2, type=float, default=[0.02, 0.5], help="The time step range in late phase") parser.add_argument("--sdf_init_shape_rotate_x", type= int, nargs=1, default= 0 , help="rotation of the initial shape on the x-axis") parser.add_argument("--if_flip_the_normal", action='store_true', default=False , help="Flip the x-axis positive half-axis of Normal. We find this process helps to alleviate the Janus problem.") parser.add_argument("--front_threshold", type= int, nargs=1, default= 45 , help="the range of front view would be [-front_threshold, front_threshold") parser.add_argument("--if_use_bump", type=bool, default= True , help="whether to use perturbed normals during appearing modeling") parser.add_argument("--uv_padding_block", type= int, default= 4 , help="The block of uv padding.") FLAGS = parser.parse_args() FLAGS.mtl_override = None # Override material of model FLAGS.dmtet_grid = 64 # Resolution of initial tet grid. We provide 64, 128 and 256 resolution grids. Other resolutions can be generated with https://github.com/crawforddoran/quartet FLAGS.mesh_scale = 2.1 # Scale of tet grid box. Adjust to cover the model FLAGS.env_scale = 1.0 # Env map intensity multiplier FLAGS.envmap = None # HDR environment probe FLAGS.relight = None # HDR environment probe(relight) FLAGS.display = None # Conf validation window/display. E.g. [{"relight" : <path to envlight>}] FLAGS.camera_space_light = False # Fixed light in camera space. This is needed for setups like ethiopian head where the scanned object rotates on a stand. FLAGS.lock_light = False # Disable light optimization in the second pass FLAGS.lock_pos = False # Disable vertex position optimization in the second pass FLAGS.pre_load = True # Pre-load entire dataset into memory for faster training FLAGS.kd_min = [ 0.0, 0.0, 0.0, 0.0] # Limits for kd FLAGS.kd_max = [ 1.0, 1.0, 1.0, 1.0] FLAGS.ks_min = [ 0.0, 0.08, 0.0] # Limits for ks FLAGS.ks_max = [ 1.0, 1.0, 1.0] FLAGS.nrm_min = [-1.0, -1.0, 0.0] # Limits for normal map FLAGS.nrm_max = [ 1.0, 1.0, 1.0] FLAGS.cam_near_far = [1, 50] FLAGS.learn_light = False FLAGS.gpu_number = 1 FLAGS.sdf_init_shape_scale=[1.0, 1.0, 1.0] FLAGS.multi_gpu = "WORLD_SIZE" in os.environ and int(os.environ["WORLD_SIZE"]) > 1 if FLAGS.multi_gpu: FLAGS.gpu_number = int(os.environ["WORLD_SIZE"]) FLAGS.local_rank = int(os.environ["LOCAL_RANK"]) torch.distributed.init_process_group(backend="nccl", world_size = FLAGS.gpu_number, rank = FLAGS.local_rank) torch.cuda.set_device(FLAGS.local_rank) if FLAGS.config is not None: data = json.load(open(FLAGS.config, 'r')) for key in data: FLAGS.__dict__[key] = data[key] if FLAGS.display_res is None: FLAGS.display_res = FLAGS.train_res if FLAGS.local_rank == 0: print("Config / Flags:") print("---------") for key in FLAGS.__dict__.keys(): print(key, FLAGS.__dict__[key]) print("---------") seed_everything(FLAGS.seed, FLAGS.local_rank) os.makedirs(FLAGS.out_dir, exist_ok=True) glctx = dr.RasterizeCudaContext() # ============================================================================================== # Create data pipeline # ============================================================================================== dataset_train = DatasetMesh(glctx, FLAGS, validate=False) dataset_validate = DatasetMesh(glctx, FLAGS, validate=True) dataset_gif = DatasetMesh(glctx, FLAGS, gif=True) # ============================================================================================== # Create env light with trainable parameters # ============================================================================================== if FLAGS.mode == 'appearance_modeling' and FLAGS.base_mesh is not None: if FLAGS.learn_light: lgt = light.create_trainable_env_rnd(512, scale=0.0, bias=1) else: lgt = light.load_env(FLAGS.envmap, scale=FLAGS.env_scale) else: lgt = None if FLAGS.sdf_init_shape in ['ellipsoid', 'cylinder', 'custom_mesh'] and FLAGS.mode == 'geometry_modeling': if FLAGS.sdf_init_shape == 'ellipsoid': init_shape = o3d.geometry.TriangleMesh.create_sphere(1) elif FLAGS.sdf_init_shape == 'cylinder': init_shape = o3d.geometry.TriangleMesh.create_cylinder(radius=0.75, height=1.2, resolution=20, split=4, create_uv_map=False) elif FLAGS.sdf_init_shape == 'custom_mesh': if FLAGS.base_mesh: init_shape = get_normalize_mesh(FLAGS.base_mesh) else: assert False, "[Error] The path of custom mesh is invalid ! (geometry modeling)" else: assert False, "Invalid init type" vertices = np.asarray(init_shape.vertices) vertices[...,0]=vertices[...,0] * FLAGS.sdf_init_shape_scale[0] vertices[...,1]=vertices[...,1] * FLAGS.sdf_init_shape_scale[1] vertices[...,2]=vertices[...,2] * FLAGS.sdf_init_shape_scale[2] vertices = vertices @ util.rotate_x_2(np.deg2rad(FLAGS.sdf_init_shape_rotate_x)) vertices[...,1]=vertices[...,1] + FLAGS.translation_y init_shape.vertices = o3d.cuda.pybind.utility.Vector3dVector(vertices) points_surface = np.asarray(init_shape.sample_points_poisson_disk(5000).points) init_shape = o3d.t.geometry.TriangleMesh.from_legacy(init_shape) scene = o3d.t.geometry.RaycastingScene() scene.add_triangles(init_shape) scene_and_vertices = [scene, points_surface] guidance = StableDiffusion(device = 'cuda', mode = FLAGS.mode, text = FLAGS.text, add_directional_text = FLAGS.add_directional_text, batch = FLAGS.batch, guidance_weight = FLAGS.guidance_weight, sds_weight_strategy = FLAGS.sds_weight_strategy, early_time_step_range = FLAGS.early_time_step_range, late_time_step_range= FLAGS.late_time_step_range) if FLAGS.mode == 'geometry_modeling' :
geometry = DMTetGeometry(FLAGS.dmtet_grid, FLAGS.mesh_scale, FLAGS)
2
2023-11-27 13:44:01+00:00
24k
camenduru/magicanimate-hf
magicanimate/pipelines/pipeline_animation.py
[ { "identifier": "UNet3DConditionModel", "path": "magicanimate/models/unet_controlnet.py", "snippet": "class UNet3DConditionModel(ModelMixin, ConfigMixin):\n _supports_gradient_checkpointing = True\n\n @register_to_config\n def __init__(\n self,\n sample_size: Optional[int] = None,\n in_channels: int = 4,\n out_channels: int = 4,\n center_input_sample: bool = False,\n flip_sin_to_cos: bool = True,\n freq_shift: int = 0, \n down_block_types: Tuple[str] = (\n \"CrossAttnDownBlock3D\",\n \"CrossAttnDownBlock3D\",\n \"CrossAttnDownBlock3D\",\n \"DownBlock3D\",\n ),\n mid_block_type: str = \"UNetMidBlock3DCrossAttn\",\n up_block_types: Tuple[str] = (\n \"UpBlock3D\",\n \"CrossAttnUpBlock3D\",\n \"CrossAttnUpBlock3D\",\n \"CrossAttnUpBlock3D\"\n ),\n only_cross_attention: Union[bool, Tuple[bool]] = False,\n block_out_channels: Tuple[int] = (320, 640, 1280, 1280),\n layers_per_block: int = 2,\n downsample_padding: int = 1,\n mid_block_scale_factor: float = 1,\n act_fn: str = \"silu\",\n norm_num_groups: int = 32,\n norm_eps: float = 1e-5,\n cross_attention_dim: int = 1280,\n attention_head_dim: Union[int, Tuple[int]] = 8,\n dual_cross_attention: bool = False,\n use_linear_projection: bool = False,\n class_embed_type: Optional[str] = None,\n num_class_embeds: Optional[int] = None,\n upcast_attention: bool = False,\n resnet_time_scale_shift: str = \"default\",\n \n # Additional\n use_motion_module = False,\n motion_module_resolutions = ( 1,2,4,8 ),\n motion_module_mid_block = False,\n motion_module_decoder_only = False,\n motion_module_type = None,\n motion_module_kwargs = {},\n unet_use_cross_frame_attention = None,\n unet_use_temporal_attention = None,\n ):\n super().__init__()\n\n self.sample_size = sample_size\n time_embed_dim = block_out_channels[0] * 4\n\n # input\n self.conv_in = InflatedConv3d(in_channels, block_out_channels[0], kernel_size=3, padding=(1, 1))\n\n # time\n self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift)\n timestep_input_dim = block_out_channels[0]\n\n self.time_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)\n\n # class embedding\n if class_embed_type is None and num_class_embeds is not None:\n self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim)\n elif class_embed_type == \"timestep\":\n self.class_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)\n elif class_embed_type == \"identity\":\n self.class_embedding = nn.Identity(time_embed_dim, time_embed_dim)\n else:\n self.class_embedding = None\n\n self.down_blocks = nn.ModuleList([])\n self.mid_block = None\n self.up_blocks = nn.ModuleList([])\n\n if isinstance(only_cross_attention, bool):\n only_cross_attention = [only_cross_attention] * len(down_block_types)\n\n if isinstance(attention_head_dim, int):\n attention_head_dim = (attention_head_dim,) * len(down_block_types)\n\n # down\n output_channel = block_out_channels[0]\n for i, down_block_type in enumerate(down_block_types):\n res = 2 ** i\n input_channel = output_channel\n output_channel = block_out_channels[i]\n is_final_block = i == len(block_out_channels) - 1\n\n down_block = get_down_block(\n down_block_type,\n num_layers=layers_per_block,\n in_channels=input_channel,\n out_channels=output_channel,\n temb_channels=time_embed_dim,\n add_downsample=not is_final_block,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n resnet_groups=norm_num_groups,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=attention_head_dim[i],\n downsample_padding=downsample_padding,\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention[i],\n upcast_attention=upcast_attention,\n resnet_time_scale_shift=resnet_time_scale_shift,\n\n unet_use_cross_frame_attention=unet_use_cross_frame_attention,\n unet_use_temporal_attention=unet_use_temporal_attention,\n \n use_motion_module=use_motion_module and (res in motion_module_resolutions) and (not motion_module_decoder_only),\n motion_module_type=motion_module_type,\n motion_module_kwargs=motion_module_kwargs,\n )\n self.down_blocks.append(down_block)\n\n # mid\n if mid_block_type == \"UNetMidBlock3DCrossAttn\":\n self.mid_block = UNetMidBlock3DCrossAttn(\n in_channels=block_out_channels[-1],\n temb_channels=time_embed_dim,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n output_scale_factor=mid_block_scale_factor,\n resnet_time_scale_shift=resnet_time_scale_shift,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=attention_head_dim[-1],\n resnet_groups=norm_num_groups,\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n upcast_attention=upcast_attention,\n\n unet_use_cross_frame_attention=unet_use_cross_frame_attention,\n unet_use_temporal_attention=unet_use_temporal_attention,\n \n use_motion_module=use_motion_module and motion_module_mid_block,\n motion_module_type=motion_module_type,\n motion_module_kwargs=motion_module_kwargs,\n )\n else:\n raise ValueError(f\"unknown mid_block_type : {mid_block_type}\")\n \n # count how many layers upsample the videos\n self.num_upsamplers = 0\n\n # up\n reversed_block_out_channels = list(reversed(block_out_channels))\n reversed_attention_head_dim = list(reversed(attention_head_dim))\n only_cross_attention = list(reversed(only_cross_attention))\n output_channel = reversed_block_out_channels[0]\n for i, up_block_type in enumerate(up_block_types):\n res = 2 ** (3 - i)\n is_final_block = i == len(block_out_channels) - 1\n\n prev_output_channel = output_channel\n output_channel = reversed_block_out_channels[i]\n input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)]\n\n # add upsample block for all BUT final layer\n if not is_final_block:\n add_upsample = True\n self.num_upsamplers += 1\n else:\n add_upsample = False\n\n up_block = get_up_block(\n up_block_type,\n num_layers=layers_per_block + 1,\n in_channels=input_channel,\n out_channels=output_channel,\n prev_output_channel=prev_output_channel,\n temb_channels=time_embed_dim,\n add_upsample=add_upsample,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n resnet_groups=norm_num_groups,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=reversed_attention_head_dim[i],\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention[i],\n upcast_attention=upcast_attention,\n resnet_time_scale_shift=resnet_time_scale_shift,\n\n unet_use_cross_frame_attention=unet_use_cross_frame_attention,\n unet_use_temporal_attention=unet_use_temporal_attention,\n\n use_motion_module=use_motion_module and (res in motion_module_resolutions),\n motion_module_type=motion_module_type,\n motion_module_kwargs=motion_module_kwargs,\n )\n self.up_blocks.append(up_block)\n prev_output_channel = output_channel\n\n # out\n self.conv_norm_out = nn.GroupNorm(num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=norm_eps)\n self.conv_act = nn.SiLU()\n self.conv_out = InflatedConv3d(block_out_channels[0], out_channels, kernel_size=3, padding=1)\n\n def set_attention_slice(self, slice_size):\n r\"\"\"\n Enable sliced attention computation.\n\n When this option is enabled, the attention module will split the input tensor in slices, to compute attention\n in several steps. This is useful to save some memory in exchange for a small speed decrease.\n\n Args:\n slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `\"auto\"`):\n When `\"auto\"`, halves the input to the attention heads, so attention will be computed in two steps. If\n `\"max\"`, maxium amount of memory will be saved by running only one slice at a time. If a number is\n provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim`\n must be a multiple of `slice_size`.\n \"\"\"\n sliceable_head_dims = []\n\n def fn_recursive_retrieve_slicable_dims(module: torch.nn.Module):\n if hasattr(module, \"set_attention_slice\"):\n sliceable_head_dims.append(module.sliceable_head_dim)\n\n for child in module.children():\n fn_recursive_retrieve_slicable_dims(child)\n\n # retrieve number of attention layers\n for module in self.children():\n fn_recursive_retrieve_slicable_dims(module)\n\n num_slicable_layers = len(sliceable_head_dims)\n\n if slice_size == \"auto\":\n # half the attention head size is usually a good trade-off between\n # speed and memory\n slice_size = [dim // 2 for dim in sliceable_head_dims]\n elif slice_size == \"max\":\n # make smallest slice possible\n slice_size = num_slicable_layers * [1]\n\n slice_size = num_slicable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size\n\n if len(slice_size) != len(sliceable_head_dims):\n raise ValueError(\n f\"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different\"\n f\" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}.\"\n )\n\n for i in range(len(slice_size)):\n size = slice_size[i]\n dim = sliceable_head_dims[i]\n if size is not None and size > dim:\n raise ValueError(f\"size {size} has to be smaller or equal to {dim}.\")\n\n # Recursively walk through all the children.\n # Any children which exposes the set_attention_slice method\n # gets the message\n def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]):\n if hasattr(module, \"set_attention_slice\"):\n module.set_attention_slice(slice_size.pop())\n\n for child in module.children():\n fn_recursive_set_attention_slice(child, slice_size)\n\n reversed_slice_size = list(reversed(slice_size))\n for module in self.children():\n fn_recursive_set_attention_slice(module, reversed_slice_size)\n\n def _set_gradient_checkpointing(self, module, value=False):\n if isinstance(module, (CrossAttnDownBlock3D, DownBlock3D, CrossAttnUpBlock3D, UpBlock3D)):\n module.gradient_checkpointing = value\n\n def forward(\n self,\n sample: torch.FloatTensor,\n timestep: Union[torch.Tensor, float, int],\n encoder_hidden_states: torch.Tensor,\n class_labels: Optional[torch.Tensor] = None,\n attention_mask: Optional[torch.Tensor] = None,\n # for controlnet\n down_block_additional_residuals: Optional[Tuple[torch.Tensor]] = None,\n mid_block_additional_residual: Optional[torch.Tensor] = None,\n return_dict: bool = True,\n ) -> Union[UNet3DConditionOutput, Tuple]:\n r\"\"\"\n Args:\n sample (`torch.FloatTensor`): (batch, channel, height, width) noisy inputs tensor\n timestep (`torch.FloatTensor` or `float` or `int`): (batch) timesteps\n encoder_hidden_states (`torch.FloatTensor`): (batch, sequence_length, feature_dim) encoder hidden states\n return_dict (`bool`, *optional*, defaults to `True`):\n Whether or not to return a [`models.unet_2d_condition.UNet2DConditionOutput`] instead of a plain tuple.\n\n Returns:\n [`~models.unet_2d_condition.UNet2DConditionOutput`] or `tuple`:\n [`~models.unet_2d_condition.UNet2DConditionOutput`] if `return_dict` is True, otherwise a `tuple`. When\n returning a tuple, the first element is the sample tensor.\n \"\"\"\n # By default samples have to be AT least a multiple of the overall upsampling factor.\n # The overall upsampling factor is equal to 2 ** (# num of upsampling layears).\n # However, the upsampling interpolation output size can be forced to fit any upsampling size\n # on the fly if necessary.\n default_overall_up_factor = 2**self.num_upsamplers\n\n # upsample size should be forwarded when sample is not a multiple of `default_overall_up_factor`\n forward_upsample_size = False\n upsample_size = None\n\n if any(s % default_overall_up_factor != 0 for s in sample.shape[-2:]):\n logger.info(\"Forward upsample size to force interpolation output size.\")\n forward_upsample_size = True\n\n # prepare attention_mask\n if attention_mask is not None:\n attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0\n attention_mask = attention_mask.unsqueeze(1)\n\n # center input if necessary\n if self.config.center_input_sample:\n sample = 2 * sample - 1.0\n\n # time\n timesteps = timestep\n if not torch.is_tensor(timesteps):\n # This would be a good case for the `match` statement (Python 3.10+)\n is_mps = sample.device.type == \"mps\"\n if isinstance(timestep, float):\n dtype = torch.float32 if is_mps else torch.float64\n else:\n dtype = torch.int32 if is_mps else torch.int64\n timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device)\n elif len(timesteps.shape) == 0:\n timesteps = timesteps[None].to(sample.device)\n\n # broadcast to batch dimension in a way that's compatible with ONNX/Core ML\n timesteps = timesteps.expand(sample.shape[0])\n\n t_emb = self.time_proj(timesteps)\n\n # timesteps does not contain any weights and will always return f32 tensors\n # but time_embedding might actually be running in fp16. so we need to cast here.\n # there might be better ways to encapsulate this.\n t_emb = t_emb.to(dtype=self.dtype)\n emb = self.time_embedding(t_emb)\n\n if self.class_embedding is not None:\n if class_labels is None:\n raise ValueError(\"class_labels should be provided when num_class_embeds > 0\")\n\n if self.config.class_embed_type == \"timestep\":\n class_labels = self.time_proj(class_labels)\n\n class_emb = self.class_embedding(class_labels).to(dtype=self.dtype)\n emb = emb + class_emb\n\n # pre-process\n sample = self.conv_in(sample)\n\n # down\n is_controlnet = mid_block_additional_residual is not None and down_block_additional_residuals is not None\n\n down_block_res_samples = (sample,)\n for downsample_block in self.down_blocks:\n if hasattr(downsample_block, \"has_cross_attention\") and downsample_block.has_cross_attention:\n sample, res_samples = downsample_block(\n hidden_states=sample,\n temb=emb,\n encoder_hidden_states=encoder_hidden_states,\n attention_mask=attention_mask,\n )\n else:\n sample, res_samples = downsample_block(hidden_states=sample, temb=emb, encoder_hidden_states=encoder_hidden_states)\n\n down_block_res_samples += res_samples\n\n if is_controlnet:\n new_down_block_res_samples = ()\n\n for down_block_res_sample, down_block_additional_residual in zip(\n down_block_res_samples, down_block_additional_residuals\n ):\n down_block_res_sample = down_block_res_sample + down_block_additional_residual\n new_down_block_res_samples = new_down_block_res_samples + (down_block_res_sample,)\n\n down_block_res_samples = new_down_block_res_samples\n\n # mid\n sample = self.mid_block(\n sample, emb, encoder_hidden_states=encoder_hidden_states, attention_mask=attention_mask\n )\n\n if is_controlnet:\n sample = sample + mid_block_additional_residual\n\n # up\n for i, upsample_block in enumerate(self.up_blocks):\n is_final_block = i == len(self.up_blocks) - 1\n\n res_samples = down_block_res_samples[-len(upsample_block.resnets) :]\n down_block_res_samples = down_block_res_samples[: -len(upsample_block.resnets)]\n\n # if we have not reached the final block and need to forward the\n # upsample size, we do it here\n if not is_final_block and forward_upsample_size:\n upsample_size = down_block_res_samples[-1].shape[2:]\n\n if hasattr(upsample_block, \"has_cross_attention\") and upsample_block.has_cross_attention:\n sample = upsample_block(\n hidden_states=sample,\n temb=emb,\n res_hidden_states_tuple=res_samples,\n encoder_hidden_states=encoder_hidden_states,\n upsample_size=upsample_size,\n attention_mask=attention_mask,\n )\n else:\n sample = upsample_block(\n hidden_states=sample, temb=emb, res_hidden_states_tuple=res_samples, upsample_size=upsample_size, encoder_hidden_states=encoder_hidden_states,\n )\n\n # post-process\n sample = self.conv_norm_out(sample)\n sample = self.conv_act(sample)\n sample = self.conv_out(sample)\n\n if not return_dict:\n return (sample,)\n\n return UNet3DConditionOutput(sample=sample)\n\n @classmethod\n def from_pretrained_2d(cls, pretrained_model_path, subfolder=None, unet_additional_kwargs=None):\n if subfolder is not None:\n pretrained_model_path = os.path.join(pretrained_model_path, subfolder)\n print(f\"loaded temporal unet's pretrained weights from {pretrained_model_path} ...\")\n\n config_file = os.path.join(pretrained_model_path, 'config.json')\n if not os.path.isfile(config_file):\n raise RuntimeError(f\"{config_file} does not exist\")\n with open(config_file, \"r\") as f:\n config = json.load(f)\n config[\"_class_name\"] = cls.__name__\n config[\"down_block_types\"] = [\n \"CrossAttnDownBlock3D\",\n \"CrossAttnDownBlock3D\",\n \"CrossAttnDownBlock3D\",\n \"DownBlock3D\"\n ]\n config[\"up_block_types\"] = [\n \"UpBlock3D\",\n \"CrossAttnUpBlock3D\",\n \"CrossAttnUpBlock3D\",\n \"CrossAttnUpBlock3D\"\n ]\n # config[\"mid_block_type\"] = \"UNetMidBlock3DCrossAttn\"\n\n from diffusers.utils import WEIGHTS_NAME\n model = cls.from_config(config, **unet_additional_kwargs)\n model_file = os.path.join(pretrained_model_path, WEIGHTS_NAME)\n if not os.path.isfile(model_file):\n raise RuntimeError(f\"{model_file} does not exist\")\n state_dict = torch.load(model_file, map_location=\"cpu\")\n\n m, u = model.load_state_dict(state_dict, strict=False)\n print(f\"### missing keys: {len(m)}; \\n### unexpected keys: {len(u)};\")\n # print(f\"### missing keys:\\n{m}\\n### unexpected keys:\\n{u}\\n\")\n \n params = [p.numel() if \"temporal\" in n else 0 for n, p in model.named_parameters()]\n print(f\"### Temporal Module Parameters: {sum(params) / 1e6} M\")\n \n return model" }, { "identifier": "ControlNetModel", "path": "magicanimate/models/controlnet.py", "snippet": "class ControlNetModel(ModelMixin, ConfigMixin):\n _supports_gradient_checkpointing = True\n\n @register_to_config\n def __init__(\n self,\n in_channels: int = 4,\n flip_sin_to_cos: bool = True,\n freq_shift: int = 0,\n down_block_types: Tuple[str] = (\n \"CrossAttnDownBlock2D\",\n \"CrossAttnDownBlock2D\",\n \"CrossAttnDownBlock2D\",\n \"DownBlock2D\",\n ),\n only_cross_attention: Union[bool, Tuple[bool]] = False,\n block_out_channels: Tuple[int] = (320, 640, 1280, 1280),\n layers_per_block: int = 2,\n downsample_padding: int = 1,\n mid_block_scale_factor: float = 1,\n act_fn: str = \"silu\",\n norm_num_groups: Optional[int] = 32,\n norm_eps: float = 1e-5,\n cross_attention_dim: int = 1280,\n attention_head_dim: Union[int, Tuple[int]] = 8,\n use_linear_projection: bool = False,\n class_embed_type: Optional[str] = None,\n num_class_embeds: Optional[int] = None,\n upcast_attention: bool = False,\n resnet_time_scale_shift: str = \"default\",\n projection_class_embeddings_input_dim: Optional[int] = None,\n controlnet_conditioning_channel_order: str = \"rgb\",\n conditioning_embedding_out_channels: Optional[Tuple[int]] = (16, 32, 96, 256),\n ):\n super().__init__()\n\n # Check inputs\n if len(block_out_channels) != len(down_block_types):\n raise ValueError(\n f\"Must provide the same number of `block_out_channels` as `down_block_types`. `block_out_channels`: {block_out_channels}. `down_block_types`: {down_block_types}.\"\n )\n\n if not isinstance(only_cross_attention, bool) and len(only_cross_attention) != len(down_block_types):\n raise ValueError(\n f\"Must provide the same number of `only_cross_attention` as `down_block_types`. `only_cross_attention`: {only_cross_attention}. `down_block_types`: {down_block_types}.\"\n )\n\n if not isinstance(attention_head_dim, int) and len(attention_head_dim) != len(down_block_types):\n raise ValueError(\n f\"Must provide the same number of `attention_head_dim` as `down_block_types`. `attention_head_dim`: {attention_head_dim}. `down_block_types`: {down_block_types}.\"\n )\n\n # input\n conv_in_kernel = 3\n conv_in_padding = (conv_in_kernel - 1) // 2\n self.conv_in = nn.Conv2d(\n in_channels, block_out_channels[0], kernel_size=conv_in_kernel, padding=conv_in_padding\n )\n\n # time\n time_embed_dim = block_out_channels[0] * 4\n\n self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift)\n timestep_input_dim = block_out_channels[0]\n\n self.time_embedding = TimestepEmbedding(\n timestep_input_dim,\n time_embed_dim,\n act_fn=act_fn,\n )\n\n # class embedding\n if class_embed_type is None and num_class_embeds is not None:\n self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim)\n elif class_embed_type == \"timestep\":\n self.class_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)\n elif class_embed_type == \"identity\":\n self.class_embedding = nn.Identity(time_embed_dim, time_embed_dim)\n elif class_embed_type == \"projection\":\n if projection_class_embeddings_input_dim is None:\n raise ValueError(\n \"`class_embed_type`: 'projection' requires `projection_class_embeddings_input_dim` be set\"\n )\n # The projection `class_embed_type` is the same as the timestep `class_embed_type` except\n # 1. the `class_labels` inputs are not first converted to sinusoidal embeddings\n # 2. it projects from an arbitrary input dimension.\n #\n # Note that `TimestepEmbedding` is quite general, being mainly linear layers and activations.\n # When used for embedding actual timesteps, the timesteps are first converted to sinusoidal embeddings.\n # As a result, `TimestepEmbedding` can be passed arbitrary vectors.\n self.class_embedding = TimestepEmbedding(projection_class_embeddings_input_dim, time_embed_dim)\n else:\n self.class_embedding = None\n\n # control net conditioning embedding\n self.controlnet_cond_embedding = ControlNetConditioningEmbedding(\n conditioning_embedding_channels=block_out_channels[0],\n block_out_channels=conditioning_embedding_out_channels,\n )\n\n self.down_blocks = nn.ModuleList([])\n self.controlnet_down_blocks = nn.ModuleList([])\n\n if isinstance(only_cross_attention, bool):\n only_cross_attention = [only_cross_attention] * len(down_block_types)\n\n if isinstance(attention_head_dim, int):\n attention_head_dim = (attention_head_dim,) * len(down_block_types)\n\n # down\n output_channel = block_out_channels[0]\n\n controlnet_block = nn.Conv2d(output_channel, output_channel, kernel_size=1)\n controlnet_block = zero_module(controlnet_block)\n self.controlnet_down_blocks.append(controlnet_block)\n\n for i, down_block_type in enumerate(down_block_types):\n input_channel = output_channel\n output_channel = block_out_channels[i]\n is_final_block = i == len(block_out_channels) - 1\n\n down_block = get_down_block(\n down_block_type,\n num_layers=layers_per_block,\n in_channels=input_channel,\n out_channels=output_channel,\n temb_channels=time_embed_dim,\n add_downsample=not is_final_block,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n resnet_groups=norm_num_groups,\n cross_attention_dim=cross_attention_dim,\n num_attention_heads=attention_head_dim[i],\n downsample_padding=downsample_padding,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention[i],\n upcast_attention=upcast_attention,\n resnet_time_scale_shift=resnet_time_scale_shift,\n )\n self.down_blocks.append(down_block)\n\n for _ in range(layers_per_block):\n controlnet_block = nn.Conv2d(output_channel, output_channel, kernel_size=1)\n controlnet_block = zero_module(controlnet_block)\n self.controlnet_down_blocks.append(controlnet_block)\n\n if not is_final_block:\n controlnet_block = nn.Conv2d(output_channel, output_channel, kernel_size=1)\n controlnet_block = zero_module(controlnet_block)\n self.controlnet_down_blocks.append(controlnet_block)\n\n # mid\n mid_block_channel = block_out_channels[-1]\n\n controlnet_block = nn.Conv2d(mid_block_channel, mid_block_channel, kernel_size=1)\n controlnet_block = zero_module(controlnet_block)\n self.controlnet_mid_block = controlnet_block\n\n self.mid_block = UNetMidBlock2DCrossAttn(\n in_channels=mid_block_channel,\n temb_channels=time_embed_dim,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n output_scale_factor=mid_block_scale_factor,\n resnet_time_scale_shift=resnet_time_scale_shift,\n cross_attention_dim=cross_attention_dim,\n num_attention_heads=attention_head_dim[-1],\n resnet_groups=norm_num_groups,\n use_linear_projection=use_linear_projection,\n upcast_attention=upcast_attention,\n )\n\n @classmethod\n def from_unet(\n cls,\n unet: UNet2DConditionModel,\n controlnet_conditioning_channel_order: str = \"rgb\",\n conditioning_embedding_out_channels: Optional[Tuple[int]] = (16, 32, 96, 256),\n load_weights_from_unet: bool = True,\n ):\n r\"\"\"\n Instantiate Controlnet class from UNet2DConditionModel.\n\n Parameters:\n unet (`UNet2DConditionModel`):\n UNet model which weights are copied to the ControlNet. Note that all configuration options are also\n copied where applicable.\n \"\"\"\n controlnet = cls(\n in_channels=unet.config.in_channels,\n flip_sin_to_cos=unet.config.flip_sin_to_cos,\n freq_shift=unet.config.freq_shift,\n down_block_types=unet.config.down_block_types,\n only_cross_attention=unet.config.only_cross_attention,\n block_out_channels=unet.config.block_out_channels,\n layers_per_block=unet.config.layers_per_block,\n downsample_padding=unet.config.downsample_padding,\n mid_block_scale_factor=unet.config.mid_block_scale_factor,\n act_fn=unet.config.act_fn,\n norm_num_groups=unet.config.norm_num_groups,\n norm_eps=unet.config.norm_eps,\n cross_attention_dim=unet.config.cross_attention_dim,\n attention_head_dim=unet.config.attention_head_dim,\n use_linear_projection=unet.config.use_linear_projection,\n class_embed_type=unet.config.class_embed_type,\n num_class_embeds=unet.config.num_class_embeds,\n upcast_attention=unet.config.upcast_attention,\n resnet_time_scale_shift=unet.config.resnet_time_scale_shift,\n projection_class_embeddings_input_dim=unet.config.projection_class_embeddings_input_dim,\n controlnet_conditioning_channel_order=controlnet_conditioning_channel_order,\n conditioning_embedding_out_channels=conditioning_embedding_out_channels,\n )\n\n if load_weights_from_unet:\n controlnet.conv_in.load_state_dict(unet.conv_in.state_dict())\n controlnet.time_proj.load_state_dict(unet.time_proj.state_dict())\n controlnet.time_embedding.load_state_dict(unet.time_embedding.state_dict())\n\n if controlnet.class_embedding:\n controlnet.class_embedding.load_state_dict(unet.class_embedding.state_dict())\n\n controlnet.down_blocks.load_state_dict(unet.down_blocks.state_dict())\n controlnet.mid_block.load_state_dict(unet.mid_block.state_dict())\n\n return controlnet\n\n # @property\n # # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors\n # def attn_processors(self) -> Dict[str, AttentionProcessor]:\n # r\"\"\"\n # Returns:\n # `dict` of attention processors: A dictionary containing all attention processors used in the model with\n # indexed by its weight name.\n # \"\"\"\n # # set recursively\n # processors = {}\n\n # def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]):\n # if hasattr(module, \"set_processor\"):\n # processors[f\"{name}.processor\"] = module.processor\n\n # for sub_name, child in module.named_children():\n # fn_recursive_add_processors(f\"{name}.{sub_name}\", child, processors)\n\n # return processors\n\n # for name, module in self.named_children():\n # fn_recursive_add_processors(name, module, processors)\n\n # return processors\n\n # # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.set_attn_processor\n # def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]):\n # r\"\"\"\n # Parameters:\n # `processor (`dict` of `AttentionProcessor` or `AttentionProcessor`):\n # The instantiated processor class or a dictionary of processor classes that will be set as the processor\n # of **all** `Attention` layers.\n # In case `processor` is a dict, the key needs to define the path to the corresponding cross attention processor. This is strongly recommended when setting trainable attention processors.:\n\n # \"\"\"\n # count = len(self.attn_processors.keys())\n\n # if isinstance(processor, dict) and len(processor) != count:\n # raise ValueError(\n # f\"A dict of processors was passed, but the number of processors {len(processor)} does not match the\"\n # f\" number of attention layers: {count}. Please make sure to pass {count} processor classes.\"\n # )\n\n # def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor):\n # if hasattr(module, \"set_processor\"):\n # if not isinstance(processor, dict):\n # module.set_processor(processor)\n # else:\n # module.set_processor(processor.pop(f\"{name}.processor\"))\n\n # for sub_name, child in module.named_children():\n # fn_recursive_attn_processor(f\"{name}.{sub_name}\", child, processor)\n\n # for name, module in self.named_children():\n # fn_recursive_attn_processor(name, module, processor)\n\n # # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.set_default_attn_processor\n # def set_default_attn_processor(self):\n # \"\"\"\n # Disables custom attention processors and sets the default attention implementation.\n # \"\"\"\n # self.set_attn_processor(AttnProcessor())\n\n # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.set_attention_slice\n def set_attention_slice(self, slice_size):\n r\"\"\"\n Enable sliced attention computation.\n\n When this option is enabled, the attention module will split the input tensor in slices, to compute attention\n in several steps. This is useful to save some memory in exchange for a small speed decrease.\n\n Args:\n slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `\"auto\"`):\n When `\"auto\"`, halves the input to the attention heads, so attention will be computed in two steps. If\n `\"max\"`, maximum amount of memory will be saved by running only one slice at a time. If a number is\n provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim`\n must be a multiple of `slice_size`.\n \"\"\"\n sliceable_head_dims = []\n\n def fn_recursive_retrieve_sliceable_dims(module: torch.nn.Module):\n if hasattr(module, \"set_attention_slice\"):\n sliceable_head_dims.append(module.sliceable_head_dim)\n\n for child in module.children():\n fn_recursive_retrieve_sliceable_dims(child)\n\n # retrieve number of attention layers\n for module in self.children():\n fn_recursive_retrieve_sliceable_dims(module)\n\n num_sliceable_layers = len(sliceable_head_dims)\n\n if slice_size == \"auto\":\n # half the attention head size is usually a good trade-off between\n # speed and memory\n slice_size = [dim // 2 for dim in sliceable_head_dims]\n elif slice_size == \"max\":\n # make smallest slice possible\n slice_size = num_sliceable_layers * [1]\n\n slice_size = num_sliceable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size\n\n if len(slice_size) != len(sliceable_head_dims):\n raise ValueError(\n f\"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different\"\n f\" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}.\"\n )\n\n for i in range(len(slice_size)):\n size = slice_size[i]\n dim = sliceable_head_dims[i]\n if size is not None and size > dim:\n raise ValueError(f\"size {size} has to be smaller or equal to {dim}.\")\n\n # Recursively walk through all the children.\n # Any children which exposes the set_attention_slice method\n # gets the message\n def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]):\n if hasattr(module, \"set_attention_slice\"):\n module.set_attention_slice(slice_size.pop())\n\n for child in module.children():\n fn_recursive_set_attention_slice(child, slice_size)\n\n reversed_slice_size = list(reversed(slice_size))\n for module in self.children():\n fn_recursive_set_attention_slice(module, reversed_slice_size)\n\n def _set_gradient_checkpointing(self, module, value=False):\n if isinstance(module, (CrossAttnDownBlock2D, DownBlock2D)):\n module.gradient_checkpointing = value\n\n def forward(\n self,\n sample: torch.FloatTensor,\n timestep: Union[torch.Tensor, float, int],\n encoder_hidden_states: torch.Tensor,\n controlnet_cond: torch.FloatTensor,\n conditioning_scale: float = 1.0,\n class_labels: Optional[torch.Tensor] = None,\n timestep_cond: Optional[torch.Tensor] = None,\n attention_mask: Optional[torch.Tensor] = None,\n cross_attention_kwargs: Optional[Dict[str, Any]] = None,\n return_dict: bool = True,\n ) -> Union[ControlNetOutput, Tuple]:\n # check channel order\n channel_order = self.config.controlnet_conditioning_channel_order\n\n if channel_order == \"rgb\":\n # in rgb order by default\n ...\n elif channel_order == \"bgr\":\n controlnet_cond = torch.flip(controlnet_cond, dims=[1])\n else:\n raise ValueError(f\"unknown `controlnet_conditioning_channel_order`: {channel_order}\")\n\n # prepare attention_mask\n if attention_mask is not None:\n attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0\n attention_mask = attention_mask.unsqueeze(1)\n\n # 1. time\n timesteps = timestep\n if not torch.is_tensor(timesteps):\n # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can\n # This would be a good case for the `match` statement (Python 3.10+)\n is_mps = sample.device.type == \"mps\"\n if isinstance(timestep, float):\n dtype = torch.float32 if is_mps else torch.float64\n else:\n dtype = torch.int32 if is_mps else torch.int64\n timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device)\n elif len(timesteps.shape) == 0:\n timesteps = timesteps[None].to(sample.device)\n\n # broadcast to batch dimension in a way that's compatible with ONNX/Core ML\n timesteps = timesteps.expand(sample.shape[0])\n\n t_emb = self.time_proj(timesteps)\n\n # timesteps does not contain any weights and will always return f32 tensors\n # but time_embedding might actually be running in fp16. so we need to cast here.\n # there might be better ways to encapsulate this.\n t_emb = t_emb.to(dtype=self.dtype)\n\n emb = self.time_embedding(t_emb, timestep_cond)\n\n if self.class_embedding is not None:\n if class_labels is None:\n raise ValueError(\"class_labels should be provided when num_class_embeds > 0\")\n\n if self.config.class_embed_type == \"timestep\":\n class_labels = self.time_proj(class_labels)\n\n class_emb = self.class_embedding(class_labels).to(dtype=self.dtype)\n emb = emb + class_emb\n\n # 2. pre-process\n sample = self.conv_in(sample)\n\n controlnet_cond = self.controlnet_cond_embedding(controlnet_cond)\n\n sample += controlnet_cond\n\n # 3. down\n down_block_res_samples = (sample,)\n for downsample_block in self.down_blocks:\n if hasattr(downsample_block, \"has_cross_attention\") and downsample_block.has_cross_attention:\n sample, res_samples = downsample_block(\n hidden_states=sample,\n temb=emb,\n encoder_hidden_states=encoder_hidden_states,\n attention_mask=attention_mask,\n # cross_attention_kwargs=cross_attention_kwargs,\n )\n else:\n sample, res_samples = downsample_block(hidden_states=sample, temb=emb)\n\n down_block_res_samples += res_samples\n\n # 4. mid\n if self.mid_block is not None:\n sample = self.mid_block(\n sample,\n emb,\n encoder_hidden_states=encoder_hidden_states,\n attention_mask=attention_mask,\n # cross_attention_kwargs=cross_attention_kwargs,\n )\n\n # 5. Control net blocks\n\n controlnet_down_block_res_samples = ()\n\n for down_block_res_sample, controlnet_block in zip(down_block_res_samples, self.controlnet_down_blocks):\n down_block_res_sample = controlnet_block(down_block_res_sample)\n controlnet_down_block_res_samples += (down_block_res_sample,)\n\n down_block_res_samples = controlnet_down_block_res_samples\n\n mid_block_res_sample = self.controlnet_mid_block(sample)\n\n # 6. scaling\n down_block_res_samples = [sample * conditioning_scale for sample in down_block_res_samples]\n mid_block_res_sample *= conditioning_scale\n\n if not return_dict:\n return (down_block_res_samples, mid_block_res_sample)\n\n return ControlNetOutput(\n down_block_res_samples=down_block_res_samples, mid_block_res_sample=mid_block_res_sample\n )" }, { "identifier": "ReferenceAttentionControl", "path": "magicanimate/models/mutual_self_attention.py", "snippet": "class ReferenceAttentionControl():\n \n def __init__(self, \n unet,\n mode=\"write\",\n do_classifier_free_guidance=False,\n attention_auto_machine_weight = float('inf'),\n gn_auto_machine_weight = 1.0,\n style_fidelity = 1.0,\n reference_attn=True,\n reference_adain=False,\n fusion_blocks=\"midup\",\n batch_size=1, \n ) -> None:\n # 10. Modify self attention and group norm\n self.unet = unet\n assert mode in [\"read\", \"write\"]\n assert fusion_blocks in [\"midup\", \"full\"]\n self.reference_attn = reference_attn\n self.reference_adain = reference_adain\n self.fusion_blocks = fusion_blocks\n self.register_reference_hooks(\n mode, \n do_classifier_free_guidance,\n attention_auto_machine_weight,\n gn_auto_machine_weight,\n style_fidelity,\n reference_attn,\n reference_adain,\n fusion_blocks,\n batch_size=batch_size, \n )\n\n def register_reference_hooks(\n self, \n mode, \n do_classifier_free_guidance,\n attention_auto_machine_weight,\n gn_auto_machine_weight,\n style_fidelity,\n reference_attn,\n reference_adain,\n dtype=torch.float16,\n batch_size=1, \n num_images_per_prompt=1, \n device=torch.device(\"cpu\"), \n fusion_blocks='midup',\n ):\n MODE = mode\n do_classifier_free_guidance = do_classifier_free_guidance\n attention_auto_machine_weight = attention_auto_machine_weight\n gn_auto_machine_weight = gn_auto_machine_weight\n style_fidelity = style_fidelity\n reference_attn = reference_attn\n reference_adain = reference_adain\n fusion_blocks = fusion_blocks\n num_images_per_prompt = num_images_per_prompt\n dtype=dtype\n if do_classifier_free_guidance:\n uc_mask = (\n torch.Tensor([1] * batch_size * num_images_per_prompt * 16 + [0] * batch_size * num_images_per_prompt * 16)\n .to(device)\n .bool()\n )\n else:\n uc_mask = (\n torch.Tensor([0] * batch_size * num_images_per_prompt * 2)\n .to(device)\n .bool()\n )\n \n def hacked_basic_transformer_inner_forward(\n self,\n hidden_states: torch.FloatTensor,\n attention_mask: Optional[torch.FloatTensor] = None,\n encoder_hidden_states: Optional[torch.FloatTensor] = None,\n encoder_attention_mask: Optional[torch.FloatTensor] = None,\n timestep: Optional[torch.LongTensor] = None,\n cross_attention_kwargs: Dict[str, Any] = None,\n class_labels: Optional[torch.LongTensor] = None,\n video_length=None,\n ):\n if self.use_ada_layer_norm:\n norm_hidden_states = self.norm1(hidden_states, timestep)\n elif self.use_ada_layer_norm_zero:\n norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(\n hidden_states, timestep, class_labels, hidden_dtype=hidden_states.dtype\n )\n else:\n norm_hidden_states = self.norm1(hidden_states)\n\n # 1. Self-Attention\n cross_attention_kwargs = cross_attention_kwargs if cross_attention_kwargs is not None else {}\n if self.only_cross_attention:\n attn_output = self.attn1(\n norm_hidden_states,\n encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None,\n attention_mask=attention_mask,\n **cross_attention_kwargs,\n )\n else:\n if MODE == \"write\":\n self.bank.append(norm_hidden_states.clone())\n attn_output = self.attn1(\n norm_hidden_states,\n encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None,\n attention_mask=attention_mask,\n **cross_attention_kwargs,\n )\n if MODE == \"read\":\n self.bank = [rearrange(d.unsqueeze(1).repeat(1, video_length, 1, 1), \"b t l c -> (b t) l c\")[:hidden_states.shape[0]] for d in self.bank]\n hidden_states_uc = self.attn1(norm_hidden_states, \n encoder_hidden_states=torch.cat([norm_hidden_states] + self.bank, dim=1),\n attention_mask=attention_mask) + hidden_states\n hidden_states_c = hidden_states_uc.clone()\n _uc_mask = uc_mask.clone()\n if do_classifier_free_guidance:\n if hidden_states.shape[0] != _uc_mask.shape[0]:\n _uc_mask = (\n torch.Tensor([1] * (hidden_states.shape[0]//2) + [0] * (hidden_states.shape[0]//2))\n .to(device)\n .bool()\n )\n hidden_states_c[_uc_mask] = self.attn1(\n norm_hidden_states[_uc_mask],\n encoder_hidden_states=norm_hidden_states[_uc_mask],\n attention_mask=attention_mask,\n ) + hidden_states[_uc_mask]\n hidden_states = hidden_states_c.clone()\n \n self.bank.clear()\n if self.attn2 is not None:\n # Cross-Attention\n norm_hidden_states = (\n self.norm2(hidden_states, timestep) if self.use_ada_layer_norm else self.norm2(hidden_states)\n )\n hidden_states = (\n self.attn2(\n norm_hidden_states, encoder_hidden_states=encoder_hidden_states, attention_mask=attention_mask\n )\n + hidden_states\n )\n\n # Feed-forward\n hidden_states = self.ff(self.norm3(hidden_states)) + hidden_states\n\n # Temporal-Attention\n if self.unet_use_temporal_attention:\n d = hidden_states.shape[1]\n hidden_states = rearrange(hidden_states, \"(b f) d c -> (b d) f c\", f=video_length)\n norm_hidden_states = (\n self.norm_temp(hidden_states, timestep) if self.use_ada_layer_norm else self.norm_temp(hidden_states)\n )\n hidden_states = self.attn_temp(norm_hidden_states) + hidden_states\n hidden_states = rearrange(hidden_states, \"(b d) f c -> (b f) d c\", d=d)\n\n return hidden_states\n \n if self.use_ada_layer_norm_zero:\n attn_output = gate_msa.unsqueeze(1) * attn_output\n hidden_states = attn_output + hidden_states\n\n if self.attn2 is not None:\n norm_hidden_states = (\n self.norm2(hidden_states, timestep) if self.use_ada_layer_norm else self.norm2(hidden_states)\n )\n\n # 2. Cross-Attention\n attn_output = self.attn2(\n norm_hidden_states,\n encoder_hidden_states=encoder_hidden_states,\n attention_mask=encoder_attention_mask,\n **cross_attention_kwargs,\n )\n hidden_states = attn_output + hidden_states\n\n # 3. Feed-forward\n norm_hidden_states = self.norm3(hidden_states)\n\n if self.use_ada_layer_norm_zero:\n norm_hidden_states = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]\n\n ff_output = self.ff(norm_hidden_states)\n\n if self.use_ada_layer_norm_zero:\n ff_output = gate_mlp.unsqueeze(1) * ff_output\n\n hidden_states = ff_output + hidden_states\n\n return hidden_states\n\n def hacked_mid_forward(self, *args, **kwargs):\n eps = 1e-6\n x = self.original_forward(*args, **kwargs)\n if MODE == \"write\":\n if gn_auto_machine_weight >= self.gn_weight:\n var, mean = torch.var_mean(x, dim=(2, 3), keepdim=True, correction=0)\n self.mean_bank.append(mean)\n self.var_bank.append(var)\n if MODE == \"read\":\n if len(self.mean_bank) > 0 and len(self.var_bank) > 0:\n var, mean = torch.var_mean(x, dim=(2, 3), keepdim=True, correction=0)\n std = torch.maximum(var, torch.zeros_like(var) + eps) ** 0.5\n mean_acc = sum(self.mean_bank) / float(len(self.mean_bank))\n var_acc = sum(self.var_bank) / float(len(self.var_bank))\n std_acc = torch.maximum(var_acc, torch.zeros_like(var_acc) + eps) ** 0.5\n x_uc = (((x - mean) / std) * std_acc) + mean_acc\n x_c = x_uc.clone()\n if do_classifier_free_guidance and style_fidelity > 0:\n x_c[uc_mask] = x[uc_mask]\n x = style_fidelity * x_c + (1.0 - style_fidelity) * x_uc\n self.mean_bank = []\n self.var_bank = []\n return x\n\n def hack_CrossAttnDownBlock2D_forward(\n self,\n hidden_states: torch.FloatTensor,\n temb: Optional[torch.FloatTensor] = None,\n encoder_hidden_states: Optional[torch.FloatTensor] = None,\n attention_mask: Optional[torch.FloatTensor] = None,\n cross_attention_kwargs: Optional[Dict[str, Any]] = None,\n encoder_attention_mask: Optional[torch.FloatTensor] = None,\n ):\n eps = 1e-6\n\n # TODO(Patrick, William) - attention mask is not used\n output_states = ()\n\n for i, (resnet, attn) in enumerate(zip(self.resnets, self.attentions)):\n hidden_states = resnet(hidden_states, temb)\n hidden_states = attn(\n hidden_states,\n encoder_hidden_states=encoder_hidden_states,\n cross_attention_kwargs=cross_attention_kwargs,\n attention_mask=attention_mask,\n encoder_attention_mask=encoder_attention_mask,\n return_dict=False,\n )[0]\n if MODE == \"write\":\n if gn_auto_machine_weight >= self.gn_weight:\n var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)\n self.mean_bank.append([mean])\n self.var_bank.append([var])\n if MODE == \"read\":\n if len(self.mean_bank) > 0 and len(self.var_bank) > 0:\n var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)\n std = torch.maximum(var, torch.zeros_like(var) + eps) ** 0.5\n mean_acc = sum(self.mean_bank[i]) / float(len(self.mean_bank[i]))\n var_acc = sum(self.var_bank[i]) / float(len(self.var_bank[i]))\n std_acc = torch.maximum(var_acc, torch.zeros_like(var_acc) + eps) ** 0.5\n hidden_states_uc = (((hidden_states - mean) / std) * std_acc) + mean_acc\n hidden_states_c = hidden_states_uc.clone()\n if do_classifier_free_guidance and style_fidelity > 0:\n hidden_states_c[uc_mask] = hidden_states[uc_mask].to(hidden_states_c.dtype)\n hidden_states = style_fidelity * hidden_states_c + (1.0 - style_fidelity) * hidden_states_uc\n\n output_states = output_states + (hidden_states,)\n\n if MODE == \"read\":\n self.mean_bank = []\n self.var_bank = []\n\n if self.downsamplers is not None:\n for downsampler in self.downsamplers:\n hidden_states = downsampler(hidden_states)\n\n output_states = output_states + (hidden_states,)\n\n return hidden_states, output_states\n\n def hacked_DownBlock2D_forward(self, hidden_states, temb=None):\n eps = 1e-6\n\n output_states = ()\n\n for i, resnet in enumerate(self.resnets):\n hidden_states = resnet(hidden_states, temb)\n\n if MODE == \"write\":\n if gn_auto_machine_weight >= self.gn_weight:\n var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)\n self.mean_bank.append([mean])\n self.var_bank.append([var])\n if MODE == \"read\":\n if len(self.mean_bank) > 0 and len(self.var_bank) > 0:\n var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)\n std = torch.maximum(var, torch.zeros_like(var) + eps) ** 0.5\n mean_acc = sum(self.mean_bank[i]) / float(len(self.mean_bank[i]))\n var_acc = sum(self.var_bank[i]) / float(len(self.var_bank[i]))\n std_acc = torch.maximum(var_acc, torch.zeros_like(var_acc) + eps) ** 0.5\n hidden_states_uc = (((hidden_states - mean) / std) * std_acc) + mean_acc\n hidden_states_c = hidden_states_uc.clone()\n if do_classifier_free_guidance and style_fidelity > 0:\n hidden_states_c[uc_mask] = hidden_states[uc_mask].to(hidden_states_c.dtype)\n hidden_states = style_fidelity * hidden_states_c + (1.0 - style_fidelity) * hidden_states_uc\n\n output_states = output_states + (hidden_states,)\n\n if MODE == \"read\":\n self.mean_bank = []\n self.var_bank = []\n\n if self.downsamplers is not None:\n for downsampler in self.downsamplers:\n hidden_states = downsampler(hidden_states)\n\n output_states = output_states + (hidden_states,)\n\n return hidden_states, output_states\n\n def hacked_CrossAttnUpBlock2D_forward(\n self,\n hidden_states: torch.FloatTensor,\n res_hidden_states_tuple: Tuple[torch.FloatTensor, ...],\n temb: Optional[torch.FloatTensor] = None,\n encoder_hidden_states: Optional[torch.FloatTensor] = None,\n cross_attention_kwargs: Optional[Dict[str, Any]] = None,\n upsample_size: Optional[int] = None,\n attention_mask: Optional[torch.FloatTensor] = None,\n encoder_attention_mask: Optional[torch.FloatTensor] = None,\n ):\n eps = 1e-6\n # TODO(Patrick, William) - attention mask is not used\n for i, (resnet, attn) in enumerate(zip(self.resnets, self.attentions)):\n # pop res hidden states\n res_hidden_states = res_hidden_states_tuple[-1]\n res_hidden_states_tuple = res_hidden_states_tuple[:-1]\n hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)\n hidden_states = resnet(hidden_states, temb)\n hidden_states = attn(\n hidden_states,\n encoder_hidden_states=encoder_hidden_states,\n cross_attention_kwargs=cross_attention_kwargs,\n attention_mask=attention_mask,\n encoder_attention_mask=encoder_attention_mask,\n return_dict=False,\n )[0]\n\n if MODE == \"write\":\n if gn_auto_machine_weight >= self.gn_weight:\n var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)\n self.mean_bank.append([mean])\n self.var_bank.append([var])\n if MODE == \"read\":\n if len(self.mean_bank) > 0 and len(self.var_bank) > 0:\n var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)\n std = torch.maximum(var, torch.zeros_like(var) + eps) ** 0.5\n mean_acc = sum(self.mean_bank[i]) / float(len(self.mean_bank[i]))\n var_acc = sum(self.var_bank[i]) / float(len(self.var_bank[i]))\n std_acc = torch.maximum(var_acc, torch.zeros_like(var_acc) + eps) ** 0.5\n hidden_states_uc = (((hidden_states - mean) / std) * std_acc) + mean_acc\n hidden_states_c = hidden_states_uc.clone()\n if do_classifier_free_guidance and style_fidelity > 0:\n hidden_states_c[uc_mask] = hidden_states[uc_mask].to(hidden_states_c.dtype)\n hidden_states = style_fidelity * hidden_states_c + (1.0 - style_fidelity) * hidden_states_uc\n\n if MODE == \"read\":\n self.mean_bank = []\n self.var_bank = []\n\n if self.upsamplers is not None:\n for upsampler in self.upsamplers:\n hidden_states = upsampler(hidden_states, upsample_size)\n\n return hidden_states\n\n def hacked_UpBlock2D_forward(self, hidden_states, res_hidden_states_tuple, temb=None, upsample_size=None):\n eps = 1e-6\n for i, resnet in enumerate(self.resnets):\n # pop res hidden states\n res_hidden_states = res_hidden_states_tuple[-1]\n res_hidden_states_tuple = res_hidden_states_tuple[:-1]\n hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)\n hidden_states = resnet(hidden_states, temb)\n\n if MODE == \"write\":\n if gn_auto_machine_weight >= self.gn_weight:\n var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)\n self.mean_bank.append([mean])\n self.var_bank.append([var])\n if MODE == \"read\":\n if len(self.mean_bank) > 0 and len(self.var_bank) > 0:\n var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)\n std = torch.maximum(var, torch.zeros_like(var) + eps) ** 0.5\n mean_acc = sum(self.mean_bank[i]) / float(len(self.mean_bank[i]))\n var_acc = sum(self.var_bank[i]) / float(len(self.var_bank[i]))\n std_acc = torch.maximum(var_acc, torch.zeros_like(var_acc) + eps) ** 0.5\n hidden_states_uc = (((hidden_states - mean) / std) * std_acc) + mean_acc\n hidden_states_c = hidden_states_uc.clone()\n if do_classifier_free_guidance and style_fidelity > 0:\n hidden_states_c[uc_mask] = hidden_states[uc_mask].to(hidden_states_c.dtype)\n hidden_states = style_fidelity * hidden_states_c + (1.0 - style_fidelity) * hidden_states_uc\n\n if MODE == \"read\":\n self.mean_bank = []\n self.var_bank = []\n\n if self.upsamplers is not None:\n for upsampler in self.upsamplers:\n hidden_states = upsampler(hidden_states, upsample_size)\n\n return hidden_states\n\n if self.reference_attn:\n if self.fusion_blocks == \"midup\":\n attn_modules = [module for module in (torch_dfs(self.unet.mid_block)+torch_dfs(self.unet.up_blocks)) if isinstance(module, BasicTransformerBlock) or isinstance(module, _BasicTransformerBlock)]\n elif self.fusion_blocks == \"full\":\n attn_modules = [module for module in torch_dfs(self.unet) if isinstance(module, BasicTransformerBlock) or isinstance(module, _BasicTransformerBlock)] \n attn_modules = sorted(attn_modules, key=lambda x: -x.norm1.normalized_shape[0])\n\n for i, module in enumerate(attn_modules):\n module._original_inner_forward = module.forward\n module.forward = hacked_basic_transformer_inner_forward.__get__(module, BasicTransformerBlock)\n module.bank = []\n module.attn_weight = float(i) / float(len(attn_modules))\n\n if self.reference_adain:\n gn_modules = [self.unet.mid_block]\n self.unet.mid_block.gn_weight = 0\n\n down_blocks = self.unet.down_blocks\n for w, module in enumerate(down_blocks):\n module.gn_weight = 1.0 - float(w) / float(len(down_blocks))\n gn_modules.append(module)\n\n up_blocks = self.unet.up_blocks\n for w, module in enumerate(up_blocks):\n module.gn_weight = float(w) / float(len(up_blocks))\n gn_modules.append(module)\n\n for i, module in enumerate(gn_modules):\n if getattr(module, \"original_forward\", None) is None:\n module.original_forward = module.forward\n if i == 0:\n # mid_block\n module.forward = hacked_mid_forward.__get__(module, torch.nn.Module)\n elif isinstance(module, CrossAttnDownBlock2D):\n module.forward = hack_CrossAttnDownBlock2D_forward.__get__(module, CrossAttnDownBlock2D)\n elif isinstance(module, DownBlock2D):\n module.forward = hacked_DownBlock2D_forward.__get__(module, DownBlock2D)\n elif isinstance(module, CrossAttnUpBlock2D):\n module.forward = hacked_CrossAttnUpBlock2D_forward.__get__(module, CrossAttnUpBlock2D)\n elif isinstance(module, UpBlock2D):\n module.forward = hacked_UpBlock2D_forward.__get__(module, UpBlock2D)\n module.mean_bank = []\n module.var_bank = []\n module.gn_weight *= 2\n \n def update(self, writer, dtype=torch.float16):\n if self.reference_attn:\n if self.fusion_blocks == \"midup\":\n reader_attn_modules = [module for module in (torch_dfs(self.unet.mid_block)+torch_dfs(self.unet.up_blocks)) if isinstance(module, _BasicTransformerBlock)]\n writer_attn_modules = [module for module in (torch_dfs(writer.unet.mid_block)+torch_dfs(writer.unet.up_blocks)) if isinstance(module, BasicTransformerBlock)]\n elif self.fusion_blocks == \"full\":\n reader_attn_modules = [module for module in torch_dfs(self.unet) if isinstance(module, _BasicTransformerBlock)]\n writer_attn_modules = [module for module in torch_dfs(writer.unet) if isinstance(module, BasicTransformerBlock)]\n reader_attn_modules = sorted(reader_attn_modules, key=lambda x: -x.norm1.normalized_shape[0]) \n writer_attn_modules = sorted(writer_attn_modules, key=lambda x: -x.norm1.normalized_shape[0])\n for r, w in zip(reader_attn_modules, writer_attn_modules):\n r.bank = [v.clone().to(dtype) for v in w.bank]\n # w.bank.clear()\n if self.reference_adain:\n reader_gn_modules = [self.unet.mid_block]\n \n down_blocks = self.unet.down_blocks\n for w, module in enumerate(down_blocks):\n reader_gn_modules.append(module)\n\n up_blocks = self.unet.up_blocks\n for w, module in enumerate(up_blocks):\n reader_gn_modules.append(module)\n \n writer_gn_modules = [writer.unet.mid_block]\n \n down_blocks = writer.unet.down_blocks\n for w, module in enumerate(down_blocks):\n writer_gn_modules.append(module)\n\n up_blocks = writer.unet.up_blocks\n for w, module in enumerate(up_blocks):\n writer_gn_modules.append(module)\n \n for r, w in zip(reader_gn_modules, writer_gn_modules):\n if len(w.mean_bank) > 0 and isinstance(w.mean_bank[0], list):\n r.mean_bank = [[v.clone().to(dtype) for v in vl] for vl in w.mean_bank]\n r.var_bank = [[v.clone().to(dtype) for v in vl] for vl in w.var_bank]\n else:\n r.mean_bank = [v.clone().to(dtype) for v in w.mean_bank]\n r.var_bank = [v.clone().to(dtype) for v in w.var_bank]\n \n def clear(self):\n if self.reference_attn:\n if self.fusion_blocks == \"midup\":\n reader_attn_modules = [module for module in (torch_dfs(self.unet.mid_block)+torch_dfs(self.unet.up_blocks)) if isinstance(module, BasicTransformerBlock) or isinstance(module, _BasicTransformerBlock)]\n elif self.fusion_blocks == \"full\":\n reader_attn_modules = [module for module in torch_dfs(self.unet) if isinstance(module, BasicTransformerBlock) or isinstance(module, _BasicTransformerBlock)]\n reader_attn_modules = sorted(reader_attn_modules, key=lambda x: -x.norm1.normalized_shape[0])\n for r in reader_attn_modules:\n r.bank.clear()\n if self.reference_adain:\n reader_gn_modules = [self.unet.mid_block]\n \n down_blocks = self.unet.down_blocks\n for w, module in enumerate(down_blocks):\n reader_gn_modules.append(module)\n\n up_blocks = self.unet.up_blocks\n for w, module in enumerate(up_blocks):\n reader_gn_modules.append(module)\n \n for r in reader_gn_modules:\n r.mean_bank.clear()\n r.var_bank.clear()" }, { "identifier": "get_context_scheduler", "path": "magicanimate/pipelines/context.py", "snippet": "def get_context_scheduler(name: str) -> Callable:\n if name == \"uniform\":\n return uniform\n else:\n raise ValueError(f\"Unknown context_overlap policy {name}\")" }, { "identifier": "get_total_steps", "path": "magicanimate/pipelines/context.py", "snippet": "def get_total_steps(\n scheduler,\n timesteps: List[int],\n num_steps: Optional[int] = None,\n num_frames: int = ...,\n context_size: Optional[int] = None,\n context_stride: int = 3,\n context_overlap: int = 4,\n closed_loop: bool = True,\n):\n return sum(\n len(\n list(\n scheduler(\n i,\n num_steps,\n num_frames,\n context_size,\n context_stride,\n context_overlap,\n )\n )\n )\n for i in range(len(timesteps))\n )" }, { "identifier": "get_tensor_interpolation_method", "path": "magicanimate/utils/util.py", "snippet": "def get_tensor_interpolation_method():\n return tensor_interpolation" } ]
import inspect, math import numpy as np import torch import torch.distributed as dist from typing import Callable, List, Optional, Union from dataclasses import dataclass from PIL import Image from tqdm import tqdm from diffusers.utils import is_accelerate_available from packaging import version from transformers import CLIPTextModel, CLIPTokenizer from diffusers.configuration_utils import FrozenDict from diffusers.models import AutoencoderKL from diffusers.pipeline_utils import DiffusionPipeline from diffusers.schedulers import ( DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, ) from diffusers.utils import deprecate, logging, BaseOutput from einops import rearrange from magicanimate.models.unet_controlnet import UNet3DConditionModel from magicanimate.models.controlnet import ControlNetModel from magicanimate.models.mutual_self_attention import ReferenceAttentionControl from magicanimate.pipelines.context import ( get_context_scheduler, get_total_steps ) from magicanimate.utils.util import get_tensor_interpolation_method from accelerate import cpu_offload
16,750
# ************************************************************************* # This file may have been modified by Bytedance Inc. (“Bytedance Inc.'s Mo- # difications”). All Bytedance Inc.'s Modifications are Copyright (2023) B- # ytedance Inc.. # ************************************************************************* # Adapted from https://github.com/showlab/Tune-A-Video/blob/main/tuneavideo/pipelines/pipeline_tuneavideo.py # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ TODO: 1. support multi-controlnet 2. [DONE] support DDIM inversion 3. support Prompt-to-prompt """ logger = logging.get_logger(__name__) # pylint: disable=invalid-name @dataclass class AnimationPipelineOutput(BaseOutput): videos: Union[torch.Tensor, np.ndarray] class AnimationPipeline(DiffusionPipeline): _optional_components = [] def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet3DConditionModel,
# ************************************************************************* # This file may have been modified by Bytedance Inc. (“Bytedance Inc.'s Mo- # difications”). All Bytedance Inc.'s Modifications are Copyright (2023) B- # ytedance Inc.. # ************************************************************************* # Adapted from https://github.com/showlab/Tune-A-Video/blob/main/tuneavideo/pipelines/pipeline_tuneavideo.py # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ TODO: 1. support multi-controlnet 2. [DONE] support DDIM inversion 3. support Prompt-to-prompt """ logger = logging.get_logger(__name__) # pylint: disable=invalid-name @dataclass class AnimationPipelineOutput(BaseOutput): videos: Union[torch.Tensor, np.ndarray] class AnimationPipeline(DiffusionPipeline): _optional_components = [] def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet3DConditionModel,
controlnet: ControlNetModel,
1
2023-12-04 20:47:34+00:00
24k
metatube-community/metatube-plex-plugins
MetaTube.bundle/Contents/Libraries/Shared/urllib3/poolmanager.py
[ { "identifier": "HTTPHeaderDict", "path": "MetaTube.bundle/Contents/Libraries/Shared/urllib3/_collections.py", "snippet": "class HTTPHeaderDict(MutableMapping):\n \"\"\"\n :param headers:\n An iterable of field-value pairs. Must not contain multiple field names\n when compared case-insensitively.\n\n :param kwargs:\n Additional field-value pairs to pass in to ``dict.update``.\n\n A ``dict`` like container for storing HTTP Headers.\n\n Field names are stored and compared case-insensitively in compliance with\n RFC 7230. Iteration provides the first case-sensitive key seen for each\n case-insensitive pair.\n\n Using ``__setitem__`` syntax overwrites fields that compare equal\n case-insensitively in order to maintain ``dict``'s api. For fields that\n compare equal, instead create a new ``HTTPHeaderDict`` and use ``.add``\n in a loop.\n\n If multiple fields that are equal case-insensitively are passed to the\n constructor or ``.update``, the behavior is undefined and some will be\n lost.\n\n >>> headers = HTTPHeaderDict()\n >>> headers.add('Set-Cookie', 'foo=bar')\n >>> headers.add('set-cookie', 'baz=quxx')\n >>> headers['content-length'] = '7'\n >>> headers['SET-cookie']\n 'foo=bar, baz=quxx'\n >>> headers['Content-Length']\n '7'\n \"\"\"\n\n def __init__(self, headers=None, **kwargs):\n super(HTTPHeaderDict, self).__init__()\n self._container = OrderedDict()\n if headers is not None:\n if isinstance(headers, HTTPHeaderDict):\n self._copy_from(headers)\n else:\n self.extend(headers)\n if kwargs:\n self.extend(kwargs)\n\n def __setitem__(self, key, val):\n self._container[key.lower()] = [key, val]\n return self._container[key.lower()]\n\n def __getitem__(self, key):\n val = self._container[key.lower()]\n return \", \".join(val[1:])\n\n def __delitem__(self, key):\n del self._container[key.lower()]\n\n def __contains__(self, key):\n return key.lower() in self._container\n\n def __eq__(self, other):\n if not isinstance(other, Mapping) and not hasattr(other, \"keys\"):\n return False\n if not isinstance(other, type(self)):\n other = type(self)(other)\n return dict((k.lower(), v) for k, v in self.itermerged()) == dict(\n (k.lower(), v) for k, v in other.itermerged()\n )\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n if six.PY2: # Python 2\n iterkeys = MutableMapping.iterkeys\n itervalues = MutableMapping.itervalues\n\n __marker = object()\n\n def __len__(self):\n return len(self._container)\n\n def __iter__(self):\n # Only provide the originally cased names\n for vals in self._container.values():\n yield vals[0]\n\n def pop(self, key, default=__marker):\n \"\"\"D.pop(k[,d]) -> v, remove specified key and return the corresponding value.\n If key is not found, d is returned if given, otherwise KeyError is raised.\n \"\"\"\n # Using the MutableMapping function directly fails due to the private marker.\n # Using ordinary dict.pop would expose the internal structures.\n # So let's reinvent the wheel.\n try:\n value = self[key]\n except KeyError:\n if default is self.__marker:\n raise\n return default\n else:\n del self[key]\n return value\n\n def discard(self, key):\n try:\n del self[key]\n except KeyError:\n pass\n\n def add(self, key, val):\n \"\"\"Adds a (name, value) pair, doesn't overwrite the value if it already\n exists.\n\n >>> headers = HTTPHeaderDict(foo='bar')\n >>> headers.add('Foo', 'baz')\n >>> headers['foo']\n 'bar, baz'\n \"\"\"\n key_lower = key.lower()\n new_vals = [key, val]\n # Keep the common case aka no item present as fast as possible\n vals = self._container.setdefault(key_lower, new_vals)\n if new_vals is not vals:\n vals.append(val)\n\n def extend(self, *args, **kwargs):\n \"\"\"Generic import function for any type of header-like object.\n Adapted version of MutableMapping.update in order to insert items\n with self.add instead of self.__setitem__\n \"\"\"\n if len(args) > 1:\n raise TypeError(\n \"extend() takes at most 1 positional \"\n \"arguments ({0} given)\".format(len(args))\n )\n other = args[0] if len(args) >= 1 else ()\n\n if isinstance(other, HTTPHeaderDict):\n for key, val in other.iteritems():\n self.add(key, val)\n elif isinstance(other, Mapping):\n for key in other:\n self.add(key, other[key])\n elif hasattr(other, \"keys\"):\n for key in other.keys():\n self.add(key, other[key])\n else:\n for key, value in other:\n self.add(key, value)\n\n for key, value in kwargs.items():\n self.add(key, value)\n\n def getlist(self, key, default=__marker):\n \"\"\"Returns a list of all the values for the named field. Returns an\n empty list if the key doesn't exist.\"\"\"\n try:\n vals = self._container[key.lower()]\n except KeyError:\n if default is self.__marker:\n return []\n return default\n else:\n return vals[1:]\n\n def _prepare_for_method_change(self):\n \"\"\"\n Remove content-specific header fields before changing the request\n method to GET or HEAD according to RFC 9110, Section 15.4.\n \"\"\"\n content_specific_headers = [\n \"Content-Encoding\",\n \"Content-Language\",\n \"Content-Location\",\n \"Content-Type\",\n \"Content-Length\",\n \"Digest\",\n \"Last-Modified\",\n ]\n for header in content_specific_headers:\n self.discard(header)\n return self\n\n # Backwards compatibility for httplib\n getheaders = getlist\n getallmatchingheaders = getlist\n iget = getlist\n\n # Backwards compatibility for http.cookiejar\n get_all = getlist\n\n def __repr__(self):\n return \"%s(%s)\" % (type(self).__name__, dict(self.itermerged()))\n\n def _copy_from(self, other):\n for key in other:\n val = other.getlist(key)\n if isinstance(val, list):\n # Don't need to convert tuples\n val = list(val)\n self._container[key.lower()] = [key] + val\n\n def copy(self):\n clone = type(self)()\n clone._copy_from(self)\n return clone\n\n def iteritems(self):\n \"\"\"Iterate over all header lines, including duplicate ones.\"\"\"\n for key in self:\n vals = self._container[key.lower()]\n for val in vals[1:]:\n yield vals[0], val\n\n def itermerged(self):\n \"\"\"Iterate over all headers, merging duplicate ones together.\"\"\"\n for key in self:\n val = self._container[key.lower()]\n yield val[0], \", \".join(val[1:])\n\n def items(self):\n return list(self.iteritems())\n\n @classmethod\n def from_httplib(cls, message): # Python 2\n \"\"\"Read headers from a Python 2 httplib message object.\"\"\"\n # python2.7 does not expose a proper API for exporting multiheaders\n # efficiently. This function re-reads raw lines from the message\n # object and extracts the multiheaders properly.\n obs_fold_continued_leaders = (\" \", \"\\t\")\n headers = []\n\n for line in message.headers:\n if line.startswith(obs_fold_continued_leaders):\n if not headers:\n # We received a header line that starts with OWS as described\n # in RFC-7230 S3.2.4. This indicates a multiline header, but\n # there exists no previous header to which we can attach it.\n raise InvalidHeader(\n \"Header continuation with no previous header: %s\" % line\n )\n else:\n key, value = headers[-1]\n headers[-1] = (key, value + \" \" + line.strip())\n continue\n\n key, value = line.split(\":\", 1)\n headers.append((key, value.strip()))\n\n return cls(headers)" }, { "identifier": "RecentlyUsedContainer", "path": "MetaTube.bundle/Contents/Libraries/Shared/urllib3/_collections.py", "snippet": "class RecentlyUsedContainer(MutableMapping):\n \"\"\"\n Provides a thread-safe dict-like container which maintains up to\n ``maxsize`` keys while throwing away the least-recently-used keys beyond\n ``maxsize``.\n\n :param maxsize:\n Maximum number of recent elements to retain.\n\n :param dispose_func:\n Every time an item is evicted from the container,\n ``dispose_func(value)`` is called. Callback which will get called\n \"\"\"\n\n ContainerCls = OrderedDict\n\n def __init__(self, maxsize=10, dispose_func=None):\n self._maxsize = maxsize\n self.dispose_func = dispose_func\n\n self._container = self.ContainerCls()\n self.lock = RLock()\n\n def __getitem__(self, key):\n # Re-insert the item, moving it to the end of the eviction line.\n with self.lock:\n item = self._container.pop(key)\n self._container[key] = item\n return item\n\n def __setitem__(self, key, value):\n evicted_value = _Null\n with self.lock:\n # Possibly evict the existing value of 'key'\n evicted_value = self._container.get(key, _Null)\n self._container[key] = value\n\n # If we didn't evict an existing value, we might have to evict the\n # least recently used item from the beginning of the container.\n if len(self._container) > self._maxsize:\n _key, evicted_value = self._container.popitem(last=False)\n\n if self.dispose_func and evicted_value is not _Null:\n self.dispose_func(evicted_value)\n\n def __delitem__(self, key):\n with self.lock:\n value = self._container.pop(key)\n\n if self.dispose_func:\n self.dispose_func(value)\n\n def __len__(self):\n with self.lock:\n return len(self._container)\n\n def __iter__(self):\n raise NotImplementedError(\n \"Iteration over this class is unlikely to be threadsafe.\"\n )\n\n def clear(self):\n with self.lock:\n # Copy pointers to all values, then wipe the mapping\n values = list(itervalues(self._container))\n self._container.clear()\n\n if self.dispose_func:\n for value in values:\n self.dispose_func(value)\n\n def keys(self):\n with self.lock:\n return list(iterkeys(self._container))" }, { "identifier": "HTTPConnectionPool", "path": "MetaTube.bundle/Contents/Libraries/Shared/urllib3/connectionpool.py", "snippet": "class ConnectionPool(object):\nclass HTTPConnectionPool(ConnectionPool, RequestMethods):\nclass HTTPSConnectionPool(HTTPConnectionPool):\n def __init__(self, host, port=None):\n def __str__(self):\n def __enter__(self):\n def __exit__(self, exc_type, exc_val, exc_tb):\n def close(self):\n def __init__(\n self,\n host,\n port=None,\n strict=False,\n timeout=Timeout.DEFAULT_TIMEOUT,\n maxsize=1,\n block=False,\n headers=None,\n retries=None,\n _proxy=None,\n _proxy_headers=None,\n _proxy_config=None,\n **conn_kw\n ):\n def _new_conn(self):\n def _get_conn(self, timeout=None):\n def _put_conn(self, conn):\n def _validate_conn(self, conn):\n def _prepare_proxy(self, conn):\n def _get_timeout(self, timeout):\n def _raise_timeout(self, err, url, timeout_value):\n def _make_request(\n self, conn, method, url, timeout=_Default, chunked=False, **httplib_request_kw\n ):\n def _absolute_url(self, path):\n def close(self):\n def is_same_host(self, url):\n def urlopen(\n self,\n method,\n url,\n body=None,\n headers=None,\n retries=None,\n redirect=True,\n assert_same_host=True,\n timeout=_Default,\n pool_timeout=None,\n release_conn=None,\n chunked=False,\n body_pos=None,\n **response_kw\n ):\n def _is_ssl_error_message_from_http_proxy(ssl_error):\n def __init__(\n self,\n host,\n port=None,\n strict=False,\n timeout=Timeout.DEFAULT_TIMEOUT,\n maxsize=1,\n block=False,\n headers=None,\n retries=None,\n _proxy=None,\n _proxy_headers=None,\n key_file=None,\n cert_file=None,\n cert_reqs=None,\n key_password=None,\n ca_certs=None,\n ssl_version=None,\n assert_hostname=None,\n assert_fingerprint=None,\n ca_cert_dir=None,\n **conn_kw\n ):\n def _prepare_conn(self, conn):\n def _prepare_proxy(self, conn):\n def _new_conn(self):\n def _validate_conn(self, conn):\ndef connection_from_url(url, **kw):\ndef _normalize_host(host, scheme):\ndef _close_pool_connections(pool):" }, { "identifier": "LocationValueError", "path": "MetaTube.bundle/Contents/Libraries/Shared/urllib3/exceptions.py", "snippet": "class LocationValueError(ValueError, HTTPError):\n \"\"\"Raised when there is something wrong with a given URL input.\"\"\"\n\n pass" }, { "identifier": "MaxRetryError", "path": "MetaTube.bundle/Contents/Libraries/Shared/urllib3/exceptions.py", "snippet": "class MaxRetryError(RequestError):\n \"\"\"Raised when the maximum number of retries is exceeded.\n\n :param pool: The connection pool\n :type pool: :class:`~urllib3.connectionpool.HTTPConnectionPool`\n :param string url: The requested Url\n :param exceptions.Exception reason: The underlying error\n\n \"\"\"\n\n def __init__(self, pool, url, reason=None):\n self.reason = reason\n\n message = \"Max retries exceeded with url: %s (Caused by %r)\" % (url, reason)\n\n RequestError.__init__(self, pool, url, message)" }, { "identifier": "ProxySchemeUnknown", "path": "MetaTube.bundle/Contents/Libraries/Shared/urllib3/exceptions.py", "snippet": "class ProxySchemeUnknown(AssertionError, URLSchemeUnknown):\n \"\"\"ProxyManager does not support the supplied scheme\"\"\"\n\n # TODO(t-8ch): Stop inheriting from AssertionError in v2.0.\n\n def __init__(self, scheme):\n # 'localhost' is here because our URL parser parses\n # localhost:8080 -> scheme=localhost, remove if we fix this.\n if scheme == \"localhost\":\n scheme = None\n if scheme is None:\n message = \"Proxy URL had no scheme, should start with http:// or https://\"\n else:\n message = (\n \"Proxy URL had unsupported scheme %s, should use http:// or https://\"\n % scheme\n )\n super(ProxySchemeUnknown, self).__init__(message)" }, { "identifier": "ProxySchemeUnsupported", "path": "MetaTube.bundle/Contents/Libraries/Shared/urllib3/exceptions.py", "snippet": "class ProxySchemeUnsupported(ValueError):\n \"\"\"Fetching HTTPS resources through HTTPS proxies is unsupported\"\"\"\n\n pass" }, { "identifier": "URLSchemeUnknown", "path": "MetaTube.bundle/Contents/Libraries/Shared/urllib3/exceptions.py", "snippet": "class URLSchemeUnknown(LocationValueError):\n \"\"\"Raised when a URL input has an unsupported scheme.\"\"\"\n\n def __init__(self, scheme):\n message = \"Not supported URL scheme %s\" % scheme\n super(URLSchemeUnknown, self).__init__(message)\n\n self.scheme = scheme" }, { "identifier": "six", "path": "MetaTube.bundle/Contents/Libraries/Shared/urllib3/packages/six.py", "snippet": "PY2 = sys.version_info[0] == 2\nPY3 = sys.version_info[0] == 3\nPY34 = sys.version_info[0:2] >= (3, 4)\n MAXSIZE = sys.maxsize\n MAXSIZE = int((1 << 31) - 1)\n MAXSIZE = int((1 << 31) - 1)\n MAXSIZE = int((1 << 63) - 1)\n class X(object):\nclass _LazyDescr(object):\nclass MovedModule(_LazyDescr):\nclass _LazyModule(types.ModuleType):\nclass MovedAttribute(_LazyDescr):\nclass _SixMetaPathImporter(object):\nclass _MovedItems(_LazyModule):\nclass Module_six_moves_urllib_parse(_LazyModule):\nclass Module_six_moves_urllib_error(_LazyModule):\nclass Module_six_moves_urllib_request(_LazyModule):\nclass Module_six_moves_urllib_response(_LazyModule):\nclass Module_six_moves_urllib_robotparser(_LazyModule):\nclass Module_six_moves_urllib(types.ModuleType):\n class Iterator(object):\n class metaclass(type):\n def __len__(self):\ndef _add_doc(func, doc):\ndef _import_module(name):\n def __init__(self, name):\n def __get__(self, obj, tp):\n def __init__(self, name, old, new=None):\n def _resolve(self):\n def __getattr__(self, attr):\n def __init__(self, name):\n def __dir__(self):\n def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None):\n def _resolve(self):\n def __init__(self, six_module_name):\n def _add_module(self, mod, *fullnames):\n def _get_module(self, fullname):\n def find_module(self, fullname, path=None):\n def find_spec(self, fullname, path, target=None):\n def __get_module(self, fullname):\n def load_module(self, fullname):\n def is_package(self, fullname):\n def get_code(self, fullname):\n def create_module(self, spec):\n def exec_module(self, module):\n def __dir__(self):\ndef add_move(move):\ndef remove_move(name):\n def advance_iterator(it):\n def callable(obj):\n def get_unbound_function(unbound):\n def create_unbound_method(func, cls):\n def get_unbound_function(unbound):\n def create_bound_method(func, obj):\n def create_unbound_method(func, cls):\n def next(self):\n def iterkeys(d, **kw):\n def itervalues(d, **kw):\n def iteritems(d, **kw):\n def iterlists(d, **kw):\n def iterkeys(d, **kw):\n def itervalues(d, **kw):\n def iteritems(d, **kw):\n def iterlists(d, **kw):\n def b(s):\n def u(s):\n def b(s):\n def u(s):\n def byte2int(bs):\n def indexbytes(buf, i):\ndef assertCountEqual(self, *args, **kwargs):\ndef assertRaisesRegex(self, *args, **kwargs):\ndef assertRegex(self, *args, **kwargs):\ndef assertNotRegex(self, *args, **kwargs):\n def reraise(tp, value, tb=None):\n def exec_(_code_, _globs_=None, _locs_=None):\n def raise_from(value, from_value):\n def print_(*args, **kwargs):\n def write(data):\n def print_(*args, **kwargs):\n def _update_wrapper(\n wrapper,\n wrapped,\n assigned=functools.WRAPPER_ASSIGNMENTS,\n updated=functools.WRAPPER_UPDATES,\n ):\n def wraps(\n wrapped,\n assigned=functools.WRAPPER_ASSIGNMENTS,\n updated=functools.WRAPPER_UPDATES,\n ):\ndef with_metaclass(meta, *bases):\n def __new__(cls, name, this_bases, d):\n def __prepare__(cls, name, this_bases):\ndef add_metaclass(metaclass):\n def wrapper(cls):\ndef ensure_binary(s, encoding=\"utf-8\", errors=\"strict\"):\ndef ensure_str(s, encoding=\"utf-8\", errors=\"strict\"):\ndef ensure_text(s, encoding=\"utf-8\", errors=\"strict\"):\ndef python_2_unicode_compatible(klass):" }, { "identifier": "RequestMethods", "path": "MetaTube.bundle/Contents/Libraries/Shared/urllib3/request.py", "snippet": "class RequestMethods(object):\n \"\"\"\n Convenience mixin for classes who implement a :meth:`urlopen` method, such\n as :class:`urllib3.HTTPConnectionPool` and\n :class:`urllib3.PoolManager`.\n\n Provides behavior for making common types of HTTP request methods and\n decides which type of request field encoding to use.\n\n Specifically,\n\n :meth:`.request_encode_url` is for sending requests whose fields are\n encoded in the URL (such as GET, HEAD, DELETE).\n\n :meth:`.request_encode_body` is for sending requests whose fields are\n encoded in the *body* of the request using multipart or www-form-urlencoded\n (such as for POST, PUT, PATCH).\n\n :meth:`.request` is for making any kind of request, it will look up the\n appropriate encoding format and use one of the above two methods to make\n the request.\n\n Initializer parameters:\n\n :param headers:\n Headers to include with all requests, unless other headers are given\n explicitly.\n \"\"\"\n\n _encode_url_methods = {\"DELETE\", \"GET\", \"HEAD\", \"OPTIONS\"}\n\n def __init__(self, headers=None):\n self.headers = headers or {}\n\n def urlopen(\n self,\n method,\n url,\n body=None,\n headers=None,\n encode_multipart=True,\n multipart_boundary=None,\n **kw\n ): # Abstract\n raise NotImplementedError(\n \"Classes extending RequestMethods must implement \"\n \"their own ``urlopen`` method.\"\n )\n\n def request(self, method, url, fields=None, headers=None, **urlopen_kw):\n \"\"\"\n Make a request using :meth:`urlopen` with the appropriate encoding of\n ``fields`` based on the ``method`` used.\n\n This is a convenience method that requires the least amount of manual\n effort. It can be used in most situations, while still having the\n option to drop down to more specific methods when necessary, such as\n :meth:`request_encode_url`, :meth:`request_encode_body`,\n or even the lowest level :meth:`urlopen`.\n \"\"\"\n method = method.upper()\n\n urlopen_kw[\"request_url\"] = url\n\n if method in self._encode_url_methods:\n return self.request_encode_url(\n method, url, fields=fields, headers=headers, **urlopen_kw\n )\n else:\n return self.request_encode_body(\n method, url, fields=fields, headers=headers, **urlopen_kw\n )\n\n def request_encode_url(self, method, url, fields=None, headers=None, **urlopen_kw):\n \"\"\"\n Make a request using :meth:`urlopen` with the ``fields`` encoded in\n the url. This is useful for request methods like GET, HEAD, DELETE, etc.\n \"\"\"\n if headers is None:\n headers = self.headers\n\n extra_kw = {\"headers\": headers}\n extra_kw.update(urlopen_kw)\n\n if fields:\n url += \"?\" + urlencode(fields)\n\n return self.urlopen(method, url, **extra_kw)\n\n def request_encode_body(\n self,\n method,\n url,\n fields=None,\n headers=None,\n encode_multipart=True,\n multipart_boundary=None,\n **urlopen_kw\n ):\n \"\"\"\n Make a request using :meth:`urlopen` with the ``fields`` encoded in\n the body. This is useful for request methods like POST, PUT, PATCH, etc.\n\n When ``encode_multipart=True`` (default), then\n :func:`urllib3.encode_multipart_formdata` is used to encode\n the payload with the appropriate content type. Otherwise\n :func:`urllib.parse.urlencode` is used with the\n 'application/x-www-form-urlencoded' content type.\n\n Multipart encoding must be used when posting files, and it's reasonably\n safe to use it in other times too. However, it may break request\n signing, such as with OAuth.\n\n Supports an optional ``fields`` parameter of key/value strings AND\n key/filetuple. A filetuple is a (filename, data, MIME type) tuple where\n the MIME type is optional. For example::\n\n fields = {\n 'foo': 'bar',\n 'fakefile': ('foofile.txt', 'contents of foofile'),\n 'realfile': ('barfile.txt', open('realfile').read()),\n 'typedfile': ('bazfile.bin', open('bazfile').read(),\n 'image/jpeg'),\n 'nonamefile': 'contents of nonamefile field',\n }\n\n When uploading a file, providing a filename (the first parameter of the\n tuple) is optional but recommended to best mimic behavior of browsers.\n\n Note that if ``headers`` are supplied, the 'Content-Type' header will\n be overwritten because it depends on the dynamic random boundary string\n which is used to compose the body of the request. The random boundary\n string can be explicitly set with the ``multipart_boundary`` parameter.\n \"\"\"\n if headers is None:\n headers = self.headers\n\n extra_kw = {\"headers\": {}}\n\n if fields:\n if \"body\" in urlopen_kw:\n raise TypeError(\n \"request got values for both 'fields' and 'body', can only specify one.\"\n )\n\n if encode_multipart:\n body, content_type = encode_multipart_formdata(\n fields, boundary=multipart_boundary\n )\n else:\n body, content_type = (\n urlencode(fields),\n \"application/x-www-form-urlencoded\",\n )\n\n extra_kw[\"body\"] = body\n extra_kw[\"headers\"] = {\"Content-Type\": content_type}\n\n extra_kw[\"headers\"].update(headers)\n extra_kw.update(urlopen_kw)\n\n return self.urlopen(method, url, **extra_kw)" }, { "identifier": "connection_requires_http_tunnel", "path": "MetaTube.bundle/Contents/Libraries/Shared/urllib3/util/proxy.py", "snippet": "def connection_requires_http_tunnel(\n proxy_url=None, proxy_config=None, destination_scheme=None\n):\n \"\"\"\n Returns True if the connection requires an HTTP CONNECT through the proxy.\n\n :param URL proxy_url:\n URL of the proxy.\n :param ProxyConfig proxy_config:\n Proxy configuration from poolmanager.py\n :param str destination_scheme:\n The scheme of the destination. (i.e https, http, etc)\n \"\"\"\n # If we're not using a proxy, no way to use a tunnel.\n if proxy_url is None:\n return False\n\n # HTTP destinations never require tunneling, we always forward.\n if destination_scheme == \"http\":\n return False\n\n # Support for forwarding with HTTPS proxies and HTTPS destinations.\n if (\n proxy_url.scheme == \"https\"\n and proxy_config\n and proxy_config.use_forwarding_for_https\n ):\n return False\n\n # Otherwise always use a tunnel.\n return True" }, { "identifier": "Retry", "path": "MetaTube.bundle/Contents/Libraries/Shared/urllib3/util/retry.py", "snippet": "class Retry(object):\n \"\"\"Retry configuration.\n\n Each retry attempt will create a new Retry object with updated values, so\n they can be safely reused.\n\n Retries can be defined as a default for a pool::\n\n retries = Retry(connect=5, read=2, redirect=5)\n http = PoolManager(retries=retries)\n response = http.request('GET', 'http://example.com/')\n\n Or per-request (which overrides the default for the pool)::\n\n response = http.request('GET', 'http://example.com/', retries=Retry(10))\n\n Retries can be disabled by passing ``False``::\n\n response = http.request('GET', 'http://example.com/', retries=False)\n\n Errors will be wrapped in :class:`~urllib3.exceptions.MaxRetryError` unless\n retries are disabled, in which case the causing exception will be raised.\n\n :param int total:\n Total number of retries to allow. Takes precedence over other counts.\n\n Set to ``None`` to remove this constraint and fall back on other\n counts.\n\n Set to ``0`` to fail on the first retry.\n\n Set to ``False`` to disable and imply ``raise_on_redirect=False``.\n\n :param int connect:\n How many connection-related errors to retry on.\n\n These are errors raised before the request is sent to the remote server,\n which we assume has not triggered the server to process the request.\n\n Set to ``0`` to fail on the first retry of this type.\n\n :param int read:\n How many times to retry on read errors.\n\n These errors are raised after the request was sent to the server, so the\n request may have side-effects.\n\n Set to ``0`` to fail on the first retry of this type.\n\n :param int redirect:\n How many redirects to perform. Limit this to avoid infinite redirect\n loops.\n\n A redirect is a HTTP response with a status code 301, 302, 303, 307 or\n 308.\n\n Set to ``0`` to fail on the first retry of this type.\n\n Set to ``False`` to disable and imply ``raise_on_redirect=False``.\n\n :param int status:\n How many times to retry on bad status codes.\n\n These are retries made on responses, where status code matches\n ``status_forcelist``.\n\n Set to ``0`` to fail on the first retry of this type.\n\n :param int other:\n How many times to retry on other errors.\n\n Other errors are errors that are not connect, read, redirect or status errors.\n These errors might be raised after the request was sent to the server, so the\n request might have side-effects.\n\n Set to ``0`` to fail on the first retry of this type.\n\n If ``total`` is not set, it's a good idea to set this to 0 to account\n for unexpected edge cases and avoid infinite retry loops.\n\n :param iterable allowed_methods:\n Set of uppercased HTTP method verbs that we should retry on.\n\n By default, we only retry on methods which are considered to be\n idempotent (multiple requests with the same parameters end with the\n same state). See :attr:`Retry.DEFAULT_ALLOWED_METHODS`.\n\n Set to a ``False`` value to retry on any verb.\n\n .. warning::\n\n Previously this parameter was named ``method_whitelist``, that\n usage is deprecated in v1.26.0 and will be removed in v2.0.\n\n :param iterable status_forcelist:\n A set of integer HTTP status codes that we should force a retry on.\n A retry is initiated if the request method is in ``allowed_methods``\n and the response status code is in ``status_forcelist``.\n\n By default, this is disabled with ``None``.\n\n :param float backoff_factor:\n A backoff factor to apply between attempts after the second try\n (most errors are resolved immediately by a second try without a\n delay). urllib3 will sleep for::\n\n {backoff factor} * (2 ** ({number of total retries} - 1))\n\n seconds. If the backoff_factor is 0.1, then :func:`.sleep` will sleep\n for [0.0s, 0.2s, 0.4s, ...] between retries. It will never be longer\n than :attr:`Retry.DEFAULT_BACKOFF_MAX`.\n\n By default, backoff is disabled (set to 0).\n\n :param bool raise_on_redirect: Whether, if the number of redirects is\n exhausted, to raise a MaxRetryError, or to return a response with a\n response code in the 3xx range.\n\n :param bool raise_on_status: Similar meaning to ``raise_on_redirect``:\n whether we should raise an exception, or return a response,\n if status falls in ``status_forcelist`` range and retries have\n been exhausted.\n\n :param tuple history: The history of the request encountered during\n each call to :meth:`~Retry.increment`. The list is in the order\n the requests occurred. Each list item is of class :class:`RequestHistory`.\n\n :param bool respect_retry_after_header:\n Whether to respect Retry-After header on status codes defined as\n :attr:`Retry.RETRY_AFTER_STATUS_CODES` or not.\n\n :param iterable remove_headers_on_redirect:\n Sequence of headers to remove from the request when a response\n indicating a redirect is returned before firing off the redirected\n request.\n \"\"\"\n\n #: Default methods to be used for ``allowed_methods``\n DEFAULT_ALLOWED_METHODS = frozenset(\n [\"HEAD\", \"GET\", \"PUT\", \"DELETE\", \"OPTIONS\", \"TRACE\"]\n )\n\n #: Default status codes to be used for ``status_forcelist``\n RETRY_AFTER_STATUS_CODES = frozenset([413, 429, 503])\n\n #: Default headers to be used for ``remove_headers_on_redirect``\n DEFAULT_REMOVE_HEADERS_ON_REDIRECT = frozenset([\"Cookie\", \"Authorization\"])\n\n #: Maximum backoff time.\n DEFAULT_BACKOFF_MAX = 120\n\n def __init__(\n self,\n total=10,\n connect=None,\n read=None,\n redirect=None,\n status=None,\n other=None,\n allowed_methods=_Default,\n status_forcelist=None,\n backoff_factor=0,\n raise_on_redirect=True,\n raise_on_status=True,\n history=None,\n respect_retry_after_header=True,\n remove_headers_on_redirect=_Default,\n # TODO: Deprecated, remove in v2.0\n method_whitelist=_Default,\n ):\n\n if method_whitelist is not _Default:\n if allowed_methods is not _Default:\n raise ValueError(\n \"Using both 'allowed_methods' and \"\n \"'method_whitelist' together is not allowed. \"\n \"Instead only use 'allowed_methods'\"\n )\n warnings.warn(\n \"Using 'method_whitelist' with Retry is deprecated and \"\n \"will be removed in v2.0. Use 'allowed_methods' instead\",\n DeprecationWarning,\n stacklevel=2,\n )\n allowed_methods = method_whitelist\n if allowed_methods is _Default:\n allowed_methods = self.DEFAULT_ALLOWED_METHODS\n if remove_headers_on_redirect is _Default:\n remove_headers_on_redirect = self.DEFAULT_REMOVE_HEADERS_ON_REDIRECT\n\n self.total = total\n self.connect = connect\n self.read = read\n self.status = status\n self.other = other\n\n if redirect is False or total is False:\n redirect = 0\n raise_on_redirect = False\n\n self.redirect = redirect\n self.status_forcelist = status_forcelist or set()\n self.allowed_methods = allowed_methods\n self.backoff_factor = backoff_factor\n self.raise_on_redirect = raise_on_redirect\n self.raise_on_status = raise_on_status\n self.history = history or tuple()\n self.respect_retry_after_header = respect_retry_after_header\n self.remove_headers_on_redirect = frozenset(\n [h.lower() for h in remove_headers_on_redirect]\n )\n\n def new(self, **kw):\n params = dict(\n total=self.total,\n connect=self.connect,\n read=self.read,\n redirect=self.redirect,\n status=self.status,\n other=self.other,\n status_forcelist=self.status_forcelist,\n backoff_factor=self.backoff_factor,\n raise_on_redirect=self.raise_on_redirect,\n raise_on_status=self.raise_on_status,\n history=self.history,\n remove_headers_on_redirect=self.remove_headers_on_redirect,\n respect_retry_after_header=self.respect_retry_after_header,\n )\n\n # TODO: If already given in **kw we use what's given to us\n # If not given we need to figure out what to pass. We decide\n # based on whether our class has the 'method_whitelist' property\n # and if so we pass the deprecated 'method_whitelist' otherwise\n # we use 'allowed_methods'. Remove in v2.0\n if \"method_whitelist\" not in kw and \"allowed_methods\" not in kw:\n if \"method_whitelist\" in self.__dict__:\n warnings.warn(\n \"Using 'method_whitelist' with Retry is deprecated and \"\n \"will be removed in v2.0. Use 'allowed_methods' instead\",\n DeprecationWarning,\n )\n params[\"method_whitelist\"] = self.allowed_methods\n else:\n params[\"allowed_methods\"] = self.allowed_methods\n\n params.update(kw)\n return type(self)(**params)\n\n @classmethod\n def from_int(cls, retries, redirect=True, default=None):\n \"\"\"Backwards-compatibility for the old retries format.\"\"\"\n if retries is None:\n retries = default if default is not None else cls.DEFAULT\n\n if isinstance(retries, Retry):\n return retries\n\n redirect = bool(redirect) and None\n new_retries = cls(retries, redirect=redirect)\n log.debug(\"Converted retries value: %r -> %r\", retries, new_retries)\n return new_retries\n\n def get_backoff_time(self):\n \"\"\"Formula for computing the current backoff\n\n :rtype: float\n \"\"\"\n # We want to consider only the last consecutive errors sequence (Ignore redirects).\n consecutive_errors_len = len(\n list(\n takewhile(lambda x: x.redirect_location is None, reversed(self.history))\n )\n )\n if consecutive_errors_len <= 1:\n return 0\n\n backoff_value = self.backoff_factor * (2 ** (consecutive_errors_len - 1))\n return min(self.DEFAULT_BACKOFF_MAX, backoff_value)\n\n def parse_retry_after(self, retry_after):\n # Whitespace: https://tools.ietf.org/html/rfc7230#section-3.2.4\n if re.match(r\"^\\s*[0-9]+\\s*$\", retry_after):\n seconds = int(retry_after)\n else:\n retry_date_tuple = email.utils.parsedate_tz(retry_after)\n if retry_date_tuple is None:\n raise InvalidHeader(\"Invalid Retry-After header: %s\" % retry_after)\n if retry_date_tuple[9] is None: # Python 2\n # Assume UTC if no timezone was specified\n # On Python2.7, parsedate_tz returns None for a timezone offset\n # instead of 0 if no timezone is given, where mktime_tz treats\n # a None timezone offset as local time.\n retry_date_tuple = retry_date_tuple[:9] + (0,) + retry_date_tuple[10:]\n\n retry_date = email.utils.mktime_tz(retry_date_tuple)\n seconds = retry_date - time.time()\n\n if seconds < 0:\n seconds = 0\n\n return seconds\n\n def get_retry_after(self, response):\n \"\"\"Get the value of Retry-After in seconds.\"\"\"\n\n retry_after = response.headers.get(\"Retry-After\")\n\n if retry_after is None:\n return None\n\n return self.parse_retry_after(retry_after)\n\n def sleep_for_retry(self, response=None):\n retry_after = self.get_retry_after(response)\n if retry_after:\n time.sleep(retry_after)\n return True\n\n return False\n\n def _sleep_backoff(self):\n backoff = self.get_backoff_time()\n if backoff <= 0:\n return\n time.sleep(backoff)\n\n def sleep(self, response=None):\n \"\"\"Sleep between retry attempts.\n\n This method will respect a server's ``Retry-After`` response header\n and sleep the duration of the time requested. If that is not present, it\n will use an exponential backoff. By default, the backoff factor is 0 and\n this method will return immediately.\n \"\"\"\n\n if self.respect_retry_after_header and response:\n slept = self.sleep_for_retry(response)\n if slept:\n return\n\n self._sleep_backoff()\n\n def _is_connection_error(self, err):\n \"\"\"Errors when we're fairly sure that the server did not receive the\n request, so it should be safe to retry.\n \"\"\"\n if isinstance(err, ProxyError):\n err = err.original_error\n return isinstance(err, ConnectTimeoutError)\n\n def _is_read_error(self, err):\n \"\"\"Errors that occur after the request has been started, so we should\n assume that the server began processing it.\n \"\"\"\n return isinstance(err, (ReadTimeoutError, ProtocolError))\n\n def _is_method_retryable(self, method):\n \"\"\"Checks if a given HTTP method should be retried upon, depending if\n it is included in the allowed_methods\n \"\"\"\n # TODO: For now favor if the Retry implementation sets its own method_whitelist\n # property outside of our constructor to avoid breaking custom implementations.\n if \"method_whitelist\" in self.__dict__:\n warnings.warn(\n \"Using 'method_whitelist' with Retry is deprecated and \"\n \"will be removed in v2.0. Use 'allowed_methods' instead\",\n DeprecationWarning,\n )\n allowed_methods = self.method_whitelist\n else:\n allowed_methods = self.allowed_methods\n\n if allowed_methods and method.upper() not in allowed_methods:\n return False\n return True\n\n def is_retry(self, method, status_code, has_retry_after=False):\n \"\"\"Is this method/status code retryable? (Based on allowlists and control\n variables such as the number of total retries to allow, whether to\n respect the Retry-After header, whether this header is present, and\n whether the returned status code is on the list of status codes to\n be retried upon on the presence of the aforementioned header)\n \"\"\"\n if not self._is_method_retryable(method):\n return False\n\n if self.status_forcelist and status_code in self.status_forcelist:\n return True\n\n return (\n self.total\n and self.respect_retry_after_header\n and has_retry_after\n and (status_code in self.RETRY_AFTER_STATUS_CODES)\n )\n\n def is_exhausted(self):\n \"\"\"Are we out of retries?\"\"\"\n retry_counts = (\n self.total,\n self.connect,\n self.read,\n self.redirect,\n self.status,\n self.other,\n )\n retry_counts = list(filter(None, retry_counts))\n if not retry_counts:\n return False\n\n return min(retry_counts) < 0\n\n def increment(\n self,\n method=None,\n url=None,\n response=None,\n error=None,\n _pool=None,\n _stacktrace=None,\n ):\n \"\"\"Return a new Retry object with incremented retry counters.\n\n :param response: A response object, or None, if the server did not\n return a response.\n :type response: :class:`~urllib3.response.HTTPResponse`\n :param Exception error: An error encountered during the request, or\n None if the response was received successfully.\n\n :return: A new ``Retry`` object.\n \"\"\"\n if self.total is False and error:\n # Disabled, indicate to re-raise the error.\n raise six.reraise(type(error), error, _stacktrace)\n\n total = self.total\n if total is not None:\n total -= 1\n\n connect = self.connect\n read = self.read\n redirect = self.redirect\n status_count = self.status\n other = self.other\n cause = \"unknown\"\n status = None\n redirect_location = None\n\n if error and self._is_connection_error(error):\n # Connect retry?\n if connect is False:\n raise six.reraise(type(error), error, _stacktrace)\n elif connect is not None:\n connect -= 1\n\n elif error and self._is_read_error(error):\n # Read retry?\n if read is False or not self._is_method_retryable(method):\n raise six.reraise(type(error), error, _stacktrace)\n elif read is not None:\n read -= 1\n\n elif error:\n # Other retry?\n if other is not None:\n other -= 1\n\n elif response and response.get_redirect_location():\n # Redirect retry?\n if redirect is not None:\n redirect -= 1\n cause = \"too many redirects\"\n redirect_location = response.get_redirect_location()\n status = response.status\n\n else:\n # Incrementing because of a server error like a 500 in\n # status_forcelist and the given method is in the allowed_methods\n cause = ResponseError.GENERIC_ERROR\n if response and response.status:\n if status_count is not None:\n status_count -= 1\n cause = ResponseError.SPECIFIC_ERROR.format(status_code=response.status)\n status = response.status\n\n history = self.history + (\n RequestHistory(method, url, error, status, redirect_location),\n )\n\n new_retry = self.new(\n total=total,\n connect=connect,\n read=read,\n redirect=redirect,\n status=status_count,\n other=other,\n history=history,\n )\n\n if new_retry.is_exhausted():\n raise MaxRetryError(_pool, url, error or ResponseError(cause))\n\n log.debug(\"Incremented Retry for (url='%s'): %r\", url, new_retry)\n\n return new_retry\n\n def __repr__(self):\n return (\n \"{cls.__name__}(total={self.total}, connect={self.connect}, \"\n \"read={self.read}, redirect={self.redirect}, status={self.status})\"\n ).format(cls=type(self), self=self)\n\n def __getattr__(self, item):\n if item == \"method_whitelist\":\n # TODO: Remove this deprecated alias in v2.0\n warnings.warn(\n \"Using 'method_whitelist' with Retry is deprecated and \"\n \"will be removed in v2.0. Use 'allowed_methods' instead\",\n DeprecationWarning,\n )\n return self.allowed_methods\n try:\n return getattr(super(Retry, self), item)\n except AttributeError:\n return getattr(Retry, item)" }, { "identifier": "parse_url", "path": "MetaTube.bundle/Contents/Libraries/Shared/urllib3/util/url.py", "snippet": "def parse_url(url):\n \"\"\"\n Given a url, return a parsed :class:`.Url` namedtuple. Best-effort is\n performed to parse incomplete urls. Fields not provided will be None.\n This parser is RFC 3986 and RFC 6874 compliant.\n\n The parser logic and helper functions are based heavily on\n work done in the ``rfc3986`` module.\n\n :param str url: URL to parse into a :class:`.Url` namedtuple.\n\n Partly backwards-compatible with :mod:`urlparse`.\n\n Example::\n\n >>> parse_url('http://google.com/mail/')\n Url(scheme='http', host='google.com', port=None, path='/mail/', ...)\n >>> parse_url('google.com:80')\n Url(scheme=None, host='google.com', port=80, path=None, ...)\n >>> parse_url('/foo?bar')\n Url(scheme=None, host=None, port=None, path='/foo', query='bar', ...)\n \"\"\"\n if not url:\n # Empty\n return Url()\n\n source_url = url\n if not SCHEME_RE.search(url):\n url = \"//\" + url\n\n try:\n scheme, authority, path, query, fragment = URI_RE.match(url).groups()\n normalize_uri = scheme is None or scheme.lower() in NORMALIZABLE_SCHEMES\n\n if scheme:\n scheme = scheme.lower()\n\n if authority:\n auth, _, host_port = authority.rpartition(\"@\")\n auth = auth or None\n host, port = _HOST_PORT_RE.match(host_port).groups()\n if auth and normalize_uri:\n auth = _encode_invalid_chars(auth, USERINFO_CHARS)\n if port == \"\":\n port = None\n else:\n auth, host, port = None, None, None\n\n if port is not None:\n port = int(port)\n if not (0 <= port <= 65535):\n raise LocationParseError(url)\n\n host = _normalize_host(host, scheme)\n\n if normalize_uri and path:\n path = _remove_path_dot_segments(path)\n path = _encode_invalid_chars(path, PATH_CHARS)\n if normalize_uri and query:\n query = _encode_invalid_chars(query, QUERY_CHARS)\n if normalize_uri and fragment:\n fragment = _encode_invalid_chars(fragment, FRAGMENT_CHARS)\n\n except (ValueError, AttributeError):\n return six.raise_from(LocationParseError(source_url), None)\n\n # For the sake of backwards compatibility we put empty\n # string values for path if there are any defined values\n # beyond the path in the URL.\n # TODO: Remove this when we break backwards compatibility.\n if not path:\n if query is not None or fragment is not None:\n path = \"\"\n else:\n path = None\n\n # Ensure that each part of the URL is a `str` for\n # backwards compatibility.\n if isinstance(url, six.text_type):\n ensure_func = six.ensure_text\n else:\n ensure_func = six.ensure_str\n\n def ensure_type(x):\n return x if x is None else ensure_func(x)\n\n return Url(\n scheme=ensure_type(scheme),\n auth=ensure_type(auth),\n host=ensure_type(host),\n port=port,\n path=ensure_type(path),\n query=ensure_type(query),\n fragment=ensure_type(fragment),\n )" } ]
import collections import functools import logging from ._collections import HTTPHeaderDict, RecentlyUsedContainer from .connectionpool import HTTPConnectionPool, HTTPSConnectionPool, port_by_scheme from .exceptions import ( LocationValueError, MaxRetryError, ProxySchemeUnknown, ProxySchemeUnsupported, URLSchemeUnknown, ) from .packages import six from .packages.six.moves.urllib.parse import urljoin from .request import RequestMethods from .util.proxy import connection_requires_http_tunnel from .util.retry import Retry from .util.url import parse_url
14,704
""" with self.pools.lock: # If the scheme, host, or port doesn't match existing open # connections, open a new ConnectionPool. pool = self.pools.get(pool_key) if pool: return pool # Make a fresh ConnectionPool of the desired type scheme = request_context["scheme"] host = request_context["host"] port = request_context["port"] pool = self._new_pool(scheme, host, port, request_context=request_context) self.pools[pool_key] = pool return pool def connection_from_url(self, url, pool_kwargs=None): """ Similar to :func:`urllib3.connectionpool.connection_from_url`. If ``pool_kwargs`` is not provided and a new pool needs to be constructed, ``self.connection_pool_kw`` is used to initialize the :class:`urllib3.connectionpool.ConnectionPool`. If ``pool_kwargs`` is provided, it is used instead. Note that if a new pool does not need to be created for the request, the provided ``pool_kwargs`` are not used. """ u = parse_url(url) return self.connection_from_host( u.host, port=u.port, scheme=u.scheme, pool_kwargs=pool_kwargs ) def _merge_pool_kwargs(self, override): """ Merge a dictionary of override values for self.connection_pool_kw. This does not modify self.connection_pool_kw and returns a new dict. Any keys in the override dictionary with a value of ``None`` are removed from the merged dictionary. """ base_pool_kwargs = self.connection_pool_kw.copy() if override: for key, value in override.items(): if value is None: try: del base_pool_kwargs[key] except KeyError: pass else: base_pool_kwargs[key] = value return base_pool_kwargs def _proxy_requires_url_absolute_form(self, parsed_url): """ Indicates if the proxy requires the complete destination URL in the request. Normally this is only needed when not using an HTTP CONNECT tunnel. """ if self.proxy is None: return False return not connection_requires_http_tunnel( self.proxy, self.proxy_config, parsed_url.scheme ) def _validate_proxy_scheme_url_selection(self, url_scheme): """ Validates that were not attempting to do TLS in TLS connections on Python2 or with unsupported SSL implementations. """ if self.proxy is None or url_scheme != "https": return if self.proxy.scheme != "https": return if six.PY2 and not self.proxy_config.use_forwarding_for_https: raise ProxySchemeUnsupported( "Contacting HTTPS destinations through HTTPS proxies " "'via CONNECT tunnels' is not supported in Python 2" ) def urlopen(self, method, url, redirect=True, **kw): """ Same as :meth:`urllib3.HTTPConnectionPool.urlopen` with custom cross-host redirect logic and only sends the request-uri portion of the ``url``. The given ``url`` parameter must be absolute, such that an appropriate :class:`urllib3.connectionpool.ConnectionPool` can be chosen for it. """ u = parse_url(url) self._validate_proxy_scheme_url_selection(u.scheme) conn = self.connection_from_host(u.host, port=u.port, scheme=u.scheme) kw["assert_same_host"] = False kw["redirect"] = False if "headers" not in kw: kw["headers"] = self.headers.copy() if self._proxy_requires_url_absolute_form(u): response = conn.urlopen(method, url, **kw) else: response = conn.urlopen(method, u.request_uri, **kw) redirect_location = redirect and response.get_redirect_location() if not redirect_location: return response # Support relative URLs for redirecting. redirect_location = urljoin(url, redirect_location) if response.status == 303: # Change the method according to RFC 9110, Section 15.4.4. method = "GET" # And lose the body not to transfer anything sensitive. kw["body"] = None
from __future__ import absolute_import __all__ = ["PoolManager", "ProxyManager", "proxy_from_url"] log = logging.getLogger(__name__) SSL_KEYWORDS = ( "key_file", "cert_file", "cert_reqs", "ca_certs", "ssl_version", "ca_cert_dir", "ssl_context", "key_password", "server_hostname", ) # All known keyword arguments that could be provided to the pool manager, its # pools, or the underlying connections. This is used to construct a pool key. _key_fields = ( "key_scheme", # str "key_host", # str "key_port", # int "key_timeout", # int or float or Timeout "key_retries", # int or Retry "key_strict", # bool "key_block", # bool "key_source_address", # str "key_key_file", # str "key_key_password", # str "key_cert_file", # str "key_cert_reqs", # str "key_ca_certs", # str "key_ssl_version", # str "key_ca_cert_dir", # str "key_ssl_context", # instance of ssl.SSLContext or urllib3.util.ssl_.SSLContext "key_maxsize", # int "key_headers", # dict "key__proxy", # parsed proxy url "key__proxy_headers", # dict "key__proxy_config", # class "key_socket_options", # list of (level (int), optname (int), value (int or str)) tuples "key__socks_options", # dict "key_assert_hostname", # bool or string "key_assert_fingerprint", # str "key_server_hostname", # str ) #: The namedtuple class used to construct keys for the connection pool. #: All custom key schemes should include the fields in this key at a minimum. PoolKey = collections.namedtuple("PoolKey", _key_fields) _proxy_config_fields = ("ssl_context", "use_forwarding_for_https") ProxyConfig = collections.namedtuple("ProxyConfig", _proxy_config_fields) def _default_key_normalizer(key_class, request_context): """ Create a pool key out of a request context dictionary. According to RFC 3986, both the scheme and host are case-insensitive. Therefore, this function normalizes both before constructing the pool key for an HTTPS request. If you wish to change this behaviour, provide alternate callables to ``key_fn_by_scheme``. :param key_class: The class to use when constructing the key. This should be a namedtuple with the ``scheme`` and ``host`` keys at a minimum. :type key_class: namedtuple :param request_context: A dictionary-like object that contain the context for a request. :type request_context: dict :return: A namedtuple that can be used as a connection pool key. :rtype: PoolKey """ # Since we mutate the dictionary, make a copy first context = request_context.copy() context["scheme"] = context["scheme"].lower() context["host"] = context["host"].lower() # These are both dictionaries and need to be transformed into frozensets for key in ("headers", "_proxy_headers", "_socks_options"): if key in context and context[key] is not None: context[key] = frozenset(context[key].items()) # The socket_options key may be a list and needs to be transformed into a # tuple. socket_opts = context.get("socket_options") if socket_opts is not None: context["socket_options"] = tuple(socket_opts) # Map the kwargs to the names in the namedtuple - this is necessary since # namedtuples can't have fields starting with '_'. for key in list(context.keys()): context["key_" + key] = context.pop(key) # Default to ``None`` for keys missing from the context for field in key_class._fields: if field not in context: context[field] = None return key_class(**context) #: A dictionary that maps a scheme to a callable that creates a pool key. #: This can be used to alter the way pool keys are constructed, if desired. #: Each PoolManager makes a copy of this dictionary so they can be configured #: globally here, or individually on the instance. key_fn_by_scheme = { "http": functools.partial(_default_key_normalizer, PoolKey), "https": functools.partial(_default_key_normalizer, PoolKey), } pool_classes_by_scheme = {"http": HTTPConnectionPool, "https": HTTPSConnectionPool} class PoolManager(RequestMethods): """ Allows for arbitrary requests while transparently keeping track of necessary connection pools for you. :param num_pools: Number of connection pools to cache before discarding the least recently used pool. :param headers: Headers to include with all requests, unless other headers are given explicitly. :param \\**connection_pool_kw: Additional parameters are used to create fresh :class:`urllib3.connectionpool.ConnectionPool` instances. Example:: >>> manager = PoolManager(num_pools=2) >>> r = manager.request('GET', 'http://google.com/') >>> r = manager.request('GET', 'http://google.com/mail') >>> r = manager.request('GET', 'http://yahoo.com/') >>> len(manager.pools) 2 """ proxy = None proxy_config = None def __init__(self, num_pools=10, headers=None, **connection_pool_kw): RequestMethods.__init__(self, headers) self.connection_pool_kw = connection_pool_kw self.pools = RecentlyUsedContainer(num_pools) # Locally set the pool classes and keys so other PoolManagers can # override them. self.pool_classes_by_scheme = pool_classes_by_scheme self.key_fn_by_scheme = key_fn_by_scheme.copy() def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): self.clear() # Return False to re-raise any potential exceptions return False def _new_pool(self, scheme, host, port, request_context=None): """ Create a new :class:`urllib3.connectionpool.ConnectionPool` based on host, port, scheme, and any additional pool keyword arguments. If ``request_context`` is provided, it is provided as keyword arguments to the pool class used. This method is used to actually create the connection pools handed out by :meth:`connection_from_url` and companion methods. It is intended to be overridden for customization. """ pool_cls = self.pool_classes_by_scheme[scheme] if request_context is None: request_context = self.connection_pool_kw.copy() # Although the context has everything necessary to create the pool, # this function has historically only used the scheme, host, and port # in the positional args. When an API change is acceptable these can # be removed. for key in ("scheme", "host", "port"): request_context.pop(key, None) if scheme == "http": for kw in SSL_KEYWORDS: request_context.pop(kw, None) return pool_cls(host, port, **request_context) def clear(self): """ Empty our store of pools and direct them all to close. This will not affect in-flight connections, but they will not be re-used after completion. """ self.pools.clear() def connection_from_host(self, host, port=None, scheme="http", pool_kwargs=None): """ Get a :class:`urllib3.connectionpool.ConnectionPool` based on the host, port, and scheme. If ``port`` isn't given, it will be derived from the ``scheme`` using ``urllib3.connectionpool.port_by_scheme``. If ``pool_kwargs`` is provided, it is merged with the instance's ``connection_pool_kw`` variable and used to create the new connection pool, if one is needed. """ if not host: raise LocationValueError("No host specified.") request_context = self._merge_pool_kwargs(pool_kwargs) request_context["scheme"] = scheme or "http" if not port: port = port_by_scheme.get(request_context["scheme"].lower(), 80) request_context["port"] = port request_context["host"] = host return self.connection_from_context(request_context) def connection_from_context(self, request_context): """ Get a :class:`urllib3.connectionpool.ConnectionPool` based on the request context. ``request_context`` must at least contain the ``scheme`` key and its value must be a key in ``key_fn_by_scheme`` instance variable. """ scheme = request_context["scheme"].lower() pool_key_constructor = self.key_fn_by_scheme.get(scheme) if not pool_key_constructor: raise URLSchemeUnknown(scheme) pool_key = pool_key_constructor(request_context) return self.connection_from_pool_key(pool_key, request_context=request_context) def connection_from_pool_key(self, pool_key, request_context=None): """ Get a :class:`urllib3.connectionpool.ConnectionPool` based on the provided pool key. ``pool_key`` should be a namedtuple that only contains immutable objects. At a minimum it must have the ``scheme``, ``host``, and ``port`` fields. """ with self.pools.lock: # If the scheme, host, or port doesn't match existing open # connections, open a new ConnectionPool. pool = self.pools.get(pool_key) if pool: return pool # Make a fresh ConnectionPool of the desired type scheme = request_context["scheme"] host = request_context["host"] port = request_context["port"] pool = self._new_pool(scheme, host, port, request_context=request_context) self.pools[pool_key] = pool return pool def connection_from_url(self, url, pool_kwargs=None): """ Similar to :func:`urllib3.connectionpool.connection_from_url`. If ``pool_kwargs`` is not provided and a new pool needs to be constructed, ``self.connection_pool_kw`` is used to initialize the :class:`urllib3.connectionpool.ConnectionPool`. If ``pool_kwargs`` is provided, it is used instead. Note that if a new pool does not need to be created for the request, the provided ``pool_kwargs`` are not used. """ u = parse_url(url) return self.connection_from_host( u.host, port=u.port, scheme=u.scheme, pool_kwargs=pool_kwargs ) def _merge_pool_kwargs(self, override): """ Merge a dictionary of override values for self.connection_pool_kw. This does not modify self.connection_pool_kw and returns a new dict. Any keys in the override dictionary with a value of ``None`` are removed from the merged dictionary. """ base_pool_kwargs = self.connection_pool_kw.copy() if override: for key, value in override.items(): if value is None: try: del base_pool_kwargs[key] except KeyError: pass else: base_pool_kwargs[key] = value return base_pool_kwargs def _proxy_requires_url_absolute_form(self, parsed_url): """ Indicates if the proxy requires the complete destination URL in the request. Normally this is only needed when not using an HTTP CONNECT tunnel. """ if self.proxy is None: return False return not connection_requires_http_tunnel( self.proxy, self.proxy_config, parsed_url.scheme ) def _validate_proxy_scheme_url_selection(self, url_scheme): """ Validates that were not attempting to do TLS in TLS connections on Python2 or with unsupported SSL implementations. """ if self.proxy is None or url_scheme != "https": return if self.proxy.scheme != "https": return if six.PY2 and not self.proxy_config.use_forwarding_for_https: raise ProxySchemeUnsupported( "Contacting HTTPS destinations through HTTPS proxies " "'via CONNECT tunnels' is not supported in Python 2" ) def urlopen(self, method, url, redirect=True, **kw): """ Same as :meth:`urllib3.HTTPConnectionPool.urlopen` with custom cross-host redirect logic and only sends the request-uri portion of the ``url``. The given ``url`` parameter must be absolute, such that an appropriate :class:`urllib3.connectionpool.ConnectionPool` can be chosen for it. """ u = parse_url(url) self._validate_proxy_scheme_url_selection(u.scheme) conn = self.connection_from_host(u.host, port=u.port, scheme=u.scheme) kw["assert_same_host"] = False kw["redirect"] = False if "headers" not in kw: kw["headers"] = self.headers.copy() if self._proxy_requires_url_absolute_form(u): response = conn.urlopen(method, url, **kw) else: response = conn.urlopen(method, u.request_uri, **kw) redirect_location = redirect and response.get_redirect_location() if not redirect_location: return response # Support relative URLs for redirecting. redirect_location = urljoin(url, redirect_location) if response.status == 303: # Change the method according to RFC 9110, Section 15.4.4. method = "GET" # And lose the body not to transfer anything sensitive. kw["body"] = None
kw["headers"] = HTTPHeaderDict(kw["headers"])._prepare_for_method_change()
0
2023-11-27 07:01:39+00:00
24k
NobiDeveloper/Nobita-Filter-Bot
plugins/commands.py
[ { "identifier": "script", "path": "Script.py", "snippet": "class script(object):\n START_TXT = \"\"\"\n<b>{},\n\nɪ ᴄᴀɴ ᴘʀᴏᴠɪᴅᴇ ᴍᴏᴠɪᴇs ᴀɴᴅ sᴇʀɪᴇs,\nᴊᴜsᴛ ᴀᴅᴅ ᴍᴇ ᴛᴏ ʏᴏᴜʀ ɢʀᴏᴜᴘ ᴀɴᴅ ᴇɴᴊᴏʏ 😍\n\n💞 ᴍᴀɪɴᴛᴀɪɴᴇᴅ ʙʏ : <a href='https://telegram.me/MovieVillaYT'>ᴍᴏᴠɪᴇ ᴠɪʟʟᴀ</a></b>\n\"\"\"\n\n HELP_TXT = \"\"\"\n<b>{},\n\n/g_info - ᴛᴏ ᴄʜᴇᴄᴋ ʏᴏᴜʀ ᴠᴀʟᴜᴇꜱ\n/set_tutorial - ᴛᴏ ꜱᴇᴛ ᴄᴜꜱᴛᴏᴍ ᴛᴜᴛᴏʀɪᴀʟ\n/set_shortlink - ᴛᴏ ꜱᴇᴛ ᴄᴜꜱᴛᴏᴍ ꜱʜᴏʀᴛᴇɴᴇʀ\n/rem_tutorial - ᴛᴏ ʀᴇᴍᴏᴠᴇ ᴛᴜᴛᴏʀɪᴀʟ ʟɪɴᴋ\n</b>\"\"\"\n\n ABOUT_TXT = \"\"\"<b>➣ ᴍʏ ɴᴀᴍᴇ ⋟</b> {}\n<b>➢ ᴄʀᴇᴀᴛᴏʀ ⋟</b> <a href=https://youtube.com/@NobiDeveloper>𝘔𝘖𝘝𝘐𝘌 𝘝𝘐𝘓𝘓𝘈</a>\n<b>➣ ʟɪʙʀᴀʀʏ ⋟</b> 𝘱𝘺𝘳𝘰𝘨𝘳𝘢𝘮\n<b>➢ ʟᴀɴɢᴜᴀɢᴇ ⋟</b> 𝘱𝘺𝘵𝘩𝘰𝘯 3\n<b>➣ ᴅᴀᴛᴀʙᴀsᴇ ⋟</b> 𝘮𝘰𝘯𝘨𝘰 𝘥𝘣\n<b>➢ ʙᴏᴛ sᴇʀᴠᴇʀ ⋟</b> 𝘩𝘦𝘳𝘰𝘬𝘶\n<b>➣ ʙᴜɪʟᴅ sᴛᴀᴛs ⋟</b> 𝘷2.0.1 ﹝ʙᴇᴛᴀ﹞\"\"\"\n\n SOURCE_TXT = \"\"\"\n<b>ᴛʜɪꜱ ɪꜱ ᴀɴ ᴏᴘᴇɴ ꜱᴏᴜʀᴄᴇ ᴘʀᴏᴊᴇᴄᴛ.</b>\n\nᴀʟʟ ᴛʜᴇ ꜰɪʟᴇꜱ ɪɴ ᴛʜɪꜱ ʙᴏᴛ ᴀʀᴇ ꜰʀᴇᴇʟʏ ᴀᴠᴀɪʟᴀʙʟᴇ ᴏɴ ᴛʜᴇ ɪɴᴛᴇʀɴᴇᴛ ᴏʀ ᴘᴏꜱᴛᴇᴅ ʙʏ ꜱᴏᴍᴇʙᴏᴅʏ ᴇʟꜱᴇ. ᴊᴜꜱᴛ ꜰᴏʀ ᴇᴀꜱʏ ꜱᴇᴀʀᴄʜɪɴɢ ᴛʜɪꜱ ʙᴏᴛ ɪꜱ ɪɴᴅᴇxɪɴɢ ꜰɪʟᴇꜱ ᴡʜɪᴄʜ ᴀʀᴇ ᴀʟʀᴇᴀᴅʏ ᴜᴘʟᴏᴀᴅᴇᴅ ᴏɴ ᴛᴇʟᴇɢʀᴀᴍ. ᴡᴇ ʀᴇꜱᴘᴇᴄᴛ ᴀʟʟ ᴛʜᴇ ᴄᴏᴘʏʀɪɢʜᴛ ʟᴀᴡꜱ ᴀɴᴅ ᴡᴏʀᴋꜱ ɪɴ ᴄᴏᴍᴘʟɪᴀɴᴄᴇ ᴡɪᴛʜ ᴅᴍᴄᴀ ᴀɴᴅ ᴇᴜᴄᴅ. ɪꜰ ᴀɴʏᴛʜɪɴɢ ɪꜱ ᴀɢᴀɪɴꜱᴛ ʟᴀᴡ ᴘʟᴇᴀꜱᴇ ᴄᴏɴᴛᴀᴄᴛ ᴍᴇ ꜱᴏ ᴛʜᴀᴛ ɪᴛ ᴄᴀɴ ʙᴇ ʀᴇᴍᴏᴠᴇᴅ ᴀꜱᴀᴘ. ɪᴛ ɪꜱ ꜰᴏʀʙɪʙʙᴇɴ ᴛᴏ ᴅᴏᴡɴʟᴏᴀᴅ, ꜱᴛʀᴇᴀᴍ, ʀᴇᴘʀᴏᴅᴜᴄᴇ, ᴏʀ ʙʏ ᴀɴʏ ᴍᴇᴀɴꜱ, ꜱʜᴀʀᴇ, ᴏʀ ᴄᴏɴꜱᴜᴍᴇ, ᴄᴏɴᴛᴇɴᴛ ᴡɪᴛʜᴏᴜᴛ ᴇxᴘʟɪᴄɪᴛ ᴘᴇʀᴍɪꜱꜱɪᴏɴ ꜰʀᴏᴍ ᴛʜᴇ ᴄᴏɴᴛᴇɴᴛ ᴄʀᴇᴀᴛᴏʀ ᴏʀ ʟᴇɢᴀʟ ᴄᴏᴘʏʀɪɢʜᴛ ʜᴏʟᴅᴇʀ. ɪꜰ ʏᴏᴜ ʙᴇʟɪᴇᴠᴇ ᴛʜɪꜱ ʙᴏᴛ ɪꜱ ᴠɪᴏʟᴀᴛɪɴɢ ʏᴏᴜʀ ɪɴᴛᴇʟʟᴇᴄᴛᴜᴀʟ ᴘʀᴏᴘᴇʀᴛʏ, ᴄᴏɴᴛᴀᴄᴛ ᴛʜᴇ ʀᴇꜱᴘᴇᴄᴛɪᴠᴇ ᴄʜᴀɴɴᴇʟꜱ ꜰᴏʀ ʀᴇᴍᴏᴠᴀʟ. ᴛʜᴇ ʙᴏᴛ ᴅᴏᴇꜱ ɴᴏᴛ ᴏᴡɴ ᴀɴʏ ᴏꜰ ᴛʜᴇꜱᴇ ᴄᴏɴᴛᴇɴᴛꜱ, ɪᴛ ᴏɴʟʏ ɪɴᴅᴇx ᴛʜᴇ ꜰɪʟᴇꜱ ꜰʀᴏᴍ ᴛᴇʟᴇɢʀᴀᴍ.\n\n<b><a href=https://telegram.me/NobiDeveloper>~ ᴍᴀɪɴᴛᴀɪɴᴇᴅ ʙʏ @MovieVillaYT</a></b>\n\"\"\"\n\n MANUELFILTER_TXT = \"\"\"\n<b>{},\n\n~ ʏᴏᴜ ᴄᴀɴ ᴇᴀsɪʟʏ ᴄᴜsᴛᴏᴍɪᴢᴇ ᴛʜɪs ʙᴏᴛ ꜰᴏʀ ʏᴏᴜʀ ɢʀᴏᴜᴘ.\n\n~ ᴏɴʟʏ ɢʀᴏᴜᴘ ᴀᴅᴍɪɴ ᴄᴀɴ ᴜsᴇ ᴛʜɪs ᴄᴏᴍᴍᴀɴᴅ ᴀɴᴅ ᴄʜᴀɴɢᴇs sᴇᴛᴛɪɴɢs.\n\n~ ɪᴛ ᴡᴏʀᴋs ᴏɴʟʏ ᴡʜᴇɴ ʏᴏᴜ ᴀʟʀᴇᴀᴅʏ ᴄᴏɴɴᴇᴄᴛ ʏᴏᴜʀ ɢʀᴏᴜᴘ.\n\nᴄᴏᴍᴍᴀɴᴅs ᴀɴᴅ ᴜsᴀɢᴇ -\n\n• /settings - ᴄʜᴀɴɢᴇ sᴇᴛᴛɪɴɢs ᴀs ʏᴏᴜʀ ᴡɪsʜ.</b>\n\"\"\"\n\n GROUP_TXT = \"\"\"\n<b>⍟ ᴄʜᴀɴɴᴇʟs ᴀɴᴅ ɢʀᴏᴜᴘs ᴍᴏᴅᴜʟᴇ ⍟</b>\n\n<b>🍿 ᴍᴏᴠɪᴇꜱ ᴄʜᴀɴɴᴇʟ.\n🗣️ ʙᴏᴛ sᴜᴘᴘᴏʀᴛ ɢʀᴏᴜᴘ.\n🚦 ʙᴏᴛ ᴜᴘᴅᴀᴛᴇs ᴄʜᴀɴɴᴇʟ.\n🎬 ᴍᴏᴠɪᴇ ʀᴇǫᴜᴇsᴛɪɴɢ ɢʀᴏᴜᴘ.</b>\"\"\"\n\n BUTTON_TXT = \"\"\"\n<b>💵 ɪ ʀᴇǫᴜᴇsᴛᴇᴅ ᴛᴏ ʏᴏᴜ 💸\n\nᴘʟᴇᴀsᴇ ᴅᴏɴᴀᴛᴇ ᴛʜᴇ ᴅᴇᴠᴇʟᴏᴘᴇʀ ꜰᴏʀ ᴋᴇᴇᴘɪɴɢ ᴛʜᴇ sᴇʀᴠɪᴄᴇ ᴀʟɪᴠᴇ & ᴋᴇᴇᴘ ʙʀɪɴɢɪɴɢ ᴍᴏʀᴇ ɴᴇᴡ ꜰᴇᴀᴛᴜʀᴇs ꜰᴏʀ ʏᴏᴜ....</b>\n\n𝐘𝐨𝐮 𝐂𝐚𝐧 𝐃𝐨𝐧𝐚𝐭𝐞 𝐀𝐧𝐲 𝐀𝐦𝐨𝐮𝐧𝐭 𝐘𝐨𝐮 𝐇𝐚𝐯𝐞 💷\n\n<b>᚜ ᴘᴀʏᴍᴇɴᴛ ᴍᴇᴛʜᴏᴅs ᚛</b>\n\n💵 <a href='https://telegra.ph/SUPPORT-12-22-2'>𝗚𝗼𝗼𝗴𝗹𝗲 𝗣𝗮𝘆</a>\n💸 <a href='https://telegra.ph/SUPPORT-12-22-2'>𝗣𝗮𝘆𝘁𝗺</a>\n💶 <a href='https://telegra.ph/SUPPORT-12-22-2'>𝗣𝗵𝗼𝗻𝗲𝗣𝗲</a>\n\n𝐂𝐨𝐧𝐭𝐚𝐜𝐭 𝐌𝐞 𝐅𝐨𝐫 𝐊𝐧𝐨𝐰 𝐀𝐛𝐨𝐮𝐭 𝐓𝐡𝐞 𝐏𝐚𝐲𝐦𝐞𝐧𝐭 𝐈𝐧𝐟𝐨\n\n<b>ᴄʟɪᴄᴋ ʜᴇʀᴇ - <a href='https://telegram.me/NobiDeveloperr'>ʙᴏss</a>\nᴄʟɪᴄᴋ ʜᴇʀᴇ - <a href='https://telegram.me/NobiDeveloperr'>ʙᴏss</a></b>\"\"\"\n\n AUTOFILTER_TXT = \"\"\"ʜᴇʟᴘ: <b>ᴀᴜᴛᴏ ꜰɪʟᴛᴇʀ</b>\n<b>ɴᴏᴛᴇ: Fɪʟᴇ Iɴᴅᴇx</b>\n1. ᴍᴀᴋᴇ ᴍᴇ ᴛʜᴇ ᴀᴅᴍɪɴ ᴏꜰ ʏᴏᴜʀ ᴄʜᴀɴɴᴇʟ ɪꜰ ɪᴛ'ꜱ ᴘʀɪᴠᴀᴛᴇ.\n2. ᴍᴀᴋᴇ ꜱᴜʀᴇ ᴛʜᴀᴛ ʏᴏᴜʀ ᴄʜᴀɴɴᴇʟ ᴅᴏᴇꜱ ɴᴏᴛ ᴄᴏɴᴛᴀɪɴꜱ ᴄᴀᴍʀɪᴘꜱ, ᴘᴏʀɴ ᴀɴᴅ ꜰᴀᴋᴇ ꜰɪʟᴇꜱ.\n3. ꜰᴏʀᴡᴀʀᴅ ᴛʜᴇ ʟᴀꜱᴛ ᴍᴇꜱꜱᴀɢᴇ ᴛᴏ ᴍᴇ ᴡɪᴛʜ Qᴜᴏᴛᴇꜱ. ɪ'ʟʟ ᴀᴅᴅ ᴀʟʟ ᴛʜᴇ ꜰɪʟᴇꜱ ɪɴ ᴛʜᴀᴛ ᴄʜᴀɴɴᴇʟ ᴛᴏ ᴍʏ ᴅʙ.\n\n<b>Nᴏᴛᴇ: AᴜᴛᴏFɪʟᴛᴇʀ</b>\n1. Aᴅᴅ ᴛʜᴇ ʙᴏᴛ ᴀs ᴀᴅᴍɪɴ ᴏɴ ʏᴏᴜʀ ɢʀᴏᴜᴘ.\n2. Usᴇ /connect ᴀɴᴅ ᴄᴏɴɴᴇᴄᴛ ʏᴏᴜʀ ɢʀᴏᴜᴘ ᴛᴏ ᴛʜᴇ ʙᴏᴛ.\n3. Usᴇ /settings ᴏɴ ʙᴏᴛ's PM ᴀɴᴅ ᴛᴜʀɴ ᴏɴ AᴜᴛᴏFɪʟᴛᴇʀ ᴏɴ ᴛʜᴇ sᴇᴛᴛɪɴɢs ᴍᴇɴᴜ.\"\"\"\n\n CONNECTION_TXT = \"\"\"ʜᴇʟᴘ: <b>ᴄᴏɴɴᴇᴄᴛɪᴏɴꜱ</b>\n- ᴜꜱᴇᴅ ᴛᴏ ᴄᴏɴɴᴇᴄᴛ ʙᴏᴛ ᴛᴏ ᴘᴍ ꜰᴏʀ ᴍᴀɴᴀɢɪɴɢ ꜰɪʟᴛᴇʀꜱ \n- ɪᴛ ʜᴇʟᴘꜱ ᴛᴏ ᴀᴠᴏɪᴅ ꜱᴘᴀᴍᴍɪɴɢ ɪɴ ɢʀᴏᴜᴘꜱ.\n<b>ɴᴏᴛᴇ:</b>\n1. ᴏɴʟʏ ᴀᴅᴍɪɴꜱ ᴄᴀɴ ᴀᴅᴅ ᴀ ᴄᴏɴɴᴇᴄᴛɪᴏɴ.\n2. ꜱᴇɴᴅ <code>/ᴄᴏɴɴᴇᴄᴛ</code> ꜰᴏʀ ᴄᴏɴɴᴇᴄᴛɪɴɢ ᴍᴇ ᴛᴏ ʏᴏᴜʀ ᴘᴍ\nCᴏᴍᴍᴀɴᴅs Aɴᴅ Usᴀɢᴇ:\n• /connect - <code>ᴄᴏɴɴᴇᴄᴛ ᴀ ᴘᴀʀᴛɪᴄᴜʟᴀʀ ᴄʜᴀᴛ ᴛᴏ ʏᴏᴜʀ ᴘᴍ</code>\n• /disconnect - <code>ᴅɪꜱᴄᴏɴɴᴇᴄᴛ ꜰʀᴏᴍ ᴀ ᴄʜᴀᴛ</code>\n• /connections - <code>ʟɪꜱᴛ ᴀʟʟ ʏᴏᴜʀ ᴄᴏɴɴᴇᴄᴛɪᴏɴꜱ</code>\"\"\"\n\n EXTRAMOD_TXT = \"\"\"ʜᴇʟᴘ: Exᴛʀᴀ Mᴏᴅᴜʟᴇs\n<b>ɴᴏᴛᴇ:</b>\nᴛʜᴇꜱᴇ ᴀʀᴇ ᴛʜᴇ ᴇxᴛʀᴀ ꜰᴇᴀᴛᴜʀᴇꜱ ᴏꜰ ᴛʜɪꜱ ʙᴏᴛ\nCᴏᴍᴍᴀɴᴅs Aɴᴅ Usᴀɢᴇ:\n• /id - <code>ɢᴇᴛ ɪᴅ ᴏꜰ ᴀ ꜱᴘᴇᴄɪꜰɪᴇᴅ ᴜꜱᴇʀ.</code>\n• /info - <code>ɢᴇᴛ ɪɴꜰᴏʀᴍᴀᴛɪᴏɴ ᴀʙᴏᴜᴛ ᴀ ᴜꜱᴇʀ.</code>\n• /imdb - <code>ɢᴇᴛ ᴛʜᴇ ꜰɪʟᴍ ɪɴꜰᴏʀᴍᴀᴛɪᴏɴ ꜰʀᴏᴍ ɪᴍᴅʙ ꜱᴏᴜʀᴄᴇ.</code>\n• /search - <code>ɢᴇᴛ ᴛʜᴇ ꜰɪʟᴍ ɪɴꜰᴏʀᴍᴀᴛɪᴏɴ ꜰʀᴏᴍ ᴠᴀʀɪᴏᴜꜱ ꜱᴏᴜʀᴄᴇꜱ.</code>\"\"\"\n\n ADMIN_TXT = \"\"\"ʜᴇʟᴘ: Aᴅᴍɪɴ Mᴏᴅs\n<b>ɴᴏᴛᴇ:</b>\nTʜɪs Mᴏᴅᴜʟᴇ Oɴʟʏ Wᴏʀᴋs Fᴏʀ Mʏ Aᴅᴍɪɴs\nCᴏᴍᴍᴀɴᴅs Aɴᴅ Usᴀɢᴇ:\n• /logs - <code>ᴛᴏ ɢᴇᴛ ᴛʜᴇ ʀᴇᴄᴇɴᴛ ᴇʀʀᴏʀꜱ</code>\n• /stats - <code>ᴛᴏ ɢᴇᴛ ꜱᴛᴀᴛᴜꜱ ᴏꜰ ꜰɪʟᴇꜱ ɪɴ ᴅʙ. [Tʜɪs Cᴏᴍᴍᴀɴᴅ Cᴀɴ Bᴇ Usᴇᴅ Bʏ Aɴʏᴏɴᴇ]</code>\n• /delete - <code>ᴛᴏ ᴅᴇʟᴇᴛᴇ ᴀ ꜱᴘᴇᴄɪꜰɪᴄ ꜰɪʟᴇ ꜰʀᴏᴍ ᴅʙ.</code>\n• /users - <code>ᴛᴏ ɢᴇᴛ ʟɪꜱᴛ ᴏꜰ ᴍʏ ᴜꜱᴇʀꜱ ᴀɴᴅ ɪᴅꜱ.</code>\n• /chats - <code>ᴛᴏ ɢᴇᴛ ʟɪꜱᴛ ᴏꜰ ᴍʏ ᴄʜᴀᴛꜱ ᴀɴᴅ ɪᴅꜱ</code>\n• /leave - <code>ᴛᴏ ʟᴇᴀᴠᴇ ꜰʀᴏᴍ ᴀ ᴄʜᴀᴛ.</code>\n• /disable - <code>ᴛᴏ ᴅɪꜱᴀʙʟᴇ ᴀ ᴄʜᴀᴛ.</code>\n• /ban - <code>ᴛᴏ ʙᴀɴ ᴀ ᴜꜱᴇʀ.</code>\n• /unban - <code>ᴛᴏ ᴜɴʙᴀɴ ᴀ ᴜꜱᴇʀ.</code>\n• /channel - <code>ᴛᴏ ɢᴇᴛ ʟɪꜱᴛ ᴏꜰ ᴛᴏᴛᴀʟ ᴄᴏɴɴᴇᴄᴛᴇᴅ ᴄʜᴀɴɴᴇʟꜱ</code>\n• /broadcast - <code>ᴛᴏ ʙʀᴏᴀᴅᴄᴀꜱᴛ ᴀ ᴍᴇꜱꜱᴀɢᴇ ᴛᴏ ᴀʟʟ ᴜꜱᴇʀꜱ</code>\n• /grp_broadcast - <code>Tᴏ ʙʀᴏᴀᴅᴄᴀsᴛ ᴀ ᴍᴇssᴀɢᴇ ᴛᴏ ᴀʟʟ ᴄᴏɴɴᴇᴄᴛᴇᴅ ɢʀᴏᴜᴘs.</code>\n• /gfilter - <code>ᴛᴏ ᴀᴅᴅ ɢʟᴏʙᴀʟ ғɪʟᴛᴇʀs</code>\n• /gfilters - <code>ᴛᴏ ᴠɪᴇᴡ ʟɪsᴛ ᴏғ ᴀʟʟ ɢʟᴏʙᴀʟ ғɪʟᴛᴇʀs</code>\n• /delg - <code>ᴛᴏ ᴅᴇʟᴇᴛᴇ ᴀ sᴘᴇᴄɪғɪᴄ ɢʟᴏʙᴀʟ ғɪʟᴛᴇʀ</code>\n• /request - <code>Tᴏ sᴇɴᴅ ᴀ Mᴏᴠɪᴇ/Sᴇʀɪᴇs ʀᴇᴏ̨ᴜᴇsᴛ ᴛᴏ ʙᴏᴛ ᴀᴅᴍɪɴs. Oɴʟʏ ᴡᴏʀᴋs ᴏɴ sᴜᴘᴘᴏʀᴛ ɢʀᴏᴜᴘ. [Tʜɪs Cᴏᴍᴍᴀɴᴅ Cᴀɴ Bᴇ Usᴇᴅ Bʏ Aɴʏᴏɴᴇ]</code>\n• /delallg - <code>Tᴏ ᴅᴇʟᴇᴛᴇ ᴀʟʟ Gғɪʟᴛᴇʀs ғʀᴏᴍ ᴛʜᴇ ʙᴏᴛ's ᴅᴀᴛᴀʙᴀsᴇ.</code>\n• /deletefiles - <code>Tᴏ ᴅᴇʟᴇᴛᴇ CᴀᴍRɪᴘ ᴀɴᴅ PʀᴇDVD Fɪʟᴇs ғʀᴏᴍ ᴛʜᴇ ʙᴏᴛ's ᴅᴀᴛᴀʙᴀsᴇ.</code>\"\"\"\n\n STATUS_TXT = \"\"\"<b>📂 ᴛᴏᴛᴀʟ ꜰɪʟᴇs: <code>{}</code>\n👤 ᴛᴏᴛᴀʟ ᴜsᴇʀs: <code>{}</code>\n♻️ ᴛᴏᴛᴀʟ ᴄʜᴀᴛs: <code>{}</code>\n🗃️ ᴜsᴇᴅ sᴛᴏʀᴀɢᴇ: <code>{}</code>\n🆓 ꜰʀᴇᴇ sᴛᴏʀᴀɢᴇ: <code>{}</code></b>\"\"\"\n\n LOG_TEXT_G = \"\"\"#𝐍𝐞𝐰𝐆𝐫𝐨𝐮𝐩\n\n<b>᚛› 𝐆𝐫𝐨𝐮𝐩 ⪼ {}(<code>{}</code>)</b>\n<b>᚛› 𝐓𝐨𝐭𝐚𝐥 𝐌𝐞𝐦𝐛𝐞𝐫𝐬 ⪼ <code>{}</code></b>\n<b>᚛› 𝐀𝐝𝐝𝐞𝐝 𝐁𝐲 ⪼ {}</b>\n\"\"\"\n\n LOG_TEXT_P = \"\"\"#𝐍𝐞𝐰𝐔𝐬𝐞𝐫\n\n<b>᚛› 𝐈𝐃 - <code>{}</code></b>\n<b>᚛› 𝐍𝐚𝐦𝐞 - {}</b>\n\"\"\"\n\n ALRT_TXT = \"\"\"{},\nᴄʜᴇᴄᴋ ʏᴏᴜʀ ᴏᴡɴ ʀᴇǫᴜᴇ𝗌ᴛ 😤\n\"\"\"\n\n OLD_ALRT_TXT =\"\"\"{},\n\nʏᴏᴜ ᴀʀᴇ ᴜꜱɪɴɢ ᴍʏ ᴏʟᴅ ᴍᴇꜱꜱᴀɢᴇ,\n\nꜱᴇɴᴅ ᴛʜᴇ ʀᴇǫᴜᴇ𝗌ᴛ ᴀɢᴀɪɴ 😊\n\"\"\"\n\n CUDNT_FND = \"\"\"<b>{},</b>\n\n𝗜 𝗰𝗼𝘂𝗹𝗱𝗻'𝘁 𝗳𝗶𝗻𝗱 𝗮𝗻𝘆𝘁𝗵𝗶𝗻𝗴 𝗿𝗲𝗹𝗮𝘁𝗲𝗱 𝘁𝗼 𝘁𝗵𝗮𝘁 𝗱𝗶𝗱 𝘆𝗼𝘂 𝗺𝗲𝗮𝗻 𝗮𝗻𝘆 𝗼𝗻𝗲 𝗼𝗳 𝘁𝗵𝗲𝘀𝗲 ?? 👇\"\"\"\n\n I_CUDNT = \"\"\"<b>{},</b>\n\n𝗜 𝗰𝗼𝘂𝗹𝗱𝗻'𝘁 𝗳𝗶𝗻𝗱 𝗮𝗻𝘆 𝗺𝗼𝘃𝗶𝗲 𝗼𝗿 𝘀𝗲𝗿𝗶𝗲𝘀 𝗶𝗻 𝘁𝗵𝗮𝘁 𝗻𝗮𝗺𝗲.. 😐\"\"\"\n\n I_CUD_NT = \"\"\"ɪ ᴄᴏᴜʟᴅɴ'ᴛ ꜰɪɴᴅ ᴀɴʏ ᴍᴏᴠɪᴇ ʀᴇʟᴀᴛᴇᴅ ᴛᴏ {}.\nᴘʟᴇᴀꜱᴇ ᴄʜᴇᴄᴋ ᴛʜᴇ ꜱᴘᴇʟʟɪɴɢ ᴏɴ ɢᴏᴏɢʟᴇ ᴏʀ ɪᴍᴅʙ...\"\"\"\n\n MVE_NT_FND = \"\"\"<b>ᴍᴏᴠɪᴇ ɴᴏᴛ ꜰᴏᴜɴᴅ...\n\n<u>ʀᴇᴀꜱᴏɴꜱ:</u></b>\n\n𝟷) ꜱᴘᴇʟʟɪɴɢ ᴍɪꜱᴛᴀᴋᴇ\n\n𝟸) ᴏᴛᴛ ᴏʀ ᴅᴠᴅ ɴᴏᴛ ʀᴇʟᴇᴀꜱᴇᴅ\n\n𝟹) ɴᴏᴛ ᴀᴠᴀɪʟᴀʙʟᴇ ɪɴ ᴅᴀᴛᴀʙᴀꜱᴇ\n\n<b><a href=https://telegram.me/NobiDeveloperr>~ ʀᴇǫᴜᴇ𝗌ᴛ ᴛᴏ ᴏᴡɴᴇʀ</a></b>\n\"\"\"\n\n TOP_ALRT_MSG = \"\"\"ꜱᴇᴀʀᴄʜɪɴɢ ɪɴ ᴅᴀᴛᴀʙᴀꜱᴇ...\"\"\"\n\n MELCOW_ENG = \"\"\"<b>{},\n\n📿 ᴡᴇʟᴄᴏᴍᴇ ᴛᴏ ᴏᴜʀ ɢʀᴏᴜᴘ {}\n\n🚬 ᴛʜɪs ɪs ᴀ ᴍᴏᴠɪᴇ ɢʀᴏᴜᴘ\n\n⏳ ᴀʟʟ ᴄᴀᴛᴇɢᴏʀɪᴇs ᴏꜰ ᴍᴏᴠɪᴇs ᴀᴠᴀɪʟᴀʙʟᴇ ʜᴇʀᴇ\n\n🧨 ᴊᴜsᴛ ᴛʏᴘᴇ ᴛʜᴇ ᴍᴏᴠɪᴇ ɴᴀᴍᴇ\n\n🤖 ʙᴏᴛ ᴡɪʟʟ sᴇɴᴅ ʏᴏᴜʀ ᴍᴏᴠɪᴇ\n\n☎️ ʀᴇᴀᴅ ɢʀᴏᴜᴘ ʀᴜʟᴇs ᴛᴏ ᴋɴᴏᴡ ᴍᴏʀᴇ...</b>\"\"\"\n\n SHORTLINK_INFO = \"\"\"\n<b>──────「 <a href='https://telegram.me/NobiDeveloper'>ᴇᴀʀɴ ᴍᴏɴᴇʏ</a> 」──────\n\n➥ ɴᴏᴡ ʏᴏᴜ ᴄᴀɴ ᴀʟsᴏ ᴇᴀʀɴ ʟᴏᴛs ᴏꜰ ᴍᴏɴᴇʏ ꜰʀᴏᴍ ᴛʜɪꜱ ʙᴏᴛ.\n\n›› sᴛᴇᴘ 𝟷 : ʏᴏᴜ ᴍᴜsᴛ ʜᴀᴠᴇ ᴀᴛʟᴇᴀsᴛ ᴏɴᴇ ɢʀᴏᴜᴘ ᴡɪᴛʜ ᴍɪɴɪᴍᴜᴍ 𝟹𝟶𝟶 ᴍᴇᴍʙᴇʀs.\n\n›› sᴛᴇᴘ 𝟸 : ᴍᴀᴋᴇ ᴀᴄᴄᴏᴜɴᴛ ᴏɴ <a href='https://tnshort.net/ref/devilofficial'>ᴛɴʟɪɴᴋ</a> ᴏʀ <a href='https://onepagelink.in/ref/Nobita'>ᴏɴᴇᴘᴀɢᴇʟɪɴᴋ</a>. [ ʏᴏᴜ ᴄᴀɴ ᴀʟsᴏ ᴜsᴇ ᴏᴛʜᴇʀ sʜᴏʀᴛɴᴇʀ ᴡᴇʙsɪᴛᴇ ]\n\n›› sᴛᴇᴘ 𝟹 : ꜰᴏʟʟᴏᴡ ᴛʜᴇsᴇ <a href='https://telegram.me/NobiDeveloper/1063'>ɪɴꜱᴛʀᴜᴄᴛɪᴏɴꜱ</a>.\n\n➥ ᴛʜɪꜱ ʙᴏᴛ ꜰʀᴇᴇ ꜰᴏʀ ᴀʟʟ ʏᴏᴜ ᴄᴀɴ ᴜꜱᴇ ᴛʜɪꜱ ʙᴏᴛ ɪɴ ʏᴏᴜʀ ɢʀᴏᴜᴘs ꜰʀᴇᴇ ᴏꜰ ᴄᴏꜱᴛ.</b>\"\"\"\n\n REQINFO = \"\"\"\n⚠ ɪɴꜰᴏʀᴍᴀᴛɪᴏɴ ⚠\n\nᴀꜰᴛᴇʀ 5 ᴍɪɴᴜᴛᴇꜱ ᴛʜɪꜱ ᴍᴇꜱꜱᴀɢᴇ ᴡɪʟʟ ʙᴇ ᴀᴜᴛᴏᴍᴀᴛɪᴄᴀʟʟʏ ᴅᴇʟᴇᴛᴇᴅ\n\nɪꜰ ʏᴏᴜ ᴅᴏ ɴᴏᴛ ꜱᴇᴇ ᴛʜᴇ ʀᴇǫᴜᴇsᴛᴇᴅ ᴍᴏᴠɪᴇ / sᴇʀɪᴇs ꜰɪʟᴇ, ʟᴏᴏᴋ ᴀᴛ ᴛʜᴇ ɴᴇxᴛ ᴘᴀɢᴇ\"\"\"\n\n SELECT = \"\"\"\nMOVIES ➢ Sᴇʟᴇᴄᴛ \"Lᴀɴɢᴜᴀɢᴇs\"\n\nSERIES ➢ Sᴇʟᴇᴄᴛ \"Sᴇᴀsᴏɴs\"\n\nTɪᴘ: Sᴇʟᴇᴄᴛ \"Lᴀɴɢᴜᴀɢᴇs\" ᴏʀ \"Sᴇᴀsᴏɴs\" Bᴜᴛᴛᴏɴ ᴀɴᴅ Cʟɪᴄᴋ \"Sᴇɴᴅ Aʟʟ\" Tᴏ ɢᴇᴛ Aʟʟ Fɪʟᴇ Lɪɴᴋs ɪɴ ᴀ Sɪɴɢʟᴇ ᴄʟɪᴄᴋ\"\"\"\n\n SINFO = \"\"\"\n▣ ᴛɪᴘs ▣\n\n☆ ᴛʏᴘᴇ ᴄᴏʀʀᴇᴄᴛ sᴘᴇʟʟɪɴɢ (ɢᴏᴏɢʟᴇ)\n\n☆ ɪꜰ ʏᴏᴜ ɴᴏᴛ ɢᴇᴛ ʏᴏᴜʀ ꜰɪʟᴇ ɪɴ ᴛʜɪꜱ ᴘᴀɢᴇ ᴛʜᴇɴ ᴄʟɪᴄᴋ ᴏɴ ɴᴇxᴛ ʙᴜᴛᴛᴏɴ\n\n☆ ᴄᴏɴᴛɪɴᴜᴇ ᴛʜɪs ᴍᴇᴛʜᴏᴅ ᴛᴏ ɢᴇᴛᴛɪɴɢ ʏᴏᴜ ꜰɪʟᴇ\n\n❤️‍🔥 ᴘᴏᴡᴇʀᴇᴅ ʙʏ @NobiDeveloper\n\"\"\"\n\n NORSLTS = \"\"\"\n★ #𝗡𝗼𝗥𝗲𝘀𝘂𝗹𝘁𝘀 ★\n\n𝗜𝗗 <b>: {}</b>\n𝗡𝗮𝗺𝗲 <b>: {}</b>\n𝗠𝗲𝘀𝘀𝗮𝗴𝗲 <b>: {}</b>\"\"\"\n\n CAPTION = \"\"\"\n[{file_name}](https://telegram.me/NobiDeveloper)\n\n<b>•────•────────•────•\n📌 ʀᴇǫᴜᴇsᴛ ɢʀᴏᴜᴘ​ : [ᴄʟɪᴄᴋ ʜᴇʀᴇ](https://telegram.me/AllRequestGroups)\n🎬 ᴍᴏᴠɪᴇs ᴄʜᴀɴɴᴇʟ​ : [ᴄʟɪᴄᴋ ʜᴇʀᴇ](https://telegram.me/MovieVillaYT)\n•────•────────•────•\n\n©️ ᴘᴏᴡᴇʀᴇᴅ ʙʏ : [ᴍᴏᴠɪᴇ ᴠɪʟʟᴀ](https://youtube.com/@NobiDeveloper)</b>\"\"\"\n\n IMDB_TEMPLATE_TXT = \"\"\"\n<b>{title}</b>\n\n⭐️<b>{rating}</b> | ⏰ <b>{runtime}</b> | 📆 <b>{release_date}</b>\n\n● <b>{genres}</b>\n● <b>{languages}</b>\n\n📖 sᴛᴏʀʏ : <b>{plot}</b> \n\n© {message.chat.title}\n\"\"\"\n \n ALL_FILTERS = \"\"\"\n<b>Hᴇʏ {}, Tʜᴇsᴇ ᴀʀᴇ ᴍʏ ᴛʜʀᴇᴇ ᴛʏᴘᴇs ᴏғ ғɪʟᴛᴇʀs.</b>\"\"\"\n \n GFILTER_TXT = \"\"\"\n<b>Wᴇʟᴄᴏᴍᴇ ᴛᴏ Gʟᴏʙᴀʟ Fɪʟᴛᴇʀs. Gʟᴏʙᴀʟ Fɪʟᴛᴇʀs ᴀʀᴇ ᴛʜᴇ ғɪʟᴛᴇʀs sᴇᴛ ʙʏ ʙᴏᴛ ᴀᴅᴍɪɴs ᴡʜɪᴄʜ ᴡɪʟʟ ᴡᴏʀᴋ ᴏɴ ᴀʟʟ ɢʀᴏᴜᴘs.</b>\n \nAᴠᴀɪʟᴀʙʟᴇ ᴄᴏᴍᴍᴀɴᴅs:\n• /gfilter - <code>Tᴏ ᴄʀᴇᴀᴛᴇ ᴀ ɢʟᴏʙᴀʟ ғɪʟᴛᴇʀ.</code>\n• /gfilters - <code>Tᴏ ᴠɪᴇᴡ ᴀʟʟ ɢʟᴏʙᴀʟ ғɪʟᴛᴇʀs.</code>\n• /delg - <code>Tᴏ ᴅᴇʟᴇᴛᴇ ᴀ ᴘᴀʀᴛɪᴄᴜʟᴀʀ ɢʟᴏʙᴀʟ ғɪʟᴛᴇʀ.</code>\n• /delallg - <code>ᴛᴏ ᴅᴇʟᴇᴛᴇ ᴀʟʟ ɢʟᴏʙᴀʟ ꜰɪʟᴛᴇʀꜱ.</code>\"\"\"\n \n FILE_STORE_TXT = \"\"\"\n<b>Fɪʟᴇ sᴛᴏʀᴇ ɪs ᴛʜᴇ ғᴇᴀᴛᴜʀᴇ ᴡʜɪᴄʜ ᴡɪʟʟ ᴄʀᴇᴀᴛᴇ ᴀ sʜᴀʀᴇᴀʙʟᴇ ʟɪɴᴋ ᴏғ ᴀ sɪɴɢʟᴇ ᴏʀ ᴍᴜʟᴛɪᴘʟᴇ ғɪʟᴇs.</b>\n\nAᴠᴀɪʟᴀʙʟᴇ ᴄᴏᴍᴍᴀɴᴅs:\n• /batch - <code>Tᴏ ᴄʀᴇᴀᴛᴇ ᴀ ʙᴀᴛᴄʜ ʟɪɴᴋ ᴏғ ᴍᴜʟᴛɪᴘʟᴇ ғɪʟᴇs.</code>\n• /link - <code>Tᴏ ᴄʀᴇᴀᴛᴇ ᴀ sɪɴɢʟᴇ ғɪʟᴇ sᴛᴏʀᴇ ʟɪɴᴋ.</code>\n• /pbatch - <code>Jᴜsᴛ ʟɪᴋᴇ /batch, ʙᴜᴛ ᴛʜᴇ ғɪʟᴇs ᴡɪʟʟ ʙᴇ sᴇɴᴅ ᴡɪᴛʜ ғᴏʀᴡᴀʀᴅ ʀᴇsᴛʀɪᴄᴛɪᴏɴs.</code>\n• /plink - <code>Jᴜsᴛ ʟɪᴋᴇ /link, ʙᴜᴛ ᴛʜᴇ ғɪʟᴇ ᴡɪʟʟ ʙᴇ sᴇɴᴅ ᴡɪᴛʜ ғᴏʀᴡᴀʀᴅ ʀᴇsᴛʀɪᴄᴛɪᴏɴ.</code>\"\"\"\n\n RESTART_TXT = \"\"\"\n<b>Bᴏᴛ Rᴇsᴛᴀʀᴛᴇᴅ !\n\n📅 Dᴀᴛᴇ : <code>{}</code>\n⏰ Tɪᴍᴇ : <code>{}</code>\n🌐 Tɪᴍᴇᴢᴏɴᴇ : <code>Asia/Kolkata</code>\n🛠️ Bᴜɪʟᴅ Sᴛᴀᴛᴜs: <code>v2.7.1 [ Sᴛᴀʙʟᴇ ]</code></b>\n\"\"\"\n\n LOGO = \"\"\"\n𝑺𝒕𝒂𝒓𝒕𝒊𝒏𝒈.......🥵\"\"\"" }, { "identifier": "Media", "path": "database/ia_filterdb.py", "snippet": "class Media(Document):\n file_id = fields.StrField(attribute='_id')\n file_ref = fields.StrField(allow_none=True)\n file_name = fields.StrField(required=True)\n file_size = fields.IntField(required=True)\n file_type = fields.StrField(allow_none=True)\n mime_type = fields.StrField(allow_none=True)\n caption = fields.StrField(allow_none=True)\n\n class Meta:\n indexes = ('$file_name', )\n collection_name = COLLECTION_NAME" }, { "identifier": "get_file_details", "path": "database/ia_filterdb.py", "snippet": "async def get_file_details(query):\n filter = {'file_id': query}\n cursor = Media.find(filter)\n filedetails = await cursor.to_list(length=1)\n return filedetails" }, { "identifier": "unpack_new_file_id", "path": "database/ia_filterdb.py", "snippet": "def unpack_new_file_id(new_file_id):\n \"\"\"Return file_id, file_ref\"\"\"\n decoded = FileId.decode(new_file_id)\n file_id = encode_file_id(\n pack(\n \"<iiqq\",\n int(decoded.file_type),\n decoded.dc_id,\n decoded.media_id,\n decoded.access_hash\n )\n )\n file_ref = encode_file_ref(decoded.file_reference)\n return file_id, file_ref" }, { "identifier": "get_bad_files", "path": "database/ia_filterdb.py", "snippet": "async def get_bad_files(query, file_type=None, filter=False):\n \"\"\"For given query return (results, next_offset)\"\"\"\n query = query.strip()\n #if filter:\n #better ?\n #query = query.replace(' ', r'(\\s|\\.|\\+|\\-|_)')\n #raw_pattern = r'(\\s|_|\\-|\\.|\\+)' + query + r'(\\s|_|\\-|\\.|\\+)'\n if not query:\n raw_pattern = '.'\n elif ' ' not in query:\n raw_pattern = r'(\\b|[\\.\\+\\-_])' + query + r'(\\b|[\\.\\+\\-_])'\n else:\n raw_pattern = query.replace(' ', r'.*[\\s\\.\\+\\-_]')\n \n try:\n regex = re.compile(raw_pattern, flags=re.IGNORECASE)\n except:\n return []\n\n if USE_CAPTION_FILTER:\n filter = {'$or': [{'file_name': regex}, {'caption': regex}]}\n else:\n filter = {'file_name': regex}\n\n if file_type:\n filter['file_type'] = file_type\n\n total_results = await Media.count_documents(filter)\n\n cursor = Media.find(filter)\n # Sort by recent\n cursor.sort('$natural', -1)\n # Get list of files\n files = await cursor.to_list(length=total_results)\n\n return files, total_results" }, { "identifier": "db", "path": "database/users_chats_db.py", "snippet": "class Database:\n def __init__(self, uri, database_name):\n def new_user(self, id, name):\n def new_group(self, id, title):\n async def add_user(self, id, name):\n async def is_user_exist(self, id):\n async def total_users_count(self):\n async def remove_ban(self, id):\n async def ban_user(self, user_id, ban_reason=\"No Reason\"):\n async def get_ban_status(self, id):\n async def get_all_users(self):\n async def delete_user(self, user_id):\n async def get_banned(self):\n async def add_chat(self, chat, title):\n async def get_chat(self, chat):\n async def re_enable_chat(self, id):\n async def update_settings(self, id, settings):\n async def get_settings(self, id):\n async def disable_chat(self, chat, reason=\"No Reason\"):\n async def total_chat_count(self):\n async def get_all_chats(self):\n async def get_db_size(self):" }, { "identifier": "CHANNELS", "path": "info.py", "snippet": "CHANNELS = [int(ch) if id_pattern.search(ch) else ch for ch in environ.get('CHANNELS', '').split()]" }, { "identifier": "ADMINS", "path": "info.py", "snippet": "ADMINS = [int(admin) if id_pattern.search(admin) else admin for admin in environ.get('ADMINS', '').split()]" }, { "identifier": "AUTH_CHANNEL", "path": "info.py", "snippet": "AUTH_CHANNEL = int(auth_channel) if auth_channel and id_pattern.search(auth_channel) else None" }, { "identifier": "LOG_CHANNEL", "path": "info.py", "snippet": "LOG_CHANNEL = int(environ.get('LOG_CHANNEL', ''))" }, { "identifier": "PICS", "path": "info.py", "snippet": "PICS = (environ.get('PICS', 'https://telegra.ph/file/61ef9818986cef9554017.jpg https://telegra.ph/file/4696ff67a5bae3ea92c14.jpg')).split()" }, { "identifier": "BATCH_FILE_CAPTION", "path": "info.py", "snippet": "BATCH_FILE_CAPTION = environ.get(\"BATCH_FILE_CAPTION\", CUSTOM_FILE_CAPTION)" }, { "identifier": "CUSTOM_FILE_CAPTION", "path": "info.py", "snippet": "CUSTOM_FILE_CAPTION = environ.get(\"CUSTOM_FILE_CAPTION\", f\"{script.CAPTION}\")" }, { "identifier": "PROTECT_CONTENT", "path": "info.py", "snippet": "PROTECT_CONTENT = is_enabled((environ.get('PROTECT_CONTENT', \"False\")), False)" }, { "identifier": "CHNL_LNK", "path": "info.py", "snippet": "CHNL_LNK = environ.get('CHNL_LNK', 'https://telegram.me/NobiDeveloper')" }, { "identifier": "GRP_LNK", "path": "info.py", "snippet": "GRP_LNK = environ.get('GRP_LNK', 'https://telegram.me/NobiDeveloperSupport')" }, { "identifier": "REQST_CHANNEL", "path": "info.py", "snippet": "REQST_CHANNEL = int(reqst_channel) if reqst_channel and id_pattern.search(reqst_channel) else None" }, { "identifier": "SUPPORT_CHAT_ID", "path": "info.py", "snippet": "SUPPORT_CHAT_ID = int(support_chat_id) if support_chat_id and id_pattern.search(support_chat_id) else None" }, { "identifier": "SUPPORT_CHAT", "path": "info.py", "snippet": "SUPPORT_CHAT = environ.get('SUPPORT_CHAT', 'NobiDeveloperSupport')" }, { "identifier": "MAX_B_TN", "path": "info.py", "snippet": "MAX_B_TN = environ.get(\"MAX_B_TN\", \"8\")" }, { "identifier": "VERIFY", "path": "info.py", "snippet": "VERIFY = bool(environ.get('VERIFY', False))" }, { "identifier": "SHORTLINK_API", "path": "info.py", "snippet": "SHORTLINK_API = environ.get('SHORTLINK_API', '8c09653e5c38f84d1b76ad3197c5a023e53b494d')" }, { "identifier": "SHORTLINK_URL", "path": "info.py", "snippet": "SHORTLINK_URL = environ.get('SHORTLINK_URL', 'onepagelink.in')" }, { "identifier": "TUTORIAL", "path": "info.py", "snippet": "TUTORIAL = environ.get('TUTORIAL', 'https://youtu.be/0c-i2Lol6LU')" }, { "identifier": "IS_TUTORIAL", "path": "info.py", "snippet": "IS_TUTORIAL = bool(environ.get('IS_TUTORIAL', True))" }, { "identifier": "STICKERS", "path": "info.py", "snippet": "STICKERS = (environ.get('STICKERS', 'CAACAgUAAxkBAAEKk8BlNPrdTHUdjCkHswRS7FEGD57bQgADDQACryfoV7k_sTsjJTYAATAE CAACAgUAAxkBAAEKk75lNPrc-rw4n-xEqmgMA14lO_lzMQACLwcAApzI6VfWL2jjZeNSATAE')).split()" }, { "identifier": "get_settings", "path": "utils.py", "snippet": "async def get_settings(group_id):\n settings = temp.SETTINGS.get(group_id)\n if not settings:\n settings = await db.get_settings(group_id)\n temp.SETTINGS[group_id] = settings\n return settings" }, { "identifier": "get_size", "path": "utils.py", "snippet": "def get_size(size):\n \"\"\"Get size in readable format\"\"\"\n\n units = [\"Bytes\", \"KB\", \"MB\", \"GB\", \"TB\", \"PB\", \"EB\"]\n size = float(size)\n i = 0\n while size >= 1024.0 and i < len(units):\n i += 1\n size /= 1024.0\n return \"%.2f %s\" % (size, units[i])" }, { "identifier": "is_subscribed", "path": "utils.py", "snippet": "async def is_subscribed(bot, query):\n try:\n user = await bot.get_chat_member(AUTH_CHANNEL, query.from_user.id)\n except UserNotParticipant:\n pass\n except Exception as e:\n logger.exception(e)\n else:\n if user.status != enums.ChatMemberStatus.BANNED:\n return True\n\n return False" }, { "identifier": "save_group_settings", "path": "utils.py", "snippet": "async def save_group_settings(group_id, key, value):\n current = await get_settings(group_id)\n current[key] = value\n temp.SETTINGS[group_id] = current\n await db.update_settings(group_id, current)" }, { "identifier": "temp", "path": "utils.py", "snippet": "class temp(object):\n BANNED_USERS = []\n BANNED_CHATS = []\n ME = None\n CURRENT=int(os.environ.get(\"SKIP\", 2))\n CANCEL = False\n MELCOW = {}\n U_NAME = None\n B_NAME = None\n GETALL = {}\n SHORT = {}\n SETTINGS = {}" }, { "identifier": "verify_user", "path": "utils.py", "snippet": "async def verify_user(bot, userid, token):\n user = await bot.get_users(userid)\n if not await db.is_user_exist(user.id):\n await db.add_user(user.id, user.first_name)\n await bot.send_message(LOG_CHANNEL, script.LOG_TEXT_P.format(user.id, user.mention))\n TOKENS[user.id] = {token: True}\n tz = pytz.timezone('Asia/Kolkata')\n today = date.today()\n VERIFIED[user.id] = str(today)" }, { "identifier": "check_token", "path": "utils.py", "snippet": "async def check_token(bot, userid, token):\n user = await bot.get_users(userid)\n if not await db.is_user_exist(user.id):\n await db.add_user(user.id, user.first_name)\n await bot.send_message(LOG_CHANNEL, script.LOG_TEXT_P.format(user.id, user.mention))\n if user.id in TOKENS.keys():\n TKN = TOKENS[user.id]\n if token in TKN.keys():\n is_used = TKN[token]\n if is_used == True:\n return False\n else:\n return True\n else:\n return False" }, { "identifier": "check_verification", "path": "utils.py", "snippet": "async def check_verification(bot, userid):\n user = await bot.get_users(userid)\n if not await db.is_user_exist(user.id):\n await db.add_user(user.id, user.first_name)\n await bot.send_message(LOG_CHANNEL, script.LOG_TEXT_P.format(user.id, user.mention))\n tz = pytz.timezone('Asia/Kolkata')\n today = date.today()\n if user.id in VERIFIED.keys():\n EXP = VERIFIED[user.id]\n years, month, day = EXP.split('-')\n comp = date(int(years), int(month), int(day))\n if comp<today:\n return False\n else:\n return True\n else:\n return False" }, { "identifier": "get_token", "path": "utils.py", "snippet": "async def get_token(bot, userid, link):\n user = await bot.get_users(userid)\n if not await db.is_user_exist(user.id):\n await db.add_user(user.id, user.first_name)\n await bot.send_message(LOG_CHANNEL, script.LOG_TEXT_P.format(user.id, user.mention))\n token = ''.join(random.choices(string.ascii_letters + string.digits, k=7))\n TOKENS[user.id] = {token: False}\n link = f\"{link}verify-{user.id}-{token}\"\n shortened_verify_url = await get_verify_shorted_link(link)\n return str(shortened_verify_url)" }, { "identifier": "get_shortlink", "path": "utils.py", "snippet": "async def get_shortlink(chat_id, link):\n settings = await get_settings(chat_id) #fetching settings for group\n if 'shortlink' in settings.keys():\n URL = settings['shortlink']\n else:\n URL = SHORTLINK_URL\n if 'shortlink_api' in settings.keys():\n API = settings['shortlink_api']\n else:\n API = SHORTLINK_API\n https = link.split(\":\")[0] \n if \"http\" == https: #if https == \"http\":\n https = \"https\"\n link = link.replace(\"http\", https)\n if URL == \"api.shareus.in\":\n url = f'https://{URL}/shortLink'\n params = {\n \"token\": API,\n \"format\": \"json\",\n \"link\": link,\n }\n try:\n async with aiohttp.ClientSession() as session:\n async with session.get(url, params=params, raise_for_status=True, ssl=False) as response:\n data = await response.json(content_type=\"text/html\")\n if data[\"status\"] == \"success\":\n return data[\"shortlink\"]\n else:\n logger.error(f\"Error: {data['message']}\")\n return f'https://{URL}/shortLink?token={API}&format=json&link={link}'\n except Exception as e:\n logger.error(e)\n return f'https://{URL}/shortLink?token={API}&format=json&link={link}'\n else:\n url = f'https://{URL}/api'\n params = {\n \"api\": API,\n \"url\": link,\n }\n try:\n async with aiohttp.ClientSession() as session:\n async with session.get(url, params=params, raise_for_status=True, ssl=False) as response:\n data = await response.json()\n if data[\"status\"] == \"success\":\n return data[\"shortenedUrl\"]\n else:\n logger.error(f\"Error: {data['message']}\")\n return f'https://{URL}/api?api={API}&link={link}'\n except Exception as e:\n logger.error(e)\n return f'https://{URL}/api?api={API}&link={link}'" }, { "identifier": "get_tutorial", "path": "utils.py", "snippet": "async def get_tutorial(chat_id):\n settings = await get_settings(chat_id) #fetching settings for group\n if 'tutorial' in settings.keys():\n if settings['is_tutorial']:\n TUTORIAL_URL = settings['tutorial']\n else:\n TUTORIAL_URL = TUTORIAL\n else:\n TUTORIAL_URL = TUTORIAL\n return TUTORIAL_URL" }, { "identifier": "active_connection", "path": "database/connections_mdb.py", "snippet": "async def active_connection(user_id):\n\n query = mycol.find_one(\n { \"_id\": user_id },\n { \"_id\": 0, \"group_details\": 0 }\n )\n if not query:\n return None\n\n group_id = query['active_group']\n return int(group_id) if group_id != None else None" }, { "identifier": "ENABLE_SHORTLINK", "path": "plugins/pm_filter.py", "snippet": "ENABLE_SHORTLINK = \"\"" } ]
import os import logging import random import asyncio import re import json import base64 from Script import script from pyrogram import Client, filters, enums from pyrogram.errors import ChatAdminRequired, FloodWait from pyrogram.types import InlineKeyboardButton, InlineKeyboardMarkup from database.ia_filterdb import Media, get_file_details, unpack_new_file_id, get_bad_files from database.users_chats_db import db from info import CHANNELS, ADMINS, AUTH_CHANNEL, LOG_CHANNEL, PICS, BATCH_FILE_CAPTION, CUSTOM_FILE_CAPTION, PROTECT_CONTENT, CHNL_LNK, GRP_LNK, REQST_CHANNEL, SUPPORT_CHAT_ID, SUPPORT_CHAT, MAX_B_TN, VERIFY, SHORTLINK_API, SHORTLINK_URL, TUTORIAL, IS_TUTORIAL, STICKERS from utils import get_settings, get_size, is_subscribed, save_group_settings, temp, verify_user, check_token, check_verification, get_token, get_shortlink, get_tutorial from database.connections_mdb import active_connection from plugins.pm_filter import ENABLE_SHORTLINK
17,791
logger = logging.getLogger(__name__) BATCH_FILES = {} @Client.on_message(filters.command("start") & filters.incoming) async def start(client, message): if message.chat.type in [enums.ChatType.GROUP, enums.ChatType.SUPERGROUP]: buttons = [ [ InlineKeyboardButton('🤖 ᴜᴘᴅᴀᴛᴇꜱ 🤖', url="https://telegram.me/Nobideveloper") ], [ InlineKeyboardButton('♻️ ᴘʟᴇᴀꜱᴇ ꜱʜᴀʀᴇ ♻️', url=f"https://telegram.me/share/url?url=telegram.me/Nobideveloper"), ] ] reply_markup = InlineKeyboardMarkup(buttons) await message.reply_sticker(sticker=random.choice(STICKERS), reply_markup=reply_markup) await asyncio.sleep(2) if not await db.get_chat(message.chat.id): total=await client.get_chat_members_count(message.chat.id) await client.send_message(LOG_CHANNEL, script.LOG_TEXT_G.format(message.chat.title, message.chat.id, total, "Unknown")) await db.add_chat(message.chat.id, message.chat.title) return if not await db.is_user_exist(message.from_user.id): await db.add_user(message.from_user.id, message.from_user.first_name) await client.send_message(LOG_CHANNEL, script.LOG_TEXT_P.format(message.from_user.id, message.from_user.mention)) if len(message.command) != 2: buttons = [[ InlineKeyboardButton('⇄ ᴀᴅᴅ ᴍᴇ ᴛᴏ ʏᴏᴜʀ ɢʀᴏᴜᴘ ⇄', url=f'http://telegram.me/{temp.U_NAME}?startgroup=true') ],[ InlineKeyboardButton('👨‍💻 ᴏᴡɴᴇʀ​', callback_data='button'), InlineKeyboardButton('🌿 ꜱᴜᴘᴘᴏʀᴛ', callback_data='group_info') ],[ InlineKeyboardButton('💠 ʜᴇʟᴘ 💠', callback_data='help'), InlineKeyboardButton('♻️ ᴀʙᴏᴜᴛ ♻️', callback_data='about') ],[ InlineKeyboardButton('💰 ᴇᴀʀɴ ᴍᴏɴᴇʏ ᴡɪᴛʜ ʙᴏᴛ 💸', callback_data='shortlink_info') ]] reply_markup = InlineKeyboardMarkup(buttons) await message.reply_photo( photo=random.choice(PICS), caption=script.START_TXT.format(message.from_user.mention, temp.U_NAME, temp.B_NAME), reply_markup=reply_markup, parse_mode=enums.ParseMode.HTML ) return
logger = logging.getLogger(__name__) BATCH_FILES = {} @Client.on_message(filters.command("start") & filters.incoming) async def start(client, message): if message.chat.type in [enums.ChatType.GROUP, enums.ChatType.SUPERGROUP]: buttons = [ [ InlineKeyboardButton('🤖 ᴜᴘᴅᴀᴛᴇꜱ 🤖', url="https://telegram.me/Nobideveloper") ], [ InlineKeyboardButton('♻️ ᴘʟᴇᴀꜱᴇ ꜱʜᴀʀᴇ ♻️', url=f"https://telegram.me/share/url?url=telegram.me/Nobideveloper"), ] ] reply_markup = InlineKeyboardMarkup(buttons) await message.reply_sticker(sticker=random.choice(STICKERS), reply_markup=reply_markup) await asyncio.sleep(2) if not await db.get_chat(message.chat.id): total=await client.get_chat_members_count(message.chat.id) await client.send_message(LOG_CHANNEL, script.LOG_TEXT_G.format(message.chat.title, message.chat.id, total, "Unknown")) await db.add_chat(message.chat.id, message.chat.title) return if not await db.is_user_exist(message.from_user.id): await db.add_user(message.from_user.id, message.from_user.first_name) await client.send_message(LOG_CHANNEL, script.LOG_TEXT_P.format(message.from_user.id, message.from_user.mention)) if len(message.command) != 2: buttons = [[ InlineKeyboardButton('⇄ ᴀᴅᴅ ᴍᴇ ᴛᴏ ʏᴏᴜʀ ɢʀᴏᴜᴘ ⇄', url=f'http://telegram.me/{temp.U_NAME}?startgroup=true') ],[ InlineKeyboardButton('👨‍💻 ᴏᴡɴᴇʀ​', callback_data='button'), InlineKeyboardButton('🌿 ꜱᴜᴘᴘᴏʀᴛ', callback_data='group_info') ],[ InlineKeyboardButton('💠 ʜᴇʟᴘ 💠', callback_data='help'), InlineKeyboardButton('♻️ ᴀʙᴏᴜᴛ ♻️', callback_data='about') ],[ InlineKeyboardButton('💰 ᴇᴀʀɴ ᴍᴏɴᴇʏ ᴡɪᴛʜ ʙᴏᴛ 💸', callback_data='shortlink_info') ]] reply_markup = InlineKeyboardMarkup(buttons) await message.reply_photo( photo=random.choice(PICS), caption=script.START_TXT.format(message.from_user.mention, temp.U_NAME, temp.B_NAME), reply_markup=reply_markup, parse_mode=enums.ParseMode.HTML ) return
if AUTH_CHANNEL and not await is_subscribed(client, message):
8
2023-11-28 13:36:56+00:00
24k
chenxx89/BFRffusion
models/models.py
[ { "identifier": "timestep_embedding", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def timestep_embedding(timesteps, dim, max_period=10000, repeat_only=False):\n \"\"\"\n Create sinusoidal timestep embeddings.\n :param timesteps: a 1-D Tensor of N indices, one per batch element.\n These may be fractional.\n :param dim: the dimension of the output.\n :param max_period: controls the minimum frequency of the embeddings.\n :return: an [N x dim] Tensor of positional embeddings.\n \"\"\"\n if not repeat_only:\n half = dim // 2\n freqs = torch.exp(\n -math.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32) / half\n ).to(device=timesteps.device)\n args = timesteps[:, None].float() * freqs[None]\n embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1)\n if dim % 2:\n embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1)\n else:\n embedding = repeat(timesteps, 'b -> b d', d=dim)\n return embedding" }, { "identifier": "UNetModel", "path": "ldm/modules/diffusionmodules/openaimodel.py", "snippet": "class UNetModel(nn.Module):\n \"\"\"\n The full UNet model with attention and timestep embedding.\n :param in_channels: channels in the input Tensor.\n :param model_channels: base channel count for the model.\n :param out_channels: channels in the output Tensor.\n :param num_res_blocks: number of residual blocks per downsample.\n :param attention_resolutions: a collection of downsample rates at which\n attention will take place. May be a set, list, or tuple.\n For example, if this contains 4, then at 4x downsampling, attention\n will be used.\n :param dropout: the dropout probability.\n :param channel_mult: channel multiplier for each level of the UNet.\n :param conv_resample: if True, use learned convolutions for upsampling and\n downsampling.\n :param dims: determines if the signal is 1D, 2D, or 3D.\n :param num_classes: if specified (as an int), then this model will be\n class-conditional with `num_classes` classes.\n :param use_checkpoint: use gradient checkpointing to reduce memory usage.\n :param num_heads: the number of attention heads in each attention layer.\n :param num_heads_channels: if specified, ignore num_heads and instead use\n a fixed channel width per attention head.\n :param num_heads_upsample: works with num_heads to set a different number\n of heads for upsampling. Deprecated.\n :param use_scale_shift_norm: use a FiLM-like conditioning mechanism.\n :param resblock_updown: use residual blocks for up/downsampling.\n :param use_new_attention_order: use a different attention pattern for potentially\n increased efficiency.\n \"\"\"\n\n def __init__(\n self,\n image_size,\n in_channels,\n model_channels,\n out_channels,\n num_res_blocks,\n attention_resolutions,\n dropout=0,\n channel_mult=(1, 2, 4, 8),\n conv_resample=True,\n dims=2,\n num_classes=None,\n use_checkpoint=False,\n use_fp16=False,\n num_heads=-1,\n num_head_channels=-1,\n num_heads_upsample=-1,\n use_scale_shift_norm=False,\n resblock_updown=False,\n use_new_attention_order=False,\n use_spatial_transformer=False, # custom transformer support\n transformer_depth=1, # custom transformer support\n context_dim=None, # custom transformer support\n n_embed=None, # custom support for prediction of discrete ids into codebook of first stage vq model\n legacy=True,\n disable_self_attentions=None,\n num_attention_blocks=None,\n disable_middle_self_attn=False,\n use_linear_in_transformer=False,\n ):\n super().__init__()\n if use_spatial_transformer:\n assert context_dim is not None, 'Fool!! You forgot to include the dimension of your cross-attention conditioning...'\n\n if context_dim is not None:\n assert use_spatial_transformer, 'Fool!! You forgot to use the spatial transformer for your cross-attention conditioning...'\n from omegaconf.listconfig import ListConfig\n if type(context_dim) == ListConfig:\n context_dim = list(context_dim)\n\n if num_heads_upsample == -1:\n num_heads_upsample = num_heads\n\n if num_heads == -1:\n assert num_head_channels != -1, 'Either num_heads or num_head_channels has to be set'\n\n if num_head_channels == -1:\n assert num_heads != -1, 'Either num_heads or num_head_channels has to be set'\n\n self.image_size = image_size\n self.in_channels = in_channels\n self.model_channels = model_channels\n self.out_channels = out_channels\n if isinstance(num_res_blocks, int):\n self.num_res_blocks = len(channel_mult) * [num_res_blocks]\n else:\n if len(num_res_blocks) != len(channel_mult):\n raise ValueError(\"provide num_res_blocks either as an int (globally constant) or \"\n \"as a list/tuple (per-level) with the same length as channel_mult\")\n self.num_res_blocks = num_res_blocks\n if disable_self_attentions is not None:\n # should be a list of booleans, indicating whether to disable self-attention in TransformerBlocks or not\n assert len(disable_self_attentions) == len(channel_mult)\n if num_attention_blocks is not None:\n assert len(num_attention_blocks) == len(self.num_res_blocks)\n assert all(map(lambda i: self.num_res_blocks[i] >= num_attention_blocks[i], range(len(num_attention_blocks))))\n print(f\"Constructor of UNetModel received num_attention_blocks={num_attention_blocks}. \"\n f\"This option has LESS priority than attention_resolutions {attention_resolutions}, \"\n f\"i.e., in cases where num_attention_blocks[i] > 0 but 2**i not in attention_resolutions, \"\n f\"attention will still not be set.\")\n\n self.attention_resolutions = attention_resolutions\n self.dropout = dropout\n self.channel_mult = channel_mult\n self.conv_resample = conv_resample\n self.num_classes = num_classes\n self.use_checkpoint = use_checkpoint\n self.dtype = th.float16 if use_fp16 else th.float32\n self.num_heads = num_heads\n self.num_head_channels = num_head_channels\n self.num_heads_upsample = num_heads_upsample\n self.predict_codebook_ids = n_embed is not None\n\n time_embed_dim = model_channels * 4\n self.time_embed = nn.Sequential(\n linear(model_channels, time_embed_dim),\n nn.SiLU(),\n linear(time_embed_dim, time_embed_dim),\n )\n\n if self.num_classes is not None:\n if isinstance(self.num_classes, int):\n self.label_emb = nn.Embedding(num_classes, time_embed_dim)\n elif self.num_classes == \"continuous\":\n print(\"setting up linear c_adm embedding layer\")\n self.label_emb = nn.Linear(1, time_embed_dim)\n else:\n raise ValueError()\n\n self.input_blocks = nn.ModuleList(\n [\n TimestepEmbedSequential(\n conv_nd(dims, in_channels, model_channels, 3, padding=1)\n )\n ]\n )\n self._feature_size = model_channels\n input_block_chans = [model_channels]\n ch = model_channels\n ds = 1\n for level, mult in enumerate(channel_mult):\n for nr in range(self.num_res_blocks[level]):\n layers = [\n ResBlock(\n ch,\n time_embed_dim,\n dropout,\n out_channels=mult * model_channels,\n dims=dims,\n use_checkpoint=use_checkpoint,\n use_scale_shift_norm=use_scale_shift_norm,\n )\n ]\n ch = mult * model_channels\n if ds in attention_resolutions:\n if num_head_channels == -1:\n dim_head = ch // num_heads\n else:\n num_heads = ch // num_head_channels\n dim_head = num_head_channels\n if legacy:\n #num_heads = 1\n dim_head = ch // num_heads if use_spatial_transformer else num_head_channels\n if exists(disable_self_attentions):\n disabled_sa = disable_self_attentions[level]\n else:\n disabled_sa = False\n\n if not exists(num_attention_blocks) or nr < num_attention_blocks[level]:\n layers.append(\n AttentionBlock(\n ch,\n use_checkpoint=use_checkpoint,\n num_heads=num_heads,\n num_head_channels=dim_head,\n use_new_attention_order=use_new_attention_order,\n ) if not use_spatial_transformer else SpatialTransformer(\n ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim,\n disable_self_attn=disabled_sa, use_linear=use_linear_in_transformer,\n use_checkpoint=use_checkpoint\n )\n )\n self.input_blocks.append(TimestepEmbedSequential(*layers))\n self._feature_size += ch\n input_block_chans.append(ch)\n if level != len(channel_mult) - 1:\n out_ch = ch\n self.input_blocks.append(\n TimestepEmbedSequential(\n ResBlock(\n ch,\n time_embed_dim,\n dropout,\n out_channels=out_ch,\n dims=dims,\n use_checkpoint=use_checkpoint,\n use_scale_shift_norm=use_scale_shift_norm,\n down=True,\n )\n if resblock_updown\n else Downsample(\n ch, conv_resample, dims=dims, out_channels=out_ch\n )\n )\n )\n ch = out_ch\n input_block_chans.append(ch)\n ds *= 2\n self._feature_size += ch\n\n if num_head_channels == -1:\n dim_head = ch // num_heads\n else:\n num_heads = ch // num_head_channels\n dim_head = num_head_channels\n if legacy:\n #num_heads = 1\n dim_head = ch // num_heads if use_spatial_transformer else num_head_channels\n self.middle_block = TimestepEmbedSequential(\n ResBlock(\n ch,\n time_embed_dim,\n dropout,\n dims=dims,\n use_checkpoint=use_checkpoint,\n use_scale_shift_norm=use_scale_shift_norm,\n ),\n AttentionBlock(\n ch,\n use_checkpoint=use_checkpoint,\n num_heads=num_heads,\n num_head_channels=dim_head,\n use_new_attention_order=use_new_attention_order,\n ) if not use_spatial_transformer else SpatialTransformer( # always uses a self-attn\n ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim,\n disable_self_attn=disable_middle_self_attn, use_linear=use_linear_in_transformer,\n use_checkpoint=use_checkpoint\n ),\n ResBlock(\n ch,\n time_embed_dim,\n dropout,\n dims=dims,\n use_checkpoint=use_checkpoint,\n use_scale_shift_norm=use_scale_shift_norm,\n ),\n )\n self._feature_size += ch\n\n self.output_blocks = nn.ModuleList([])\n for level, mult in list(enumerate(channel_mult))[::-1]:\n for i in range(self.num_res_blocks[level] + 1):\n ich = input_block_chans.pop()\n layers = [\n ResBlock(\n ch + ich,\n time_embed_dim,\n dropout,\n out_channels=model_channels * mult,\n dims=dims,\n use_checkpoint=use_checkpoint,\n use_scale_shift_norm=use_scale_shift_norm,\n )\n ]\n ch = model_channels * mult\n if ds in attention_resolutions:\n if num_head_channels == -1:\n dim_head = ch // num_heads\n else:\n num_heads = ch // num_head_channels\n dim_head = num_head_channels\n if legacy:\n #num_heads = 1\n dim_head = ch // num_heads if use_spatial_transformer else num_head_channels\n if exists(disable_self_attentions):\n disabled_sa = disable_self_attentions[level]\n else:\n disabled_sa = False\n\n if not exists(num_attention_blocks) or i < num_attention_blocks[level]:\n layers.append(\n AttentionBlock(\n ch,\n use_checkpoint=use_checkpoint,\n num_heads=num_heads_upsample,\n num_head_channels=dim_head,\n use_new_attention_order=use_new_attention_order,\n ) if not use_spatial_transformer else SpatialTransformer(\n ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim,\n disable_self_attn=disabled_sa, use_linear=use_linear_in_transformer,\n use_checkpoint=use_checkpoint\n )\n )\n if level and i == self.num_res_blocks[level]:\n out_ch = ch\n layers.append(\n ResBlock(\n ch,\n time_embed_dim,\n dropout,\n out_channels=out_ch,\n dims=dims,\n use_checkpoint=use_checkpoint,\n use_scale_shift_norm=use_scale_shift_norm,\n up=True,\n )\n if resblock_updown\n else Upsample(ch, conv_resample, dims=dims, out_channels=out_ch)\n )\n ds //= 2\n self.output_blocks.append(TimestepEmbedSequential(*layers))\n self._feature_size += ch\n\n self.out = nn.Sequential(\n normalization(ch),\n nn.SiLU(),\n zero_module(conv_nd(dims, model_channels, out_channels, 3, padding=1)),\n )\n if self.predict_codebook_ids:\n self.id_predictor = nn.Sequential(\n normalization(ch),\n conv_nd(dims, model_channels, n_embed, 1),\n #nn.LogSoftmax(dim=1) # change to cross_entropy and produce non-normalized logits\n )\n\n def convert_to_fp16(self):\n \"\"\"\n Convert the torso of the model to float16.\n \"\"\"\n self.input_blocks.apply(convert_module_to_f16)\n self.middle_block.apply(convert_module_to_f16)\n self.output_blocks.apply(convert_module_to_f16)\n\n def convert_to_fp32(self):\n \"\"\"\n Convert the torso of the model to float32.\n \"\"\"\n self.input_blocks.apply(convert_module_to_f32)\n self.middle_block.apply(convert_module_to_f32)\n self.output_blocks.apply(convert_module_to_f32)\n\n def forward(self, x, timesteps=None, context=None, y=None,**kwargs):\n \"\"\"\n Apply the model to an input batch.\n :param x: an [N x C x ...] Tensor of inputs.\n :param timesteps: a 1-D batch of timesteps.\n :param context: conditioning plugged in via crossattn\n :param y: an [N] Tensor of labels, if class-conditional.\n :return: an [N x C x ...] Tensor of outputs.\n \"\"\"\n assert (y is not None) == (\n self.num_classes is not None\n ), \"must specify y if and only if the model is class-conditional\"\n hs = []\n t_emb = timestep_embedding(timesteps, self.model_channels, repeat_only=False)\n emb = self.time_embed(t_emb)\n\n if self.num_classes is not None:\n assert y.shape[0] == x.shape[0]\n emb = emb + self.label_emb(y)\n\n h = x.type(self.dtype)\n for module in self.input_blocks:\n h = module(h, emb, context)\n hs.append(h)\n h = self.middle_block(h, emb, context)\n for module in self.output_blocks:\n h = th.cat([h, hs.pop()], dim=1)\n h = module(h, emb, context)\n h = h.type(x.dtype)\n if self.predict_codebook_ids:\n return self.id_predictor(h)\n else:\n return self.out(h)" }, { "identifier": "LatentDiffusion", "path": "ldm/models/diffusion/ddpm.py", "snippet": "class LatentDiffusion(DDPM):\n \"\"\"main class\"\"\"\n\n def __init__(self,\n first_stage_config,\n cond_stage_config,\n num_timesteps_cond=None,\n cond_stage_key=\"image\",\n cond_stage_trainable=False,\n concat_mode=True,\n cond_stage_forward=None,\n conditioning_key=None,\n scale_factor=1.0,\n scale_by_std=False,\n force_null_conditioning=False,\n *args, **kwargs):\n self.force_null_conditioning = force_null_conditioning\n self.num_timesteps_cond = default(num_timesteps_cond, 1)\n self.scale_by_std = scale_by_std\n assert self.num_timesteps_cond <= kwargs['timesteps']\n # for backwards compatibility after implementation of DiffusionWrapper\n if conditioning_key is None:\n conditioning_key = 'concat' if concat_mode else 'crossattn'\n if cond_stage_config == '__is_unconditional__' and not self.force_null_conditioning:\n conditioning_key = None\n ckpt_path = kwargs.pop(\"ckpt_path\", None)\n reset_ema = kwargs.pop(\"reset_ema\", False)\n reset_num_ema_updates = kwargs.pop(\"reset_num_ema_updates\", False)\n ignore_keys = kwargs.pop(\"ignore_keys\", [])\n super().__init__(conditioning_key=conditioning_key, *args, **kwargs)\n self.concat_mode = concat_mode\n self.cond_stage_trainable = cond_stage_trainable\n self.cond_stage_key = cond_stage_key\n try:\n self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1\n except:\n self.num_downs = 0\n if not scale_by_std:\n self.scale_factor = scale_factor\n else:\n self.register_buffer('scale_factor', torch.tensor(scale_factor))\n self.instantiate_first_stage(first_stage_config)\n self.instantiate_cond_stage(cond_stage_config)\n self.cond_stage_forward = cond_stage_forward\n self.clip_denoised = False\n self.bbox_tokenizer = None\n\n self.restarted_from_ckpt = False\n if ckpt_path is not None:\n self.init_from_ckpt(ckpt_path, ignore_keys)\n self.restarted_from_ckpt = True\n if reset_ema:\n assert self.use_ema\n print(\n f\"Resetting ema to pure model weights. This is useful when restoring from an ema-only checkpoint.\")\n self.model_ema = LitEma(self.model)\n if reset_num_ema_updates:\n print(\" +++++++++++ WARNING: RESETTING NUM_EMA UPDATES TO ZERO +++++++++++ \")\n assert self.use_ema\n self.model_ema.reset_num_updates()\n\n def make_cond_schedule(self, ):\n self.cond_ids = torch.full(size=(self.num_timesteps,), fill_value=self.num_timesteps - 1, dtype=torch.long)\n ids = torch.round(torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond)).long()\n self.cond_ids[:self.num_timesteps_cond] = ids\n\n @rank_zero_only\n @torch.no_grad()\n def on_train_batch_start(self, batch, batch_idx, dataloader_idx):\n # only for very first batch\n if self.scale_by_std and self.current_epoch == 0 and self.global_step == 0 and batch_idx == 0 and not self.restarted_from_ckpt:\n assert self.scale_factor == 1., 'rather not use custom rescaling and std-rescaling simultaneously'\n # set rescale weight to 1./std of encodings\n print(\"### USING STD-RESCALING ###\")\n x = super().get_input(batch, self.first_stage_key)\n x = x.to(self.device)\n encoder_posterior = self.encode_first_stage(x)\n z = self.get_first_stage_encoding(encoder_posterior).detach()\n del self.scale_factor\n self.register_buffer('scale_factor', 1. / z.flatten().std())\n print(f\"setting self.scale_factor to {self.scale_factor}\")\n print(\"### USING STD-RESCALING ###\")\n\n def register_schedule(self,\n given_betas=None, beta_schedule=\"linear\", timesteps=1000,\n linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):\n super().register_schedule(given_betas, beta_schedule, timesteps, linear_start, linear_end, cosine_s)\n\n self.shorten_cond_schedule = self.num_timesteps_cond > 1\n if self.shorten_cond_schedule:\n self.make_cond_schedule()\n\n def instantiate_first_stage(self, config):\n model = instantiate_from_config(config)\n self.first_stage_model = model.eval()\n self.first_stage_model.train = disabled_train\n for param in self.first_stage_model.parameters():\n param.requires_grad = False\n\n def instantiate_cond_stage(self, config):\n if not self.cond_stage_trainable:\n if config == \"__is_first_stage__\":\n print(\"Using first stage also as cond stage.\")\n self.cond_stage_model = self.first_stage_model\n elif config == \"__is_unconditional__\":\n print(f\"Training {self.__class__.__name__} as an unconditional model.\")\n self.cond_stage_model = None\n # self.be_unconditional = True\n else:\n model = instantiate_from_config(config)\n self.cond_stage_model = model.eval()\n self.cond_stage_model.train = disabled_train\n for param in self.cond_stage_model.parameters():\n param.requires_grad = False\n else:\n assert config != '__is_first_stage__'\n assert config != '__is_unconditional__'\n model = instantiate_from_config(config)\n self.cond_stage_model = model\n\n def _get_denoise_row_from_list(self, samples, desc='', force_no_decoder_quantization=False):\n denoise_row = []\n for zd in tqdm(samples, desc=desc):\n denoise_row.append(self.decode_first_stage(zd.to(self.device),\n force_not_quantize=force_no_decoder_quantization))\n n_imgs_per_row = len(denoise_row)\n denoise_row = torch.stack(denoise_row) # n_log_step, n_row, C, H, W\n denoise_grid = rearrange(denoise_row, 'n b c h w -> b n c h w')\n denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w')\n denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row)\n return denoise_grid\n\n def get_first_stage_encoding(self, encoder_posterior):\n if isinstance(encoder_posterior, DiagonalGaussianDistribution):\n z = encoder_posterior.sample()\n elif isinstance(encoder_posterior, torch.Tensor):\n z = encoder_posterior\n else:\n raise NotImplementedError(f\"encoder_posterior of type '{type(encoder_posterior)}' not yet implemented\")\n return self.scale_factor * z\n\n def get_learned_conditioning(self, c):\n if self.cond_stage_forward is None:\n if hasattr(self.cond_stage_model, 'encode') and callable(self.cond_stage_model.encode):\n c = self.cond_stage_model.encode(c)\n if isinstance(c, DiagonalGaussianDistribution):\n c = c.mode()\n else:\n c = self.cond_stage_model(c)\n else:\n assert hasattr(self.cond_stage_model, self.cond_stage_forward)\n c = getattr(self.cond_stage_model, self.cond_stage_forward)(c)\n return c\n\n def meshgrid(self, h, w):\n y = torch.arange(0, h).view(h, 1, 1).repeat(1, w, 1)\n x = torch.arange(0, w).view(1, w, 1).repeat(h, 1, 1)\n\n arr = torch.cat([y, x], dim=-1)\n return arr\n\n def delta_border(self, h, w):\n \"\"\"\n :param h: height\n :param w: width\n :return: normalized distance to image border,\n wtith min distance = 0 at border and max dist = 0.5 at image center\n \"\"\"\n lower_right_corner = torch.tensor([h - 1, w - 1]).view(1, 1, 2)\n arr = self.meshgrid(h, w) / lower_right_corner\n dist_left_up = torch.min(arr, dim=-1, keepdims=True)[0]\n dist_right_down = torch.min(1 - arr, dim=-1, keepdims=True)[0]\n edge_dist = torch.min(torch.cat([dist_left_up, dist_right_down], dim=-1), dim=-1)[0]\n return edge_dist\n\n def get_weighting(self, h, w, Ly, Lx, device):\n weighting = self.delta_border(h, w)\n weighting = torch.clip(weighting, self.split_input_params[\"clip_min_weight\"],\n self.split_input_params[\"clip_max_weight\"], )\n weighting = weighting.view(1, h * w, 1).repeat(1, 1, Ly * Lx).to(device)\n\n if self.split_input_params[\"tie_braker\"]:\n L_weighting = self.delta_border(Ly, Lx)\n L_weighting = torch.clip(L_weighting,\n self.split_input_params[\"clip_min_tie_weight\"],\n self.split_input_params[\"clip_max_tie_weight\"])\n\n L_weighting = L_weighting.view(1, 1, Ly * Lx).to(device)\n weighting = weighting * L_weighting\n return weighting\n\n def get_fold_unfold(self, x, kernel_size, stride, uf=1, df=1): # todo load once not every time, shorten code\n \"\"\"\n :param x: img of size (bs, c, h, w)\n :return: n img crops of size (n, bs, c, kernel_size[0], kernel_size[1])\n \"\"\"\n bs, nc, h, w = x.shape\n\n # number of crops in image\n Ly = (h - kernel_size[0]) // stride[0] + 1\n Lx = (w - kernel_size[1]) // stride[1] + 1\n\n if uf == 1 and df == 1:\n fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride)\n unfold = torch.nn.Unfold(**fold_params)\n\n fold = torch.nn.Fold(output_size=x.shape[2:], **fold_params)\n\n weighting = self.get_weighting(kernel_size[0], kernel_size[1], Ly, Lx, x.device).to(x.dtype)\n normalization = fold(weighting).view(1, 1, h, w) # normalizes the overlap\n weighting = weighting.view((1, 1, kernel_size[0], kernel_size[1], Ly * Lx))\n\n elif uf > 1 and df == 1:\n fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride)\n unfold = torch.nn.Unfold(**fold_params)\n\n fold_params2 = dict(kernel_size=(kernel_size[0] * uf, kernel_size[0] * uf),\n dilation=1, padding=0,\n stride=(stride[0] * uf, stride[1] * uf))\n fold = torch.nn.Fold(output_size=(x.shape[2] * uf, x.shape[3] * uf), **fold_params2)\n\n weighting = self.get_weighting(kernel_size[0] * uf, kernel_size[1] * uf, Ly, Lx, x.device).to(x.dtype)\n normalization = fold(weighting).view(1, 1, h * uf, w * uf) # normalizes the overlap\n weighting = weighting.view((1, 1, kernel_size[0] * uf, kernel_size[1] * uf, Ly * Lx))\n\n elif df > 1 and uf == 1:\n fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride)\n unfold = torch.nn.Unfold(**fold_params)\n\n fold_params2 = dict(kernel_size=(kernel_size[0] // df, kernel_size[0] // df),\n dilation=1, padding=0,\n stride=(stride[0] // df, stride[1] // df))\n fold = torch.nn.Fold(output_size=(x.shape[2] // df, x.shape[3] // df), **fold_params2)\n\n weighting = self.get_weighting(kernel_size[0] // df, kernel_size[1] // df, Ly, Lx, x.device).to(x.dtype)\n normalization = fold(weighting).view(1, 1, h // df, w // df) # normalizes the overlap\n weighting = weighting.view((1, 1, kernel_size[0] // df, kernel_size[1] // df, Ly * Lx))\n\n else:\n raise NotImplementedError\n\n return fold, unfold, normalization, weighting\n\n @torch.no_grad()\n def get_input(self, batch, k, return_first_stage_outputs=False, force_c_encode=False,\n cond_key=None, return_original_cond=False, bs=None, return_x=False):\n x = super().get_input(batch, k)\n if bs is not None:\n x = x[:bs]\n x = x.to(self.device)\n encoder_posterior = self.encode_first_stage(x)\n z = self.get_first_stage_encoding(encoder_posterior).detach()\n\n if self.model.conditioning_key is not None and not self.force_null_conditioning:\n if cond_key is None:\n cond_key = self.cond_stage_key\n if cond_key != self.first_stage_key:\n if cond_key in ['caption', 'coordinates_bbox', \"txt\"]:\n xc = batch[cond_key]\n elif cond_key in ['class_label', 'cls']:\n xc = batch\n else:\n xc = super().get_input(batch, cond_key).to(self.device)\n else:\n xc = x\n if not self.cond_stage_trainable or force_c_encode:\n if isinstance(xc, dict) or isinstance(xc, list):\n c = self.get_learned_conditioning(xc)\n else:\n c = self.get_learned_conditioning(xc.to(self.device))\n else:\n c = xc\n if bs is not None:\n c = c[:bs]\n\n if self.use_positional_encodings:\n pos_x, pos_y = self.compute_latent_shifts(batch)\n ckey = __conditioning_keys__[self.model.conditioning_key]\n c = {ckey: c, 'pos_x': pos_x, 'pos_y': pos_y}\n\n else:\n c = None\n xc = None\n if self.use_positional_encodings:\n pos_x, pos_y = self.compute_latent_shifts(batch)\n c = {'pos_x': pos_x, 'pos_y': pos_y}\n out = [z, c]\n if return_first_stage_outputs:\n xrec = self.decode_first_stage(z)\n out.extend([x, xrec])\n if return_x:\n out.extend([x])\n if return_original_cond:\n out.append(xc)\n return out\n\n @torch.no_grad()\n def decode_first_stage(self, z, predict_cids=False, force_not_quantize=False):\n if predict_cids:\n if z.dim() == 4:\n z = torch.argmax(z.exp(), dim=1).long()\n z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None)\n z = rearrange(z, 'b h w c -> b c h w').contiguous()\n\n z = 1. / self.scale_factor * z\n return self.first_stage_model.decode(z)\n\n @torch.no_grad()\n def encode_first_stage(self, x):\n return self.first_stage_model.encode(x)\n\n def shared_step(self, batch, **kwargs):\n x, c = self.get_input(batch, self.first_stage_key)\n loss = self(x, c)\n return loss\n\n def forward(self, x, c, *args, **kwargs):\n t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long()\n if self.model.conditioning_key is not None:\n assert c is not None\n # if self.cond_stage_trainable:\n # c = self.get_learned_conditioning(c)\n if self.shorten_cond_schedule: # TODO: drop this option\n tc = self.cond_ids[t].to(self.device)\n c = self.q_sample(x_start=c, t=tc, noise=torch.randn_like(c.float()))\n return self.p_losses(x, c, t, *args, **kwargs)\n\n def apply_model(self, x_noisy, t, cond, return_ids=False):\n if isinstance(cond, dict):\n # hybrid case, cond is expected to be a dict\n pass\n else:\n if not isinstance(cond, list):\n cond = [cond]\n key = 'c_concat' if self.model.conditioning_key == 'concat' else 'c_crossattn'\n cond = {key: cond}\n\n x_recon = self.model(x_noisy, t, **cond)\n\n if isinstance(x_recon, tuple) and not return_ids:\n return x_recon[0]\n else:\n return x_recon\n\n def _predict_eps_from_xstart(self, x_t, t, pred_xstart):\n return (extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - pred_xstart) / \\\n extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape)\n\n def _prior_bpd(self, x_start):\n \"\"\"\n Get the prior KL term for the variational lower-bound, measured in\n bits-per-dim.\n This term can't be optimized, as it only depends on the encoder.\n :param x_start: the [N x C x ...] tensor of inputs.\n :return: a batch of [N] KL values (in bits), one per batch element.\n \"\"\"\n batch_size = x_start.shape[0]\n t = torch.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device)\n qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t)\n kl_prior = normal_kl(mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0)\n return mean_flat(kl_prior) / np.log(2.0)\n\n def p_losses(self, x_start, cond, t, noise=None):\n noise = default(noise, lambda: torch.randn_like(x_start))\n x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise)\n model_output = self.apply_model(x_noisy, t, cond)\n\n loss_dict = {}\n prefix = 'train' if self.training else 'val'\n\n if self.parameterization == \"x0\":\n target = x_start\n elif self.parameterization == \"eps\":\n target = noise\n elif self.parameterization == \"v\":\n target = self.get_v(x_start, noise, t)\n else:\n raise NotImplementedError()\n\n loss_simple = self.get_loss(model_output, target, mean=False).mean([1, 2, 3])\n loss_dict.update({f'{prefix}/loss_simple': loss_simple.mean()})\n\n logvar_t = self.logvar[t].to(self.device)\n loss = loss_simple / torch.exp(logvar_t) + logvar_t\n # loss = loss_simple / torch.exp(self.logvar) + self.logvar\n if self.learn_logvar:\n loss_dict.update({f'{prefix}/loss_gamma': loss.mean()})\n loss_dict.update({'logvar': self.logvar.data.mean()})\n\n loss = self.l_simple_weight * loss.mean()\n\n loss_vlb = self.get_loss(model_output, target, mean=False).mean(dim=(1, 2, 3))\n loss_vlb = (self.lvlb_weights[t] * loss_vlb).mean()\n loss_dict.update({f'{prefix}/loss_vlb': loss_vlb})\n loss += (self.original_elbo_weight * loss_vlb)\n loss_dict.update({f'{prefix}/loss': loss})\n\n return loss, loss_dict\n\n def p_mean_variance(self, x, c, t, clip_denoised: bool, return_codebook_ids=False, quantize_denoised=False,\n return_x0=False, score_corrector=None, corrector_kwargs=None):\n t_in = t\n model_out = self.apply_model(x, t_in, c, return_ids=return_codebook_ids)\n\n if score_corrector is not None:\n assert self.parameterization == \"eps\"\n model_out = score_corrector.modify_score(self, model_out, x, t, c, **corrector_kwargs)\n\n if return_codebook_ids:\n model_out, logits = model_out\n\n if self.parameterization == \"eps\":\n x_recon = self.predict_start_from_noise(x, t=t, noise=model_out)\n elif self.parameterization == \"x0\":\n x_recon = model_out\n else:\n raise NotImplementedError()\n\n if clip_denoised:\n x_recon.clamp_(-1., 1.)\n if quantize_denoised:\n x_recon, _, [_, _, indices] = self.first_stage_model.quantize(x_recon)\n model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t)\n if return_codebook_ids:\n return model_mean, posterior_variance, posterior_log_variance, logits\n elif return_x0:\n return model_mean, posterior_variance, posterior_log_variance, x_recon\n else:\n return model_mean, posterior_variance, posterior_log_variance\n\n @torch.no_grad()\n def p_sample(self, x, c, t, clip_denoised=False, repeat_noise=False,\n return_codebook_ids=False, quantize_denoised=False, return_x0=False,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None):\n b, *_, device = *x.shape, x.device\n outputs = self.p_mean_variance(x=x, c=c, t=t, clip_denoised=clip_denoised,\n return_codebook_ids=return_codebook_ids,\n quantize_denoised=quantize_denoised,\n return_x0=return_x0,\n score_corrector=score_corrector, corrector_kwargs=corrector_kwargs)\n if return_codebook_ids:\n raise DeprecationWarning(\"Support dropped.\")\n model_mean, _, model_log_variance, logits = outputs\n elif return_x0:\n model_mean, _, model_log_variance, x0 = outputs\n else:\n model_mean, _, model_log_variance = outputs\n\n noise = noise_like(x.shape, device, repeat_noise) * temperature\n if noise_dropout > 0.:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n # no noise when t == 0\n nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1)))\n\n if return_codebook_ids:\n return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, logits.argmax(dim=1)\n if return_x0:\n return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, x0\n else:\n return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise\n\n @torch.no_grad()\n def progressive_denoising(self, cond, shape, verbose=True, callback=None, quantize_denoised=False,\n img_callback=None, mask=None, x0=None, temperature=1., noise_dropout=0.,\n score_corrector=None, corrector_kwargs=None, batch_size=None, x_T=None, start_T=None,\n log_every_t=None):\n if not log_every_t:\n log_every_t = self.log_every_t\n timesteps = self.num_timesteps\n if batch_size is not None:\n b = batch_size if batch_size is not None else shape[0]\n shape = [batch_size] + list(shape)\n else:\n b = batch_size = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=self.device)\n else:\n img = x_T\n intermediates = []\n if cond is not None:\n if isinstance(cond, dict):\n cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else\n list(map(lambda x: x[:batch_size], cond[key])) for key in cond}\n else:\n cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size]\n\n if start_T is not None:\n timesteps = min(timesteps, start_T)\n iterator = tqdm(reversed(range(0, timesteps)), desc='Progressive Generation',\n total=timesteps) if verbose else reversed(\n range(0, timesteps))\n if type(temperature) == float:\n temperature = [temperature] * timesteps\n\n for i in iterator:\n ts = torch.full((b,), i, device=self.device, dtype=torch.long)\n if self.shorten_cond_schedule:\n assert self.model.conditioning_key != 'hybrid'\n tc = self.cond_ids[ts].to(cond.device)\n cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond))\n\n img, x0_partial = self.p_sample(img, cond, ts,\n clip_denoised=self.clip_denoised,\n quantize_denoised=quantize_denoised, return_x0=True,\n temperature=temperature[i], noise_dropout=noise_dropout,\n score_corrector=score_corrector, corrector_kwargs=corrector_kwargs)\n if mask is not None:\n assert x0 is not None\n img_orig = self.q_sample(x0, ts)\n img = img_orig * mask + (1. - mask) * img\n\n if i % log_every_t == 0 or i == timesteps - 1:\n intermediates.append(x0_partial)\n if callback: callback(i)\n if img_callback: img_callback(img, i)\n return img, intermediates\n\n @torch.no_grad()\n def p_sample_loop(self, cond, shape, return_intermediates=False,\n x_T=None, verbose=True, callback=None, timesteps=None, quantize_denoised=False,\n mask=None, x0=None, img_callback=None, start_T=None,\n log_every_t=None):\n\n if not log_every_t:\n log_every_t = self.log_every_t\n device = self.betas.device\n b = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=device)\n else:\n img = x_T\n\n intermediates = [img]\n if timesteps is None:\n timesteps = self.num_timesteps\n\n if start_T is not None:\n timesteps = min(timesteps, start_T)\n iterator = tqdm(reversed(range(0, timesteps)), desc='Sampling t', total=timesteps) if verbose else reversed(\n range(0, timesteps))\n\n if mask is not None:\n assert x0 is not None\n assert x0.shape[2:3] == mask.shape[2:3] # spatial size has to match\n\n for i in iterator:\n ts = torch.full((b,), i, device=device, dtype=torch.long)\n if self.shorten_cond_schedule:\n assert self.model.conditioning_key != 'hybrid'\n tc = self.cond_ids[ts].to(cond.device)\n cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond))\n\n img = self.p_sample(img, cond, ts,\n clip_denoised=self.clip_denoised,\n quantize_denoised=quantize_denoised)\n if mask is not None:\n img_orig = self.q_sample(x0, ts)\n img = img_orig * mask + (1. - mask) * img\n\n if i % log_every_t == 0 or i == timesteps - 1:\n intermediates.append(img)\n if callback: callback(i)\n if img_callback: img_callback(img, i)\n\n if return_intermediates:\n return img, intermediates\n return img\n\n @torch.no_grad()\n def sample(self, cond, batch_size=16, return_intermediates=False, x_T=None,\n verbose=True, timesteps=None, quantize_denoised=False,\n mask=None, x0=None, shape=None, **kwargs):\n if shape is None:\n shape = (batch_size, self.channels, self.image_size, self.image_size)\n if cond is not None:\n if isinstance(cond, dict):\n cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else\n list(map(lambda x: x[:batch_size], cond[key])) for key in cond}\n else:\n cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size]\n return self.p_sample_loop(cond,\n shape,\n return_intermediates=return_intermediates, x_T=x_T,\n verbose=verbose, timesteps=timesteps, quantize_denoised=quantize_denoised,\n mask=mask, x0=x0)\n\n @torch.no_grad()\n def sample_log(self, cond, batch_size, ddim, ddim_steps, **kwargs):\n if ddim:\n ddim_sampler = DDIMSampler(self)\n shape = (self.channels, self.image_size, self.image_size)\n samples, intermediates = ddim_sampler.sample(ddim_steps, batch_size,\n shape, cond, verbose=False, **kwargs)\n\n else:\n samples, intermediates = self.sample(cond=cond, batch_size=batch_size,\n return_intermediates=True, **kwargs)\n\n return samples, intermediates\n\n @torch.no_grad()\n def get_unconditional_conditioning(self, batch_size, null_label=None):\n if null_label is not None:\n xc = null_label\n if isinstance(xc, ListConfig):\n xc = list(xc)\n if isinstance(xc, dict) or isinstance(xc, list):\n c = self.get_learned_conditioning(xc)\n else:\n if hasattr(xc, \"to\"):\n xc = xc.to(self.device)\n c = self.get_learned_conditioning(xc)\n else:\n if self.cond_stage_key in [\"class_label\", \"cls\"]:\n xc = self.cond_stage_model.get_unconditional_conditioning(batch_size, device=self.device)\n return self.get_learned_conditioning(xc)\n else:\n raise NotImplementedError(\"todo\")\n if isinstance(c, list): # in case the encoder gives us a list\n for i in range(len(c)):\n c[i] = repeat(c[i], '1 ... -> b ...', b=batch_size).to(self.device)\n else:\n c = repeat(c, '1 ... -> b ...', b=batch_size).to(self.device)\n return c\n\n @torch.no_grad()\n def log_images(self, batch, N=8, n_row=4, sample=True, ddim_steps=50, ddim_eta=0., return_keys=None,\n quantize_denoised=True, inpaint=True, plot_denoise_rows=False, plot_progressive_rows=True,\n plot_diffusion_rows=True, unconditional_guidance_scale=1., unconditional_guidance_label=None,\n use_ema_scope=True,\n **kwargs):\n ema_scope = self.ema_scope if use_ema_scope else nullcontext\n use_ddim = ddim_steps is not None\n\n log = dict()\n z, c, x, xrec, xc = self.get_input(batch, self.first_stage_key,\n return_first_stage_outputs=True,\n force_c_encode=True,\n return_original_cond=True,\n bs=N)\n N = min(x.shape[0], N)\n n_row = min(x.shape[0], n_row)\n log[\"inputs\"] = x\n log[\"reconstruction\"] = xrec\n if self.model.conditioning_key is not None:\n if hasattr(self.cond_stage_model, \"decode\"):\n xc = self.cond_stage_model.decode(c)\n log[\"conditioning\"] = xc\n elif self.cond_stage_key in [\"caption\", \"txt\"]:\n xc = log_txt_as_img((x.shape[2], x.shape[3]), batch[self.cond_stage_key], size=x.shape[2] // 25)\n log[\"conditioning\"] = xc\n elif self.cond_stage_key in ['class_label', \"cls\"]:\n try:\n xc = log_txt_as_img((x.shape[2], x.shape[3]), batch[\"human_label\"], size=x.shape[2] // 25)\n log['conditioning'] = xc\n except KeyError:\n # probably no \"human_label\" in batch\n pass\n elif isimage(xc):\n log[\"conditioning\"] = xc\n if ismap(xc):\n log[\"original_conditioning\"] = self.to_rgb(xc)\n\n if plot_diffusion_rows:\n # get diffusion row\n diffusion_row = list()\n z_start = z[:n_row]\n for t in range(self.num_timesteps):\n if t % self.log_every_t == 0 or t == self.num_timesteps - 1:\n t = repeat(torch.tensor([t]), '1 -> b', b=n_row)\n t = t.to(self.device).long()\n noise = torch.randn_like(z_start)\n z_noisy = self.q_sample(x_start=z_start, t=t, noise=noise)\n diffusion_row.append(self.decode_first_stage(z_noisy))\n\n diffusion_row = torch.stack(diffusion_row) # n_log_step, n_row, C, H, W\n diffusion_grid = rearrange(diffusion_row, 'n b c h w -> b n c h w')\n diffusion_grid = rearrange(diffusion_grid, 'b n c h w -> (b n) c h w')\n diffusion_grid = make_grid(diffusion_grid, nrow=diffusion_row.shape[0])\n log[\"diffusion_row\"] = diffusion_grid\n\n if sample:\n # get denoise row\n with ema_scope(\"Sampling\"):\n samples, z_denoise_row = self.sample_log(cond=c, batch_size=N, ddim=use_ddim,\n ddim_steps=ddim_steps, eta=ddim_eta)\n # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True)\n x_samples = self.decode_first_stage(samples)\n log[\"samples\"] = x_samples\n if plot_denoise_rows:\n denoise_grid = self._get_denoise_row_from_list(z_denoise_row)\n log[\"denoise_row\"] = denoise_grid\n\n if quantize_denoised and not isinstance(self.first_stage_model, AutoencoderKL) and not isinstance(\n self.first_stage_model, IdentityFirstStage):\n # also display when quantizing x0 while sampling\n with ema_scope(\"Plotting Quantized Denoised\"):\n samples, z_denoise_row = self.sample_log(cond=c, batch_size=N, ddim=use_ddim,\n ddim_steps=ddim_steps, eta=ddim_eta,\n quantize_denoised=True)\n # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True,\n # quantize_denoised=True)\n x_samples = self.decode_first_stage(samples.to(self.device))\n log[\"samples_x0_quantized\"] = x_samples\n\n if unconditional_guidance_scale > 1.0:\n uc = self.get_unconditional_conditioning(N, unconditional_guidance_label)\n if self.model.conditioning_key == \"crossattn-adm\":\n uc = {\"c_crossattn\": [uc], \"c_adm\": c[\"c_adm\"]}\n with ema_scope(\"Sampling with classifier-free guidance\"):\n samples_cfg, _ = self.sample_log(cond=c, batch_size=N, ddim=use_ddim,\n ddim_steps=ddim_steps, eta=ddim_eta,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=uc,\n )\n x_samples_cfg = self.decode_first_stage(samples_cfg)\n log[f\"samples_cfg_scale_{unconditional_guidance_scale:.2f}\"] = x_samples_cfg\n\n if inpaint:\n # make a simple center square\n b, h, w = z.shape[0], z.shape[2], z.shape[3]\n mask = torch.ones(N, h, w).to(self.device)\n # zeros will be filled in\n mask[:, h // 4:3 * h // 4, w // 4:3 * w // 4] = 0.\n mask = mask[:, None, ...]\n with ema_scope(\"Plotting Inpaint\"):\n samples, _ = self.sample_log(cond=c, batch_size=N, ddim=use_ddim, eta=ddim_eta,\n ddim_steps=ddim_steps, x0=z[:N], mask=mask)\n x_samples = self.decode_first_stage(samples.to(self.device))\n log[\"samples_inpainting\"] = x_samples\n log[\"mask\"] = mask\n\n # outpaint\n mask = 1. - mask\n with ema_scope(\"Plotting Outpaint\"):\n samples, _ = self.sample_log(cond=c, batch_size=N, ddim=use_ddim, eta=ddim_eta,\n ddim_steps=ddim_steps, x0=z[:N], mask=mask)\n x_samples = self.decode_first_stage(samples.to(self.device))\n log[\"samples_outpainting\"] = x_samples\n\n if plot_progressive_rows:\n with ema_scope(\"Plotting Progressives\"):\n img, progressives = self.progressive_denoising(c,\n shape=(self.channels, self.image_size, self.image_size),\n batch_size=N)\n prog_row = self._get_denoise_row_from_list(progressives, desc=\"Progressive Generation\")\n log[\"progressive_row\"] = prog_row\n\n if return_keys:\n if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0:\n return log\n else:\n return {key: log[key] for key in return_keys}\n return log\n\n def configure_optimizers(self):\n lr = self.learning_rate\n params = list(self.model.parameters())\n if self.cond_stage_trainable:\n print(f\"{self.__class__.__name__}: Also optimizing conditioner params!\")\n params = params + list(self.cond_stage_model.parameters())\n if self.learn_logvar:\n print('Diffusion model optimizing logvar')\n params.append(self.logvar)\n opt = torch.optim.AdamW(params, lr=lr)\n if self.use_scheduler:\n assert 'target' in self.scheduler_config\n scheduler = instantiate_from_config(self.scheduler_config)\n\n print(\"Setting up LambdaLR scheduler...\")\n scheduler = [\n {\n 'scheduler': LambdaLR(opt, lr_lambda=scheduler.schedule),\n 'interval': 'step',\n 'frequency': 1\n }]\n return [opt], scheduler\n return opt\n\n @torch.no_grad()\n def to_rgb(self, x):\n x = x.float()\n if not hasattr(self, \"colorize\"):\n self.colorize = torch.randn(3, x.shape[1], 1, 1).to(x)\n x = nn.functional.conv2d(x, weight=self.colorize)\n x = 2. * (x - x.min()) / (x.max() - x.min()) - 1.\n return x" }, { "identifier": "log_txt_as_img", "path": "ldm/util.py", "snippet": "def log_txt_as_img(wh, xc, size=10):\n # wh a tuple of (width, height)\n # xc a list of captions to plot\n b = len(xc)\n txts = list()\n for bi in range(b):\n txt = Image.new(\"RGB\", wh, color=\"white\")\n draw = ImageDraw.Draw(txt)\n font = ImageFont.truetype('font/DejaVuSans.ttf', size=size)\n nc = int(40 * (wh[0] / 256))\n lines = \"\\n\".join(xc[bi][start:start + nc] for start in range(0, len(xc[bi]), nc))\n\n try:\n draw.text((0, 0), lines, fill=\"black\", font=font)\n except UnicodeEncodeError:\n print(\"Cant encode string for logging. Skipping.\")\n\n txt = np.array(txt).transpose(2, 0, 1) / 127.5 - 1.0\n txts.append(txt)\n txts = np.stack(txts)\n txts = torch.tensor(txts)\n return txts" }, { "identifier": "instantiate_from_config", "path": "ldm/util.py", "snippet": "def instantiate_from_config(config):\n if not \"target\" in config:\n if config == '__is_first_stage__':\n return None\n elif config == \"__is_unconditional__\":\n return None\n raise KeyError(\"Expected key `target` to instantiate.\")\n return get_obj_from_str(config[\"target\"])(**config.get(\"params\", dict()))" }, { "identifier": "DDIMSampler", "path": "ldm/models/diffusion/ddim.py", "snippet": "class DDIMSampler(object):\n def __init__(self, model, schedule=\"linear\", **kwargs):\n super().__init__()\n self.model = model\n self.ddpm_num_timesteps = model.num_timesteps\n self.schedule = schedule\n\n def register_buffer(self, name, attr):\n if type(attr) == torch.Tensor:\n if attr.device != torch.device(\"cuda\"):\n attr = attr.to(torch.device(\"cuda\"))\n setattr(self, name, attr)\n\n def make_schedule(self, ddim_num_steps, ddim_discretize=\"uniform\", ddim_eta=0., verbose=True):\n self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps,\n num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose)\n alphas_cumprod = self.model.alphas_cumprod\n assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep'\n to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device)\n\n self.register_buffer('betas', to_torch(self.model.betas))\n self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))\n self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev))\n\n # calculations for diffusion q(x_t | x_{t-1}) and others\n self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu())))\n self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu())))\n self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1)))\n\n # ddim sampling parameters\n ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(),\n ddim_timesteps=self.ddim_timesteps,\n eta=ddim_eta,verbose=verbose)\n self.register_buffer('ddim_sigmas', ddim_sigmas)\n self.register_buffer('ddim_alphas', ddim_alphas)\n self.register_buffer('ddim_alphas_prev', ddim_alphas_prev)\n self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas))\n sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(\n (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * (\n 1 - self.alphas_cumprod / self.alphas_cumprod_prev))\n self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps)\n\n @torch.no_grad()\n def sample(self,\n S,\n batch_size,\n shape,\n conditioning=None,\n callback=None,\n normals_sequence=None,\n img_callback=None,\n quantize_x0=False,\n eta=0.,\n mask=None,\n x0=None,\n temperature=1.,\n noise_dropout=0.,\n score_corrector=None,\n corrector_kwargs=None,\n verbose=True,\n x_T=None,\n log_every_t=100,\n unconditional_guidance_scale=1.,\n unconditional_conditioning=None, # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...\n dynamic_threshold=None,\n ucg_schedule=None,\n **kwargs\n ):\n if conditioning is not None:\n if isinstance(conditioning, dict):\n ctmp = conditioning[list(conditioning.keys())[0]]\n while isinstance(ctmp, list): ctmp = ctmp[0]\n cbs = ctmp.shape[0]\n if cbs != batch_size:\n print(f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\")\n\n elif isinstance(conditioning, list):\n for ctmp in conditioning:\n if ctmp.shape[0] != batch_size:\n print(f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\")\n\n else:\n if conditioning.shape[0] != batch_size:\n print(f\"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}\")\n\n self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)\n # sampling\n C, H, W = shape\n size = (batch_size, C, H, W)\n # print(f'Data shape for DDIM sampling is {size}, eta {eta}')\n\n samples, intermediates = self.ddim_sampling(conditioning, size,\n callback=callback,\n img_callback=img_callback,\n quantize_denoised=quantize_x0,\n mask=mask, x0=x0,\n ddim_use_original_steps=False,\n noise_dropout=noise_dropout,\n temperature=temperature,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n x_T=x_T,\n log_every_t=log_every_t,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n dynamic_threshold=dynamic_threshold,\n ucg_schedule=ucg_schedule\n )\n return samples, intermediates\n\n @torch.no_grad()\n def ddim_sampling(self, cond, shape,\n x_T=None, ddim_use_original_steps=False,\n callback=None, timesteps=None, quantize_denoised=False,\n mask=None, x0=None, img_callback=None, log_every_t=100,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None, dynamic_threshold=None,\n ucg_schedule=None):\n device = self.model.betas.device\n b = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=device)\n else:\n img = x_T\n\n if timesteps is None:\n timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps\n elif timesteps is not None and not ddim_use_original_steps:\n subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1\n timesteps = self.ddim_timesteps[:subset_end]\n\n intermediates = {'x_inter': [img], 'pred_x0': [img]}\n time_range = reversed(range(0,timesteps)) if ddim_use_original_steps else np.flip(timesteps)\n total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]\n # print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n # iterator = tqdm(time_range, desc='DDIM Sampler', total=total_steps)\n\n for i, step in enumerate(time_range):\n index = total_steps - i - 1\n ts = torch.full((b,), step, device=device, dtype=torch.long)\n\n if mask is not None:\n assert x0 is not None\n img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass?\n img = img_orig * mask + (1. - mask) * img\n\n if ucg_schedule is not None:\n assert len(ucg_schedule) == len(time_range)\n unconditional_guidance_scale = ucg_schedule[i]\n\n outs = self.p_sample_ddim(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps,\n quantize_denoised=quantize_denoised, temperature=temperature,\n noise_dropout=noise_dropout, score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n dynamic_threshold=dynamic_threshold)\n img, pred_x0 = outs\n if callback: callback(i)\n if img_callback: img_callback(pred_x0, i)\n\n if index % log_every_t == 0 or index == total_steps - 1:\n intermediates['x_inter'].append(img)\n intermediates['pred_x0'].append(pred_x0)\n\n return img, intermediates\n\n @torch.no_grad()\n def p_sample_ddim(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None,\n dynamic_threshold=None):\n b, *_, device = *x.shape, x.device\n\n if unconditional_conditioning is None or unconditional_guidance_scale == 1.:\n model_output = self.model.apply_model(x, t, c)\n else:\n x_in = torch.cat([x] * 2)\n t_in = torch.cat([t] * 2)\n if isinstance(c, dict):\n assert isinstance(unconditional_conditioning, dict)\n c_in = dict()\n for k in c:\n if isinstance(c[k], list):\n c_in[k] = [torch.cat([\n unconditional_conditioning[k][i],\n c[k][i]]) for i in range(len(c[k]))]\n else:\n c_in[k] = torch.cat([\n unconditional_conditioning[k],\n c[k]])\n elif isinstance(c, list):\n c_in = list()\n assert isinstance(unconditional_conditioning, list)\n for i in range(len(c)):\n c_in.append(torch.cat([unconditional_conditioning[i], c[i]]))\n else:\n c_in = torch.cat([unconditional_conditioning, c])\n model_uncond, model_t = self.model.apply_model(x_in, t_in, c_in).chunk(2)\n model_output = model_uncond + unconditional_guidance_scale * (model_t - model_uncond)\n\n if self.model.parameterization == \"v\":\n e_t = self.model.predict_eps_from_z_and_v(x, t, model_output)\n else:\n e_t = model_output\n\n if score_corrector is not None:\n assert self.model.parameterization == \"eps\", 'not implemented'\n e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs)\n\n alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas\n alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev\n sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas\n sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas\n # select parameters corresponding to the currently considered timestep\n a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)\n a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)\n sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)\n sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device)\n\n # current prediction for x_0\n if self.model.parameterization != \"v\":\n pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()\n else:\n pred_x0 = self.model.predict_start_from_z_and_v(x, t, model_output)\n\n if quantize_denoised:\n pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)\n\n if dynamic_threshold is not None:\n raise NotImplementedError()\n\n # direction pointing to x_t\n dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t\n noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature\n if noise_dropout > 0.:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise\n return x_prev, pred_x0\n\n @torch.no_grad()\n def encode(self, x0, c, t_enc, use_original_steps=False, return_intermediates=None,\n unconditional_guidance_scale=1.0, unconditional_conditioning=None, callback=None):\n num_reference_steps = self.ddpm_num_timesteps if use_original_steps else self.ddim_timesteps.shape[0]\n\n assert t_enc <= num_reference_steps\n num_steps = t_enc\n\n if use_original_steps:\n alphas_next = self.alphas_cumprod[:num_steps]\n alphas = self.alphas_cumprod_prev[:num_steps]\n else:\n alphas_next = self.ddim_alphas[:num_steps]\n alphas = torch.tensor(self.ddim_alphas_prev[:num_steps])\n\n x_next = x0\n intermediates = []\n inter_steps = []\n for i in tqdm(range(num_steps), desc='Encoding Image'):\n t = torch.full((x0.shape[0],), i, device=self.model.device, dtype=torch.long)\n if unconditional_guidance_scale == 1.:\n noise_pred = self.model.apply_model(x_next, t, c)\n else:\n assert unconditional_conditioning is not None\n e_t_uncond, noise_pred = torch.chunk(\n self.model.apply_model(torch.cat((x_next, x_next)), torch.cat((t, t)),\n torch.cat((unconditional_conditioning, c))), 2)\n noise_pred = e_t_uncond + unconditional_guidance_scale * (noise_pred - e_t_uncond)\n\n xt_weighted = (alphas_next[i] / alphas[i]).sqrt() * x_next\n weighted_noise_pred = alphas_next[i].sqrt() * (\n (1 / alphas_next[i] - 1).sqrt() - (1 / alphas[i] - 1).sqrt()) * noise_pred\n x_next = xt_weighted + weighted_noise_pred\n if return_intermediates and i % (\n num_steps // return_intermediates) == 0 and i < num_steps - 1:\n intermediates.append(x_next)\n inter_steps.append(i)\n elif return_intermediates and i >= num_steps - 2:\n intermediates.append(x_next)\n inter_steps.append(i)\n if callback: callback(i)\n\n out = {'x_encoded': x_next, 'intermediate_steps': inter_steps}\n if return_intermediates:\n out.update({'intermediates': intermediates})\n return x_next, out\n\n @torch.no_grad()\n def stochastic_encode(self, x0, t, use_original_steps=False, noise=None):\n # fast, but does not allow for exact reconstruction\n # t serves as an index to gather the correct alphas\n if use_original_steps:\n sqrt_alphas_cumprod = self.sqrt_alphas_cumprod\n sqrt_one_minus_alphas_cumprod = self.sqrt_one_minus_alphas_cumprod\n else:\n sqrt_alphas_cumprod = torch.sqrt(self.ddim_alphas)\n sqrt_one_minus_alphas_cumprod = self.ddim_sqrt_one_minus_alphas\n\n if noise is None:\n noise = torch.randn_like(x0)\n return (extract_into_tensor(sqrt_alphas_cumprod, t, x0.shape) * x0 +\n extract_into_tensor(sqrt_one_minus_alphas_cumprod, t, x0.shape) * noise)\n\n @torch.no_grad()\n def decode(self, x_latent, cond, t_start, unconditional_guidance_scale=1.0, unconditional_conditioning=None,\n use_original_steps=False, callback=None):\n\n timesteps = np.arange(self.ddpm_num_timesteps) if use_original_steps else self.ddim_timesteps\n timesteps = timesteps[:t_start]\n\n time_range = np.flip(timesteps)\n total_steps = timesteps.shape[0]\n print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc='Decoding image', total=total_steps)\n x_dec = x_latent\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((x_latent.shape[0],), step, device=x_latent.device, dtype=torch.long)\n x_dec, _ = self.p_sample_ddim(x_dec, cond, ts, index=index, use_original_steps=use_original_steps,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning)\n if callback: callback(i)\n return x_dec" }, { "identifier": "instantiate_from_config", "path": "data/dataset_instantiate.py", "snippet": "def instantiate_from_config(config):\n if not \"target\" in config:\n if config == '__is_first_stage__':\n return None\n elif config == \"__is_unconditional__\":\n return None\n raise KeyError(\"Expected key `target` to instantiate.\")\n return get_obj_from_str(config[\"target\"])(config.get(\"params\", dict()))" }, { "identifier": "calculate_psnr_ssim", "path": "metrics/metrics_all.py", "snippet": "def calculate_psnr_ssim(gt_path, restored_path, test_y_channel = False, crop_border = 0, suffix = '', correct_mean_var = False, show_details =False):\n \"\"\"\n Calculate PSNR and SSIM for images.\n gt_path: Path to gt (Ground-Truth)\n restored_path: Path to restored images\n test_y_channel: If True, test Y channel (In MatLab YCbCr format). If False, test RGB channels.\n crop_border: Crop border for each side\n suffix: Suffix for restored images\n \"\"\"\n print(\"Calculate PSNR and SSIM for images\")\n psnr_all = []\n ssim_all = []\n img_list_gt = sorted(list(scandir(gt_path, recursive=True, full_path=True)))\n img_list_restored = sorted(list(scandir(restored_path, recursive=True, full_path=True)))\n\n if test_y_channel:\n print('Testing Y channel.')\n else:\n print('Testing RGB channels.')\n\n for i, img_path in tqdm(enumerate(img_list_gt)):\n basename, ext = osp.splitext(osp.basename(img_path))\n img_gt = cv2.imread(img_path, cv2.IMREAD_UNCHANGED).astype(np.float32) / 255.\n if suffix == '':\n img_path_restored = img_list_restored[i]\n else:\n img_path_restored = osp.join(restored_path, basename + suffix + ext)\n img_restored = cv2.imread(img_path_restored, cv2.IMREAD_UNCHANGED).astype(np.float32) / 255.\n # img_restored = cv2.imread(img_path_restored, cv2.IMREAD_COLOR).astype(np.float32) / 255.\n img_restored\n if correct_mean_var:\n mean_l = []\n std_l = []\n for j in range(3):\n mean_l.append(np.mean(img_gt[:, :, j]))\n std_l.append(np.std(img_gt[:, :, j]))\n for j in range(3):\n # correct twice\n mean = np.mean(img_restored[:, :, j])\n img_restored[:, :, j] = img_restored[:, :, j] - mean + mean_l[j]\n std = np.std(img_restored[:, :, j])\n img_restored[:, :, j] = img_restored[:, :, j] / std * std_l[j]\n\n mean = np.mean(img_restored[:, :, j])\n img_restored[:, :, j] = img_restored[:, :, j] - mean + mean_l[j]\n std = np.std(img_restored[:, :, j])\n img_restored[:, :, j] = img_restored[:, :, j] / std * std_l[j]\n\n if test_y_channel and img_gt.ndim == 3 and img_gt.shape[2] == 3:\n img_gt = bgr2ycbcr(img_gt, y_only=True)\n img_restored = bgr2ycbcr(img_restored, y_only=True)\n\n # calculate PSNR and SSIM\n psnr = calculate_psnr(img_gt * 255, img_restored * 255, crop_border=crop_border, input_order='HWC')\n ssim = calculate_ssim(img_gt * 255, img_restored * 255, crop_border=crop_border, input_order='HWC')\n if show_details:\n print(f'{basename + suffix + ext:25}. \\tPSNR: {psnr:.6f} dB, \\tSSIM: {ssim:.6f}')\n psnr_all.append(psnr)\n ssim_all.append(ssim)\n Average_psnr = sum(psnr_all) / len(psnr_all)\n Average_ssim = sum(ssim_all) / len(ssim_all)\n print(f'PSNR: {Average_psnr:.6f} dB, SSIM: {Average_ssim:.6f}')\n return Average_psnr, Average_ssim" }, { "identifier": "calculate_lpips", "path": "metrics/metrics_all.py", "snippet": "def calculate_lpips(gt_path, restored_path, suffix = '', show_details =False):\n \"\"\"\n Calculate LPIPS for images.\n gt_path: Path to gt (Ground-Truth)\n restored_path: Path to restored images\n suffix: Suffix for restored images\n \"\"\"\n print(\"Calculate LPIPS for images\")\n loss_fn_vgg = lpips.LPIPS(net='vgg').cuda() # RGB, normalized to [-1,1]\n lpips_all = []\n img_list = sorted(glob.glob(osp.join(gt_path, '*')))\n img_list_restored = sorted(list(scandir(restored_path, recursive=True, full_path=True)))\n\n mean = [0.5, 0.5, 0.5]\n std = [0.5, 0.5, 0.5]\n for i, img_path in tqdm(enumerate(img_list)):\n basename, ext = osp.splitext(osp.basename(img_path))\n img_gt = cv2.imread(img_path, cv2.IMREAD_UNCHANGED).astype(np.float32) / 255.\n\n if suffix == '':\n img_path_restored = img_list_restored[i]\n else:\n img_path_restored = osp.join(restored_path, basename + suffix + ext)\n img_restored = cv2.imread(img_path_restored, cv2.IMREAD_UNCHANGED).astype(np.float32) / 255. \n # img_restored = cv2.imread(img_path_restored, cv2.IMREAD_COLOR).astype(np.float32) / 255. \n\n img_gt, img_restored = img2tensor([img_gt, img_restored], bgr2rgb=True, float32=True)\n # norm to [-1, 1]\n normalize(img_gt, mean, std, inplace=True)\n normalize(img_restored, mean, std, inplace=True)\n\n # calculate lpips\n lpips_val = loss_fn_vgg(img_restored.unsqueeze(0).cuda(), img_gt.unsqueeze(0).cuda())\n lpips_val = lpips_val.cpu().item()\n if show_details:\n print(f'{i+1:3d}: {basename:25}. \\tLPIPS: {lpips_val:.6f}.')\n lpips_all.append(lpips_val)\n Average_lpips = sum(lpips_all) / len(lpips_all)\n print(f'LPIPS: {Average_lpips:.6f}')\n return Average_lpips" }, { "identifier": "calculate_NIQE", "path": "metrics/metrics_all.py", "snippet": "def calculate_NIQE(restored_path, crop_border = 0, show_details =False):\n \"\"\"\n Calculate NIQE for images.\n restored_path: Path to restored images\n crop_border: Crop border for each side\n \"\"\"\n print(\"Calculate NIQE for images\")\n niqe_all = []\n img_list = sorted(scandir(restored_path, recursive=True, full_path=True))\n\n for i, img_path in tqdm(enumerate(img_list)):\n basename, _ = os.path.splitext(os.path.basename(img_path))\n img = cv2.imread(img_path, cv2.IMREAD_UNCHANGED)\n\n with warnings.catch_warnings():\n warnings.simplefilter('ignore', category=RuntimeWarning)\n niqe_score = calculate_niqe(img, crop_border, input_order='HWC', convert_to='y')\n if show_details:\n print(f'{i+1:3d}: {basename:25}. \\tNIQE: {niqe_score:.6f}')\n niqe_all.append(niqe_score)\n Average_niqe = sum(niqe_all) / len(niqe_all)\n print(f'NIQE: {Average_niqe:.6f}')\n return Average_niqe " }, { "identifier": "calculate_fid_folder", "path": "metrics/metrics_all.py", "snippet": "def calculate_fid_folder(restored_path):\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n fid_stats = ''\n batch_size = 64\n num_sample = 50000\n num_workers = 4\n backend = 'disk'\n\n # inception model\n inception = load_patched_inception_v3(device)\n\n # create dataset\n opt = {}\n opt['name'] = 'SingleImageDataset'\n opt['type'] = 'SingleImageDataset'\n opt['dataroot_lq'] = restored_path\n opt['io_backend'] = dict(type=backend)\n opt['mean'] = [0.5, 0.5, 0.5]\n opt['std'] = [0.5, 0.5, 0.5]\n dataset = build_dataset(opt)\n\n # create dataloader\n data_loader = DataLoader(\n dataset=dataset,\n batch_size=batch_size,\n shuffle=False,\n num_workers=num_workers,\n sampler=None,\n drop_last=False)\n num_sample = min(num_sample, len(dataset))\n total_batch = math.ceil(num_sample / batch_size)\n\n def data_generator(data_loader, total_batch):\n for idx, data in enumerate(data_loader):\n if idx >= total_batch:\n break\n else:\n yield data['lq']\n\n features = extract_inception_features(data_generator(data_loader, total_batch), inception, total_batch, device)\n features = features.numpy()\n total_len = features.shape[0]\n features = features[:num_sample]\n print(f'Extracted {total_len} features, use the first {features.shape[0]} features to calculate stats.')\n\n sample_mean = np.mean(features, 0)\n sample_cov = np.cov(features, rowvar=False)\n\n # load the dataset stats\n stats = torch.load(fid_stats)\n real_mean = stats['mean']\n real_cov = stats['cov']\n\n # calculate FID metric\n fid = calculate_fid(sample_mean, sample_cov, real_mean, real_cov)\n print('fid:', fid)\n return fid" } ]
import torch import os import numpy as np import math import shutil import safetensors.torch from ldm.modules.diffusionmodules.util import timestep_embedding from einops import rearrange, repeat from torchvision.utils import make_grid from ldm.modules.diffusionmodules.openaimodel import UNetModel from ldm.models.diffusion.ddpm import LatentDiffusion from ldm.util import log_txt_as_img, instantiate_from_config from ldm.models.diffusion.ddim import DDIMSampler from data.dataset_instantiate import instantiate_from_config as instantiate_dataset_from_config from torch.utils.tensorboard import SummaryWriter from tqdm import tqdm from metrics.metrics_all import calculate_psnr_ssim, calculate_lpips, calculate_NIQE, calculate_fid_folder from torch.utils.data import DataLoader from PIL import Image from torch.optim.lr_scheduler import LambdaLR from omegaconf import OmegaConf
20,632
def get_state_dict(d): return d.get('state_dict', d) def load_state_dict(ckpt_path, location='cpu'): _, extension = os.path.splitext(ckpt_path) if extension.lower() == ".safetensors": state_dict = safetensors.torch.load_file(ckpt_path, device=location) else: state_dict = get_state_dict(torch.load(ckpt_path, map_location=torch.device(location))) state_dict = get_state_dict(state_dict) print(f'Loaded state_dict from [{ckpt_path}]') return state_dict def create_model(config_path): config = OmegaConf.load(config_path) model = instantiate_from_config(config.model).cpu() print(f'Loaded model config from [{config_path}]') return model class ControlledUnetModel(UNetModel): def forward(self, x, timesteps=None, context=None, control=None, **kwargs): hs = [] t_emb = timestep_embedding(timesteps, self.model_channels, repeat_only=False) emb = self.time_embed(t_emb) h = x.type(self.dtype) for i, module in enumerate(self.input_blocks): h = module(h, emb, context) if ((i+1)%3 == 0) and control is not None: h = h + control.pop(0) hs.append(h) h = self.middle_block(h, emb, context) if control is not None: h += control.pop(0) for i, module in enumerate(self.output_blocks): if control is None: h = torch.cat([h, hs.pop()], dim=1) else: h = torch.cat([h, hs.pop()], dim=1) h = module(h, emb, context) if ((i+2)%3 == 0) and control is not None: h = h + control.pop(0) h = h.type(x.dtype) return self.out(h)
def get_state_dict(d): return d.get('state_dict', d) def load_state_dict(ckpt_path, location='cpu'): _, extension = os.path.splitext(ckpt_path) if extension.lower() == ".safetensors": state_dict = safetensors.torch.load_file(ckpt_path, device=location) else: state_dict = get_state_dict(torch.load(ckpt_path, map_location=torch.device(location))) state_dict = get_state_dict(state_dict) print(f'Loaded state_dict from [{ckpt_path}]') return state_dict def create_model(config_path): config = OmegaConf.load(config_path) model = instantiate_from_config(config.model).cpu() print(f'Loaded model config from [{config_path}]') return model class ControlledUnetModel(UNetModel): def forward(self, x, timesteps=None, context=None, control=None, **kwargs): hs = [] t_emb = timestep_embedding(timesteps, self.model_channels, repeat_only=False) emb = self.time_embed(t_emb) h = x.type(self.dtype) for i, module in enumerate(self.input_blocks): h = module(h, emb, context) if ((i+1)%3 == 0) and control is not None: h = h + control.pop(0) hs.append(h) h = self.middle_block(h, emb, context) if control is not None: h += control.pop(0) for i, module in enumerate(self.output_blocks): if control is None: h = torch.cat([h, hs.pop()], dim=1) else: h = torch.cat([h, hs.pop()], dim=1) h = module(h, emb, context) if ((i+2)%3 == 0) and control is not None: h = h + control.pop(0) h = h.type(x.dtype) return self.out(h)
class BFRffusion(LatentDiffusion):
2
2023-11-30 13:50:58+00:00
24k
IanYeung/MGLD-VSR
ldm/models/diffusion/ddpm_inv.py
[ { "identifier": "log_txt_as_img", "path": "ldm/util.py", "snippet": "def log_txt_as_img(wh, xc, size=10):\n # wh a tuple of (width, height)\n # xc a list of captions to plot\n b = len(xc)\n txts = list()\n for bi in range(b):\n txt = Image.new(\"RGB\", wh, color=\"white\")\n draw = ImageDraw.Draw(txt)\n font = ImageFont.truetype('data/DejaVuSans.ttf', size=size)\n nc = int(40 * (wh[0] / 256))\n lines = \"\\n\".join(xc[bi][start:start + nc] for start in range(0, len(xc[bi]), nc))\n\n try:\n draw.text((0, 0), lines, fill=\"black\", font=font)\n except UnicodeEncodeError:\n print(\"Cant encode string for logging. Skipping.\")\n\n txt = np.array(txt).transpose(2, 0, 1) / 127.5 - 1.0\n txts.append(txt)\n txts = np.stack(txts)\n txts = torch.tensor(txts)\n return txts" }, { "identifier": "exists", "path": "ldm/util.py", "snippet": "def exists(x):\n return x is not None" }, { "identifier": "default", "path": "ldm/util.py", "snippet": "def default(val, d):\n if exists(val):\n return val\n return d() if isfunction(d) else d" }, { "identifier": "ismap", "path": "ldm/util.py", "snippet": "def ismap(x):\n if not isinstance(x, torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] > 3)" }, { "identifier": "isimage", "path": "ldm/util.py", "snippet": "def isimage(x):\n if not isinstance(x, torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] == 3 or x.shape[1] == 1)" }, { "identifier": "mean_flat", "path": "ldm/util.py", "snippet": "def mean_flat(tensor):\n \"\"\"\n https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/nn.py#L86\n Take the mean over all non-batch dimensions.\n \"\"\"\n return tensor.mean(dim=list(range(1, len(tensor.shape))))" }, { "identifier": "count_params", "path": "ldm/util.py", "snippet": "def count_params(model, verbose=False):\n total_params = sum(p.numel() for p in model.parameters())\n if verbose:\n print(f\"{model.__class__.__name__} has {total_params * 1.e-6:.2f} M params.\")\n return total_params" }, { "identifier": "instantiate_from_config", "path": "ldm/util.py", "snippet": "def instantiate_from_config(config):\n if not \"target\" in config:\n if config == '__is_first_stage__':\n return None\n elif config == \"__is_unconditional__\":\n return None\n raise KeyError(\"Expected key `target` to instantiate.\")\n return get_obj_from_str(config[\"target\"])(**config.get(\"params\", dict()))" }, { "identifier": "LitEma", "path": "ldm/modules/ema.py", "snippet": "class LitEma(nn.Module):\n def __init__(self, model, decay=0.9999, use_num_upates=True):\n super().__init__()\n if decay < 0.0 or decay > 1.0:\n raise ValueError('Decay must be between 0 and 1')\n\n self.m_name2s_name = {}\n self.register_buffer('decay', torch.tensor(decay, dtype=torch.float32))\n self.register_buffer('num_updates', torch.tensor(0,dtype=torch.int) if use_num_upates\n else torch.tensor(-1,dtype=torch.int))\n\n for name, p in model.named_parameters():\n if p.requires_grad:\n #remove as '.'-character is not allowed in buffers\n s_name = name.replace('.','')\n self.m_name2s_name.update({name:s_name})\n self.register_buffer(s_name,p.clone().detach().data)\n\n self.collected_params = []\n\n def forward(self,model):\n decay = self.decay\n\n if self.num_updates >= 0:\n self.num_updates += 1\n decay = min(self.decay,(1 + self.num_updates) / (10 + self.num_updates))\n\n one_minus_decay = 1.0 - decay\n\n with torch.no_grad():\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n\n for key in m_param:\n if m_param[key].requires_grad:\n sname = self.m_name2s_name[key]\n shadow_params[sname] = shadow_params[sname].type_as(m_param[key])\n shadow_params[sname].sub_(one_minus_decay * (shadow_params[sname] - m_param[key]))\n else:\n pass\n # assert not key in self.m_name2s_name\n\n def copy_to(self, model):\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n for key in m_param:\n if m_param[key].requires_grad:\n m_param[key].data.copy_(shadow_params[self.m_name2s_name[key]].data)\n else:\n pass\n # assert not key in self.m_name2s_name\n\n def store(self, parameters):\n \"\"\"\n Save the current parameters for restoring later.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n temporarily stored.\n \"\"\"\n self.collected_params = [param.clone() for param in parameters]\n\n def restore(self, parameters):\n \"\"\"\n Restore the parameters stored with the `store` method.\n Useful to validate the model with EMA parameters without affecting the\n original optimization process. Store the parameters before the\n `copy_to` method. After validation (or model saving), use this to\n restore the former parameters.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n updated with the stored parameters.\n \"\"\"\n for c_param, param in zip(self.collected_params, parameters):\n param.data.copy_(c_param.data)" }, { "identifier": "normal_kl", "path": "ldm/modules/distributions/distributions.py", "snippet": "def normal_kl(mean1, logvar1, mean2, logvar2):\n \"\"\"\n source: https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/losses.py#L12\n Compute the KL divergence between two gaussians.\n Shapes are automatically broadcasted, so batches can be compared to\n scalars, among other use cases.\n \"\"\"\n tensor = None\n for obj in (mean1, logvar1, mean2, logvar2):\n if isinstance(obj, torch.Tensor):\n tensor = obj\n break\n assert tensor is not None, \"at least one argument must be a Tensor\"\n\n # Force variances to be Tensors. Broadcasting helps convert scalars to\n # Tensors, but it does not work for torch.exp().\n logvar1, logvar2 = [\n x if isinstance(x, torch.Tensor) else torch.tensor(x).to(tensor)\n for x in (logvar1, logvar2)\n ]\n\n return 0.5 * (\n -1.0\n + logvar2\n - logvar1\n + torch.exp(logvar1 - logvar2)\n + ((mean1 - mean2) ** 2) * torch.exp(-logvar2)\n )" }, { "identifier": "DiagonalGaussianDistribution", "path": "ldm/modules/distributions/distributions.py", "snippet": "class DiagonalGaussianDistribution(object):\n def __init__(self, parameters, deterministic=False):\n self.parameters = parameters\n self.mean, self.logvar = torch.chunk(parameters, 2, dim=1)\n self.logvar = torch.clamp(self.logvar, -30.0, 20.0)\n self.deterministic = deterministic\n self.std = torch.exp(0.5 * self.logvar)\n self.var = torch.exp(self.logvar)\n if self.deterministic:\n self.var = self.std = torch.zeros_like(self.mean).to(device=self.parameters.device)\n\n def sample(self):\n x = self.mean + self.std * torch.randn(self.mean.shape).to(device=self.parameters.device)\n return x\n\n def kl(self, other=None):\n if self.deterministic:\n return torch.Tensor([0.])\n else:\n if other is None:\n return 0.5 * torch.sum(torch.pow(self.mean, 2)\n + self.var - 1.0 - self.logvar,\n dim=[1, 2, 3])\n else:\n return 0.5 * torch.sum(\n torch.pow(self.mean - other.mean, 2) / other.var\n + self.var / other.var - 1.0 - self.logvar + other.logvar,\n dim=[1, 2, 3])\n\n def nll(self, sample, dims=[1,2,3]):\n if self.deterministic:\n return torch.Tensor([0.])\n logtwopi = np.log(2.0 * np.pi)\n return 0.5 * torch.sum(\n logtwopi + self.logvar + torch.pow(sample - self.mean, 2) / self.var,\n dim=dims)\n\n def mode(self):\n return self.mean" }, { "identifier": "VQModelInterface", "path": "ldm/models/autoencoder.py", "snippet": "class VQModelInterface(VQModel):\n def __init__(self, embed_dim, *args, **kwargs):\n super().__init__(embed_dim=embed_dim, *args, **kwargs)\n self.embed_dim = embed_dim\n\n def encode(self, x):\n h = self.encoder(x)\n h = self.quant_conv(h)\n return h\n\n def decode(self, h, force_not_quantize=False):\n # also go through quantization layer\n if not force_not_quantize:\n quant, emb_loss, info = self.quantize(h)\n else:\n quant = h\n quant = self.post_quant_conv(quant)\n dec = self.decoder(quant)\n return dec" }, { "identifier": "IdentityFirstStage", "path": "ldm/models/autoencoder.py", "snippet": "class IdentityFirstStage(torch.nn.Module):\n def __init__(self, *args, vq_interface=False, **kwargs):\n self.vq_interface = vq_interface # TODO: Should be true by default but check to not break older stuff\n super().__init__()\n\n def encode(self, x, *args, **kwargs):\n return x\n\n def decode(self, x, *args, **kwargs):\n return x\n\n def quantize(self, x, *args, **kwargs):\n if self.vq_interface:\n return x, None, [None, None, None]\n return x\n\n def forward(self, x, *args, **kwargs):\n return x" }, { "identifier": "AutoencoderKL", "path": "ldm/models/autoencoder.py", "snippet": "class AutoencoderKL(pl.LightningModule):\n def __init__(self,\n ddconfig,\n lossconfig,\n embed_dim,\n ckpt_path=None,\n ignore_keys=[],\n image_key=\"image\",\n colorize_nlabels=None,\n monitor=None,\n ):\n super().__init__()\n self.image_key = image_key\n self.encoder = Encoder(**ddconfig)\n self.decoder = Decoder(**ddconfig)\n self.loss = instantiate_from_config(lossconfig)\n assert ddconfig[\"double_z\"]\n self.quant_conv = torch.nn.Conv2d(2*ddconfig[\"z_channels\"], 2*embed_dim, 1)\n self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig[\"z_channels\"], 1)\n self.embed_dim = embed_dim\n if colorize_nlabels is not None:\n assert type(colorize_nlabels)==int\n self.register_buffer(\"colorize\", torch.randn(3, colorize_nlabels, 1, 1))\n if monitor is not None:\n self.monitor = monitor\n if ckpt_path is not None:\n self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys)\n\n def init_from_ckpt(self, path, ignore_keys=list(), only_model=False):\n sd = torch.load(path, map_location=\"cpu\")\n if \"state_dict\" in list(sd.keys()):\n sd = sd[\"state_dict\"]\n keys = list(sd.keys())\n for k in keys:\n if 'first_stage_model' in k:\n sd[k[18:]] = sd[k]\n for ik in ignore_keys:\n if k.startswith(ik):\n print(\"Deleting key {} from state_dict.\".format(k))\n del sd[k]\n missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict(\n sd, strict=False)\n print(f\"Encoder Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys\")\n if len(missing) > 0:\n print(f\"Missing Keys: {missing}\")\n # if len(unexpected) > 0:\n # print(f\"Unexpected Keys: {unexpected}\")\n\n def encode(self, x, return_encfea=False):\n h = self.encoder(x)\n moments = self.quant_conv(h)\n posterior = DiagonalGaussianDistribution(moments)\n if return_encfea:\n return posterior, moments\n return posterior\n\n def encode_gt(self, x, new_encoder):\n h = new_encoder(x)\n moments = self.quant_conv(h)\n posterior = DiagonalGaussianDistribution(moments)\n return posterior, moments\n\n def decode(self, z):\n z = self.post_quant_conv(z)\n dec = self.decoder(z)\n return dec\n\n def forward(self, input, sample_posterior=True):\n posterior = self.encode(input)\n if sample_posterior:\n z = posterior.sample()\n else:\n z = posterior.mode()\n dec = self.decode(z)\n return dec, posterior\n\n def get_input(self, batch, k):\n x = batch[k]\n if len(x.shape) == 3:\n x = x[..., None]\n # x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format).float()\n x = x.to(memory_format=torch.contiguous_format).float()\n # x = x*2.0-1.0\n return x\n\n def training_step(self, batch, batch_idx, optimizer_idx):\n inputs = self.get_input(batch, self.image_key)\n reconstructions, posterior = self(inputs)\n\n if optimizer_idx == 0:\n # train encoder+decoder+logvar\n aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step,\n last_layer=self.get_last_layer(), split=\"train\")\n self.log(\"aeloss\", aeloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)\n self.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=False)\n return aeloss\n\n if optimizer_idx == 1:\n # train the discriminator\n discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step,\n last_layer=self.get_last_layer(), split=\"train\")\n\n self.log(\"discloss\", discloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)\n self.log_dict(log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=False)\n return discloss\n\n def validation_step(self, batch, batch_idx):\n inputs = self.get_input(batch, self.image_key)\n reconstructions, posterior = self(inputs)\n aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, 0, self.global_step,\n last_layer=self.get_last_layer(), split=\"val\")\n\n discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, 1, self.global_step,\n last_layer=self.get_last_layer(), split=\"val\")\n\n self.log(\"val/rec_loss\", log_dict_ae[\"val/rec_loss\"])\n self.log_dict(log_dict_ae)\n self.log_dict(log_dict_disc)\n return self.log_dict\n\n def configure_optimizers(self):\n lr = self.learning_rate\n opt_ae = torch.optim.Adam(list(self.encoder.parameters())+\n list(self.decoder.parameters())+\n list(self.quant_conv.parameters())+\n list(self.post_quant_conv.parameters()),\n lr=lr, betas=(0.5, 0.9))\n opt_disc = torch.optim.Adam(self.loss.discriminator.parameters(),\n lr=lr, betas=(0.5, 0.9))\n return [opt_ae, opt_disc], []\n\n def get_last_layer(self):\n return self.decoder.conv_out.weight\n\n @torch.no_grad()\n def log_images(self, batch, only_inputs=False, **kwargs):\n log = dict()\n x = self.get_input(batch, self.image_key)\n x = x.to(self.device)\n if not only_inputs:\n xrec, posterior = self(x)\n if x.shape[1] > 3:\n # colorize with random projection\n assert xrec.shape[1] > 3\n x = self.to_rgb(x)\n xrec = self.to_rgb(xrec)\n # log[\"samples\"] = self.decode(torch.randn_like(posterior.sample()))\n log[\"reconstructions\"] = xrec\n log[\"inputs\"] = x\n return log\n\n def to_rgb(self, x):\n assert self.image_key == \"segmentation\"\n if not hasattr(self, \"colorize\"):\n self.register_buffer(\"colorize\", torch.randn(3, x.shape[1], 1, 1).to(x))\n x = F.conv2d(x, weight=self.colorize)\n x = 2.*(x-x.min())/(x.max()-x.min()) - 1.\n return x" }, { "identifier": "make_beta_schedule", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def make_beta_schedule(schedule, n_timestep, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):\n if schedule == \"linear\":\n betas = (\n torch.linspace(linear_start ** 0.5, linear_end ** 0.5, n_timestep, dtype=torch.float64) ** 2\n )\n\n elif schedule == \"cosine\":\n timesteps = (\n torch.arange(n_timestep + 1, dtype=torch.float64) / n_timestep + cosine_s\n )\n alphas = timesteps / (1 + cosine_s) * np.pi / 2\n alphas = torch.cos(alphas).pow(2)\n alphas = alphas / alphas[0]\n betas = 1 - alphas[1:] / alphas[:-1]\n betas = np.clip(betas, a_min=0, a_max=0.999)\n\n elif schedule == \"sqrt_linear\":\n betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64)\n elif schedule == \"sqrt\":\n betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64) ** 0.5\n else:\n raise ValueError(f\"schedule '{schedule}' unknown.\")\n return betas.numpy()" }, { "identifier": "extract_into_tensor", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def extract_into_tensor(a, t, x_shape):\n b, *_ = t.shape\n out = a.gather(-1, t)\n return out.reshape(b, *((1,) * (len(x_shape) - 1)))" }, { "identifier": "noise_like", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def noise_like(shape, device, repeat=False):\n repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(shape[0], *((1,) * (len(shape) - 1)))\n noise = lambda: torch.randn(shape, device=device)\n return repeat_noise() if repeat else noise()" }, { "identifier": "DDIMSampler", "path": "ldm/models/diffusion/ddim.py", "snippet": "class DDIMSampler(object):\n def __init__(self, model, schedule=\"linear\", **kwargs):\n super().__init__()\n self.model = model\n self.ddpm_num_timesteps = model.num_timesteps\n self.schedule = schedule\n\n def register_buffer(self, name, attr):\n if type(attr) == torch.Tensor:\n if attr.device != torch.device(\"cuda\"):\n attr = attr.to(torch.device(\"cuda\"))\n setattr(self, name, attr)\n\n def make_schedule(self, ddim_num_steps, ddim_discretize=\"uniform\", ddim_eta=0., verbose=True):\n self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps,\n num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose)\n alphas_cumprod = self.model.alphas_cumprod\n assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep'\n to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device)\n\n self.register_buffer('betas', to_torch(self.model.betas))\n self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))\n self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev))\n\n # calculations for diffusion q(x_t | x_{t-1}) and others\n self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu())))\n self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu())))\n self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1)))\n\n # ddim sampling parameters\n ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(),\n ddim_timesteps=self.ddim_timesteps,\n eta=ddim_eta,verbose=verbose)\n\n self.register_buffer('ddim_sigmas', ddim_sigmas)\n self.register_buffer('ddim_alphas', ddim_alphas)\n self.register_buffer('ddim_alphas_prev', ddim_alphas_prev)\n self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas))\n sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(\n (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * (\n 1 - self.alphas_cumprod / self.alphas_cumprod_prev))\n self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps)\n\n @torch.no_grad()\n def q_sample(self, x_start, t, noise=None, ddim_num_steps=200):\n self.make_schedule(ddim_num_steps=ddim_num_steps)\n noise = default(noise, lambda: torch.randn_like(x_start))\n return (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start +\n extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise)\n\n @torch.no_grad()\n def sample(self,\n S,\n batch_size,\n shape,\n conditioning=None,\n callback=None,\n normals_sequence=None,\n img_callback=None,\n quantize_x0=False,\n eta=0.,\n mask=None,\n x0=None,\n temperature=1.,\n noise_dropout=0.,\n score_corrector=None,\n corrector_kwargs=None,\n verbose=True,\n x_T=None,\n log_every_t=100,\n unconditional_guidance_scale=1.,\n unconditional_conditioning=None,\n # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...\n **kwargs\n ):\n if conditioning is not None:\n if isinstance(conditioning, dict):\n cbs = conditioning[list(conditioning.keys())[0]].shape[0]\n if cbs != batch_size:\n print(f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\")\n else:\n if conditioning.shape[0] != batch_size:\n print(f\"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}\")\n\n self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)\n # sampling\n C, H, W = shape\n size = (batch_size, C, H, W)\n print(f'Data shape for DDIM sampling is {size}, eta {eta}')\n\n samples, intermediates = self.ddim_sampling(conditioning, size,\n callback=callback,\n img_callback=img_callback,\n quantize_denoised=quantize_x0,\n mask=mask, x0=x0,\n ddim_use_original_steps=False,\n noise_dropout=noise_dropout,\n temperature=temperature,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n x_T=x_T,\n log_every_t=log_every_t,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n )\n return samples, intermediates\n\n @torch.no_grad()\n def ddim_sampling(self, cond, shape,\n x_T=None, ddim_use_original_steps=False,\n callback=None, timesteps=None, quantize_denoised=False,\n mask=None, x0=None, img_callback=None, log_every_t=100,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None,):\n device = self.model.betas.device\n b = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=device)\n else:\n img = x_T\n\n if timesteps is None:\n timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps\n elif timesteps is not None and not ddim_use_original_steps:\n subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1\n timesteps = self.ddim_timesteps[:subset_end]\n\n intermediates = {'x_inter': [img], 'pred_x0': [img]}\n time_range = reversed(range(0,timesteps)) if ddim_use_original_steps else np.flip(timesteps)\n total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]\n print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc='DDIM Sampler', total=total_steps)\n\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((b,), step, device=device, dtype=torch.long)\n\n if mask is not None:\n assert x0 is not None\n img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass?\n img = img_orig * mask + (1. - mask) * img\n\n outs = self.p_sample_ddim(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps,\n quantize_denoised=quantize_denoised, temperature=temperature,\n noise_dropout=noise_dropout, score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning)\n img, pred_x0 = outs\n if callback: callback(i)\n if img_callback: img_callback(pred_x0, i)\n\n if index % log_every_t == 0 or index == total_steps - 1:\n intermediates['x_inter'].append(img)\n intermediates['pred_x0'].append(pred_x0)\n\n return img, intermediates\n\n @torch.no_grad()\n def p_sample_ddim(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None):\n b, *_, device = *x.shape, x.device\n\n if unconditional_conditioning is None or unconditional_guidance_scale == 1.:\n e_t = self.model.apply_model(x, t, c)\n else:\n x_in = torch.cat([x] * 2)\n t_in = torch.cat([t] * 2)\n c_in = torch.cat([unconditional_conditioning, c])\n e_t_uncond, e_t = self.model.apply_model(x_in, t_in, c_in).chunk(2)\n e_t = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond)\n\n if score_corrector is not None:\n assert self.model.parameterization == \"eps\"\n e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs)\n\n alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas\n alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev\n sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas\n sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas\n # select parameters corresponding to the currently considered timestep\n a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)\n a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)\n sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)\n sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device)\n\n # current prediction for x_0\n pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()\n if quantize_denoised:\n pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)\n # direction pointing to x_t\n dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t\n noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature\n if noise_dropout > 0.:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise\n return x_prev, pred_x0\n\n @torch.no_grad()\n def stochastic_encode(self, x0, t, use_original_steps=False, noise=None):\n # fast, but does not allow for exact reconstruction\n # t serves as an index to gather the correct alphas\n if use_original_steps:\n sqrt_alphas_cumprod = self.sqrt_alphas_cumprod\n sqrt_one_minus_alphas_cumprod = self.sqrt_one_minus_alphas_cumprod\n else:\n sqrt_alphas_cumprod = torch.sqrt(self.ddim_alphas)\n sqrt_one_minus_alphas_cumprod = self.ddim_sqrt_one_minus_alphas\n\n if noise is None:\n noise = torch.randn_like(x0)\n return (extract_into_tensor(sqrt_alphas_cumprod, t, x0.shape) * x0 +\n extract_into_tensor(sqrt_one_minus_alphas_cumprod, t, x0.shape) * noise)\n\n @torch.no_grad()\n def decode(self, x_latent, cond, t_start, unconditional_guidance_scale=1.0, unconditional_conditioning=None,\n use_original_steps=False):\n\n timesteps = np.arange(self.ddpm_num_timesteps) if use_original_steps else self.ddim_timesteps\n timesteps = timesteps[:t_start]\n\n time_range = np.flip(timesteps)\n total_steps = timesteps.shape[0]\n print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc='Decoding image', total=total_steps)\n x_dec = x_latent\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((x_latent.shape[0],), step, device=x_latent.device, dtype=torch.long)\n x_dec, _ = self.p_sample_ddim(x_dec, cond, ts, index=index, use_original_steps=use_original_steps,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning)\n return x_dec\n\n\n @torch.no_grad()\n def p_sample_ddim_sr(self, x, c, struct_c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None):\n b, *_, device = *x.shape, x.device\n\n if unconditional_conditioning is None or unconditional_guidance_scale == 1.:\n e_t = self.model.apply_model(x, t, c, struct_c)\n else:\n x_in = torch.cat([x] * 2)\n t_in = torch.cat([t] * 2)\n c_in = torch.cat([unconditional_conditioning, c])\n e_t_uncond, e_t = self.model.apply_model(x_in, t_in, c_in, struct_c).chunk(2)\n e_t = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond)\n\n if score_corrector is not None:\n assert self.model.parameterization == \"eps\"\n e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs)\n\n alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas\n alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev\n sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas\n sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas\n # select parameters corresponding to the currently considered timestep\n a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)\n a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)\n sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)\n sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device)\n\n # current prediction for x_0\n pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()\n if quantize_denoised:\n pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)\n # direction pointing to x_t\n dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t\n noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature\n if noise_dropout > 0.:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise\n return x_prev, pred_x0\n\n @torch.no_grad()\n def decode_sr(self, x_latent, cond, struct_cond, t_start, unconditional_guidance_scale=1.0, unconditional_conditioning=None,\n use_original_steps=False):\n\n timesteps = np.arange(self.ddpm_num_timesteps) if use_original_steps else self.ddim_timesteps\n timesteps = timesteps[:t_start]\n\n time_range = np.flip(timesteps)\n total_steps = timesteps.shape[0]\n print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc='Decoding image', total=total_steps)\n x_dec = x_latent\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((x_latent.shape[0],), step, device=x_latent.device, dtype=torch.long)\n x_dec, _ = self.p_sample_ddim_sr(x_dec, cond, struct_cond, ts, index=index, use_original_steps=use_original_steps,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning)\n return x_dec\n\n @torch.no_grad()\n def sample_sr(self,\n S,\n batch_size,\n shape,\n conditioning=None,\n struct_cond=None,\n callback=None,\n normals_sequence=None,\n img_callback=None,\n quantize_x0=False,\n eta=0.,\n mask=None,\n x0=None,\n temperature=1.,\n noise_dropout=0.,\n score_corrector=None,\n corrector_kwargs=None,\n verbose=True,\n x_T=None,\n log_every_t=100,\n unconditional_guidance_scale=1.,\n unconditional_conditioning=None,\n # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...\n **kwargs\n ):\n if conditioning is not None:\n if isinstance(conditioning, dict):\n cbs = conditioning[list(conditioning.keys())[0]].shape[0]\n if cbs != batch_size:\n print(f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\")\n else:\n if conditioning.shape[0] != batch_size:\n print(f\"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}\")\n\n self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)\n # sampling\n _, C, H, W = shape\n size = (batch_size, C, H, W)\n print(f'Data shape for DDIM sampling is {size}, eta {eta}')\n\n samples, intermediates = self.ddim_sampling_sr(conditioning, struct_cond, size,\n callback=callback,\n img_callback=img_callback,\n quantize_denoised=quantize_x0,\n mask=mask, x0=x0,\n ddim_use_original_steps=False,\n noise_dropout=noise_dropout,\n temperature=temperature,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n x_T=x_T,\n log_every_t=log_every_t,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n )\n return samples, intermediates\n\n @torch.no_grad()\n def ddim_sampling_sr(self, cond, struct_cond, shape,\n x_T=None, ddim_use_original_steps=False,\n callback=None, timesteps=None, quantize_denoised=False,\n mask=None, x0=None, img_callback=None, log_every_t=100,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None,):\n device = self.model.betas.device\n b = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=device)\n else:\n img = x_T\n\n if timesteps is None:\n timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps\n elif timesteps is not None and not ddim_use_original_steps:\n subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1\n timesteps = self.ddim_timesteps[:subset_end]\n\n intermediates = {'x_inter': [img], 'pred_x0': [img]}\n time_range = reversed(range(0,timesteps)) if ddim_use_original_steps else np.flip(timesteps)\n total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]\n print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc='DDIM Sampler', total=total_steps)\n\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((b,), step, device=device, dtype=torch.long)\n\n if mask is not None:\n assert x0 is not None\n img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass?\n img = img_orig * mask + (1. - mask) * img\n\n outs = self.p_sample_ddim_sr(img, cond, struct_cond, ts, index=index, use_original_steps=ddim_use_original_steps,\n quantize_denoised=quantize_denoised, temperature=temperature,\n noise_dropout=noise_dropout, score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning)\n img, pred_x0 = outs\n if callback: callback(i)\n if img_callback: img_callback(pred_x0, i)\n\n if index % log_every_t == 0 or index == total_steps - 1:\n intermediates['x_inter'].append(img)\n intermediates['pred_x0'].append(pred_x0)\n\n return img, intermediates\n\n @torch.no_grad()\n def p_sample_ddim_sr(self, x, c, struct_c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None):\n b, *_, device = *x.shape, x.device\n\n if unconditional_conditioning is None or unconditional_guidance_scale == 1.:\n e_t = self.model.apply_model(x, t, c, struct_c)\n else:\n x_in = torch.cat([x] * 2)\n t_in = torch.cat([t] * 2)\n c_in = torch.cat([unconditional_conditioning, c])\n e_t_uncond, e_t = self.model.apply_model(x_in, t_in, c_in, struct_c).chunk(2)\n e_t = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond)\n\n if score_corrector is not None:\n assert self.model.parameterization == \"eps\"\n e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs)\n\n alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas\n alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev\n sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas\n sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas\n # select parameters corresponding to the currently considered timestep\n a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)\n a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)\n sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)\n sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device)\n\n # current prediction for x_0\n pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()\n if quantize_denoised:\n pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)\n # direction pointing to x_t\n dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t\n noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature\n if noise_dropout > 0.:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise\n return x_prev, pred_x0\n\n\n @torch.no_grad()\n def sample_sr_t(self,\n S,\n batch_size,\n shape,\n conditioning=None,\n struct_cond=None,\n callback=None,\n normals_sequence=None,\n img_callback=None,\n quantize_x0=False,\n eta=0.,\n mask=None,\n x0=None,\n temperature=1.,\n noise_dropout=0.,\n score_corrector=None,\n corrector_kwargs=None,\n verbose=True,\n x_T=None,\n log_every_t=100,\n unconditional_guidance_scale=1.,\n unconditional_conditioning=None,\n # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...\n **kwargs\n ):\n if conditioning is not None:\n if isinstance(conditioning, dict):\n cbs = conditioning[list(conditioning.keys())[0]].shape[0]\n if cbs != batch_size:\n print(f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\")\n else:\n if conditioning.shape[0] != batch_size:\n print(f\"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}\")\n\n self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)\n # sampling\n _, C, H, W = shape\n size = (batch_size, C, H, W)\n print(f'Data shape for DDIM sampling is {size}, eta {eta}')\n\n samples, intermediates = self.ddim_sampling_sr_t(conditioning, struct_cond, size,\n callback=callback,\n img_callback=img_callback,\n quantize_denoised=quantize_x0,\n mask=mask, x0=x0,\n ddim_use_original_steps=False,\n noise_dropout=noise_dropout,\n temperature=temperature,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n x_T=x_T,\n log_every_t=log_every_t,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n )\n return samples, intermediates\n\n @torch.no_grad()\n def ddim_sampling_sr_t(self, cond, struct_cond, shape,\n x_T=None, ddim_use_original_steps=False,\n callback=None, timesteps=None, quantize_denoised=False,\n mask=None, x0=None, img_callback=None, log_every_t=100,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None,):\n device = self.model.betas.device\n b = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=device)\n else:\n img = x_T\n\n if timesteps is None:\n timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps\n # timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else sorted(set(space_timesteps(1000, [self.ddim_timesteps.shape[0]])))\n timesteps = np.array(timesteps)\n elif timesteps is not None and not ddim_use_original_steps:\n subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1\n timesteps = self.ddim_timesteps[:subset_end]\n\n intermediates = {'x_inter': [img], 'pred_x0': [img]}\n time_range = reversed(range(0,timesteps)) if ddim_use_original_steps else np.flip(timesteps)\n total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]\n print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc='DDIM Sampler', total=total_steps)\n\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((b,), step, device=device, dtype=torch.long)\n\n if mask is not None:\n assert x0 is not None\n img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass?\n img = img_orig * mask + (1. - mask) * img\n\n outs = self.p_sample_ddim_sr_t(img, cond, struct_cond, ts, index=index, use_original_steps=ddim_use_original_steps,\n quantize_denoised=quantize_denoised, temperature=temperature,\n noise_dropout=noise_dropout, score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning)\n img, pred_x0 = outs\n if callback: callback(i)\n if img_callback: img_callback(pred_x0, i)\n\n if index % log_every_t == 0 or index == total_steps - 1:\n intermediates['x_inter'].append(img)\n intermediates['pred_x0'].append(pred_x0)\n\n return img, intermediates\n\n @torch.no_grad()\n def p_sample_ddim_sr_t(self, x, c, struct_c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None):\n b, *_, device = *x.shape, x.device\n\n if unconditional_conditioning is None or unconditional_guidance_scale == 1.:\n struct_c_t = self.model.structcond_stage_model(struct_c, t)\n e_t = self.model.apply_model(x, t, c, struct_c_t)\n else:\n assert NotImplementedError\n x_in = torch.cat([x] * 2)\n t_in = torch.cat([t] * 2)\n c_in = torch.cat([unconditional_conditioning, c])\n e_t_uncond, e_t = self.model.apply_model(x_in, t_in, c_in, struct_c).chunk(2)\n e_t = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond)\n\n if score_corrector is not None:\n assert self.model.parameterization == \"eps\"\n e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs)\n\n alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas\n alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev\n sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas\n sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas\n # select parameters corresponding to the currently considered timestep\n a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)\n a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)\n sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)\n sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device)\n\n # current prediction for x_0\n pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()\n if quantize_denoised:\n pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)\n # direction pointing to x_t\n dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t\n noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature\n if noise_dropout > 0.:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise\n return x_prev, pred_x0" } ]
import torch import torch.nn as nn import os import numpy as np import pytorch_lightning as pl from torch.optim.lr_scheduler import LambdaLR from einops import rearrange, repeat from contextlib import contextmanager from functools import partial from tqdm import tqdm from torchvision.utils import make_grid from pytorch_lightning.utilities.distributed import rank_zero_only from ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config from ldm.modules.ema import LitEma from ldm.modules.distributions.distributions import normal_kl, DiagonalGaussianDistribution from ldm.models.autoencoder import VQModelInterface, IdentityFirstStage, AutoencoderKL from ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like from ldm.models.diffusion.ddim import DDIMSampler
15,948
img_orig = self.q_sample(x0, ts) img = img_orig * mask + (1. - mask) * img if i % log_every_t == 0 or i == timesteps - 1: intermediates.append(x0_partial) if callback: callback(i) if img_callback: img_callback(img, i) return img, intermediates @torch.no_grad() def p_sample_loop(self, cond, shape, return_intermediates=False, x_T=None, verbose=True, callback=None, timesteps=None, quantize_denoised=False, mask=None, x0=None, img_callback=None, start_T=None, log_every_t=None): if not log_every_t: log_every_t = self.log_every_t device = self.betas.device b = shape[0] if x_T is None: img = torch.randn(shape, device=device) else: img = x_T intermediates = [img] if timesteps is None: timesteps = self.num_timesteps if start_T is not None: timesteps = min(timesteps, start_T) iterator = tqdm(reversed(range(0, timesteps)), desc='Sampling t', total=timesteps) if verbose else reversed( range(0, timesteps)) if mask is not None: assert x0 is not None assert x0.shape[2:3] == mask.shape[2:3] # spatial size has to match for i in iterator: ts = torch.full((b,), i, device=device, dtype=torch.long) if self.shorten_cond_schedule: assert self.model.conditioning_key != 'hybrid' tc = self.cond_ids[ts].to(cond.device) cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) img = self.p_sample(img, cond, ts, clip_denoised=self.clip_denoised, quantize_denoised=quantize_denoised) if mask is not None: img_orig = self.q_sample(x0, ts) img = img_orig * mask + (1. - mask) * img if i % log_every_t == 0 or i == timesteps - 1: intermediates.append(img) if callback: callback(i) if img_callback: img_callback(img, i) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, cond, batch_size=16, return_intermediates=False, x_T=None, verbose=True, timesteps=None, quantize_denoised=False, mask=None, x0=None, shape=None,**kwargs): if shape is None: shape = (batch_size, self.channels, self.image_size, self.image_size) if cond is not None: if isinstance(cond, dict): cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else list(map(lambda x: x[:batch_size], cond[key])) for key in cond} else: cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] return self.p_sample_loop(cond, shape, return_intermediates=return_intermediates, x_T=x_T, verbose=verbose, timesteps=timesteps, quantize_denoised=quantize_denoised, mask=mask, x0=x0) @torch.no_grad() def sample_log(self,cond,batch_size,ddim, ddim_steps,**kwargs): if ddim: ddim_sampler = DDIMSampler(self) shape = (self.channels, self.image_size, self.image_size) samples, intermediates =ddim_sampler.sample(ddim_steps,batch_size, shape,cond,verbose=False,**kwargs) else: samples, intermediates = self.sample(cond=cond, batch_size=batch_size, return_intermediates=True,**kwargs) return samples, intermediates @torch.no_grad() def log_images(self, batch, N=8, n_row=4, sample=True, ddim_steps=200, ddim_eta=1., return_keys=None, quantize_denoised=True, inpaint=False, plot_denoise_rows=False, plot_progressive_rows=False, plot_diffusion_rows=False, **kwargs): use_ddim = ddim_steps is not None log = dict() z, c, x, xrec, xc = self.get_input(batch, self.first_stage_key, return_first_stage_outputs=True, force_c_encode=True, return_original_cond=True, bs=N) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) log["inputs"] = x log["reconstruction"] = xrec if self.model.conditioning_key is not None: if hasattr(self.cond_stage_model, "decode"): xc = self.cond_stage_model.decode(c) log["conditioning"] = xc elif self.cond_stage_key in ["caption"]: xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["caption"]) log["conditioning"] = xc elif self.cond_stage_key == 'class_label': xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["human_label"]) log['conditioning'] = xc
""" wild mixture of https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py https://github.com/CompVis/taming-transformers -- merci """ __conditioning_keys__ = {'concat': 'c_concat', 'crossattn': 'c_crossattn', 'adm': 'y'} def disabled_train(self, mode=True): """Overwrite model.train with this function to make sure train/eval mode does not change anymore.""" return self def uniform_on_device(r1, r2, shape, device): return (r1 - r2) * torch.rand(*shape, device=device) + r2 class DDPM(pl.LightningModule): # classic DDPM with Gaussian diffusion, in image space def __init__(self, unet_config, timesteps=1000, beta_schedule="linear", loss_type="l2", ckpt_path=None, ignore_keys=[], load_only_unet=False, monitor="val/loss", use_ema=True, first_stage_key="image", image_size=256, channels=3, log_every_t=100, clip_denoised=True, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, given_betas=None, original_elbo_weight=0., embedding_reg_weight=0., unfreeze_model=False, model_lr=0., v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta l_simple_weight=1., conditioning_key=None, parameterization="eps", # all assuming fixed variance schedules scheduler_config=None, use_positional_encodings=False, learn_logvar=False, logvar_init=0., ): super().__init__() assert parameterization in ["eps", "x0"], 'currently only supporting "eps" and "x0"' self.parameterization = parameterization print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode") self.cond_stage_model = None self.clip_denoised = clip_denoised self.log_every_t = log_every_t self.first_stage_key = first_stage_key self.image_size = image_size # try conv? self.channels = channels self.use_positional_encodings = use_positional_encodings self.model = DiffusionWrapper(unet_config, conditioning_key) count_params(self.model, verbose=True) self.use_ema = use_ema if self.use_ema: self.model_ema = LitEma(self.model) print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") self.use_scheduler = scheduler_config is not None if self.use_scheduler: self.scheduler_config = scheduler_config self.v_posterior = v_posterior self.original_elbo_weight = original_elbo_weight self.l_simple_weight = l_simple_weight self.embedding_reg_weight = embedding_reg_weight self.unfreeze_model = unfreeze_model self.model_lr = model_lr if monitor is not None: self.monitor = monitor if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet) self.register_schedule(given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) self.loss_type = loss_type self.learn_logvar = learn_logvar self.logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,)) if self.learn_logvar: self.logvar = nn.Parameter(self.logvar, requires_grad=True) def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): if exists(given_betas): betas = given_betas else: betas = make_beta_schedule(beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) alphas = 1. - betas alphas_cumprod = np.cumprod(alphas, axis=0) alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1]) timesteps, = betas.shape self.num_timesteps = int(timesteps) self.linear_start = linear_start self.linear_end = linear_end assert alphas_cumprod.shape[0] == self.num_timesteps, 'alphas have to be defined for each timestep' to_torch = partial(torch.tensor, dtype=torch.float32) self.register_buffer('betas', to_torch(betas)) self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod)) self.register_buffer('alphas_cumprod_prev', to_torch(alphas_cumprod_prev)) # calculations for diffusion q(x_t | x_{t-1}) and others self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod))) self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod))) self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod))) self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod))) self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1))) # calculations for posterior q(x_{t-1} | x_t, x_0) posterior_variance = (1 - self.v_posterior) * betas * (1. - alphas_cumprod_prev) / ( 1. - alphas_cumprod) + self.v_posterior * betas # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t) self.register_buffer('posterior_variance', to_torch(posterior_variance)) # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain self.register_buffer('posterior_log_variance_clipped', to_torch(np.log(np.maximum(posterior_variance, 1e-20)))) self.register_buffer('posterior_mean_coef1', to_torch( betas * np.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod))) self.register_buffer('posterior_mean_coef2', to_torch( (1. - alphas_cumprod_prev) * np.sqrt(alphas) / (1. - alphas_cumprod))) if self.parameterization == "eps": lvlb_weights = self.betas ** 2 / ( 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod)) elif self.parameterization == "x0": lvlb_weights = 0.5 * np.sqrt(torch.Tensor(alphas_cumprod)) / (2. * 1 - torch.Tensor(alphas_cumprod)) else: raise NotImplementedError("mu not supported") # TODO how to choose this term lvlb_weights[0] = lvlb_weights[1] self.register_buffer('lvlb_weights', lvlb_weights, persistent=False) assert not torch.isnan(self.lvlb_weights).all() @contextmanager def ema_scope(self, context=None): if self.use_ema: self.model_ema.store(self.model.parameters()) self.model_ema.copy_to(self.model) if context is not None: print(f"{context}: Switched to EMA weights") try: yield None finally: if self.use_ema: self.model_ema.restore(self.model.parameters()) if context is not None: print(f"{context}: Restored training weights") def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): sd = torch.load(path, map_location="cpu") if "state_dict" in list(sd.keys()): sd = sd["state_dict"] keys = list(sd.keys()) for k in keys: for ik in ignore_keys: if k.startswith(ik): print("Deleting key {} from state_dict.".format(k)) del sd[k] missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict( sd, strict=False) print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys") if len(missing) > 0: print(f"Missing Keys: {missing}") if len(unexpected) > 0: print(f"Unexpected Keys: {unexpected}") def q_mean_variance(self, x_start, t): """ Get the distribution q(x_t | x_0). :param x_start: the [N x C x ...] tensor of noiseless inputs. :param t: the number of diffusion steps (minus 1). Here, 0 means one step. :return: A tuple (mean, variance, log_variance), all of x_start's shape. """ mean = (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start) variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape) log_variance = extract_into_tensor(self.log_one_minus_alphas_cumprod, t, x_start.shape) return mean, variance, log_variance def predict_start_from_noise(self, x_t, t, noise): return ( extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise ) def q_posterior(self, x_start, x_t, t): posterior_mean = ( extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start + extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t ) posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape) posterior_log_variance_clipped = extract_into_tensor(self.posterior_log_variance_clipped, t, x_t.shape) return posterior_mean, posterior_variance, posterior_log_variance_clipped def p_mean_variance(self, x, t, clip_denoised: bool): model_out = self.model(x, t) if self.parameterization == "eps": x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) elif self.parameterization == "x0": x_recon = model_out if clip_denoised: x_recon.clamp_(-1., 1.) model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t) return model_mean, posterior_variance, posterior_log_variance @torch.no_grad() def p_sample(self, x, t, clip_denoised=True, repeat_noise=False): b, *_, device = *x.shape, x.device model_mean, _, model_log_variance = self.p_mean_variance(x=x, t=t, clip_denoised=clip_denoised) noise = noise_like(x.shape, device, repeat_noise) # no noise when t == 0 nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise @torch.no_grad() def p_sample_loop(self, shape, return_intermediates=False): device = self.betas.device b = shape[0] img = torch.randn(shape, device=device) intermediates = [img] for i in tqdm(reversed(range(0, self.num_timesteps)), desc='Sampling t', total=self.num_timesteps): img = self.p_sample(img, torch.full((b,), i, device=device, dtype=torch.long), clip_denoised=self.clip_denoised) if i % self.log_every_t == 0 or i == self.num_timesteps - 1: intermediates.append(img) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, batch_size=16, return_intermediates=False): image_size = self.image_size channels = self.channels return self.p_sample_loop((batch_size, channels, image_size, image_size), return_intermediates=return_intermediates) def q_sample(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) return (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise) def get_loss(self, pred, target, mean=True): if self.loss_type == 'l1': loss = (target - pred).abs() if mean: loss = loss.mean() elif self.loss_type == 'l2': if mean: loss = torch.nn.functional.mse_loss(target, pred) else: loss = torch.nn.functional.mse_loss(target, pred, reduction='none') else: raise NotImplementedError("unknown loss type '{loss_type}'") return loss def p_losses(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) model_out = self.model(x_noisy, t) loss_dict = {} if self.parameterization == "eps": target = noise elif self.parameterization == "x0": target = x_start else: raise NotImplementedError(f"Paramterization {self.parameterization} not yet supported") loss = self.get_loss(model_out, target, mean=False).mean(dim=[1, 2, 3]) log_prefix = 'train' if self.training else 'val' loss_dict.update({f'{log_prefix}/loss_simple': loss.mean()}) loss_simple = loss.mean() * self.l_simple_weight loss_vlb = (self.lvlb_weights[t] * loss).mean() loss_dict.update({f'{log_prefix}/loss_vlb': loss_vlb}) loss = loss_simple + self.original_elbo_weight * loss_vlb loss_dict.update({f'{log_prefix}/loss': loss}) return loss, loss_dict def forward(self, x, *args, **kwargs): # b, c, h, w, device, img_size, = *x.shape, x.device, self.image_size # assert h == img_size and w == img_size, f'height and width of image must be {img_size}' t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long() return self.p_losses(x, t, *args, **kwargs) def get_input(self, batch, k): x = batch[k] if len(x.shape) == 3: x = x[..., None] x = rearrange(x, 'b h w c -> b c h w') x = x.to(memory_format=torch.contiguous_format).float() return x def shared_step(self, batch): x = self.get_input(batch, self.first_stage_key) loss, loss_dict = self(x) return loss, loss_dict def training_step(self, batch, batch_idx): loss, loss_dict = self.shared_step(batch) self.log_dict(loss_dict, prog_bar=True, logger=True, on_step=True, on_epoch=True) self.log("global_step", self.global_step, prog_bar=True, logger=True, on_step=True, on_epoch=False) if self.use_scheduler: lr = self.optimizers().param_groups[0]['lr'] self.log('lr_abs', lr, prog_bar=True, logger=True, on_step=True, on_epoch=False) return loss @torch.no_grad() def validation_step(self, batch, batch_idx): _, loss_dict_no_ema = self.shared_step(batch) with self.ema_scope(): _, loss_dict_ema = self.shared_step(batch) loss_dict_ema = {key + '_ema': loss_dict_ema[key] for key in loss_dict_ema} self.log_dict(loss_dict_no_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True) self.log_dict(loss_dict_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True) def on_train_batch_end(self, *args, **kwargs): if self.use_ema: self.model_ema(self.model) def _get_rows_from_list(self, samples): n_imgs_per_row = len(samples) denoise_grid = rearrange(samples, 'n b c h w -> b n c h w') denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w') denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) return denoise_grid @torch.no_grad() def log_images(self, batch, N=8, n_row=2, sample=True, return_keys=None, **kwargs): log = dict() x = self.get_input(batch, self.first_stage_key) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) x = x.to(self.device)[:N] log["inputs"] = x # get diffusion row diffusion_row = list() x_start = x[:n_row] for t in range(self.num_timesteps): if t % self.log_every_t == 0 or t == self.num_timesteps - 1: t = repeat(torch.tensor([t]), '1 -> b', b=n_row) t = t.to(self.device).long() noise = torch.randn_like(x_start) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) diffusion_row.append(x_noisy) log["diffusion_row"] = self._get_rows_from_list(diffusion_row) if sample: # get denoise row with self.ema_scope("Plotting"): samples, denoise_row = self.sample(batch_size=N, return_intermediates=True) log["samples"] = samples log["denoise_row"] = self._get_rows_from_list(denoise_row) if return_keys: if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0: return log else: return {key: log[key] for key in return_keys} return log def configure_optimizers(self): lr = self.learning_rate params = list(self.model.parameters()) if self.learn_logvar: params = params + [self.logvar] opt = torch.optim.AdamW(params, lr=lr) return opt class LatentDiffusion(DDPM): """main class""" def __init__(self, first_stage_config, cond_stage_config, personalization_config, num_timesteps_cond=None, cond_stage_key="image", cond_stage_trainable=False, concat_mode=True, cond_stage_forward=None, conditioning_key=None, scale_factor=1.0, scale_by_std=False, *args, **kwargs): self.num_timesteps_cond = default(num_timesteps_cond, 1) self.scale_by_std = scale_by_std assert self.num_timesteps_cond <= kwargs['timesteps'] # for backwards compatibility after implementation of DiffusionWrapper if conditioning_key is None: conditioning_key = 'concat' if concat_mode else 'crossattn' if cond_stage_config == '__is_unconditional__': conditioning_key = None ckpt_path = kwargs.pop("ckpt_path", None) ignore_keys = kwargs.pop("ignore_keys", []) super().__init__(conditioning_key=conditioning_key, *args, **kwargs) self.concat_mode = concat_mode self.cond_stage_trainable = cond_stage_trainable self.cond_stage_key = cond_stage_key try: self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1 except: self.num_downs = 0 if not scale_by_std: self.scale_factor = scale_factor else: self.register_buffer('scale_factor', torch.tensor(scale_factor)) self.instantiate_first_stage(first_stage_config) self.instantiate_cond_stage(cond_stage_config) self.cond_stage_forward = cond_stage_forward self.clip_denoised = False self.bbox_tokenizer = None self.restarted_from_ckpt = False if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys) self.restarted_from_ckpt = True if not self.unfreeze_model: self.cond_stage_model.eval() self.cond_stage_model.train = disabled_train for param in self.cond_stage_model.parameters(): param.requires_grad = False self.model.eval() self.model.train = disabled_train for param in self.model.parameters(): param.requires_grad = False self.embedding_manager = self.instantiate_embedding_manager(personalization_config, self.cond_stage_model) for param in self.embedding_manager.embedding_parameters(): param.requires_grad = True def make_cond_schedule(self, ): self.cond_ids = torch.full(size=(self.num_timesteps,), fill_value=self.num_timesteps - 1, dtype=torch.long) ids = torch.round(torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond)).long() self.cond_ids[:self.num_timesteps_cond] = ids @rank_zero_only @torch.no_grad() def on_train_batch_start(self, batch, batch_idx, dataloader_idx): # only for very first batch if self.scale_by_std and self.current_epoch == 0 and self.global_step == 0 and batch_idx == 0 and not self.restarted_from_ckpt: assert self.scale_factor == 1., 'rather not use custom rescaling and std-rescaling simultaneously' # set rescale weight to 1./std of encodings print("### USING STD-RESCALING ###") x = super().get_input(batch, self.first_stage_key) x = x.to(self.device) encoder_posterior = self.encode_first_stage(x) z = self.get_first_stage_encoding(encoder_posterior).detach() del self.scale_factor self.register_buffer('scale_factor', 1. / z.flatten().std()) print(f"setting self.scale_factor to {self.scale_factor}") print("### USING STD-RESCALING ###") def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): super().register_schedule(given_betas, beta_schedule, timesteps, linear_start, linear_end, cosine_s) self.shorten_cond_schedule = self.num_timesteps_cond > 1 if self.shorten_cond_schedule: self.make_cond_schedule() def instantiate_first_stage(self, config): model = instantiate_from_config(config) self.first_stage_model = model.eval() self.first_stage_model.train = disabled_train for param in self.first_stage_model.parameters(): param.requires_grad = False def instantiate_cond_stage(self, config): if not self.cond_stage_trainable: if config == "__is_first_stage__": print("Using first stage also as cond stage.") self.cond_stage_model = self.first_stage_model elif config == "__is_unconditional__": print(f"Training {self.__class__.__name__} as an unconditional model.") self.cond_stage_model = None # self.be_unconditional = True else: model = instantiate_from_config(config) self.cond_stage_model = model.eval() self.cond_stage_model.train = disabled_train for param in self.cond_stage_model.parameters(): param.requires_grad = False else: assert config != '__is_first_stage__' assert config != '__is_unconditional__' model = instantiate_from_config(config) self.cond_stage_model = model def instantiate_embedding_manager(self, config, embedder): model = instantiate_from_config(config, embedder=embedder) if config.params.get("embedding_manager_ckpt", None): # do not load if missing OR empty string model.load(config.params.embedding_manager_ckpt) return model def _get_denoise_row_from_list(self, samples, desc='', force_no_decoder_quantization=False): denoise_row = [] for zd in tqdm(samples, desc=desc): denoise_row.append(self.decode_first_stage(zd.to(self.device), force_not_quantize=force_no_decoder_quantization)) n_imgs_per_row = len(denoise_row) denoise_row = torch.stack(denoise_row) # n_log_step, n_row, C, H, W denoise_grid = rearrange(denoise_row, 'n b c h w -> b n c h w') denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w') denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) return denoise_grid def get_first_stage_encoding(self, encoder_posterior): if isinstance(encoder_posterior, DiagonalGaussianDistribution): z = encoder_posterior.sample() elif isinstance(encoder_posterior, torch.Tensor): z = encoder_posterior else: raise NotImplementedError(f"encoder_posterior of type '{type(encoder_posterior)}' not yet implemented") return self.scale_factor * z def get_learned_conditioning(self, c): if self.cond_stage_forward is None: if hasattr(self.cond_stage_model, 'encode') and callable(self.cond_stage_model.encode): c = self.cond_stage_model.encode(c, embedding_manager=self.embedding_manager) if isinstance(c, DiagonalGaussianDistribution): c = c.mode() else: c = self.cond_stage_model(c) else: assert hasattr(self.cond_stage_model, self.cond_stage_forward) c = getattr(self.cond_stage_model, self.cond_stage_forward)(c) return c def meshgrid(self, h, w): y = torch.arange(0, h).view(h, 1, 1).repeat(1, w, 1) x = torch.arange(0, w).view(1, w, 1).repeat(h, 1, 1) arr = torch.cat([y, x], dim=-1) return arr def delta_border(self, h, w): """ :param h: height :param w: width :return: normalized distance to image border, wtith min distance = 0 at border and max dist = 0.5 at image center """ lower_right_corner = torch.tensor([h - 1, w - 1]).view(1, 1, 2) arr = self.meshgrid(h, w) / lower_right_corner dist_left_up = torch.min(arr, dim=-1, keepdims=True)[0] dist_right_down = torch.min(1 - arr, dim=-1, keepdims=True)[0] edge_dist = torch.min(torch.cat([dist_left_up, dist_right_down], dim=-1), dim=-1)[0] return edge_dist def get_weighting(self, h, w, Ly, Lx, device): weighting = self.delta_border(h, w) weighting = torch.clip(weighting, self.split_input_params["clip_min_weight"], self.split_input_params["clip_max_weight"], ) weighting = weighting.view(1, h * w, 1).repeat(1, 1, Ly * Lx).to(device) if self.split_input_params["tie_braker"]: L_weighting = self.delta_border(Ly, Lx) L_weighting = torch.clip(L_weighting, self.split_input_params["clip_min_tie_weight"], self.split_input_params["clip_max_tie_weight"]) L_weighting = L_weighting.view(1, 1, Ly * Lx).to(device) weighting = weighting * L_weighting return weighting def get_fold_unfold(self, x, kernel_size, stride, uf=1, df=1): # todo load once not every time, shorten code """ :param x: img of size (bs, c, h, w) :return: n img crops of size (n, bs, c, kernel_size[0], kernel_size[1]) """ bs, nc, h, w = x.shape # number of crops in image Ly = (h - kernel_size[0]) // stride[0] + 1 Lx = (w - kernel_size[1]) // stride[1] + 1 if uf == 1 and df == 1: fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) unfold = torch.nn.Unfold(**fold_params) fold = torch.nn.Fold(output_size=x.shape[2:], **fold_params) weighting = self.get_weighting(kernel_size[0], kernel_size[1], Ly, Lx, x.device).to(x.dtype) normalization = fold(weighting).view(1, 1, h, w) # normalizes the overlap weighting = weighting.view((1, 1, kernel_size[0], kernel_size[1], Ly * Lx)) elif uf > 1 and df == 1: fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) unfold = torch.nn.Unfold(**fold_params) fold_params2 = dict(kernel_size=(kernel_size[0] * uf, kernel_size[0] * uf), dilation=1, padding=0, stride=(stride[0] * uf, stride[1] * uf)) fold = torch.nn.Fold(output_size=(x.shape[2] * uf, x.shape[3] * uf), **fold_params2) weighting = self.get_weighting(kernel_size[0] * uf, kernel_size[1] * uf, Ly, Lx, x.device).to(x.dtype) normalization = fold(weighting).view(1, 1, h * uf, w * uf) # normalizes the overlap weighting = weighting.view((1, 1, kernel_size[0] * uf, kernel_size[1] * uf, Ly * Lx)) elif df > 1 and uf == 1: fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) unfold = torch.nn.Unfold(**fold_params) fold_params2 = dict(kernel_size=(kernel_size[0] // df, kernel_size[0] // df), dilation=1, padding=0, stride=(stride[0] // df, stride[1] // df)) fold = torch.nn.Fold(output_size=(x.shape[2] // df, x.shape[3] // df), **fold_params2) weighting = self.get_weighting(kernel_size[0] // df, kernel_size[1] // df, Ly, Lx, x.device).to(x.dtype) normalization = fold(weighting).view(1, 1, h // df, w // df) # normalizes the overlap weighting = weighting.view((1, 1, kernel_size[0] // df, kernel_size[1] // df, Ly * Lx)) else: raise NotImplementedError return fold, unfold, normalization, weighting @torch.no_grad() def get_input(self, batch, k, return_first_stage_outputs=False, force_c_encode=False, cond_key=None, return_original_cond=False, bs=None): x = super().get_input(batch, k) if bs is not None: x = x[:bs] x = x.to(self.device) encoder_posterior = self.encode_first_stage(x) z = self.get_first_stage_encoding(encoder_posterior).detach() if self.model.conditioning_key is not None: if cond_key is None: cond_key = self.cond_stage_key if cond_key != self.first_stage_key: if cond_key in ['caption', 'coordinates_bbox']: xc = batch[cond_key] elif cond_key == 'class_label': xc = batch else: xc = super().get_input(batch, cond_key).to(self.device) else: xc = x if not self.cond_stage_trainable or force_c_encode: if isinstance(xc, dict) or isinstance(xc, list): # import pudb; pudb.set_trace() c = self.get_learned_conditioning(xc) else: c = self.get_learned_conditioning(xc.to(self.device)) else: c = xc if bs is not None: c = c[:bs] if self.use_positional_encodings: pos_x, pos_y = self.compute_latent_shifts(batch) ckey = __conditioning_keys__[self.model.conditioning_key] c = {ckey: c, 'pos_x': pos_x, 'pos_y': pos_y} else: c = None xc = None if self.use_positional_encodings: pos_x, pos_y = self.compute_latent_shifts(batch) c = {'pos_x': pos_x, 'pos_y': pos_y} out = [z, c] if return_first_stage_outputs: xrec = self.decode_first_stage(z) out.extend([x, xrec]) if return_original_cond: out.append(xc) return out @torch.no_grad() def decode_first_stage(self, z, predict_cids=False, force_not_quantize=False): if predict_cids: if z.dim() == 4: z = torch.argmax(z.exp(), dim=1).long() z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None) z = rearrange(z, 'b h w c -> b c h w').contiguous() z = 1. / self.scale_factor * z if hasattr(self, "split_input_params"): if self.split_input_params["patch_distributed_vq"]: ks = self.split_input_params["ks"] # eg. (128, 128) stride = self.split_input_params["stride"] # eg. (64, 64) uf = self.split_input_params["vqf"] bs, nc, h, w = z.shape if ks[0] > h or ks[1] > w: ks = (min(ks[0], h), min(ks[1], w)) print("reducing Kernel") if stride[0] > h or stride[1] > w: stride = (min(stride[0], h), min(stride[1], w)) print("reducing stride") fold, unfold, normalization, weighting = self.get_fold_unfold(z, ks, stride, uf=uf) z = unfold(z) # (bn, nc * prod(**ks), L) # 1. Reshape to img shape z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L ) # 2. apply model loop over last dim if isinstance(self.first_stage_model, VQModelInterface): output_list = [self.first_stage_model.decode(z[:, :, :, :, i], force_not_quantize=predict_cids or force_not_quantize) for i in range(z.shape[-1])] else: output_list = [self.first_stage_model.decode(z[:, :, :, :, i]) for i in range(z.shape[-1])] o = torch.stack(output_list, axis=-1) # # (bn, nc, ks[0], ks[1], L) o = o * weighting # Reverse 1. reshape to img shape o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) # stitch crops together decoded = fold(o) decoded = decoded / normalization # norm is shape (1, 1, h, w) return decoded else: if isinstance(self.first_stage_model, VQModelInterface): return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize) else: return self.first_stage_model.decode(z) else: if isinstance(self.first_stage_model, VQModelInterface): return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize) else: return self.first_stage_model.decode(z) # same as above but without decorator def differentiable_decode_first_stage(self, z, predict_cids=False, force_not_quantize=False): if predict_cids: if z.dim() == 4: z = torch.argmax(z.exp(), dim=1).long() z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None) z = rearrange(z, 'b h w c -> b c h w').contiguous() z = 1. / self.scale_factor * z if hasattr(self, "split_input_params"): if self.split_input_params["patch_distributed_vq"]: ks = self.split_input_params["ks"] # eg. (128, 128) stride = self.split_input_params["stride"] # eg. (64, 64) uf = self.split_input_params["vqf"] bs, nc, h, w = z.shape if ks[0] > h or ks[1] > w: ks = (min(ks[0], h), min(ks[1], w)) print("reducing Kernel") if stride[0] > h or stride[1] > w: stride = (min(stride[0], h), min(stride[1], w)) print("reducing stride") fold, unfold, normalization, weighting = self.get_fold_unfold(z, ks, stride, uf=uf) z = unfold(z) # (bn, nc * prod(**ks), L) # 1. Reshape to img shape z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L ) # 2. apply model loop over last dim if isinstance(self.first_stage_model, VQModelInterface): output_list = [self.first_stage_model.decode(z[:, :, :, :, i], force_not_quantize=predict_cids or force_not_quantize) for i in range(z.shape[-1])] else: output_list = [self.first_stage_model.decode(z[:, :, :, :, i]) for i in range(z.shape[-1])] o = torch.stack(output_list, axis=-1) # # (bn, nc, ks[0], ks[1], L) o = o * weighting # Reverse 1. reshape to img shape o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) # stitch crops together decoded = fold(o) decoded = decoded / normalization # norm is shape (1, 1, h, w) return decoded else: if isinstance(self.first_stage_model, VQModelInterface): return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize) else: return self.first_stage_model.decode(z) else: if isinstance(self.first_stage_model, VQModelInterface): return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize) else: return self.first_stage_model.decode(z) @torch.no_grad() def encode_first_stage(self, x): if hasattr(self, "split_input_params"): if self.split_input_params["patch_distributed_vq"]: ks = self.split_input_params["ks"] # eg. (128, 128) stride = self.split_input_params["stride"] # eg. (64, 64) df = self.split_input_params["vqf"] self.split_input_params['original_image_size'] = x.shape[-2:] bs, nc, h, w = x.shape if ks[0] > h or ks[1] > w: ks = (min(ks[0], h), min(ks[1], w)) print("reducing Kernel") if stride[0] > h or stride[1] > w: stride = (min(stride[0], h), min(stride[1], w)) print("reducing stride") fold, unfold, normalization, weighting = self.get_fold_unfold(x, ks, stride, df=df) z = unfold(x) # (bn, nc * prod(**ks), L) # Reshape to img shape z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L ) output_list = [self.first_stage_model.encode(z[:, :, :, :, i]) for i in range(z.shape[-1])] o = torch.stack(output_list, axis=-1) o = o * weighting # Reverse reshape to img shape o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) # stitch crops together decoded = fold(o) decoded = decoded / normalization return decoded else: return self.first_stage_model.encode(x) else: return self.first_stage_model.encode(x) def shared_step(self, batch, **kwargs): x, c = self.get_input(batch, self.first_stage_key) loss = self(x, c) return loss def forward(self, x, c, *args, **kwargs): t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long() if self.model.conditioning_key is not None: assert c is not None if self.cond_stage_trainable: c = self.get_learned_conditioning(c) if self.shorten_cond_schedule: # TODO: drop this option tc = self.cond_ids[t].to(self.device) c = self.q_sample(x_start=c, t=tc, noise=torch.randn_like(c.float())) return self.p_losses(x, c, t, *args, **kwargs) def _rescale_annotations(self, bboxes, crop_coordinates): # TODO: move to dataset def rescale_bbox(bbox): x0 = clamp((bbox[0] - crop_coordinates[0]) / crop_coordinates[2]) y0 = clamp((bbox[1] - crop_coordinates[1]) / crop_coordinates[3]) w = min(bbox[2] / crop_coordinates[2], 1 - x0) h = min(bbox[3] / crop_coordinates[3], 1 - y0) return x0, y0, w, h return [rescale_bbox(b) for b in bboxes] def apply_model(self, x_noisy, t, cond, return_ids=False): if isinstance(cond, dict): # hybrid case, cond is exptected to be a dict pass else: if not isinstance(cond, list): cond = [cond] key = 'c_concat' if self.model.conditioning_key == 'concat' else 'c_crossattn' cond = {key: cond} if hasattr(self, "split_input_params"): assert len(cond) == 1 # todo can only deal with one conditioning atm assert not return_ids ks = self.split_input_params["ks"] # eg. (128, 128) stride = self.split_input_params["stride"] # eg. (64, 64) h, w = x_noisy.shape[-2:] fold, unfold, normalization, weighting = self.get_fold_unfold(x_noisy, ks, stride) z = unfold(x_noisy) # (bn, nc * prod(**ks), L) # Reshape to img shape z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L ) z_list = [z[:, :, :, :, i] for i in range(z.shape[-1])] if self.cond_stage_key in ["image", "LR_image", "segmentation", 'bbox_img'] and self.model.conditioning_key: # todo check for completeness c_key = next(iter(cond.keys())) # get key c = next(iter(cond.values())) # get value assert (len(c) == 1) # todo extend to list with more than one elem c = c[0] # get element c = unfold(c) c = c.view((c.shape[0], -1, ks[0], ks[1], c.shape[-1])) # (bn, nc, ks[0], ks[1], L ) cond_list = [{c_key: [c[:, :, :, :, i]]} for i in range(c.shape[-1])] elif self.cond_stage_key == 'coordinates_bbox': assert 'original_image_size' in self.split_input_params, 'BoudingBoxRescaling is missing original_image_size' # assuming padding of unfold is always 0 and its dilation is always 1 n_patches_per_row = int((w - ks[0]) / stride[0] + 1) full_img_h, full_img_w = self.split_input_params['original_image_size'] # as we are operating on latents, we need the factor from the original image size to the # spatial latent size to properly rescale the crops for regenerating the bbox annotations num_downs = self.first_stage_model.encoder.num_resolutions - 1 rescale_latent = 2 ** (num_downs) # get top left postions of patches as conforming for the bbbox tokenizer, therefore we # need to rescale the tl patch coordinates to be in between (0,1) tl_patch_coordinates = [(rescale_latent * stride[0] * (patch_nr % n_patches_per_row) / full_img_w, rescale_latent * stride[1] * (patch_nr // n_patches_per_row) / full_img_h) for patch_nr in range(z.shape[-1])] # patch_limits are tl_coord, width and height coordinates as (x_tl, y_tl, h, w) patch_limits = [(x_tl, y_tl, rescale_latent * ks[0] / full_img_w, rescale_latent * ks[1] / full_img_h) for x_tl, y_tl in tl_patch_coordinates] # patch_values = [(np.arange(x_tl,min(x_tl+ks, 1.)),np.arange(y_tl,min(y_tl+ks, 1.))) for x_tl, y_tl in tl_patch_coordinates] # tokenize crop coordinates for the bounding boxes of the respective patches patch_limits_tknzd = [torch.LongTensor(self.bbox_tokenizer._crop_encoder(bbox))[None].to(self.device) for bbox in patch_limits] # list of length l with tensors of shape (1, 2) print(patch_limits_tknzd[0].shape) # cut tknzd crop position from conditioning assert isinstance(cond, dict), 'cond must be dict to be fed into model' cut_cond = cond['c_crossattn'][0][..., :-2].to(self.device) print(cut_cond.shape) adapted_cond = torch.stack([torch.cat([cut_cond, p], dim=1) for p in patch_limits_tknzd]) adapted_cond = rearrange(adapted_cond, 'l b n -> (l b) n') print(adapted_cond.shape) adapted_cond = self.get_learned_conditioning(adapted_cond) print(adapted_cond.shape) adapted_cond = rearrange(adapted_cond, '(l b) n d -> l b n d', l=z.shape[-1]) print(adapted_cond.shape) cond_list = [{'c_crossattn': [e]} for e in adapted_cond] else: cond_list = [cond for i in range(z.shape[-1])] # Todo make this more efficient # apply model by loop over crops output_list = [self.model(z_list[i], t, **cond_list[i]) for i in range(z.shape[-1])] assert not isinstance(output_list[0], tuple) # todo cant deal with multiple model outputs check this never happens o = torch.stack(output_list, axis=-1) o = o * weighting # Reverse reshape to img shape o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) # stitch crops together x_recon = fold(o) / normalization else: x_recon = self.model(x_noisy, t, **cond) if isinstance(x_recon, tuple) and not return_ids: return x_recon[0] else: return x_recon def _predict_eps_from_xstart(self, x_t, t, pred_xstart): return (extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - pred_xstart) / \ extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) def _prior_bpd(self, x_start): """ Get the prior KL term for the variational lower-bound, measured in bits-per-dim. This term can't be optimized, as it only depends on the encoder. :param x_start: the [N x C x ...] tensor of inputs. :return: a batch of [N] KL values (in bits), one per batch element. """ batch_size = x_start.shape[0] t = torch.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device) qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t) kl_prior = normal_kl(mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0) return mean_flat(kl_prior) / np.log(2.0) def p_losses(self, x_start, cond, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) model_output = self.apply_model(x_noisy, t, cond) loss_dict = {} prefix = 'train' if self.training else 'val' if self.parameterization == "x0": target = x_start elif self.parameterization == "eps": target = noise else: raise NotImplementedError() loss_simple = self.get_loss(model_output, target, mean=False).mean([1, 2, 3]) loss_dict.update({f'{prefix}/loss_simple': loss_simple.mean()}) logvar_t = self.logvar[t].to(self.device) loss = loss_simple / torch.exp(logvar_t) + logvar_t # loss = loss_simple / torch.exp(self.logvar) + self.logvar if self.learn_logvar: loss_dict.update({f'{prefix}/loss_gamma': loss.mean()}) loss_dict.update({'logvar': self.logvar.data.mean()}) loss = self.l_simple_weight * loss.mean() loss_vlb = self.get_loss(model_output, target, mean=False).mean(dim=(1, 2, 3)) loss_vlb = (self.lvlb_weights[t] * loss_vlb).mean() loss_dict.update({f'{prefix}/loss_vlb': loss_vlb}) loss += (self.original_elbo_weight * loss_vlb) loss_dict.update({f'{prefix}/loss': loss}) if self.embedding_reg_weight > 0: loss_embedding_reg = self.embedding_manager.embedding_to_coarse_loss().mean() loss_dict.update({f'{prefix}/loss_emb_reg': loss_embedding_reg}) loss += (self.embedding_reg_weight * loss_embedding_reg) loss_dict.update({f'{prefix}/loss': loss}) return loss, loss_dict def p_mean_variance(self, x, c, t, clip_denoised: bool, return_codebook_ids=False, quantize_denoised=False, return_x0=False, score_corrector=None, corrector_kwargs=None): t_in = t model_out = self.apply_model(x, t_in, c, return_ids=return_codebook_ids) if score_corrector is not None: assert self.parameterization == "eps" model_out = score_corrector.modify_score(self, model_out, x, t, c, **corrector_kwargs) if return_codebook_ids: model_out, logits = model_out if self.parameterization == "eps": x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) elif self.parameterization == "x0": x_recon = model_out else: raise NotImplementedError() if clip_denoised: x_recon.clamp_(-1., 1.) if quantize_denoised: x_recon, _, [_, _, indices] = self.first_stage_model.quantize(x_recon) model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t) if return_codebook_ids: return model_mean, posterior_variance, posterior_log_variance, logits elif return_x0: return model_mean, posterior_variance, posterior_log_variance, x_recon else: return model_mean, posterior_variance, posterior_log_variance @torch.no_grad() def p_sample(self, x, c, t, clip_denoised=False, repeat_noise=False, return_codebook_ids=False, quantize_denoised=False, return_x0=False, temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None): b, *_, device = *x.shape, x.device outputs = self.p_mean_variance(x=x, c=c, t=t, clip_denoised=clip_denoised, return_codebook_ids=return_codebook_ids, quantize_denoised=quantize_denoised, return_x0=return_x0, score_corrector=score_corrector, corrector_kwargs=corrector_kwargs) if return_codebook_ids: raise DeprecationWarning("Support dropped.") model_mean, _, model_log_variance, logits = outputs elif return_x0: model_mean, _, model_log_variance, x0 = outputs else: model_mean, _, model_log_variance = outputs noise = noise_like(x.shape, device, repeat_noise) * temperature if noise_dropout > 0.: noise = torch.nn.functional.dropout(noise, p=noise_dropout) # no noise when t == 0 nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) if return_codebook_ids: return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, logits.argmax(dim=1) if return_x0: return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, x0 else: return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise @torch.no_grad() def progressive_denoising(self, cond, shape, verbose=True, callback=None, quantize_denoised=False, img_callback=None, mask=None, x0=None, temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None, batch_size=None, x_T=None, start_T=None, log_every_t=None): if not log_every_t: log_every_t = self.log_every_t timesteps = self.num_timesteps if batch_size is not None: b = batch_size if batch_size is not None else shape[0] shape = [batch_size] + list(shape) else: b = batch_size = shape[0] if x_T is None: img = torch.randn(shape, device=self.device) else: img = x_T intermediates = [] if cond is not None: if isinstance(cond, dict): cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else list(map(lambda x: x[:batch_size], cond[key])) for key in cond} else: cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] if start_T is not None: timesteps = min(timesteps, start_T) iterator = tqdm(reversed(range(0, timesteps)), desc='Progressive Generation', total=timesteps) if verbose else reversed( range(0, timesteps)) if type(temperature) == float: temperature = [temperature] * timesteps for i in iterator: ts = torch.full((b,), i, device=self.device, dtype=torch.long) if self.shorten_cond_schedule: assert self.model.conditioning_key != 'hybrid' tc = self.cond_ids[ts].to(cond.device) cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) img, x0_partial = self.p_sample(img, cond, ts, clip_denoised=self.clip_denoised, quantize_denoised=quantize_denoised, return_x0=True, temperature=temperature[i], noise_dropout=noise_dropout, score_corrector=score_corrector, corrector_kwargs=corrector_kwargs) if mask is not None: assert x0 is not None img_orig = self.q_sample(x0, ts) img = img_orig * mask + (1. - mask) * img if i % log_every_t == 0 or i == timesteps - 1: intermediates.append(x0_partial) if callback: callback(i) if img_callback: img_callback(img, i) return img, intermediates @torch.no_grad() def p_sample_loop(self, cond, shape, return_intermediates=False, x_T=None, verbose=True, callback=None, timesteps=None, quantize_denoised=False, mask=None, x0=None, img_callback=None, start_T=None, log_every_t=None): if not log_every_t: log_every_t = self.log_every_t device = self.betas.device b = shape[0] if x_T is None: img = torch.randn(shape, device=device) else: img = x_T intermediates = [img] if timesteps is None: timesteps = self.num_timesteps if start_T is not None: timesteps = min(timesteps, start_T) iterator = tqdm(reversed(range(0, timesteps)), desc='Sampling t', total=timesteps) if verbose else reversed( range(0, timesteps)) if mask is not None: assert x0 is not None assert x0.shape[2:3] == mask.shape[2:3] # spatial size has to match for i in iterator: ts = torch.full((b,), i, device=device, dtype=torch.long) if self.shorten_cond_schedule: assert self.model.conditioning_key != 'hybrid' tc = self.cond_ids[ts].to(cond.device) cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) img = self.p_sample(img, cond, ts, clip_denoised=self.clip_denoised, quantize_denoised=quantize_denoised) if mask is not None: img_orig = self.q_sample(x0, ts) img = img_orig * mask + (1. - mask) * img if i % log_every_t == 0 or i == timesteps - 1: intermediates.append(img) if callback: callback(i) if img_callback: img_callback(img, i) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, cond, batch_size=16, return_intermediates=False, x_T=None, verbose=True, timesteps=None, quantize_denoised=False, mask=None, x0=None, shape=None,**kwargs): if shape is None: shape = (batch_size, self.channels, self.image_size, self.image_size) if cond is not None: if isinstance(cond, dict): cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else list(map(lambda x: x[:batch_size], cond[key])) for key in cond} else: cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] return self.p_sample_loop(cond, shape, return_intermediates=return_intermediates, x_T=x_T, verbose=verbose, timesteps=timesteps, quantize_denoised=quantize_denoised, mask=mask, x0=x0) @torch.no_grad() def sample_log(self,cond,batch_size,ddim, ddim_steps,**kwargs): if ddim: ddim_sampler = DDIMSampler(self) shape = (self.channels, self.image_size, self.image_size) samples, intermediates =ddim_sampler.sample(ddim_steps,batch_size, shape,cond,verbose=False,**kwargs) else: samples, intermediates = self.sample(cond=cond, batch_size=batch_size, return_intermediates=True,**kwargs) return samples, intermediates @torch.no_grad() def log_images(self, batch, N=8, n_row=4, sample=True, ddim_steps=200, ddim_eta=1., return_keys=None, quantize_denoised=True, inpaint=False, plot_denoise_rows=False, plot_progressive_rows=False, plot_diffusion_rows=False, **kwargs): use_ddim = ddim_steps is not None log = dict() z, c, x, xrec, xc = self.get_input(batch, self.first_stage_key, return_first_stage_outputs=True, force_c_encode=True, return_original_cond=True, bs=N) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) log["inputs"] = x log["reconstruction"] = xrec if self.model.conditioning_key is not None: if hasattr(self.cond_stage_model, "decode"): xc = self.cond_stage_model.decode(c) log["conditioning"] = xc elif self.cond_stage_key in ["caption"]: xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["caption"]) log["conditioning"] = xc elif self.cond_stage_key == 'class_label': xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["human_label"]) log['conditioning'] = xc
elif isimage(xc):
4
2023-11-30 01:50:29+00:00
24k
Czm369/MixPL
mmdet/configs/rtmdet/rtmdet_ins_s_8xb32_300e_coco.py
[ { "identifier": "PackDetInputs", "path": "mmdet/datasets/transforms/formatting.py", "snippet": "class PackDetInputs(BaseTransform):\n \"\"\"Pack the inputs data for the detection / semantic segmentation /\n panoptic segmentation.\n\n The ``img_meta`` item is always populated. The contents of the\n ``img_meta`` dictionary depends on ``meta_keys``. By default this includes:\n\n - ``img_id``: id of the image\n\n - ``img_path``: path to the image file\n\n - ``ori_shape``: original shape of the image as a tuple (h, w)\n\n - ``img_shape``: shape of the image input to the network as a tuple \\\n (h, w). Note that images may be zero padded on the \\\n bottom/right if the batch tensor is larger than this shape.\n\n - ``scale_factor``: a float indicating the preprocessing scale\n\n - ``flip``: a boolean indicating if image flip transform was used\n\n - ``flip_direction``: the flipping direction\n\n Args:\n meta_keys (Sequence[str], optional): Meta keys to be converted to\n ``mmcv.DataContainer`` and collected in ``data[img_metas]``.\n Default: ``('img_id', 'img_path', 'ori_shape', 'img_shape',\n 'scale_factor', 'flip', 'flip_direction')``\n \"\"\"\n mapping_table = {\n 'gt_bboxes': 'bboxes',\n 'gt_bboxes_labels': 'labels',\n 'gt_masks': 'masks'\n }\n\n def __init__(self,\n meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',\n 'scale_factor', 'flip', 'flip_direction')):\n self.meta_keys = meta_keys\n\n def transform(self, results: dict) -> dict:\n \"\"\"Method to pack the input data.\n\n Args:\n results (dict): Result dict from the data pipeline.\n\n Returns:\n dict:\n\n - 'inputs' (obj:`torch.Tensor`): The forward data of models.\n - 'data_sample' (obj:`DetDataSample`): The annotation info of the\n sample.\n \"\"\"\n packed_results = dict()\n if 'img' in results:\n img = results['img']\n if len(img.shape) < 3:\n img = np.expand_dims(img, -1)\n # To improve the computational speed by by 3-5 times, apply:\n # If image is not contiguous, use\n # `numpy.transpose()` followed by `numpy.ascontiguousarray()`\n # If image is already contiguous, use\n # `torch.permute()` followed by `torch.contiguous()`\n # Refer to https://github.com/open-mmlab/mmdetection/pull/9533\n # for more details\n if not img.flags.c_contiguous:\n img = np.ascontiguousarray(img.transpose(2, 0, 1))\n img = to_tensor(img)\n else:\n img = to_tensor(img).permute(2, 0, 1).contiguous()\n\n packed_results['inputs'] = img\n\n if 'gt_ignore_flags' in results:\n valid_idx = np.where(results['gt_ignore_flags'] == 0)[0]\n ignore_idx = np.where(results['gt_ignore_flags'] == 1)[0]\n\n data_sample = DetDataSample()\n instance_data = InstanceData()\n ignore_instance_data = InstanceData()\n\n for key in self.mapping_table.keys():\n if key not in results:\n continue\n if key == 'gt_masks' or isinstance(results[key], BaseBoxes):\n if 'gt_ignore_flags' in results:\n instance_data[\n self.mapping_table[key]] = results[key][valid_idx]\n ignore_instance_data[\n self.mapping_table[key]] = results[key][ignore_idx]\n else:\n instance_data[self.mapping_table[key]] = results[key]\n else:\n if 'gt_ignore_flags' in results:\n instance_data[self.mapping_table[key]] = to_tensor(\n results[key][valid_idx])\n ignore_instance_data[self.mapping_table[key]] = to_tensor(\n results[key][ignore_idx])\n else:\n instance_data[self.mapping_table[key]] = to_tensor(\n results[key])\n data_sample.gt_instances = instance_data\n data_sample.ignored_instances = ignore_instance_data\n\n if 'proposals' in results:\n proposals = InstanceData(\n bboxes=to_tensor(results['proposals']),\n scores=to_tensor(results['proposals_scores']))\n data_sample.proposals = proposals\n\n if 'gt_seg_map' in results:\n gt_sem_seg_data = dict(\n sem_seg=to_tensor(results['gt_seg_map'][None, ...].copy()))\n gt_sem_seg_data = PixelData(**gt_sem_seg_data)\n if 'ignore_index' in results:\n metainfo = dict(ignore_index=results['ignore_index'])\n gt_sem_seg_data.set_metainfo(metainfo)\n data_sample.gt_sem_seg = gt_sem_seg_data\n\n img_meta = {}\n for key in self.meta_keys:\n if key in results:\n img_meta[key] = results[key]\n data_sample.set_metainfo(img_meta)\n packed_results['data_samples'] = data_sample\n\n return packed_results\n\n def __repr__(self) -> str:\n repr_str = self.__class__.__name__\n repr_str += f'(meta_keys={self.meta_keys})'\n return repr_str" }, { "identifier": "FilterAnnotations", "path": "mmdet/datasets/transforms/loading.py", "snippet": "class FilterAnnotations(BaseTransform):\n \"\"\"Filter invalid annotations.\n\n Required Keys:\n\n - gt_bboxes (BaseBoxes[torch.float32]) (optional)\n - gt_bboxes_labels (np.int64) (optional)\n - gt_masks (BitmapMasks | PolygonMasks) (optional)\n - gt_ignore_flags (bool) (optional)\n\n Modified Keys:\n\n - gt_bboxes (optional)\n - gt_bboxes_labels (optional)\n - gt_masks (optional)\n - gt_ignore_flags (optional)\n\n Args:\n min_gt_bbox_wh (tuple[float]): Minimum width and height of ground truth\n boxes. Default: (1., 1.)\n min_gt_mask_area (int): Minimum foreground area of ground truth masks.\n Default: 1\n by_box (bool): Filter instances with bounding boxes not meeting the\n min_gt_bbox_wh threshold. Default: True\n by_mask (bool): Filter instances with masks not meeting\n min_gt_mask_area threshold. Default: False\n keep_empty (bool): Whether to return None when it\n becomes an empty bbox after filtering. Defaults to True.\n \"\"\"\n\n def __init__(self,\n min_gt_bbox_wh: Tuple[int, int] = (1, 1),\n min_gt_mask_area: int = 1,\n by_box: bool = True,\n by_mask: bool = False,\n keep_empty: bool = True) -> None:\n # TODO: add more filter options\n assert by_box or by_mask\n self.min_gt_bbox_wh = min_gt_bbox_wh\n self.min_gt_mask_area = min_gt_mask_area\n self.by_box = by_box\n self.by_mask = by_mask\n self.keep_empty = keep_empty\n\n @autocast_box_type()\n def transform(self, results: dict) -> Union[dict, None]:\n \"\"\"Transform function to filter annotations.\n\n Args:\n results (dict): Result dict.\n\n Returns:\n dict: Updated result dict.\n \"\"\"\n assert 'gt_bboxes' in results\n gt_bboxes = results['gt_bboxes']\n if gt_bboxes.shape[0] == 0:\n return results\n\n tests = []\n if self.by_box:\n tests.append(\n ((gt_bboxes.widths > self.min_gt_bbox_wh[0]) &\n (gt_bboxes.heights > self.min_gt_bbox_wh[1])).numpy())\n if self.by_mask:\n assert 'gt_masks' in results\n gt_masks = results['gt_masks']\n tests.append(gt_masks.areas >= self.min_gt_mask_area)\n\n keep = tests[0]\n for t in tests[1:]:\n keep = keep & t\n\n if not keep.any():\n if self.keep_empty:\n return None\n\n keys = ('gt_bboxes', 'gt_bboxes_labels', 'gt_masks', 'gt_ignore_flags')\n for key in keys:\n if key in results:\n results[key] = results[key][keep]\n\n return results\n\n def __repr__(self):\n return self.__class__.__name__ + \\\n f'(min_gt_bbox_wh={self.min_gt_bbox_wh}, ' \\\n f'keep_empty={self.keep_empty})'" }, { "identifier": "LoadAnnotations", "path": "mmdet/datasets/transforms/loading.py", "snippet": "class LoadAnnotations(MMCV_LoadAnnotations):\n \"\"\"Load and process the ``instances`` and ``seg_map`` annotation provided\n by dataset.\n\n The annotation format is as the following:\n\n .. code-block:: python\n\n {\n 'instances':\n [\n {\n # List of 4 numbers representing the bounding box of the\n # instance, in (x1, y1, x2, y2) order.\n 'bbox': [x1, y1, x2, y2],\n\n # Label of image classification.\n 'bbox_label': 1,\n\n # Used in instance/panoptic segmentation. The segmentation mask\n # of the instance or the information of segments.\n # 1. If list[list[float]], it represents a list of polygons,\n # one for each connected component of the object. Each\n # list[float] is one simple polygon in the format of\n # [x1, y1, ..., xn, yn] (n >= 3). The Xs and Ys are absolute\n # coordinates in unit of pixels.\n # 2. If dict, it represents the per-pixel segmentation mask in\n # COCO's compressed RLE format. The dict should have keys\n # “size” and “counts”. Can be loaded by pycocotools\n 'mask': list[list[float]] or dict,\n\n }\n ]\n # Filename of semantic or panoptic segmentation ground truth file.\n 'seg_map_path': 'a/b/c'\n }\n\n After this module, the annotation has been changed to the format below:\n\n .. code-block:: python\n\n {\n # In (x1, y1, x2, y2) order, float type. N is the number of bboxes\n # in an image\n 'gt_bboxes': BaseBoxes(N, 4)\n # In int type.\n 'gt_bboxes_labels': np.ndarray(N, )\n # In built-in class\n 'gt_masks': PolygonMasks (H, W) or BitmapMasks (H, W)\n # In uint8 type.\n 'gt_seg_map': np.ndarray (H, W)\n # in (x, y, v) order, float type.\n }\n\n Required Keys:\n\n - height\n - width\n - instances\n\n - bbox (optional)\n - bbox_label\n - mask (optional)\n - ignore_flag\n\n - seg_map_path (optional)\n\n Added Keys:\n\n - gt_bboxes (BaseBoxes[torch.float32])\n - gt_bboxes_labels (np.int64)\n - gt_masks (BitmapMasks | PolygonMasks)\n - gt_seg_map (np.uint8)\n - gt_ignore_flags (bool)\n\n Args:\n with_bbox (bool): Whether to parse and load the bbox annotation.\n Defaults to True.\n with_label (bool): Whether to parse and load the label annotation.\n Defaults to True.\n with_mask (bool): Whether to parse and load the mask annotation.\n Default: False.\n with_seg (bool): Whether to parse and load the semantic segmentation\n annotation. Defaults to False.\n poly2mask (bool): Whether to convert mask to bitmap. Default: True.\n box_type (str): The box type used to wrap the bboxes. If ``box_type``\n is None, gt_bboxes will keep being np.ndarray. Defaults to 'hbox'.\n reduce_zero_label (bool): Whether reduce all label value\n by 1. Usually used for datasets where 0 is background label.\n Defaults to False.\n ignore_index (int): The label index to be ignored.\n Valid only if reduce_zero_label is true. Defaults is 255.\n imdecode_backend (str): The image decoding backend type. The backend\n argument for :func:``mmcv.imfrombytes``.\n See :fun:``mmcv.imfrombytes`` for details.\n Defaults to 'cv2'.\n backend_args (dict, optional): Arguments to instantiate the\n corresponding backend. Defaults to None.\n \"\"\"\n\n def __init__(\n self,\n with_mask: bool = False,\n poly2mask: bool = True,\n box_type: str = 'hbox',\n # use for semseg\n reduce_zero_label: bool = False,\n ignore_index: int = 255,\n **kwargs) -> None:\n super(LoadAnnotations, self).__init__(**kwargs)\n self.with_mask = with_mask\n self.poly2mask = poly2mask\n self.box_type = box_type\n self.reduce_zero_label = reduce_zero_label\n self.ignore_index = ignore_index\n\n def _load_bboxes(self, results: dict) -> None:\n \"\"\"Private function to load bounding box annotations.\n\n Args:\n results (dict): Result dict from :obj:``mmengine.BaseDataset``.\n Returns:\n dict: The dict contains loaded bounding box annotations.\n \"\"\"\n gt_bboxes = []\n gt_ignore_flags = []\n for instance in results.get('instances', []):\n gt_bboxes.append(instance['bbox'])\n gt_ignore_flags.append(instance['ignore_flag'])\n if self.box_type is None:\n results['gt_bboxes'] = np.array(\n gt_bboxes, dtype=np.float32).reshape((-1, 4))\n else:\n _, box_type_cls = get_box_type(self.box_type)\n results['gt_bboxes'] = box_type_cls(gt_bboxes, dtype=torch.float32)\n results['gt_ignore_flags'] = np.array(gt_ignore_flags, dtype=bool)\n\n def _load_labels(self, results: dict) -> None:\n \"\"\"Private function to load label annotations.\n\n Args:\n results (dict): Result dict from :obj:``mmengine.BaseDataset``.\n\n Returns:\n dict: The dict contains loaded label annotations.\n \"\"\"\n gt_bboxes_labels = []\n for instance in results.get('instances', []):\n gt_bboxes_labels.append(instance['bbox_label'])\n # TODO: Inconsistent with mmcv, consider how to deal with it later.\n results['gt_bboxes_labels'] = np.array(\n gt_bboxes_labels, dtype=np.int64)\n\n def _poly2mask(self, mask_ann: Union[list, dict], img_h: int,\n img_w: int) -> np.ndarray:\n \"\"\"Private function to convert masks represented with polygon to\n bitmaps.\n\n Args:\n mask_ann (list | dict): Polygon mask annotation input.\n img_h (int): The height of output mask.\n img_w (int): The width of output mask.\n\n Returns:\n np.ndarray: The decode bitmap mask of shape (img_h, img_w).\n \"\"\"\n\n if isinstance(mask_ann, list):\n # polygon -- a single object might consist of multiple parts\n # we merge all parts into one mask rle code\n rles = maskUtils.frPyObjects(mask_ann, img_h, img_w)\n rle = maskUtils.merge(rles)\n elif isinstance(mask_ann['counts'], list):\n # uncompressed RLE\n rle = maskUtils.frPyObjects(mask_ann, img_h, img_w)\n else:\n # rle\n rle = mask_ann\n mask = maskUtils.decode(rle)\n return mask\n\n def _process_masks(self, results: dict) -> list:\n \"\"\"Process gt_masks and filter invalid polygons.\n\n Args:\n results (dict): Result dict from :obj:``mmengine.BaseDataset``.\n\n Returns:\n list: Processed gt_masks.\n \"\"\"\n gt_masks = []\n gt_ignore_flags = []\n for instance in results.get('instances', []):\n gt_mask = instance['mask']\n # If the annotation of segmentation mask is invalid,\n # ignore the whole instance.\n if isinstance(gt_mask, list):\n gt_mask = [\n np.array(polygon) for polygon in gt_mask\n if len(polygon) % 2 == 0 and len(polygon) >= 6\n ]\n if len(gt_mask) == 0:\n # ignore this instance and set gt_mask to a fake mask\n instance['ignore_flag'] = 1\n gt_mask = [np.zeros(6)]\n elif not self.poly2mask:\n # `PolygonMasks` requires a ploygon of format List[np.array],\n # other formats are invalid.\n instance['ignore_flag'] = 1\n gt_mask = [np.zeros(6)]\n elif isinstance(gt_mask, dict) and \\\n not (gt_mask.get('counts') is not None and\n gt_mask.get('size') is not None and\n isinstance(gt_mask['counts'], (list, str))):\n # if gt_mask is a dict, it should include `counts` and `size`,\n # so that `BitmapMasks` can uncompressed RLE\n instance['ignore_flag'] = 1\n gt_mask = [np.zeros(6)]\n gt_masks.append(gt_mask)\n # re-process gt_ignore_flags\n gt_ignore_flags.append(instance['ignore_flag'])\n results['gt_ignore_flags'] = np.array(gt_ignore_flags, dtype=bool)\n return gt_masks\n\n def _load_masks(self, results: dict) -> None:\n \"\"\"Private function to load mask annotations.\n\n Args:\n results (dict): Result dict from :obj:``mmengine.BaseDataset``.\n \"\"\"\n h, w = results['ori_shape']\n gt_masks = self._process_masks(results)\n if self.poly2mask:\n gt_masks = BitmapMasks(\n [self._poly2mask(mask, h, w) for mask in gt_masks], h, w)\n else:\n # fake polygon masks will be ignored in `PackDetInputs`\n gt_masks = PolygonMasks([mask for mask in gt_masks], h, w)\n results['gt_masks'] = gt_masks\n\n def _load_seg_map(self, results: dict) -> None:\n \"\"\"Private function to load semantic segmentation annotations.\n\n Args:\n results (dict): Result dict from :obj:``mmcv.BaseDataset``.\n\n Returns:\n dict: The dict contains loaded semantic segmentation annotations.\n \"\"\"\n if results.get('seg_map_path', None) is None:\n return\n\n img_bytes = get(\n results['seg_map_path'], backend_args=self.backend_args)\n gt_semantic_seg = mmcv.imfrombytes(\n img_bytes, flag='unchanged',\n backend=self.imdecode_backend).squeeze()\n\n if self.reduce_zero_label:\n # avoid using underflow conversion\n gt_semantic_seg[gt_semantic_seg == 0] = self.ignore_index\n gt_semantic_seg = gt_semantic_seg - 1\n gt_semantic_seg[gt_semantic_seg == self.ignore_index -\n 1] = self.ignore_index\n\n # modify if custom classes\n if results.get('label_map', None) is not None:\n # Add deep copy to solve bug of repeatedly\n # replace `gt_semantic_seg`, which is reported in\n # https://github.com/open-mmlab/mmsegmentation/pull/1445/\n gt_semantic_seg_copy = gt_semantic_seg.copy()\n for old_id, new_id in results['label_map'].items():\n gt_semantic_seg[gt_semantic_seg_copy == old_id] = new_id\n results['gt_seg_map'] = gt_semantic_seg\n results['ignore_index'] = self.ignore_index\n\n def transform(self, results: dict) -> dict:\n \"\"\"Function to load multiple types annotations.\n\n Args:\n results (dict): Result dict from :obj:``mmengine.BaseDataset``.\n\n Returns:\n dict: The dict contains loaded bounding box, label and\n semantic segmentation.\n \"\"\"\n\n if self.with_bbox:\n self._load_bboxes(results)\n if self.with_label:\n self._load_labels(results)\n if self.with_mask:\n self._load_masks(results)\n if self.with_seg:\n self._load_seg_map(results)\n return results\n\n def __repr__(self) -> str:\n repr_str = self.__class__.__name__\n repr_str += f'(with_bbox={self.with_bbox}, '\n repr_str += f'with_label={self.with_label}, '\n repr_str += f'with_mask={self.with_mask}, '\n repr_str += f'with_seg={self.with_seg}, '\n repr_str += f'poly2mask={self.poly2mask}, '\n repr_str += f\"imdecode_backend='{self.imdecode_backend}', \"\n repr_str += f'backend_args={self.backend_args})'\n return repr_str" }, { "identifier": "CachedMixUp", "path": "mmdet/datasets/transforms/transforms.py", "snippet": "class CachedMixUp(BaseTransform):\n \"\"\"Cached mixup data augmentation.\n\n .. code:: text\n\n mixup transform\n +------------------------------+\n | mixup image | |\n | +--------|--------+ |\n | | | | |\n |---------------+ | |\n | | | |\n | | image | |\n | | | |\n | | | |\n | |-----------------+ |\n | pad |\n +------------------------------+\n\n The cached mixup transform steps are as follows:\n\n 1. Append the results from the last transform into the cache.\n 2. Another random image is picked from the cache and embedded in\n the top left patch(after padding and resizing)\n 3. The target of mixup transform is the weighted average of mixup\n image and origin image.\n\n Required Keys:\n\n - img\n - gt_bboxes (np.float32) (optional)\n - gt_bboxes_labels (np.int64) (optional)\n - gt_ignore_flags (bool) (optional)\n - mix_results (List[dict])\n\n\n Modified Keys:\n\n - img\n - img_shape\n - gt_bboxes (optional)\n - gt_bboxes_labels (optional)\n - gt_ignore_flags (optional)\n\n\n Args:\n img_scale (Sequence[int]): Image output size after mixup pipeline.\n The shape order should be (width, height). Defaults to (640, 640).\n ratio_range (Sequence[float]): Scale ratio of mixup image.\n Defaults to (0.5, 1.5).\n flip_ratio (float): Horizontal flip ratio of mixup image.\n Defaults to 0.5.\n pad_val (int): Pad value. Defaults to 114.\n max_iters (int): The maximum number of iterations. If the number of\n iterations is greater than `max_iters`, but gt_bbox is still\n empty, then the iteration is terminated. Defaults to 15.\n bbox_clip_border (bool, optional): Whether to clip the objects outside\n the border of the image. In some dataset like MOT17, the gt bboxes\n are allowed to cross the border of images. Therefore, we don't\n need to clip the gt bboxes in these cases. Defaults to True.\n max_cached_images (int): The maximum length of the cache. The larger\n the cache, the stronger the randomness of this transform. As a\n rule of thumb, providing 10 caches for each image suffices for\n randomness. Defaults to 20.\n random_pop (bool): Whether to randomly pop a result from the cache\n when the cache is full. If set to False, use FIFO popping method.\n Defaults to True.\n prob (float): Probability of applying this transformation.\n Defaults to 1.0.\n \"\"\"\n\n def __init__(self,\n img_scale: Tuple[int, int] = (640, 640),\n ratio_range: Tuple[float, float] = (0.5, 1.5),\n flip_ratio: float = 0.5,\n pad_val: float = 114.0,\n max_iters: int = 15,\n bbox_clip_border: bool = True,\n max_cached_images: int = 20,\n random_pop: bool = True,\n prob: float = 1.0) -> None:\n assert isinstance(img_scale, tuple)\n assert max_cached_images >= 2, 'The length of cache must >= 2, ' \\\n f'but got {max_cached_images}.'\n assert 0 <= prob <= 1.0, 'The probability should be in range [0,1]. ' \\\n f'got {prob}.'\n self.dynamic_scale = img_scale\n self.ratio_range = ratio_range\n self.flip_ratio = flip_ratio\n self.pad_val = pad_val\n self.max_iters = max_iters\n self.bbox_clip_border = bbox_clip_border\n self.results_cache = []\n\n self.max_cached_images = max_cached_images\n self.random_pop = random_pop\n self.prob = prob\n\n @cache_randomness\n def get_indexes(self, cache: list) -> int:\n \"\"\"Call function to collect indexes.\n\n Args:\n cache (list): The result cache.\n\n Returns:\n int: index.\n \"\"\"\n\n for i in range(self.max_iters):\n index = random.randint(0, len(cache) - 1)\n gt_bboxes_i = cache[index]['gt_bboxes']\n if len(gt_bboxes_i) != 0:\n break\n return index\n\n @autocast_box_type()\n def transform(self, results: dict) -> dict:\n \"\"\"MixUp transform function.\n\n Args:\n results (dict): Result dict.\n\n Returns:\n dict: Updated result dict.\n \"\"\"\n # cache and pop images\n self.results_cache.append(copy.deepcopy(results))\n if len(self.results_cache) > self.max_cached_images:\n if self.random_pop:\n index = random.randint(0, len(self.results_cache) - 1)\n else:\n index = 0\n self.results_cache.pop(index)\n\n if len(self.results_cache) <= 1:\n return results\n\n if random.uniform(0, 1) > self.prob:\n return results\n\n index = self.get_indexes(self.results_cache)\n retrieve_results = copy.deepcopy(self.results_cache[index])\n\n # TODO: refactor mixup to reuse these code.\n if retrieve_results['gt_bboxes'].shape[0] == 0:\n # empty bbox\n return results\n\n retrieve_img = retrieve_results['img']\n with_mask = True if 'gt_masks' in results else False\n\n jit_factor = random.uniform(*self.ratio_range)\n is_flip = random.uniform(0, 1) > self.flip_ratio\n\n if len(retrieve_img.shape) == 3:\n out_img = np.ones(\n (self.dynamic_scale[1], self.dynamic_scale[0], 3),\n dtype=retrieve_img.dtype) * self.pad_val\n else:\n out_img = np.ones(\n self.dynamic_scale[::-1],\n dtype=retrieve_img.dtype) * self.pad_val\n\n # 1. keep_ratio resize\n scale_ratio = min(self.dynamic_scale[1] / retrieve_img.shape[0],\n self.dynamic_scale[0] / retrieve_img.shape[1])\n retrieve_img = mmcv.imresize(\n retrieve_img, (int(retrieve_img.shape[1] * scale_ratio),\n int(retrieve_img.shape[0] * scale_ratio)))\n\n # 2. paste\n out_img[:retrieve_img.shape[0], :retrieve_img.shape[1]] = retrieve_img\n\n # 3. scale jit\n scale_ratio *= jit_factor\n out_img = mmcv.imresize(out_img, (int(out_img.shape[1] * jit_factor),\n int(out_img.shape[0] * jit_factor)))\n\n # 4. flip\n if is_flip:\n out_img = out_img[:, ::-1, :]\n\n # 5. random crop\n ori_img = results['img']\n origin_h, origin_w = out_img.shape[:2]\n target_h, target_w = ori_img.shape[:2]\n padded_img = np.ones((max(origin_h, target_h), max(\n origin_w, target_w), 3)) * self.pad_val\n padded_img = padded_img.astype(np.uint8)\n padded_img[:origin_h, :origin_w] = out_img\n\n x_offset, y_offset = 0, 0\n if padded_img.shape[0] > target_h:\n y_offset = random.randint(0, padded_img.shape[0] - target_h)\n if padded_img.shape[1] > target_w:\n x_offset = random.randint(0, padded_img.shape[1] - target_w)\n padded_cropped_img = padded_img[y_offset:y_offset + target_h,\n x_offset:x_offset + target_w]\n\n # 6. adjust bbox\n retrieve_gt_bboxes = retrieve_results['gt_bboxes']\n retrieve_gt_bboxes.rescale_([scale_ratio, scale_ratio])\n if with_mask:\n retrieve_gt_masks = retrieve_results['gt_masks'].rescale(\n scale_ratio)\n\n if self.bbox_clip_border:\n retrieve_gt_bboxes.clip_([origin_h, origin_w])\n\n if is_flip:\n retrieve_gt_bboxes.flip_([origin_h, origin_w],\n direction='horizontal')\n if with_mask:\n retrieve_gt_masks = retrieve_gt_masks.flip()\n\n # 7. filter\n cp_retrieve_gt_bboxes = retrieve_gt_bboxes.clone()\n cp_retrieve_gt_bboxes.translate_([-x_offset, -y_offset])\n if with_mask:\n retrieve_gt_masks = retrieve_gt_masks.translate(\n out_shape=(target_h, target_w),\n offset=-x_offset,\n direction='horizontal')\n retrieve_gt_masks = retrieve_gt_masks.translate(\n out_shape=(target_h, target_w),\n offset=-y_offset,\n direction='vertical')\n\n if self.bbox_clip_border:\n cp_retrieve_gt_bboxes.clip_([target_h, target_w])\n\n # 8. mix up\n ori_img = ori_img.astype(np.float32)\n mixup_img = 0.5 * ori_img + 0.5 * padded_cropped_img.astype(np.float32)\n\n retrieve_gt_bboxes_labels = retrieve_results['gt_bboxes_labels']\n retrieve_gt_ignore_flags = retrieve_results['gt_ignore_flags']\n\n mixup_gt_bboxes = cp_retrieve_gt_bboxes.cat(\n (results['gt_bboxes'], cp_retrieve_gt_bboxes), dim=0)\n mixup_gt_bboxes_labels = np.concatenate(\n (results['gt_bboxes_labels'], retrieve_gt_bboxes_labels), axis=0)\n mixup_gt_ignore_flags = np.concatenate(\n (results['gt_ignore_flags'], retrieve_gt_ignore_flags), axis=0)\n if with_mask:\n mixup_gt_masks = retrieve_gt_masks.cat(\n [results['gt_masks'], retrieve_gt_masks])\n\n # remove outside bbox\n inside_inds = mixup_gt_bboxes.is_inside([target_h, target_w]).numpy()\n mixup_gt_bboxes = mixup_gt_bboxes[inside_inds]\n mixup_gt_bboxes_labels = mixup_gt_bboxes_labels[inside_inds]\n mixup_gt_ignore_flags = mixup_gt_ignore_flags[inside_inds]\n if with_mask:\n mixup_gt_masks = mixup_gt_masks[inside_inds]\n\n results['img'] = mixup_img.astype(np.uint8)\n results['img_shape'] = mixup_img.shape[:2]\n results['gt_bboxes'] = mixup_gt_bboxes\n results['gt_bboxes_labels'] = mixup_gt_bboxes_labels\n results['gt_ignore_flags'] = mixup_gt_ignore_flags\n if with_mask:\n results['gt_masks'] = mixup_gt_masks\n return results\n\n def __repr__(self):\n repr_str = self.__class__.__name__\n repr_str += f'(dynamic_scale={self.dynamic_scale}, '\n repr_str += f'ratio_range={self.ratio_range}, '\n repr_str += f'flip_ratio={self.flip_ratio}, '\n repr_str += f'pad_val={self.pad_val}, '\n repr_str += f'max_iters={self.max_iters}, '\n repr_str += f'bbox_clip_border={self.bbox_clip_border}, '\n repr_str += f'max_cached_images={self.max_cached_images}, '\n repr_str += f'random_pop={self.random_pop}, '\n repr_str += f'prob={self.prob})'\n return repr_str" }, { "identifier": "CachedMosaic", "path": "mmdet/datasets/transforms/transforms.py", "snippet": "class CachedMosaic(Mosaic):\n \"\"\"Cached mosaic augmentation.\n\n Cached mosaic transform will random select images from the cache\n and combine them into one output image.\n\n .. code:: text\n\n mosaic transform\n center_x\n +------------------------------+\n | pad | pad |\n | +-----------+ |\n | | | |\n | | image1 |--------+ |\n | | | | |\n | | | image2 | |\n center_y |----+-------------+-----------|\n | | cropped | |\n |pad | image3 | image4 |\n | | | |\n +----|-------------+-----------+\n | |\n +-------------+\n\n The cached mosaic transform steps are as follows:\n\n 1. Append the results from the last transform into the cache.\n 2. Choose the mosaic center as the intersections of 4 images\n 3. Get the left top image according to the index, and randomly\n sample another 3 images from the result cache.\n 4. Sub image will be cropped if image is larger than mosaic patch\n\n Required Keys:\n\n - img\n - gt_bboxes (np.float32) (optional)\n - gt_bboxes_labels (np.int64) (optional)\n - gt_ignore_flags (bool) (optional)\n\n Modified Keys:\n\n - img\n - img_shape\n - gt_bboxes (optional)\n - gt_bboxes_labels (optional)\n - gt_ignore_flags (optional)\n\n Args:\n img_scale (Sequence[int]): Image size before mosaic pipeline of single\n image. The shape order should be (width, height).\n Defaults to (640, 640).\n center_ratio_range (Sequence[float]): Center ratio range of mosaic\n output. Defaults to (0.5, 1.5).\n bbox_clip_border (bool, optional): Whether to clip the objects outside\n the border of the image. In some dataset like MOT17, the gt bboxes\n are allowed to cross the border of images. Therefore, we don't\n need to clip the gt bboxes in these cases. Defaults to True.\n pad_val (int): Pad value. Defaults to 114.\n prob (float): Probability of applying this transformation.\n Defaults to 1.0.\n max_cached_images (int): The maximum length of the cache. The larger\n the cache, the stronger the randomness of this transform. As a\n rule of thumb, providing 10 caches for each image suffices for\n randomness. Defaults to 40.\n random_pop (bool): Whether to randomly pop a result from the cache\n when the cache is full. If set to False, use FIFO popping method.\n Defaults to True.\n \"\"\"\n\n def __init__(self,\n *args,\n max_cached_images: int = 40,\n random_pop: bool = True,\n **kwargs) -> None:\n super().__init__(*args, **kwargs)\n self.results_cache = []\n self.random_pop = random_pop\n assert max_cached_images >= 4, 'The length of cache must >= 4, ' \\\n f'but got {max_cached_images}.'\n self.max_cached_images = max_cached_images\n\n @cache_randomness\n def get_indexes(self, cache: list) -> list:\n \"\"\"Call function to collect indexes.\n\n Args:\n cache (list): The results cache.\n\n Returns:\n list: indexes.\n \"\"\"\n\n indexes = [random.randint(0, len(cache) - 1) for _ in range(3)]\n return indexes\n\n @autocast_box_type()\n def transform(self, results: dict) -> dict:\n \"\"\"Mosaic transform function.\n\n Args:\n results (dict): Result dict.\n\n Returns:\n dict: Updated result dict.\n \"\"\"\n # cache and pop images\n self.results_cache.append(copy.deepcopy(results))\n if len(self.results_cache) > self.max_cached_images:\n if self.random_pop:\n index = random.randint(0, len(self.results_cache) - 1)\n else:\n index = 0\n self.results_cache.pop(index)\n\n if len(self.results_cache) <= 4:\n return results\n\n if random.uniform(0, 1) > self.prob:\n return results\n indices = self.get_indexes(self.results_cache)\n mix_results = [copy.deepcopy(self.results_cache[i]) for i in indices]\n\n # TODO: refactor mosaic to reuse these code.\n mosaic_bboxes = []\n mosaic_bboxes_labels = []\n mosaic_ignore_flags = []\n mosaic_masks = []\n with_mask = True if 'gt_masks' in results else False\n\n if len(results['img'].shape) == 3:\n mosaic_img = np.full(\n (int(self.img_scale[1] * 2), int(self.img_scale[0] * 2), 3),\n self.pad_val,\n dtype=results['img'].dtype)\n else:\n mosaic_img = np.full(\n (int(self.img_scale[1] * 2), int(self.img_scale[0] * 2)),\n self.pad_val,\n dtype=results['img'].dtype)\n\n # mosaic center x, y\n center_x = int(\n random.uniform(*self.center_ratio_range) * self.img_scale[0])\n center_y = int(\n random.uniform(*self.center_ratio_range) * self.img_scale[1])\n center_position = (center_x, center_y)\n\n loc_strs = ('top_left', 'top_right', 'bottom_left', 'bottom_right')\n for i, loc in enumerate(loc_strs):\n if loc == 'top_left':\n results_patch = copy.deepcopy(results)\n else:\n results_patch = copy.deepcopy(mix_results[i - 1])\n\n img_i = results_patch['img']\n h_i, w_i = img_i.shape[:2]\n # keep_ratio resize\n scale_ratio_i = min(self.img_scale[1] / h_i,\n self.img_scale[0] / w_i)\n img_i = mmcv.imresize(\n img_i, (int(w_i * scale_ratio_i), int(h_i * scale_ratio_i)))\n\n # compute the combine parameters\n paste_coord, crop_coord = self._mosaic_combine(\n loc, center_position, img_i.shape[:2][::-1])\n x1_p, y1_p, x2_p, y2_p = paste_coord\n x1_c, y1_c, x2_c, y2_c = crop_coord\n\n # crop and paste image\n mosaic_img[y1_p:y2_p, x1_p:x2_p] = img_i[y1_c:y2_c, x1_c:x2_c]\n\n # adjust coordinate\n gt_bboxes_i = results_patch['gt_bboxes']\n gt_bboxes_labels_i = results_patch['gt_bboxes_labels']\n gt_ignore_flags_i = results_patch['gt_ignore_flags']\n\n padw = x1_p - x1_c\n padh = y1_p - y1_c\n gt_bboxes_i.rescale_([scale_ratio_i, scale_ratio_i])\n gt_bboxes_i.translate_([padw, padh])\n mosaic_bboxes.append(gt_bboxes_i)\n mosaic_bboxes_labels.append(gt_bboxes_labels_i)\n mosaic_ignore_flags.append(gt_ignore_flags_i)\n if with_mask and results_patch.get('gt_masks', None) is not None:\n gt_masks_i = results_patch['gt_masks']\n gt_masks_i = gt_masks_i.rescale(float(scale_ratio_i))\n gt_masks_i = gt_masks_i.translate(\n out_shape=(int(self.img_scale[0] * 2),\n int(self.img_scale[1] * 2)),\n offset=padw,\n direction='horizontal')\n gt_masks_i = gt_masks_i.translate(\n out_shape=(int(self.img_scale[0] * 2),\n int(self.img_scale[1] * 2)),\n offset=padh,\n direction='vertical')\n mosaic_masks.append(gt_masks_i)\n\n mosaic_bboxes = mosaic_bboxes[0].cat(mosaic_bboxes, 0)\n mosaic_bboxes_labels = np.concatenate(mosaic_bboxes_labels, 0)\n mosaic_ignore_flags = np.concatenate(mosaic_ignore_flags, 0)\n\n if self.bbox_clip_border:\n mosaic_bboxes.clip_([2 * self.img_scale[1], 2 * self.img_scale[0]])\n # remove outside bboxes\n inside_inds = mosaic_bboxes.is_inside(\n [2 * self.img_scale[1], 2 * self.img_scale[0]]).numpy()\n mosaic_bboxes = mosaic_bboxes[inside_inds]\n mosaic_bboxes_labels = mosaic_bboxes_labels[inside_inds]\n mosaic_ignore_flags = mosaic_ignore_flags[inside_inds]\n\n results['img'] = mosaic_img\n results['img_shape'] = mosaic_img.shape[:2]\n results['gt_bboxes'] = mosaic_bboxes\n results['gt_bboxes_labels'] = mosaic_bboxes_labels\n results['gt_ignore_flags'] = mosaic_ignore_flags\n\n if with_mask:\n mosaic_masks = mosaic_masks[0].cat(mosaic_masks)\n results['gt_masks'] = mosaic_masks[inside_inds]\n return results\n\n def __repr__(self):\n repr_str = self.__class__.__name__\n repr_str += f'(img_scale={self.img_scale}, '\n repr_str += f'center_ratio_range={self.center_ratio_range}, '\n repr_str += f'pad_val={self.pad_val}, '\n repr_str += f'prob={self.prob}, '\n repr_str += f'max_cached_images={self.max_cached_images}, '\n repr_str += f'random_pop={self.random_pop})'\n return repr_str" }, { "identifier": "Pad", "path": "mmdet/datasets/transforms/transforms.py", "snippet": "class Pad(MMCV_Pad):\n \"\"\"Pad the image & segmentation map.\n\n There are three padding modes: (1) pad to a fixed size and (2) pad to the\n minimum size that is divisible by some number. and (3)pad to square. Also,\n pad to square and pad to the minimum size can be used as the same time.\n\n Required Keys:\n\n - img\n - gt_bboxes (BaseBoxes[torch.float32]) (optional)\n - gt_masks (BitmapMasks | PolygonMasks) (optional)\n - gt_seg_map (np.uint8) (optional)\n\n Modified Keys:\n\n - img\n - img_shape\n - gt_masks\n - gt_seg_map\n\n Added Keys:\n\n - pad_shape\n - pad_fixed_size\n - pad_size_divisor\n\n Args:\n size (tuple, optional): Fixed padding size.\n Expected padding shape (width, height). Defaults to None.\n size_divisor (int, optional): The divisor of padded size. Defaults to\n None.\n pad_to_square (bool): Whether to pad the image into a square.\n Currently only used for YOLOX. Defaults to False.\n pad_val (Number | dict[str, Number], optional) - Padding value for if\n the pad_mode is \"constant\". If it is a single number, the value\n to pad the image is the number and to pad the semantic\n segmentation map is 255. If it is a dict, it should have the\n following keys:\n\n - img: The value to pad the image.\n - seg: The value to pad the semantic segmentation map.\n Defaults to dict(img=0, seg=255).\n padding_mode (str): Type of padding. Should be: constant, edge,\n reflect or symmetric. Defaults to 'constant'.\n\n - constant: pads with a constant value, this value is specified\n with pad_val.\n - edge: pads with the last value at the edge of the image.\n - reflect: pads with reflection of image without repeating the last\n value on the edge. For example, padding [1, 2, 3, 4] with 2\n elements on both sides in reflect mode will result in\n [3, 2, 1, 2, 3, 4, 3, 2].\n - symmetric: pads with reflection of image repeating the last value\n on the edge. For example, padding [1, 2, 3, 4] with 2 elements on\n both sides in symmetric mode will result in\n [2, 1, 1, 2, 3, 4, 4, 3]\n \"\"\"\n\n def _pad_masks(self, results: dict) -> None:\n \"\"\"Pad masks according to ``results['pad_shape']``.\"\"\"\n if results.get('gt_masks', None) is not None:\n pad_val = self.pad_val.get('masks', 0)\n pad_shape = results['pad_shape'][:2]\n results['gt_masks'] = results['gt_masks'].pad(\n pad_shape, pad_val=pad_val)\n\n def transform(self, results: dict) -> dict:\n \"\"\"Call function to pad images, masks, semantic segmentation maps.\n\n Args:\n results (dict): Result dict from loading pipeline.\n\n Returns:\n dict: Updated result dict.\n \"\"\"\n self._pad_img(results)\n self._pad_seg(results)\n self._pad_masks(results)\n return results" }, { "identifier": "RandomCrop", "path": "mmdet/datasets/transforms/transforms.py", "snippet": "class RandomCrop(BaseTransform):\n \"\"\"Random crop the image & bboxes & masks.\n\n The absolute ``crop_size`` is sampled based on ``crop_type`` and\n ``image_size``, then the cropped results are generated.\n\n Required Keys:\n\n - img\n - gt_bboxes (BaseBoxes[torch.float32]) (optional)\n - gt_bboxes_labels (np.int64) (optional)\n - gt_masks (BitmapMasks | PolygonMasks) (optional)\n - gt_ignore_flags (bool) (optional)\n - gt_seg_map (np.uint8) (optional)\n\n Modified Keys:\n\n - img\n - img_shape\n - gt_bboxes (optional)\n - gt_bboxes_labels (optional)\n - gt_masks (optional)\n - gt_ignore_flags (optional)\n - gt_seg_map (optional)\n - gt_instances_ids (options, only used in MOT/VIS)\n\n Added Keys:\n\n - homography_matrix\n\n Args:\n crop_size (tuple): The relative ratio or absolute pixels of\n (width, height).\n crop_type (str, optional): One of \"relative_range\", \"relative\",\n \"absolute\", \"absolute_range\". \"relative\" randomly crops\n (h * crop_size[0], w * crop_size[1]) part from an input of size\n (h, w). \"relative_range\" uniformly samples relative crop size from\n range [crop_size[0], 1] and [crop_size[1], 1] for height and width\n respectively. \"absolute\" crops from an input with absolute size\n (crop_size[0], crop_size[1]). \"absolute_range\" uniformly samples\n crop_h in range [crop_size[0], min(h, crop_size[1])] and crop_w\n in range [crop_size[0], min(w, crop_size[1])].\n Defaults to \"absolute\".\n allow_negative_crop (bool, optional): Whether to allow a crop that does\n not contain any bbox area. Defaults to False.\n recompute_bbox (bool, optional): Whether to re-compute the boxes based\n on cropped instance masks. Defaults to False.\n bbox_clip_border (bool, optional): Whether clip the objects outside\n the border of the image. Defaults to True.\n\n Note:\n - If the image is smaller than the absolute crop size, return the\n original image.\n - The keys for bboxes, labels and masks must be aligned. That is,\n ``gt_bboxes`` corresponds to ``gt_labels`` and ``gt_masks``, and\n ``gt_bboxes_ignore`` corresponds to ``gt_labels_ignore`` and\n ``gt_masks_ignore``.\n - If the crop does not contain any gt-bbox region and\n ``allow_negative_crop`` is set to False, skip this image.\n \"\"\"\n\n def __init__(self,\n crop_size: tuple,\n crop_type: str = 'absolute',\n allow_negative_crop: bool = False,\n recompute_bbox: bool = False,\n bbox_clip_border: bool = True) -> None:\n if crop_type not in [\n 'relative_range', 'relative', 'absolute', 'absolute_range'\n ]:\n raise ValueError(f'Invalid crop_type {crop_type}.')\n if crop_type in ['absolute', 'absolute_range']:\n assert crop_size[0] > 0 and crop_size[1] > 0\n assert isinstance(crop_size[0], int) and isinstance(\n crop_size[1], int)\n if crop_type == 'absolute_range':\n assert crop_size[0] <= crop_size[1]\n else:\n assert 0 < crop_size[0] <= 1 and 0 < crop_size[1] <= 1\n self.crop_size = crop_size\n self.crop_type = crop_type\n self.allow_negative_crop = allow_negative_crop\n self.bbox_clip_border = bbox_clip_border\n self.recompute_bbox = recompute_bbox\n\n def _crop_data(self, results: dict, crop_size: Tuple[int, int],\n allow_negative_crop: bool) -> Union[dict, None]:\n \"\"\"Function to randomly crop images, bounding boxes, masks, semantic\n segmentation maps.\n\n Args:\n results (dict): Result dict from loading pipeline.\n crop_size (Tuple[int, int]): Expected absolute size after\n cropping, (h, w).\n allow_negative_crop (bool): Whether to allow a crop that does not\n contain any bbox area.\n\n Returns:\n results (Union[dict, None]): Randomly cropped results, 'img_shape'\n key in result dict is updated according to crop size. None will\n be returned when there is no valid bbox after cropping.\n \"\"\"\n assert crop_size[0] > 0 and crop_size[1] > 0\n img = results['img']\n margin_h = max(img.shape[0] - crop_size[0], 0)\n margin_w = max(img.shape[1] - crop_size[1], 0)\n offset_h, offset_w = self._rand_offset((margin_h, margin_w))\n crop_y1, crop_y2 = offset_h, offset_h + crop_size[0]\n crop_x1, crop_x2 = offset_w, offset_w + crop_size[1]\n\n # Record the homography matrix for the RandomCrop\n homography_matrix = np.array(\n [[1, 0, -offset_w], [0, 1, -offset_h], [0, 0, 1]],\n dtype=np.float32)\n if results.get('homography_matrix', None) is None:\n results['homography_matrix'] = homography_matrix\n else:\n results['homography_matrix'] = homography_matrix @ results[\n 'homography_matrix']\n\n # crop the image\n img = img[crop_y1:crop_y2, crop_x1:crop_x2, ...]\n img_shape = img.shape\n results['img'] = img\n results['img_shape'] = img_shape[:2]\n\n # crop bboxes accordingly and clip to the image boundary\n if results.get('gt_bboxes', None) is not None:\n bboxes = results['gt_bboxes']\n bboxes.translate_([-offset_w, -offset_h])\n if self.bbox_clip_border:\n bboxes.clip_(img_shape[:2])\n valid_inds = bboxes.is_inside(img_shape[:2]).numpy()\n # If the crop does not contain any gt-bbox area and\n # allow_negative_crop is False, skip this image.\n if (not valid_inds.any() and not allow_negative_crop):\n return None\n\n results['gt_bboxes'] = bboxes[valid_inds]\n\n if results.get('gt_ignore_flags', None) is not None:\n results['gt_ignore_flags'] = \\\n results['gt_ignore_flags'][valid_inds]\n\n if results.get('gt_bboxes_labels', None) is not None:\n results['gt_bboxes_labels'] = \\\n results['gt_bboxes_labels'][valid_inds]\n\n if results.get('gt_masks', None) is not None:\n results['gt_masks'] = results['gt_masks'][\n valid_inds.nonzero()[0]].crop(\n np.asarray([crop_x1, crop_y1, crop_x2, crop_y2]))\n if self.recompute_bbox:\n results['gt_bboxes'] = results['gt_masks'].get_bboxes(\n type(results['gt_bboxes']))\n\n # We should remove the instance ids corresponding to invalid boxes.\n if results.get('gt_instances_ids', None) is not None:\n results['gt_instances_ids'] = \\\n results['gt_instances_ids'][valid_inds]\n\n # crop semantic seg\n if results.get('gt_seg_map', None) is not None:\n results['gt_seg_map'] = results['gt_seg_map'][crop_y1:crop_y2,\n crop_x1:crop_x2]\n\n return results\n\n @cache_randomness\n def _rand_offset(self, margin: Tuple[int, int]) -> Tuple[int, int]:\n \"\"\"Randomly generate crop offset.\n\n Args:\n margin (Tuple[int, int]): The upper bound for the offset generated\n randomly.\n\n Returns:\n Tuple[int, int]: The random offset for the crop.\n \"\"\"\n margin_h, margin_w = margin\n offset_h = np.random.randint(0, margin_h + 1)\n offset_w = np.random.randint(0, margin_w + 1)\n\n return offset_h, offset_w\n\n @cache_randomness\n def _get_crop_size(self, image_size: Tuple[int, int]) -> Tuple[int, int]:\n \"\"\"Randomly generates the absolute crop size based on `crop_type` and\n `image_size`.\n\n Args:\n image_size (Tuple[int, int]): (h, w).\n\n Returns:\n crop_size (Tuple[int, int]): (crop_h, crop_w) in absolute pixels.\n \"\"\"\n h, w = image_size\n if self.crop_type == 'absolute':\n return min(self.crop_size[1], h), min(self.crop_size[0], w)\n elif self.crop_type == 'absolute_range':\n crop_h = np.random.randint(\n min(h, self.crop_size[0]),\n min(h, self.crop_size[1]) + 1)\n crop_w = np.random.randint(\n min(w, self.crop_size[0]),\n min(w, self.crop_size[1]) + 1)\n return crop_h, crop_w\n elif self.crop_type == 'relative':\n crop_w, crop_h = self.crop_size\n return int(h * crop_h + 0.5), int(w * crop_w + 0.5)\n else:\n # 'relative_range'\n crop_size = np.asarray(self.crop_size, dtype=np.float32)\n crop_h, crop_w = crop_size + np.random.rand(2) * (1 - crop_size)\n return int(h * crop_h + 0.5), int(w * crop_w + 0.5)\n\n @autocast_box_type()\n def transform(self, results: dict) -> Union[dict, None]:\n \"\"\"Transform function to randomly crop images, bounding boxes, masks,\n semantic segmentation maps.\n\n Args:\n results (dict): Result dict from loading pipeline.\n\n Returns:\n results (Union[dict, None]): Randomly cropped results, 'img_shape'\n key in result dict is updated according to crop size. None will\n be returned when there is no valid bbox after cropping.\n \"\"\"\n image_size = results['img'].shape[:2]\n crop_size = self._get_crop_size(image_size)\n results = self._crop_data(results, crop_size, self.allow_negative_crop)\n return results\n\n def __repr__(self) -> str:\n repr_str = self.__class__.__name__\n repr_str += f'(crop_size={self.crop_size}, '\n repr_str += f'crop_type={self.crop_type}, '\n repr_str += f'allow_negative_crop={self.allow_negative_crop}, '\n repr_str += f'recompute_bbox={self.recompute_bbox}, '\n repr_str += f'bbox_clip_border={self.bbox_clip_border})'\n return repr_str" }, { "identifier": "RandomFlip", "path": "mmdet/datasets/transforms/transforms.py", "snippet": "class RandomFlip(MMCV_RandomFlip):\n \"\"\"Flip the image & bbox & mask & segmentation map. Added or Updated keys:\n flip, flip_direction, img, gt_bboxes, and gt_seg_map. There are 3 flip\n modes:\n\n - ``prob`` is float, ``direction`` is string: the image will be\n ``direction``ly flipped with probability of ``prob`` .\n E.g., ``prob=0.5``, ``direction='horizontal'``,\n then image will be horizontally flipped with probability of 0.5.\n - ``prob`` is float, ``direction`` is list of string: the image will\n be ``direction[i]``ly flipped with probability of\n ``prob/len(direction)``.\n E.g., ``prob=0.5``, ``direction=['horizontal', 'vertical']``,\n then image will be horizontally flipped with probability of 0.25,\n vertically with probability of 0.25.\n - ``prob`` is list of float, ``direction`` is list of string:\n given ``len(prob) == len(direction)``, the image will\n be ``direction[i]``ly flipped with probability of ``prob[i]``.\n E.g., ``prob=[0.3, 0.5]``, ``direction=['horizontal',\n 'vertical']``, then image will be horizontally flipped with\n probability of 0.3, vertically with probability of 0.5.\n\n\n Required Keys:\n\n - img\n - gt_bboxes (BaseBoxes[torch.float32]) (optional)\n - gt_masks (BitmapMasks | PolygonMasks) (optional)\n - gt_seg_map (np.uint8) (optional)\n\n Modified Keys:\n\n - img\n - gt_bboxes\n - gt_masks\n - gt_seg_map\n\n Added Keys:\n\n - flip\n - flip_direction\n - homography_matrix\n\n\n Args:\n prob (float | list[float], optional): The flipping probability.\n Defaults to None.\n direction(str | list[str]): The flipping direction. Options\n If input is a list, the length must equal ``prob``. Each\n element in ``prob`` indicates the flip probability of\n corresponding direction. Defaults to 'horizontal'.\n \"\"\"\n\n def _record_homography_matrix(self, results: dict) -> None:\n \"\"\"Record the homography matrix for the RandomFlip.\"\"\"\n cur_dir = results['flip_direction']\n h, w = results['img'].shape[:2]\n\n if cur_dir == 'horizontal':\n homography_matrix = np.array([[-1, 0, w], [0, 1, 0], [0, 0, 1]],\n dtype=np.float32)\n elif cur_dir == 'vertical':\n homography_matrix = np.array([[1, 0, 0], [0, -1, h], [0, 0, 1]],\n dtype=np.float32)\n elif cur_dir == 'diagonal':\n homography_matrix = np.array([[-1, 0, w], [0, -1, h], [0, 0, 1]],\n dtype=np.float32)\n else:\n homography_matrix = np.eye(3, dtype=np.float32)\n\n if results.get('homography_matrix', None) is None:\n results['homography_matrix'] = homography_matrix\n else:\n results['homography_matrix'] = homography_matrix @ results[\n 'homography_matrix']\n\n @autocast_box_type()\n def _flip(self, results: dict) -> None:\n \"\"\"Flip images, bounding boxes, and semantic segmentation map.\"\"\"\n # flip image\n results['img'] = mmcv.imflip(\n results['img'], direction=results['flip_direction'])\n\n img_shape = results['img'].shape[:2]\n\n # flip bboxes\n if results.get('gt_bboxes', None) is not None:\n results['gt_bboxes'].flip_(img_shape, results['flip_direction'])\n\n # flip masks\n if results.get('gt_masks', None) is not None:\n results['gt_masks'] = results['gt_masks'].flip(\n results['flip_direction'])\n\n # flip segs\n if results.get('gt_seg_map', None) is not None:\n results['gt_seg_map'] = mmcv.imflip(\n results['gt_seg_map'], direction=results['flip_direction'])\n\n # record homography matrix for flip\n self._record_homography_matrix(results)" }, { "identifier": "Resize", "path": "mmdet/datasets/transforms/transforms.py", "snippet": "class Resize(MMCV_Resize):\n \"\"\"Resize images & bbox & seg.\n\n This transform resizes the input image according to ``scale`` or\n ``scale_factor``. Bboxes, masks, and seg map are then resized\n with the same scale factor.\n if ``scale`` and ``scale_factor`` are both set, it will use ``scale`` to\n resize.\n\n Required Keys:\n\n - img\n - gt_bboxes (BaseBoxes[torch.float32]) (optional)\n - gt_masks (BitmapMasks | PolygonMasks) (optional)\n - gt_seg_map (np.uint8) (optional)\n\n Modified Keys:\n\n - img\n - img_shape\n - gt_bboxes\n - gt_masks\n - gt_seg_map\n\n\n Added Keys:\n\n - scale\n - scale_factor\n - keep_ratio\n - homography_matrix\n\n Args:\n scale (int or tuple): Images scales for resizing. Defaults to None\n scale_factor (float or tuple[float]): Scale factors for resizing.\n Defaults to None.\n keep_ratio (bool): Whether to keep the aspect ratio when resizing the\n image. Defaults to False.\n clip_object_border (bool): Whether to clip the objects\n outside the border of the image. In some dataset like MOT17, the gt\n bboxes are allowed to cross the border of images. Therefore, we\n don't need to clip the gt bboxes in these cases. Defaults to True.\n backend (str): Image resize backend, choices are 'cv2' and 'pillow'.\n These two backends generates slightly different results. Defaults\n to 'cv2'.\n interpolation (str): Interpolation method, accepted values are\n \"nearest\", \"bilinear\", \"bicubic\", \"area\", \"lanczos\" for 'cv2'\n backend, \"nearest\", \"bilinear\" for 'pillow' backend. Defaults\n to 'bilinear'.\n \"\"\"\n\n def _resize_masks(self, results: dict) -> None:\n \"\"\"Resize masks with ``results['scale']``\"\"\"\n if results.get('gt_masks', None) is not None:\n if self.keep_ratio:\n results['gt_masks'] = results['gt_masks'].rescale(\n results['scale'])\n else:\n results['gt_masks'] = results['gt_masks'].resize(\n results['img_shape'])\n\n def _resize_bboxes(self, results: dict) -> None:\n \"\"\"Resize bounding boxes with ``results['scale_factor']``.\"\"\"\n if results.get('gt_bboxes', None) is not None:\n results['gt_bboxes'].rescale_(results['scale_factor'])\n if self.clip_object_border:\n results['gt_bboxes'].clip_(results['img_shape'])\n\n def _record_homography_matrix(self, results: dict) -> None:\n \"\"\"Record the homography matrix for the Resize.\"\"\"\n w_scale, h_scale = results['scale_factor']\n homography_matrix = np.array(\n [[w_scale, 0, 0], [0, h_scale, 0], [0, 0, 1]], dtype=np.float32)\n if results.get('homography_matrix', None) is None:\n results['homography_matrix'] = homography_matrix\n else:\n results['homography_matrix'] = homography_matrix @ results[\n 'homography_matrix']\n\n @autocast_box_type()\n def transform(self, results: dict) -> dict:\n \"\"\"Transform function to resize images, bounding boxes and semantic\n segmentation map.\n\n Args:\n results (dict): Result dict from loading pipeline.\n Returns:\n dict: Resized results, 'img', 'gt_bboxes', 'gt_seg_map',\n 'scale', 'scale_factor', 'height', 'width', and 'keep_ratio' keys\n are updated in result dict.\n \"\"\"\n if self.scale:\n results['scale'] = self.scale\n else:\n img_shape = results['img'].shape[:2]\n results['scale'] = _scale_size(img_shape[::-1], self.scale_factor)\n self._resize_img(results)\n self._resize_bboxes(results)\n self._resize_masks(results)\n self._resize_seg(results)\n self._record_homography_matrix(results)\n return results\n\n def __repr__(self) -> str:\n repr_str = self.__class__.__name__\n repr_str += f'(scale={self.scale}, '\n repr_str += f'scale_factor={self.scale_factor}, '\n repr_str += f'keep_ratio={self.keep_ratio}, '\n repr_str += f'clip_object_border={self.clip_object_border}), '\n repr_str += f'backend={self.backend}), '\n repr_str += f'interpolation={self.interpolation})'\n return repr_str" }, { "identifier": "YOLOXHSVRandomAug", "path": "mmdet/datasets/transforms/transforms.py", "snippet": "class YOLOXHSVRandomAug(BaseTransform):\n \"\"\"Apply HSV augmentation to image sequentially. It is referenced from\n https://github.com/Megvii-\n BaseDetection/YOLOX/blob/main/yolox/data/data_augment.py#L21.\n\n Required Keys:\n\n - img\n\n Modified Keys:\n\n - img\n\n Args:\n hue_delta (int): delta of hue. Defaults to 5.\n saturation_delta (int): delta of saturation. Defaults to 30.\n value_delta (int): delat of value. Defaults to 30.\n \"\"\"\n\n def __init__(self,\n hue_delta: int = 5,\n saturation_delta: int = 30,\n value_delta: int = 30) -> None:\n self.hue_delta = hue_delta\n self.saturation_delta = saturation_delta\n self.value_delta = value_delta\n\n @cache_randomness\n def _get_hsv_gains(self):\n hsv_gains = np.random.uniform(-1, 1, 3) * [\n self.hue_delta, self.saturation_delta, self.value_delta\n ]\n # random selection of h, s, v\n hsv_gains *= np.random.randint(0, 2, 3)\n # prevent overflow\n hsv_gains = hsv_gains.astype(np.int16)\n return hsv_gains\n\n def transform(self, results: dict) -> dict:\n img = results['img']\n hsv_gains = self._get_hsv_gains()\n img_hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV).astype(np.int16)\n\n img_hsv[..., 0] = (img_hsv[..., 0] + hsv_gains[0]) % 180\n img_hsv[..., 1] = np.clip(img_hsv[..., 1] + hsv_gains[1], 0, 255)\n img_hsv[..., 2] = np.clip(img_hsv[..., 2] + hsv_gains[2], 0, 255)\n cv2.cvtColor(img_hsv.astype(img.dtype), cv2.COLOR_HSV2BGR, dst=img)\n\n results['img'] = img\n return results\n\n def __repr__(self):\n repr_str = self.__class__.__name__\n repr_str += f'(hue_delta={self.hue_delta}, '\n repr_str += f'saturation_delta={self.saturation_delta}, '\n repr_str += f'value_delta={self.value_delta})'\n return repr_str" }, { "identifier": "PipelineSwitchHook", "path": "mmdet/engine/hooks/pipeline_switch_hook.py", "snippet": "class PipelineSwitchHook(Hook):\n \"\"\"Switch data pipeline at switch_epoch.\n\n Args:\n switch_epoch (int): switch pipeline at this epoch.\n switch_pipeline (list[dict]): the pipeline to switch to.\n \"\"\"\n\n def __init__(self, switch_epoch, switch_pipeline):\n self.switch_epoch = switch_epoch\n self.switch_pipeline = switch_pipeline\n self._restart_dataloader = False\n self._has_switched = False\n\n def before_train_epoch(self, runner):\n \"\"\"switch pipeline.\"\"\"\n epoch = runner.epoch\n train_loader = runner.train_dataloader\n if epoch >= self.switch_epoch and not self._has_switched:\n runner.logger.info('Switch pipeline now!')\n # The dataset pipeline cannot be updated when persistent_workers\n # is True, so we need to force the dataloader's multi-process\n # restart. This is a very hacky approach.\n train_loader.dataset.pipeline = Compose(self.switch_pipeline)\n if hasattr(train_loader, 'persistent_workers'\n ) and train_loader.persistent_workers is True:\n train_loader._DataLoader__initialized = False\n train_loader._iterator = None\n self._restart_dataloader = True\n self._has_switched = True\n else:\n # Once the restart is complete, we need to restore\n # the initialization flag.\n if self._restart_dataloader:\n train_loader._DataLoader__initialized = True" }, { "identifier": "ExpMomentumEMA", "path": "mmdet/models/layers/ema.py", "snippet": "class ExpMomentumEMA(ExponentialMovingAverage):\n \"\"\"Exponential moving average (EMA) with exponential momentum strategy,\n which is used in YOLOX.\n\n Args:\n model (nn.Module): The model to be averaged.\n momentum (float): The momentum used for updating ema parameter.\n Ema's parameter are updated with the formula:\n `averaged_param = (1-momentum) * averaged_param + momentum *\n source_param`. Defaults to 0.0002.\n gamma (int): Use a larger momentum early in training and gradually\n annealing to a smaller value to update the ema model smoothly. The\n momentum is calculated as\n `(1 - momentum) * exp(-(1 + steps) / gamma) + momentum`.\n Defaults to 2000.\n interval (int): Interval between two updates. Defaults to 1.\n device (torch.device, optional): If provided, the averaged model will\n be stored on the :attr:`device`. Defaults to None.\n update_buffers (bool): if True, it will compute running averages for\n both the parameters and the buffers of the model. Defaults to\n False.\n \"\"\"\n\n def __init__(self,\n model: nn.Module,\n momentum: float = 0.0002,\n gamma: int = 2000,\n interval=1,\n device: Optional[torch.device] = None,\n update_buffers: bool = False) -> None:\n super().__init__(\n model=model,\n momentum=momentum,\n interval=interval,\n device=device,\n update_buffers=update_buffers)\n assert gamma > 0, f'gamma must be greater than 0, but got {gamma}'\n self.gamma = gamma\n\n def avg_func(self, averaged_param: Tensor, source_param: Tensor,\n steps: int) -> None:\n \"\"\"Compute the moving average of the parameters using the exponential\n momentum strategy.\n\n Args:\n averaged_param (Tensor): The averaged parameters.\n source_param (Tensor): The source parameters.\n steps (int): The number of times the parameters have been\n updated.\n \"\"\"\n momentum = (1 - self.momentum) * math.exp(\n -float(1 + steps) / self.gamma) + self.momentum\n averaged_param.mul_(1 - momentum).add_(source_param, alpha=momentum)" } ]
from mmengine.config import read_base from .rtmdet_ins_l_8xb32_300e_coco import * from mmcv.transforms.loading import LoadImageFromFile from mmcv.transforms.processing import RandomResize from mmengine.hooks.ema_hook import EMAHook from mmdet.datasets.transforms.formatting import PackDetInputs from mmdet.datasets.transforms.loading import (FilterAnnotations, LoadAnnotations) from mmdet.datasets.transforms.transforms import (CachedMixUp, CachedMosaic, Pad, RandomCrop, RandomFlip, Resize, YOLOXHSVRandomAug) from mmdet.engine.hooks.pipeline_switch_hook import PipelineSwitchHook from mmdet.models.layers.ema import ExpMomentumEMA
18,108
# Copyright (c) OpenMMLab. All rights reserved. # Please refer to https://mmengine.readthedocs.io/en/latest/advanced_tutorials/config.html#a-pure-python-style-configuration-file-beta for more details. # noqa # mmcv >= 2.0.1 # mmengine >= 0.8.0 with read_base(): checkpoint = 'https://download.openmmlab.com/mmdetection/v3.0/rtmdet/cspnext_rsb_pretrain/cspnext-s_imagenet_600e.pth' # noqa model.update( dict( backbone=dict( deepen_factor=0.33, widen_factor=0.5, init_cfg=dict( type='Pretrained', prefix='backbone.', checkpoint=checkpoint)), neck=dict( in_channels=[128, 256, 512], out_channels=128, num_csp_blocks=1), bbox_head=dict(in_channels=128, feat_channels=128))) train_pipeline = [ dict(type=LoadImageFromFile, backend_args=backend_args), dict( type=LoadAnnotations, with_bbox=True, with_mask=True, poly2mask=False),
# Copyright (c) OpenMMLab. All rights reserved. # Please refer to https://mmengine.readthedocs.io/en/latest/advanced_tutorials/config.html#a-pure-python-style-configuration-file-beta for more details. # noqa # mmcv >= 2.0.1 # mmengine >= 0.8.0 with read_base(): checkpoint = 'https://download.openmmlab.com/mmdetection/v3.0/rtmdet/cspnext_rsb_pretrain/cspnext-s_imagenet_600e.pth' # noqa model.update( dict( backbone=dict( deepen_factor=0.33, widen_factor=0.5, init_cfg=dict( type='Pretrained', prefix='backbone.', checkpoint=checkpoint)), neck=dict( in_channels=[128, 256, 512], out_channels=128, num_csp_blocks=1), bbox_head=dict(in_channels=128, feat_channels=128))) train_pipeline = [ dict(type=LoadImageFromFile, backend_args=backend_args), dict( type=LoadAnnotations, with_bbox=True, with_mask=True, poly2mask=False),
dict(type=CachedMosaic, img_scale=(640, 640), pad_val=114.0),
4
2023-11-30 08:58:00+00:00
24k
SEU-ProactiveSecurity-Group/MalPurifier
core/defense/amd_dnn_plus.py
[ { "identifier": "Max", "path": "core/attack/max.py", "snippet": "class Max(BaseAttack):\n \"\"\"\n Max攻击:迭代地从多个攻击方法中选择结果。\n\n 参数\n --------\n @param attack_list: List, 已实例化的攻击对象的列表。\n @param varepsilon: Float, 用于判断收敛性的标量。\n \"\"\"\n\n def __init__(self, attack_list, varepsilon=1e-20,\n is_attacker=True, oblivion=False, kappa=1., manipulation_x=None, omega=None, device=None):\n \"\"\"\n 构造函数\n\n 参数:\n - attack_list: 已实例化的攻击对象的列表,至少应该有一个攻击方法。\n - varepsilon: 用于判断收敛性的标量,默认值为1e-20。\n - is_attacker: Bool, 表示是否为攻击者,默认为True。\n - oblivion: Bool, 一个布尔标志(其功能在这里并未详细说明),默认为False。\n - kappa: Float, 一个浮点数参数,默认为1。\n - manipulation_x: 可能与数据的处理或操纵有关,具体用途未详细说明。\n - omega: 参数omega的具体用途未详细说明。\n - device: 设备,例如'cuda'或'cpu',用于执行计算。\n\n 注意:\n - 在初始化过程中,会首先检查`attack_list`是否包含至少一个攻击对象。\n \"\"\"\n super(Max, self).__init__(is_attacker, oblivion, kappa, manipulation_x, omega, device) # 调用父类的构造函数\n assert len(attack_list) > 0, '至少需要一个攻击方法。' # 确保提供了至少一个攻击对象\n self.attack_list = attack_list # 设置攻击列表\n self.varepsilon = varepsilon # 设置varepsilon值\n self.device = device # 设置计算设备\n\n def perturb(self, model, x, label=None, steps_max=5, min_lambda_=1e-5, max_lambda_=1e5, verbose=False):\n \"\"\"\n 扰动节点特征\n\n 参数\n -----------\n @param model: 受害者模型。\n @param x: torch.FloatTensor, 形状为[batch_size, vocab_dim]的特征向量。\n @param label: torch.LongTensor, 真实标签。\n @param steps_max: Integer, 最大的迭代次数。\n @param min_lambda_: float, 平衡对手检测器的重要性(如果存在)。\n @param max_lambda_: float, 同上。\n @param verbose: Boolean, 是否打印详细日志。\n\n 返回值\n --------\n adv_x: 扰动后的数据。\n \"\"\"\n\n # 判断输入数据是否有效\n if x is None or x.shape[0] <= 0:\n return []\n\n # 将模型设为评估模式,主要是为了禁用一些在训练模式下的特殊层,比如Dropout\n model.eval()\n\n # 获取输入数据x在当前模型下的损失和完成状态\n with torch.no_grad():\n loss, done = self.get_scores(model, x, label)\n\n # 存储当前的损失为前一次的损失\n pre_loss = loss\n\n # 获取输入数据的数量以及其他的维度信息\n n, red_n = x.size()[0], x.size()[1:]\n red_ind = list(range(2, len(x.size()) + 1))\n\n # 初始化攻击样本为输入数据的拷贝\n adv_x = x.detach().clone()\n\n # 初始化停止标志,用于表示哪些样本已经完成了攻击\n stop_flag = torch.zeros(n, dtype=torch.bool, device=self.device)\n\n # 开始主循环,进行多次迭代以改进攻击效果\n for t in range(steps_max):\n # 计算还未完成攻击的样本数量\n num_sample_red = n - torch.sum(stop_flag)\n \n # 如果所有样本都已完成攻击,结束循环\n if num_sample_red <= 0:\n break\n\n # 获取那些还未完成攻击的样本的真实标签\n red_label = label[~stop_flag]\n pertbx = []\n\n # 对于攻击方法列表中的每种攻击方法,尝试对数据进行扰动\n for attack in self.attack_list:\n # 确保每种攻击方法都实现了perturb方法\n assert 'perturb' in type(attack).__dict__.keys()\n\n # 对于某些特定的攻击方法,在第二次及以后的迭代中取消随机化\n if t > 0 and 'use_random' in attack.__dict__.keys():\n attack.use_random = False\n\n # 对于名为\"Orthogonal\"的攻击方法,进行特殊处理\n if 'Orthogonal' in type(attack).__name__:\n pertbx.append(attack.perturb(model=model, x=adv_x[~stop_flag], label=red_label))\n else:\n pertbx.append(attack.perturb(model=model, x=adv_x[~stop_flag], label=red_label,\n min_lambda_=1e-5,\n max_lambda_=1e5,\n ))\n # 将所有攻击方法产生的扰动数据合并\n pertbx = torch.vstack(pertbx)\n\n\n # 不需要计算梯度,提高计算效率\n with torch.no_grad():\n # 将真实标签复制若干次以匹配所有的攻击列表\n red_label_ext = torch.cat([red_label] * len(self.attack_list))\n \n # 获取每种攻击方法产生的损失值和成功状态\n loss, done = self.get_scores(model, pertbx, red_label_ext)\n \n # 调整损失和成功状态的形状以方便后续计算\n loss = loss.reshape(len(self.attack_list), num_sample_red).permute(1, 0)\n done = done.reshape(len(self.attack_list), num_sample_red).permute(1, 0)\n \n # 判断哪些样本至少有一种攻击方法成功\n success_flag = torch.any(done, dim=-1)\n \n # 对于没有成功的样本,将其标记为1以进行后续处理\n done[~torch.any(done, dim=-1)] = 1\n \n # 调整损失值,对于成功的攻击方法,损失值保持不变;对于失败的,损失值变为最小值\n loss = (loss * done.to(torch.float)) + torch.min(loss) * (~done).to(torch.float)\n \n # 调整扰动数据的形状以方便后续计算\n pertbx = pertbx.reshape(len(self.attack_list), num_sample_red, *red_n).permute([1, 0, *red_ind])\n \n # 选择造成最大损失的扰动数据\n _, indices = loss.max(dim=-1)\n adv_x[~stop_flag] = pertbx[torch.arange(num_sample_red), indices]\n \n # 获取选中的扰动数据的损失值\n a_loss = loss[torch.arange(num_sample_red), indices]\n \n # 复制当前的停止标志\n pre_stop_flag = stop_flag.clone()\n \n # 更新停止标志,如果损失值变化很小或者某种攻击方法成功,则停止迭代\n stop_flag[~stop_flag] = (torch.abs(pre_loss[~stop_flag] - a_loss) < self.varepsilon) | success_flag\n \n # 更新前一个损失值\n pre_loss[~pre_stop_flag] = a_loss\n\n # 如果需要打印日志\n if verbose:\n # 评估最终的扰动数据的成功状态\n with torch.no_grad():\n _, done = self.get_scores(model, adv_x, label)\n # 打印攻击成功率\n logger.info(f\"max: attack effectiveness {done.sum().item() / x.size()[0] * 100}%.\")\n\n # 返回最终的扰动数据\n return adv_x\n\n\n def perturb_dae(self, predict_model, purifier, x, label=None, steps_max=5, min_lambda_=1e-5, max_lambda_=1e5, verbose=False, oblivion=False):\n \"\"\"\n 扰动节点特征\n\n 参数\n -----------\n @param model: 受害者模型。\n @param x: torch.FloatTensor, 形状为[batch_size, vocab_dim]的特征向量。\n @param label: torch.LongTensor, 真实标签。\n @param steps_max: Integer, 最大的迭代次数。\n @param min_lambda_: float, 平衡对手检测器的重要性(如果存在)。\n @param max_lambda_: float, 同上。\n @param verbose: Boolean, 是否打印详细日志。\n\n 返回值\n --------\n adv_x: 扰动后的数据。\n \"\"\"\n\n # 判断输入数据是否有效\n if x is None or x.shape[0] <= 0:\n return []\n\n # 将模型设为评估模式,主要是为了禁用一些在训练模式下的特殊层,比如Dropout\n predict_model.eval()\n purifier.eval()\n\n # 获取输入数据x在当前模型下的损失和完成状态\n with torch.no_grad():\n if not oblivion:\n purified_x = purifier(x.detach().clone().float()).to(torch.double)\n else:\n purified_x = x.detach().clone()\n loss, done = self.get_scores(predict_model, purified_x, label)\n\n # 存储当前的损失为前一次的损失\n pre_loss = loss\n\n # 获取输入数据的数量以及其他的维度信息\n n, red_n = x.size()[0], x.size()[1:]\n red_ind = list(range(2, len(x.size()) + 1))\n\n # 初始化攻击样本为输入数据的拷贝\n adv_x = x.detach().clone()\n\n # 初始化停止标志,用于表示哪些样本已经完成了攻击\n stop_flag = torch.zeros(n, dtype=torch.bool, device=self.device)\n\n # 开始主循环,进行多次迭代以改进攻击效果\n for t in range(steps_max):\n # 计算还未完成攻击的样本数量\n num_sample_red = n - torch.sum(stop_flag)\n \n # 如果所有样本都已完成攻击,结束循环\n if num_sample_red <= 0:\n break\n\n # 获取那些还未完成攻击的样本的真实标签\n red_label = label[~stop_flag]\n pertbx = []\n\n # 对于攻击方法列表中的每种攻击方法,尝试对数据进行扰动\n for attack in self.attack_list:\n # 确保每种攻击方法都实现了perturb方法\n assert 'perturb' in type(attack).__dict__.keys()\n\n # 对于某些特定的攻击方法,在第二次及以后的迭代中取消随机化\n if t > 0 and 'use_random' in attack.__dict__.keys():\n attack.use_random = False\n\n # 对于名为\"Orthogonal\"的攻击方法,进行特殊处理\n if 'Orthogonal' in type(attack).__name__:\n pertbx.append(attack.perturb_dae(predict_model=predict_model, purifier=purifier, x=adv_x[~stop_flag], label=red_label, oblivion=oblivion))\n else:\n pertbx.append(attack.perturb_dae(model=predict_model, purifier=purifier, x=adv_x[~stop_flag], label=red_label,\n min_lambda_=1e-5,\n max_lambda_=1e5,\n oblivion=oblivion\n ))\n\n # 将所有攻击方法产生的扰动数据合并\n pertbx = torch.vstack(pertbx)\n\n\n # 不需要计算梯度,提高计算效率\n with torch.no_grad():\n # 将真实标签复制若干次以匹配所有的攻击列表\n red_label_ext = torch.cat([red_label] * len(self.attack_list))\n \n # 获取每种攻击方法产生的损失值和成功状态\n if not oblivion:\n purified_pertbx = purifier(pertbx.detach().clone().float()).to(torch.double)\n else:\n purified_pertbx = pertbx.detach().clone()\n\n loss, done = self.get_scores(predict_model, purified_pertbx, red_label_ext)\n \n # 调整损失和成功状态的形状以方便后续计算\n loss = loss.reshape(len(self.attack_list), num_sample_red).permute(1, 0)\n done = done.reshape(len(self.attack_list), num_sample_red).permute(1, 0)\n \n # 判断哪些样本至少有一种攻击方法成功\n success_flag = torch.any(done, dim=-1)\n \n # 对于没有成功的样本,将其标记为1以进行后续处理\n done[~torch.any(done, dim=-1)] = 1\n \n # 调整损失值,对于成功的攻击方法,损失值保持不变;对于失败的,损失值变为最小值\n loss = (loss * done.to(torch.float)) + torch.min(loss) * (~done).to(torch.float)\n \n # 调整扰动数据的形状以方便后续计算\n pertbx = pertbx.reshape(len(self.attack_list), num_sample_red, *red_n).permute([1, 0, *red_ind])\n \n # 选择造成最大损失的扰动数据\n _, indices = loss.max(dim=-1)\n adv_x[~stop_flag] = pertbx[torch.arange(num_sample_red), indices]\n \n # 获取选中的扰动数据的损失值\n a_loss = loss[torch.arange(num_sample_red), indices]\n \n # 复制当前的停止标志\n pre_stop_flag = stop_flag.clone()\n \n # 更新停止标志,如果损失值变化很小或者某种攻击方法成功,则停止迭代\n stop_flag[~stop_flag] = (torch.abs(pre_loss[~stop_flag] - a_loss) < self.varepsilon) | success_flag\n \n # 更新前一个损失值\n pre_loss[~pre_stop_flag] = a_loss\n\n # 如果需要打印日志\n if verbose:\n # 评估最终的扰动数据的成功状态\n with torch.no_grad():\n purified_adv_x = purifier(adv_x.detach().clone().float()).to(torch.double)\n _, done = self.get_scores(predict_model, purified_adv_x, label)\n # 打印攻击成功率\n logger.info(f\"max: attack effectiveness {done.sum().item() / x.size()[0] * 100}%.\")\n\n # 返回最终的扰动数据\n return adv_x\n\n\n # 这个get_scores函数的主要目的是计算扰动数据在给定模型上的损失值,并判断模型对这些扰动数据的预测是否成功完成。\n # 对于具有检测器功能的模型,还会考虑模型的额外输出来决定预测的完成状态。\n def get_scores(self, model, pertb_x, label):\n \"\"\"\n 获取扰动数据在模型上的损失值和预测标签的完成状态。\n\n 参数:\n @param model: 模型对象,即受攻击的目标模型。\n @param pertb_x: torch.Tensor,扰动后的数据。\n @param label: torch.Tensor,扰动数据的真实标签。\n\n 返回:\n - loss_no_reduction: 每个样本的损失值(无降维处理)。\n - done: Boolean Tensor,表示模型对每个样本的预测是否成功完成。\n \"\"\"\n # 判断模型是否具有检测器功能,如果有,则获取模型的两个输出:logits_f 和 prob_g。\n if hasattr(model, 'is_detector_enabled'):\n logits_f, prob_g = model.forward(pertb_x)\n else:\n # 如果模型没有检测器功能,只获取一个输出logits_f。\n logits_f = model.forward(pertb_x)\n\n # 使用交叉熵计算每个样本的损失值\n ce = F.cross_entropy(logits_f, label, reduction='none')\n\n # 获取模型的预测标签\n y_pred = logits_f.argmax(1)\n\n # 如果模型具有检测器功能且不处于\"oblivion\"模式,则进行特殊处理。\n # 使用模型的输出prob_g来判断是否成功完成了预测。\n if hasattr(model, 'is_detector_enabled') and (not self.oblivion):\n tau = model.get_tau_sample_wise(y_pred)\n loss_no_reduction = -prob_g\n done = (y_pred != label) & (prob_g <= tau)\n else:\n # 如果模型没有检测器功能或处于\"oblivion\"模式,则使用交叉熵损失来判断是否成功完成了预测。\n loss_no_reduction = ce\n done = y_pred != label\n\n return loss_no_reduction, done" }, { "identifier": "StepwiseMax", "path": "core/attack/stepwise_max.py", "snippet": "class StepwiseMax(BaseAttack):\n \"\"\"\n Stepwise max攻击方法,这是一个结合了pgd l1, pgd l2, 和 pgd linf三种攻击方式的方法。\n\n 参数\n ----------\n @param use_random: bool类型,是否使用随机的起始点。\n @param rounding_threshold: float类型,用于四舍五入实数的阈值。\n @param is_attacker: bool类型,是否扮演攻击者角色(注意:防御者执行对抗性训练)。\n @param oblivion: bool类型,是否知道敌手指示器。\n @param kappa: 攻击信心度。\n @param manipulation_x: 可操作性。\n @param omega: 与每个api相对应的互依赖api的索引。\n @param device: 设备,'cpu'或'cuda'。\n\n \"\"\"\n\n def __init__(self, use_random=False, rounding_threshold=0.5,\n is_attacker=True, oblivion=False, kappa=1., manipulation_x=None, omega=None, device=None):\n super(StepwiseMax, self).__init__(is_attacker, oblivion, kappa, manipulation_x, omega, device)\n \n # 是否使用随机起点\n self.use_random = use_random\n \n # 断言确保四舍五入阈值在(0, 1)之间\n assert 0 < rounding_threshold < 1\n \n # 设置四舍五入的阈值\n self.round_threshold = rounding_threshold\n \n # lambda_用于正则化,通常与优化的损失一起使用\n self.lambda_ = 1.\n\n def perturb_dae(self, model, purifier, x, label=None,\n steps=100,\n step_check=1,\n sl_l1=1.,\n sl_l2=1.,\n sl_linf=0.01,\n min_lambda_=1e-5,\n max_lambda_=1e5,\n is_score_round=True,\n base=10.,\n verbose=False,\n oblivion=False):\n \"\"\"\n 对模型进行增强攻击。\n\n @param model: PyTorch模型,待攻击目标。\n @param x: Tensor, 原始输入数据。\n @param label: Tensor或None, 输入数据对应的标签。\n @param steps: int, 攻击的总步数。\n @param step_check: int, 检查间隔,即多少步进行一次检查。\n @param sl_l1: float, L1范数的步长。\n @param sl_l2: float, L2范数的步长。\n @param sl_linf: float, Linf范数的步长。\n @param min_lambda_: float, lambda的最小值。\n @param max_lambda_: float, lambda的最大值。\n @param is_score_round: Boolean, 是否对分数进行四舍五入。\n @param base: float, 基数。\n @param verbose: Boolean, 是否输出详细信息。\n \"\"\"\n # torch.manual_seed(int(random.random() * 100)) # 设置随机种子\n # 参数校验\n assert 0 < min_lambda_ <= max_lambda_\n assert steps >= 0 and (step_check >= 1) and 1 >= sl_l1 > 0 and sl_l2 >= 0 and sl_linf >= 0\n \n model.eval() # 将模型设置为评估模式\n purifier.eval()\n \n # 根据模型是否具有某种属性来设置lambda的初值\n if hasattr(model, 'is_detector_enabled'):\n self.lambda_ = min_lambda_\n else:\n self.lambda_ = max_lambda_\n \n # 如果不是攻击者,从预定义的步骤中随机选择一个\n if not self.is_attacker:\n step_checks = [1, 10, 25, 50]\n step_check = random.choice(step_checks)\n \n # 计算每个小步骤中需要的迭代次数\n mini_steps = [step_check] * (steps // step_check)\n mini_steps = mini_steps + [steps % step_check] if steps % step_check != 0 else mini_steps\n \n # 获取输入的维度信息\n n, red_n = x.size()[0], x.size()[1:]\n red_ind = list(range(2, len(x.size()) + 1))\n \n adv_x = x.detach().clone() # 获取输入数据的副本\n while self.lambda_ <= max_lambda_:\n pert_x_cont = None\n prev_done = None\n for i, mini_step in enumerate(mini_steps):\n with torch.no_grad():\n # 如果是第一步并且启用了随机初始化,那么获取一个随机的起始点\n if i == 0:\n adv_x = get_x0(adv_x, rounding_threshold=self.round_threshold, is_sample=True)\n # 计算损失和完成标志\n if not oblivion:\n purified_adv = purifier(adv_x.detach().clone().float()).to(torch.double)\n else:\n purified_adv = adv_x.detach().clone()\n _, done = self.get_loss(model, purified_adv, label, self.lambda_)\n \n # print(\"done:\", done)\n \n # 如果所有的都完成了,就退出循环\n if torch.all(done):\n break\n \n # 对于那些没有完成的数据,重新计算扰动\n # print(\"i:\", i)\n if i == 0:\n # print(\"~done:\", (~done))\n adv_x[~done] = x[~done]\n prev_done = done.clone()\n else:\n if (adv_x[~done]).shape[0] == (pert_x_cont[~done[~prev_done]]).shape[0]:\n adv_x[~done] = pert_x_cont[~done[~prev_done]]\n else:\n updated_mask = (~done) & (~prev_done[:len(done)])\n num_to_select = updated_mask.sum().item()\n selected_perturbations = pert_x_cont[:num_to_select]\n adv_x[updated_mask] = selected_perturbations\n\n prev_done = done.clone() \n \n # 对那些未完成的数据进行真正的扰动\n num_sample_red = torch.sum(~done).item()\n pert_x_l1, pert_x_l2, pert_x_linf = self._perturb_dae(model, purifier, adv_x[~done], label[~done],\n mini_step,\n sl_l1,\n sl_l2,\n sl_linf,\n lambda_=self.lambda_,\n oblivion=False\n )\n # print(\"pert_x_l1, pert_x_l2, pert_x_linf\", pert_x_l1, pert_x_l2, pert_x_linf)\n # 不计算梯度地执行下列操作\n with torch.no_grad():\n # 构造一个包含三种扰动的列表\n pertb_x_list = [pert_x_linf, pert_x_l2, pert_x_l1]\n n_attacks = len(pertb_x_list) # 获取攻击的数量(即3)\n pertbx = torch.vstack(pertb_x_list) # 垂直堆叠这三种扰动\n label_ext = torch.cat([label[~done]] * n_attacks) # 扩展标签列表,使其与扰动列表长度匹配\n\n # 如果不是攻击者并且不需要四舍五入得分,则获取得分\n # 否则,先对扰动进行四舍五入,再获取得分\n if not oblivion:\n purified_pertbx = purifier(pertbx.detach().clone().float()).to(torch.double)\n else:\n purified_pertbx = pertbx.detach().clone()\n if (not self.is_attacker) and (not is_score_round): \n scores, _done = self.get_scores(model, purified_pertbx, label_ext)\n else:\n scores, _done = self.get_scores(model, round_x(purified_pertbx, self.round_threshold), label_ext)\n \n # 如果得分的最大值大于0,则设置为该值,否则设置为0\n max_v = scores.amax() if scores.amax() > 0 else 0.\n scores[_done] += max_v # 对完成的得分增加max_v\n\n # 重新整形扰动和得分张量,以便后续操作\n pertbx = pertbx.reshape(n_attacks, num_sample_red, *red_n).permute([1, 0, *red_ind])\n scores = scores.reshape(n_attacks, num_sample_red).permute(1, 0)\n\n # 从得分张量中获取最大得分及其索引\n _2, s_idx = scores.max(dim=-1)\n # 使用索引从扰动张量中选择具有最高误导性的扰动\n pert_x_cont = pertbx[torch.arange(num_sample_red), s_idx]\n # print(\"pert_x_cont.shape\", pert_x_cont.shape)\n # 更新经过扰动的数据adv_x\n adv_x[~done] = pert_x_cont if not self.is_attacker else round_x(pert_x_cont, self.round_threshold)\n \n # 更新lambda值以便于下一次循环\n self.lambda_ *= base\n # 如果lambda值检查失败,则中断循环\n if not self.check_lambda(model):\n break\n # 如果是攻击者,对最终的扰动结果进行四舍五入\n if self.is_attacker:\n adv_x = round_x(adv_x, self.round_threshold)\n \n # 不计算梯度地获取最后的损失和完成标志\n with torch.no_grad():\n purified_adv = purifier(adv_x.detach().clone().float()).to(torch.double)\n _, done = self.get_loss(model, purified_adv, label, self.lambda_)\n # 如果设置了详细输出,打印攻击效果的百分比\n if verbose:\n logger.info(f\"step-wise max: attack effectiveness {done.sum().item() / done.size()[0] * 100:.3f}%.\")\n # 返回扰动后的数据\n return adv_x\n\n\n def perturb(self, model, x, label=None,\n steps=100,\n step_check=1,\n sl_l1=1.,\n sl_l2=1.,\n sl_linf=0.01,\n min_lambda_=1e-5,\n max_lambda_=1e5,\n is_score_round=True,\n base=10.,\n verbose=False):\n \"\"\"\n 对模型进行增强攻击。\n\n @param model: PyTorch模型,待攻击目标。\n @param x: Tensor, 原始输入数据。\n @param label: Tensor或None, 输入数据对应的标签。\n @param steps: int, 攻击的总步数。\n @param step_check: int, 检查间隔,即多少步进行一次检查。\n @param sl_l1: float, L1范数的步长。\n @param sl_l2: float, L2范数的步长。\n @param sl_linf: float, Linf范数的步长。\n @param min_lambda_: float, lambda的最小值。\n @param max_lambda_: float, lambda的最大值。\n @param is_score_round: Boolean, 是否对分数进行四舍五入。\n @param base: float, 基数。\n @param verbose: Boolean, 是否输出详细信息。\n \"\"\"\n # torch.manual_seed(int(random.random() * 100)) # 设置随机种子\n # 参数校验\n assert 0 < min_lambda_ <= max_lambda_\n assert steps >= 0 and (step_check >= 1) and 1 >= sl_l1 > 0 and sl_l2 >= 0 and sl_linf >= 0\n \n model.eval() # 将模型设置为评估模式\n \n # 根据模型是否具有某种属性来设置lambda的初值\n if hasattr(model, 'is_detector_enabled'):\n self.lambda_ = min_lambda_\n else:\n self.lambda_ = max_lambda_\n \n # 如果不是攻击者,从预定义的步骤中随机选择一个\n if not self.is_attacker:\n step_checks = [1, 10, 25, 50]\n step_check = random.choice(step_checks)\n \n # 计算每个小步骤中需要的迭代次数\n mini_steps = [step_check] * (steps // step_check)\n mini_steps = mini_steps + [steps % step_check] if steps % step_check != 0 else mini_steps\n \n # 获取输入的维度信息\n n, red_n = x.size()[0], x.size()[1:]\n red_ind = list(range(2, len(x.size()) + 1))\n \n adv_x = x.detach().clone() # 获取输入数据的副本\n while self.lambda_ <= max_lambda_:\n pert_x_cont = None\n prev_done = None\n for i, mini_step in enumerate(mini_steps):\n with torch.no_grad():\n # 如果是第一步并且启用了随机初始化,那么获取一个随机的起始点\n if i == 0:\n adv_x = get_x0(adv_x, rounding_threshold=self.round_threshold, is_sample=True)\n _, done = self.get_loss(model, adv_x, label, self.lambda_)\n \n # print(\"done:\", done)\n \n # 如果所有的都完成了,就退出循环\n if torch.all(done):\n break\n \n # 对于那些没有完成的数据,重新计算扰动\n # print(\"i:\", i)\n if i == 0:\n # print(\"~done:\", (~done))\n adv_x[~done] = x[~done]\n prev_done = done.clone()\n else:\n if (adv_x[~done]).shape[0] == (pert_x_cont[~done[~prev_done]]).shape[0]:\n adv_x[~done] = pert_x_cont[~done[~prev_done]]\n else:\n updated_mask = (~done) & (~prev_done[:len(done)])\n num_to_select = updated_mask.sum().item()\n selected_perturbations = pert_x_cont[:num_to_select]\n adv_x[updated_mask] = selected_perturbations\n\n prev_done = done.clone() \n \n # 对那些未完成的数据进行真正的扰动\n num_sample_red = torch.sum(~done).item()\n pert_x_l1, pert_x_l2, pert_x_linf = self._perturb(model, adv_x[~done], label[~done],\n mini_step,\n sl_l1,\n sl_l2,\n sl_linf,\n lambda_=self.lambda_\n )\n # print(\"pert_x_l1, pert_x_l2, pert_x_linf\", pert_x_l1, pert_x_l2, pert_x_linf)\n # 不计算梯度地执行下列操作\n with torch.no_grad():\n # 构造一个包含三种扰动的列表\n pertb_x_list = [pert_x_linf, pert_x_l2, pert_x_l1]\n n_attacks = len(pertb_x_list) # 获取攻击的数量(即3)\n pertbx = torch.vstack(pertb_x_list) # 垂直堆叠这三种扰动\n label_ext = torch.cat([label[~done]] * n_attacks) # 扩展标签列表,使其与扰动列表长度匹配\n\n # 如果不是攻击者并且不需要四舍五入得分,则获取得分\n # 否则,先对扰动进行四舍五入,再获取得分\n if (not self.is_attacker) and (not is_score_round):\n scores, _done = self.get_scores(model, pertbx, label_ext)\n else:\n scores, _done = self.get_scores(model, round_x(pertbx, self.round_threshold), label_ext)\n \n # 如果得分的最大值大于0,则设置为该值,否则设置为0\n max_v = scores.amax() if scores.amax() > 0 else 0.\n scores[_done] += max_v # 对完成的得分增加max_v\n\n # 重新整形扰动和得分张量,以便后续操作\n pertbx = pertbx.reshape(n_attacks, num_sample_red, *red_n).permute([1, 0, *red_ind])\n scores = scores.reshape(n_attacks, num_sample_red).permute(1, 0)\n\n # 从得分张量中获取最大得分及其索引\n _2, s_idx = scores.max(dim=-1)\n # 使用索引从扰动张量中选择具有最高误导性的扰动\n pert_x_cont = pertbx[torch.arange(num_sample_red), s_idx]\n # print(\"pert_x_cont.shape\", pert_x_cont.shape)\n # 更新经过扰动的数据adv_x\n adv_x[~done] = pert_x_cont if not self.is_attacker else round_x(pert_x_cont, self.round_threshold)\n \n # 更新lambda值以便于下一次循环\n self.lambda_ *= base\n # 如果lambda值检查失败,则中断循环\n if not self.check_lambda(model):\n break\n # 如果是攻击者,对最终的扰动结果进行四舍五入\n if self.is_attacker:\n adv_x = round_x(adv_x, self.round_threshold)\n \n # 不计算梯度地获取最后的损失和完成标志\n with torch.no_grad():\n _, done = self.get_loss(model, adv_x, label, self.lambda_)\n # 如果设置了详细输出,打印攻击效果的百分比\n if verbose:\n logger.info(f\"step-wise max: attack effectiveness {done.sum().item() / done.size()[0] * 100:.3f}%.\")\n # 返回扰动后的数据\n return adv_x\n\n def _perturb(self, model, x, label=None,\n steps=1,\n step_length_l1=1.,\n step_length_l2=0.5,\n step_length_linf=0.01,\n lambda_=1.,\n ):\n \"\"\"\n 对节点的特征向量进行扰动\n\n 参数\n -----------\n @param model: 受害者模型\n @param x: torch.FloatTensor, 节点特征向量(每个表示一个图中的API出现次数)形状为 [batch_size, vocab_dim]\n @param label: torch.LongTensor, 真实的标签\n @param steps: 整数, 迭代的最大次数\n @param step_length_l1: 每次迭代的步长,L1范数\n @param step_length_l2: 每次迭代的步长,L2范数\n @param step_length_linf: 每次迭代的步长,Linf范数\n @param lambda_: 浮点数, 惩罚因子\n \"\"\"\n if x is None or x.shape[0] <= 0:\n return []\n \n self.lambda_ = lambda_\n \n # 确保L1步长在[0,1]之间\n assert 0 <= step_length_l1 <= 1, \"期望在 [0,1] 之间的实数值,但得到 {}\".format(step_length_l1)\n model.eval()\n adv_x = x.detach()\n \n def one_iteration(_adv_x, norm_type):\n # 基于当前的扰动输入来计算梯度\n if \"rnn\" in model.model_save_path:\n model.train()\n if \"lstm\" in model.model_save_path:\n model.train() \n var_adv_x = torch.autograd.Variable(_adv_x, requires_grad=True) # 将_adv_x转换为一个可以进行自动梯度计算的变量\n loss, done = self.get_loss(model, var_adv_x, label, self.lambda_) # 获取模型在扰动输入上的损失\n grads = torch.autograd.grad(loss.mean(), var_adv_x, allow_unused=True)\n if grads[0] is None:\n grad = torch.zeros_like(var_adv_x)\n else:\n grad = grads[0].data\n\n # 寻找允许的位置来插入和移除API\n pos_insertion = (_adv_x <= 0.5) * 1 * (_adv_x >= 0.) # 寻找API的可插入位置:特征值在0和0.5之间\n grad4insertion = (grad > 0) * pos_insertion * grad # 根据梯度正值计算插入API的梯度\n\n pos_removal = (_adv_x > 0.5) * 1 # 寻找API的可移除位置:特征值大于0.5\n grad4removal = (grad <= 0) * (pos_removal & self.manipulation_x) * grad # 根据梯度负值计算移除API的梯度\n\n if self.is_attacker:\n # 对于攻击者,处理那些互相依赖的API\n checking_nonexist_api = (pos_removal ^ self.omega) & self.omega # 检查不存在的API\n grad4removal[:, self.api_flag] += torch.sum(grad * checking_nonexist_api, dim=-1, keepdim=True) # 考虑API之间的关系,调整移除API的梯度\n\n # 合并插入和移除的梯度\n grad = grad4removal + grad4insertion\n\n # 根据不同的范数类型,计算扰动值\n if norm_type == 'linf':\n perturbation = torch.sign(grad) # 计算梯度符号来获取无穷范数扰动方向\n if self.is_attacker:\n perturbation += (torch.any(perturbation[:, self.api_flag] < 0, dim=-1, keepdim=True) * checking_nonexist_api)\n return torch.clamp(_adv_x + step_length_linf * perturbation, min=0., max=1.) # 应用扰动并确保结果在[0,1]范围内\n\n elif norm_type == 'l2':\n l2norm = torch.linalg.norm(grad, dim=-1, keepdim=True) # 计算L2范数\n perturbation = torch.minimum(\n torch.tensor(1., dtype=_adv_x.dtype, device=_adv_x.device),\n grad / l2norm\n ) # 计算L2范数下的扰动方向\n perturbation = torch.where(torch.isnan(perturbation), 0., perturbation) # 处理NaN值\n perturbation = torch.where(torch.isinf(perturbation), 1., perturbation) # 处理Inf值\n if self.is_attacker:\n min_val = torch.amin(perturbation, dim=-1, keepdim=True).clamp_(max=0.)\n perturbation += (torch.any(perturbation[:, self.api_flag] < 0, dim=-1, keepdim=True) * torch.abs(min_val) * checking_nonexist_api)\n return torch.clamp(_adv_x + step_length_l2 * perturbation, min=0., max=1.)\n\n elif norm_type == 'l1':\n val, idx = torch.abs(grad).topk(int(1. / step_length_l1), dim=-1) # 获取梯度的绝对值的top-k值和相应的索引\n perturbation = F.one_hot(idx, num_classes=_adv_x.shape[-1]).sum(dim=1) # 根据索引计算L1范数下的扰动方向\n perturbation = torch.sign(grad) * perturbation # 使用梯度的符号来调整扰动方向\n if self.is_attacker:\n perturbation += (torch.any(perturbation[:, self.api_flag] < 0, dim=-1, keepdim=True) * checking_nonexist_api)\n return torch.clamp(_adv_x + step_length_l1 * perturbation, min=0., max=1.)\n\n else:\n raise NotImplementedError # 如果范数类型不在L1、L2、Linf中,则引发异常\n\n\n # 为每种范数执行迭代\n adv_x_l1 = adv_x.clone()\n for t in range(steps):\n adv_x_l1 = one_iteration(adv_x_l1, norm_type='l1')\n \n adv_x_l2 = adv_x.clone()\n for t in range(steps):\n adv_x_l2 = one_iteration(adv_x_l2, norm_type='l2')\n \n adv_x_linf = adv_x.clone()\n for t in range(steps):\n adv_x_linf = one_iteration(adv_x_linf, norm_type='linf')\n \n return adv_x_l1, adv_x_l2, adv_x_linf\n\n\n def _perturb_dae(self, model, purifier, x, label=None,\n steps=1,\n step_length_l1=1.,\n step_length_l2=0.5,\n step_length_linf=0.01,\n lambda_=1.,\n oblivion=False):\n \"\"\"\n 对节点的特征向量进行扰动\n\n 参数\n -----------\n @param model: 受害者模型\n @param x: torch.FloatTensor, 节点特征向量(每个表示一个图中的API出现次数)形状为 [batch_size, vocab_dim]\n @param label: torch.LongTensor, 真实的标签\n @param steps: 整数, 迭代的最大次数\n @param step_length_l1: 每次迭代的步长,L1范数\n @param step_length_l2: 每次迭代的步长,L2范数\n @param step_length_linf: 每次迭代的步长,Linf范数\n @param lambda_: 浮点数, 惩罚因子\n \"\"\"\n if x is None or x.shape[0] <= 0:\n return []\n \n self.lambda_ = lambda_\n \n # 确保L1步长在[0,1]之间\n assert 0 <= step_length_l1 <= 1, \"期望在 [0,1] 之间的实数值,但得到 {}\".format(step_length_l1)\n model.eval()\n adv_x = x.detach()\n \n\n def one_iteration(_adv_x, norm_type):\n # 基于当前的扰动输入来计算梯度\n var_adv_x = torch.autograd.Variable(_adv_x, requires_grad=True) # 将_adv_x转换为一个可以进行自动梯度计算的变量\n if not oblivion:\n purified_var = purifier(var_adv_x.detach().clone().float()).to(torch.double)\n else:\n purified_var = var_adv_x.detach().clone()\n loss, done = self.get_loss(model, purified_var, label, self.lambda_) # 获取模型在扰动输入上的损失\n grads = torch.autograd.grad(loss.mean(), var_adv_x, allow_unused=True)\n if grads[0] is None:\n grad = torch.zeros_like(var_adv_x)\n else:\n grad = grads[0].data\n\n # 寻找允许的位置来插入和移除API\n pos_insertion = (_adv_x <= 0.5) * 1 * (_adv_x >= 0.) # 寻找API的可插入位置:特征值在0和0.5之间\n grad4insertion = (grad > 0) * pos_insertion * grad # 根据梯度正值计算插入API的梯度\n\n pos_removal = (_adv_x > 0.5) * 1 # 寻找API的可移除位置:特征值大于0.5\n grad4removal = (grad <= 0) * (pos_removal & self.manipulation_x) * grad # 根据梯度负值计算移除API的梯度\n\n if self.is_attacker:\n # 对于攻击者,处理那些互相依赖的API\n checking_nonexist_api = (pos_removal ^ self.omega) & self.omega # 检查不存在的API\n grad4removal[:, self.api_flag] += torch.sum(grad * checking_nonexist_api, dim=-1, keepdim=True) # 考虑API之间的关系,调整移除API的梯度\n\n # 合并插入和移除的梯度\n grad = grad4removal + grad4insertion\n\n # 根据不同的范数类型,计算扰动值\n if norm_type == 'linf':\n perturbation = torch.sign(grad) # 计算梯度符号来获取无穷范数扰动方向\n if self.is_attacker:\n perturbation += (torch.any(perturbation[:, self.api_flag] < 0, dim=-1, keepdim=True) * checking_nonexist_api)\n return torch.clamp(_adv_x + step_length_linf * perturbation, min=0., max=1.) # 应用扰动并确保结果在[0,1]范围内\n\n elif norm_type == 'l2':\n l2norm = torch.linalg.norm(grad, dim=-1, keepdim=True) # 计算L2范数\n perturbation = torch.minimum(\n torch.tensor(1., dtype=_adv_x.dtype, device=_adv_x.device),\n grad / l2norm\n ) # 计算L2范数下的扰动方向\n perturbation = torch.where(torch.isnan(perturbation), 0., perturbation) # 处理NaN值\n perturbation = torch.where(torch.isinf(perturbation), 1., perturbation) # 处理Inf值\n if self.is_attacker:\n min_val = torch.amin(perturbation, dim=-1, keepdim=True).clamp_(max=0.)\n perturbation += (torch.any(perturbation[:, self.api_flag] < 0, dim=-1, keepdim=True) * torch.abs(min_val) * checking_nonexist_api)\n return torch.clamp(_adv_x + step_length_l2 * perturbation, min=0., max=1.)\n\n elif norm_type == 'l1':\n val, idx = torch.abs(grad).topk(int(1. / step_length_l1), dim=-1) # 获取梯度的绝对值的top-k值和相应的索引\n perturbation = F.one_hot(idx, num_classes=_adv_x.shape[-1]).sum(dim=1) # 根据索引计算L1范数下的扰动方向\n perturbation = torch.sign(grad) * perturbation # 使用梯度的符号来调整扰动方向\n if self.is_attacker:\n perturbation += (torch.any(perturbation[:, self.api_flag] < 0, dim=-1, keepdim=True) * checking_nonexist_api)\n return torch.clamp(_adv_x + step_length_l1 * perturbation, min=0., max=1.)\n\n else:\n raise NotImplementedError # 如果范数类型不在L1、L2、Linf中,则引发异常\n\n\n # 为每种范数执行迭代\n adv_x_l1 = adv_x.clone()\n for t in range(steps):\n adv_x_l1 = one_iteration(adv_x_l1, norm_type='l1')\n \n adv_x_l2 = adv_x.clone()\n for t in range(steps):\n adv_x_l2 = one_iteration(adv_x_l2, norm_type='l2')\n \n adv_x_linf = adv_x.clone()\n for t in range(steps):\n adv_x_linf = one_iteration(adv_x_linf, norm_type='linf')\n \n return adv_x_l1, adv_x_l2, adv_x_linf\n\n def get_scores(self, model, pertb_x, label):\n # 如果模型有 'is_detector_enabled' 这个属性\n if hasattr(model, 'is_detector_enabled'):\n # 获取模型的输出,logits_f 是模型的原始输出,prob_g 是一个概率值\n logits_f, prob_g = model.forward(pertb_x)\n else:\n # 如果模型没有 'is_detector_enabled' 这个属性,只获取模型的原始输出\n logits_f = model.forward(pertb_x)\n\n # 获取预测的类别\n y_pred = logits_f.argmax(1)\n \n # 计算交叉熵损失\n ce = F.cross_entropy(logits_f, label, reduction='none')\n \n # 如果模型有 'is_detector_enabled' 这个属性,并且 self.oblivion 为 False\n if hasattr(model, 'is_detector_enabled') and (not self.oblivion):\n # 获取样本的阈值\n tau = model.get_tau_sample_wise(y_pred)\n # 计算损失,加入了 prob_g 这个概率值的惩罚项\n loss_no_reduction = ce - self.lambda_ * prob_g\n # 判断预测是否错误,并且 prob_g 是否小于等于阈值 tau\n done = (y_pred != label) & (prob_g <= tau)\n else:\n # 如果没有 'is_detector_enabled' 这个属性或 self.oblivion 为 True,损失仍然是交叉熵损失\n loss_no_reduction = ce\n # 判断预测是否错误\n done = y_pred != label\n\n # 返回损失值和判断结果c\n return loss_no_reduction, done" }, { "identifier": "MalwareDetectionDNN", "path": "core/defense/md_dnn.py", "snippet": "class MalwareDetectionDNN(nn.Module):\n def __init__(self, input_size, n_classes, device='cpu', name='DNN', **kwargs):\n \"\"\"\n 初始化恶意软件检测器\n\n 参数:\n ----------\n @param input_size: 整数,输入向量的维度数量。\n @param n_classes: 整数,表示分类的数量,例如二分类问题中n=2。\n @param device: 字符串,可以是'cpu'或'cuda',表示模型应该在CPU还是GPU上运行。\n @param name: 字符串,用于命名模型。\n \"\"\"\n super(MalwareDetectionDNN, self).__init__() # 调用父类初始化\n self.input_size = input_size # 定义输入尺寸\n self.n_classes = n_classes # 定义分类数量\n self.device = device # 定义运行设备\n self.name = name # 定义模型名称\n\n self.parse_args(**kwargs) # 解析额外参数\n\n self.dense_layers = [] # 初始化一个空的密集层列表\n \n # 检查是否至少有一个隐藏层\n if len(self.dense_hidden_units) >= 1:\n # 添加第一个密集层\n self.dense_layers.append(nn.Linear(self.input_size, self.dense_hidden_units[0]))\n else:\n # 如果没有隐藏层,抛出异常\n raise ValueError(\"Expect at least one hidden layer.\")\n\n # 为每一对连续的隐藏单元添加一个密集层\n for i in range(len(self.dense_hidden_units[0:-1])):\n self.dense_layers.append(nn.Linear(self.dense_hidden_units[i], \n self.dense_hidden_units[i + 1]))\n \n # 添加最后一个连接到输出层的密集层\n self.dense_layers.append(nn.Linear(self.dense_hidden_units[-1], self.n_classes))\n \n # 将密集层添加到模型中以进行跟踪\n for idx_i, dense_layer in enumerate(self.dense_layers):\n self.add_module('nn_model_layer_{}'.format(idx_i), dense_layer)\n\n # 根据参数选择使用SELU或ReLU激活函数\n if self.smooth:\n self.activation_func = F.selu # 使用SELU激活函数\n else:\n self.activation_func = F.relu # 使用ReLU激活函数\n\n # 定义模型的保存路径\n self.model_save_path = path.join(config.get('experiments', 'md_dnn') + '_' + self.name,\n 'model.pth')\n \n # 日志中打印模型的结构信息\n logger.info('========================================dnn model architecture===============================')\n logger.info(self)\n logger.info('===============================================end==========================================')\n\n\n def parse_args(self,\n dense_hidden_units=None,\n dropout=0.6,\n alpha_=0.2,\n smooth=False,\n **kwargs\n ):\n \"\"\"\n 解析并设置网络的超参数。\n\n 参数:\n ----------\n dense_hidden_units : list, 可选\n 网络中每个隐藏层的单元数。如果没有指定,则默认为两个隐藏层,每层200个单元。\n dropout : float, 可选\n dropout正则化的比率,默认为0.6。\n alpha_ : float, 可选\n 某些激活函数的参数,默认为0.2。\n smooth : bool, 可选\n 是否使用平滑的激活函数,默认为False。\n **kwargs : dict\n 其他超参数。\n \"\"\"\n\n # 如果用户没有指定隐藏层,使用默认的配置\n if dense_hidden_units is None:\n self.dense_hidden_units = [200, 200]\n # 如果用户指定了一个列表,使用它\n elif isinstance(dense_hidden_units, list):\n self.dense_hidden_units = dense_hidden_units\n # 否则抛出一个异常\n else:\n raise TypeError(\"Expect a list of hidden units.\")\n\n # 设置dropout, alpha和smooth参数\n self.dropout = dropout\n self.alpha_ = alpha_\n self.smooth = smooth\n\n # 从kwargs中获取并设置proc_number\n self.proc_number = kwargs.get('proc_number', None) # 如果不存在,则返回None\n\n # 如果还有其他参数,记录警告,因为这些参数可能是未知的\n if len(kwargs) > 0:\n logger.warning(\"Unknown hyper-parameters {}\".format(str(kwargs)))\n\n\n def forward(self, x):\n \"\"\"\n 使输入数据 x 通过神经网络\n \n 参数\n ----------\n @param x: 2D张量,特征表示\n \"\"\"\n # 遍历神经网络的每一层,除了最后一层\n for dense_layer in self.dense_layers[:-1]:\n x = self.activation_func(dense_layer(x)) # 使用激活函数处理每一层的输出\n\n # 对处理过的数据进行 dropout 操作,用于防止过拟合\n latent_representation = F.dropout(x, self.dropout, training=self.training)\n \n # 用最后一层进行处理,得到logits(未归一化的预测或分类得分)\n logits = self.dense_layers[-1](latent_representation)\n return logits\n\n def inference(self, test_data_producer):\n \"\"\"\n 进行模型推理,获得预测的置信度和真实标签\n \n 参数\n ----------\n @param test_data_producer: 数据生产者或数据加载器,用于产生测试数据\n \n 返回值\n ----------\n 返回预测的置信度和真实标签\n \"\"\"\n confidences = [] # 存储每批数据的预测置信度\n gt_labels = [] # 存储每批数据的真实标签\n self.eval() # 设置模型为评估模式\n\n # 使用torch.no_grad()来告诉PyTorch不要在推理过程中计算梯度\n with torch.no_grad():\n # 遍历每一批测试数据\n for x, y in test_data_producer:\n # 将数据转移到指定的设备(CPU或GPU)并调整数据类型\n x, y = utils.to_device(x.double(), y.long(), self.device)\n # 得到每一批数据的logits\n logits = self.forward(x)\n # 使用softmax函数得到每一批数据的置信度,并将其添加到confidences列表中\n confidences.append(F.softmax(logits, dim=-1))\n # 将每一批数据的真实标签添加到gt_labels列表中\n gt_labels.append(y)\n\n # 将所有批次的置信度垂直堆叠成一个张量\n confidences = torch.vstack(confidences)\n # 将所有批次的真实标签连接成一个张量\n gt_labels = torch.cat(gt_labels, dim=0)\n \n return confidences, gt_labels\n\n def inference_dae(self, test_data_producer):\n \"\"\"\n 进行模型推理,获得预测的置信度和真实标签\n \n 参数\n ----------\n @param test_data_producer: 数据生产者或数据加载器,用于产生测试数据\n \n 返回值\n ----------\n 返回预测的置信度和真实标签\n \"\"\"\n confidences = [] # 存储每批数据的预测置信度\n gt_labels = [] # 存储每批数据的真实标签\n self.eval() # 设置模型为评估模式\n\n # 使用torch.no_grad()来告诉PyTorch不要在推理过程中计算梯度\n with torch.no_grad():\n # 遍历每一批测试数据\n for x, y in test_data_producer:\n # 将数据转移到指定的设备(CPU或GPU)并调整数据类型\n x, y = utils.to_device(x.double(), y.long(), self.device)\n # 得到每一批数据的logits\n logits = self.forward(x)\n # 使用softmax函数得到每一批数据的置信度,并将其添加到confidences列表中\n confidences.append(F.softmax(logits, dim=-1))\n # 将每一批数据的真实标签添加到gt_labels列表中\n gt_labels.append(y)\n \n return confidences, gt_labels\n\n\n def get_important_attributes(self, test_data_producer, target_label=1):\n \"\"\"\n 使用集成梯度(Integrated Gradients)方法获取重要的属性/特征\n\n 参数\n ----------\n @param test_data_producer: 数据生产者或数据加载器,用于产生测试数据\n @param target_label: 目标标签,默认为1\n \n 返回值\n ----------\n 返回重要的属性/特征\n \"\"\"\n attributions = [] # 存储属性或特征的重要性得分\n gt_labels = [] # 存储真实标签\n\n # 定义一个使用集成梯度方法的包装器\n def _ig_wrapper(_x):\n logits = self.forward(_x)\n return F.softmax(logits, dim=-1)\n\n # 初始化集成梯度对象\n ig = IntegratedGradients(_ig_wrapper)\n\n # 遍历测试数据集\n for i, (x, y) in enumerate(test_data_producer):\n # 将数据和标签转移到指定的设备上\n x, y = utils.to_device(x.double(), y.long(), self.device)\n # 使x能够计算梯度\n x.requires_grad = True\n # 定义基线,用于集成梯度的计算\n baseline = torch.zeros_like(x, dtype=torch.double, device=self.device)\n # 计算属性的重要性\n attribution_bs = ig.attribute(x,\n baselines=baseline,\n target=target_label)\n # 将所有批次的属性垂直堆叠\n attribution = torch.hstack(attribution_bs)\n # 保存得到的属性重要性得分和真实标签\n attributions.append(attribution.clone().detach().cpu().numpy())\n gt_labels.append(y.clone().detach().cpu().numpy())\n # 将真实标签保存为.npy文件\n np.save('./labels', np.concatenate(gt_labels))\n \n return np.vstack(attributions)\n\n\n def inference_batch_wise(self, x):\n \"\"\"\n 仅支持恶意软件样本的批量推理\n \n 参数\n ----------\n @param x: 输入数据的张量\n \n 返回值\n ----------\n 返回推理的置信度和标签\n \"\"\"\n # 确保输入是一个张量\n assert isinstance(x, torch.Tensor)\n \n # 获得模型的输出\n logit = self.forward(x)\n \n # 返回每个样本的置信度和一个与logit形状相同的全1数组(表示恶意软件样本)\n return torch.softmax(logit, dim=-1).detach().cpu().numpy(), np.ones((logit.size()[0],))\n\n\n def predict(self, test_data_producer, indicator_masking=True):\n \"\"\"\n 预测标签并进行评估\n\n 参数\n --------\n @param test_data_producer: torch.DataLoader, 用于生成测试数据的数据加载器\n \"\"\"\n # 进行评估\n confidence, y_true = self.inference(test_data_producer)\n y_pred = confidence.argmax(1).cpu().numpy() # 预测标签\n y_true = y_true.cpu().numpy() # 真实标签\n \n # print(\"y_true.shape:\", y_true.shape)\n # print(\"y_pred.shape:\", y_pred.shape)\n \n # 使用sklearn的评估指标进行评估\n from sklearn.metrics import f1_score, accuracy_score, confusion_matrix, balanced_accuracy_score\n accuracy = accuracy_score(y_true, y_pred)\n b_accuracy = balanced_accuracy_score(y_true, y_pred)\n \n MSG = \"The accuracy on the test dataset is {:.5f}%\"\n logger.info(MSG.format(accuracy * 100))\n \n MSG = \"The balanced accuracy on the test dataset is {:.5f}%\"\n logger.info(MSG.format(b_accuracy * 100))\n\n # 检查数据中是否存在缺失的类别\n if np.any([np.all(y_true == i) for i in range(self.n_classes)]):\n logger.warning(\"class absent.\")\n return\n\n # 计算混淆矩阵\n tn, fp, fn, tp = confusion_matrix(y_true, y_pred).ravel()\n fpr = fp / float(tn + fp) # 计算假阳性率\n fnr = fn / float(tp + fn) # 计算假阴性率\n f1 = f1_score(y_true, y_pred, average='binary') # 计算F1分数\n\n print(\"Other evaluation metrics we may need:\")\n MSG = \"False Negative Rate (FNR) is {:.5f}%、False Positive Rate (FPR) is {:.5f}%, F1 score is {:.5f}%\"\n logger.info(MSG.format(fnr * 100, fpr * 100, f1 * 100))\n\n\n def customize_loss(self, logits, gt_labels, representation=None, mini_batch_idx=None):\n \"\"\"\n 自定义损失函数\n\n 参数\n --------\n @param logits: Tensor, 模型的输出\n @param gt_labels: Tensor, 真实的标签\n @param representation: Tensor, 可选参数,表示特征表示\n @param mini_batch_idx: Int, 可选参数,表示小批次的索引\n \n 返回值\n --------\n 返回交叉熵损失\n \"\"\"\n return F.cross_entropy(logits, gt_labels)\n\n\n def fit(self, train_data_producer, validation_data_producer, epochs=100, lr=0.005, weight_decay=0., weight_sampling=0.5, verbose=True):\n \"\"\"\n 训练恶意软件检测器,根据验证集上的交叉熵损失选择最佳模型。\n\n 参数\n ----------\n @param train_data_producer: 对象, 用于生成一批训练数据的迭代器\n @param validation_data_producer: 对象, 用于生成验证数据的迭代器\n @param epochs: 整数, 训练的周期数\n @param lr: 浮点数, Adam优化器的学习率\n @param weight_decay: 浮点数, 惩罚因子\n @param verbose: 布尔值, 是否显示详细的日志\n \"\"\"\n # 初始化优化器\n optimizer = optim.Adam(self.parameters(), lr=lr, weight_decay=weight_decay)\n best_avg_acc = 0. # 记录验证集上的最佳准确率\n best_epoch = 0 # 记录最佳准确率对应的周期\n total_time = 0. # 总的训练时间\n\n # 获取训练数据批次的数量\n nbatches = len(train_data_producer)\n \n # 进行指定次数的训练周期\n for i in range(epochs):\n # 设置模型为训练模式\n self.train()\n # 初始化列表用于保存每批数据的损失值和准确率\n losses, accuracies = [], []\n\n # 对每个训练数据批次进行遍历\n for idx_batch, (x_train, y_train) in enumerate(train_data_producer):\n # 将数据转移到指定的计算设备(例如GPU或CPU)\n x_train, y_train = utils.to_device(x_train.double(), y_train.long(), self.device)\n\n # 记录开始训练的时间\n start_time = time.time()\n\n # 清空之前累积的梯度\n optimizer.zero_grad() \n \n # 对输入数据进行前向传播\n logits = self.forward(x_train) \n \n # 根据模型的输出和真实标签计算损失\n loss_train = self.customize_loss(logits, y_train) \n\n # 对损失进行反向传播\n loss_train.backward()\n \n # 使用优化器更新模型参数\n optimizer.step()\n\n # 计算训练这批数据所花费的总时间\n total_time += time.time() - start_time\n \n # 计算这批数据上的准确率\n acc_train = (logits.argmax(1) == y_train).sum().item() / x_train.size()[0]\n \n # 将时间转换为分钟和秒\n mins, secs = int(total_time / 60), int(total_time % 60)\n \n # 将这批数据的损失和准确率加入到列表中\n losses.append(loss_train.item())\n accuracies.append(acc_train)\n\n # 如果开启了详细输出模式,显示当前训练进度和这批数据上的损失和准确率\n if verbose:\n logger.info(f'小批次: {i * nbatches + idx_batch + 1}/{epochs * nbatches} | 训练时间为 {mins:.0f} 分钟, {secs} 秒。')\n logger.info(f'训练损失(小批次级别): {losses[-1]:.4f} | 训练精度: {acc_train * 100:.2f}')\n\n\n self.eval() # 将模型设置为评估模式\n avg_acc_val = []\n\n with torch.no_grad(): # 确保在评估模式下不进行梯度的计算\n for x_val, y_val in validation_data_producer:\n # 将数据移动到指定设备(例如GPU或CPU)上,并确保数据的类型为双精度浮点数和长整型\n x_val, y_val = utils.to_device(x_val.double(), y_val.long(), self.device)\n \n # 使用模型进行前向传播,得到输出结果\n logits = self.forward(x_val)\n \n # 计算验证数据上的准确率\n acc_val = (logits.argmax(1) == y_val).sum().item() / x_val.size()[0]\n \n # 保存每一批验证数据的准确率\n avg_acc_val.append(acc_val)\n \n # 计算所有验证数据的平均准确率\n avg_acc_val = np.mean(avg_acc_val)\n\n # 如果当前周期的验证精度超过之前的最佳验证精度\n if avg_acc_val >= best_avg_acc:\n # 更新最佳验证精度\n best_avg_acc = avg_acc_val\n best_epoch = i\n \n # 检查模型保存路径是否存在,如果不存在,则创建\n if not path.exists(self.model_save_path):\n utils.mkdir(path.dirname(self.model_save_path))\n \n # 保存当前的模型参数\n torch.save(self.state_dict(), self.model_save_path)\n \n # 如果开启了详细输出模式,显示模型保存路径\n if verbose:\n print(f'模型保存在路径: {self.model_save_path}')\n\n # 如果开启了详细输出模式,显示训练损失、训练精度、验证精度和最佳验证精度\n if verbose:\n logger.info(f'训练损失(周期级别): {np.mean(losses):.4f} | 训练精度: {np.mean(accuracies) * 100:.2f}')\n logger.info(f'验证精度: {avg_acc_val * 100:.2f} | 最佳验证精度: {best_avg_acc * 100:.2f} 在第 {best_epoch} 个周期')\n\n def load(self):\n \"\"\"\n 从磁盘加载模型参数\n \"\"\"\n self.load_state_dict(torch.load(self.model_save_path))" }, { "identifier": "DetectorTemplate", "path": "core/defense/amd_template.py", "snippet": "class DetectorTemplate(object):\n def __init__(self):\n self.tau = None # 阈值变量\n self.is_detector_enabled = True # 表示检测器是否启用的标志\n\n def forward(self, x):\n \"\"\"\n 类预测与密度估计\n \"\"\"\n raise NotImplementedError\n\n def get_threshold(self):\n \"\"\"\n 计算拒绝异常值的阈值\n \"\"\"\n raise NotImplementedError\n\n def get_tau_sample_wise(self):\n \"\"\"\n 获取每个样本的tau值\n \"\"\"\n raise NotImplementedError\n\n def indicator(self):\n \"\"\"\n 返回一个布尔标志向量,指示是否拒绝一个样本\n \"\"\"\n raise NotImplementedError" }, { "identifier": "config", "path": "config.py", "snippet": "def parser_config():" }, { "identifier": "utils", "path": "tools/utils.py", "snippet": "ENC_KEY = 'cab228a122d3486bac7fab148e8b5aba'\n MSG = \"No such directory or file {} exists!\".format(sample_dir)\n MSG = \"A directory or a list of paths are allowed!\"\ndef pool_initializer():\ndef retrive_files_set(base_dir, dir_ext, file_ext):\n def get_file_name(root_dir, file_ext):\ndef check_dir(sample_dir):\ndef dump_joblib(data, path):\ndef read_joblib(path):\ndef load_json(json_path):\ndef dump_json(obj_dict, file_path):\ndef dump_pickle(data, path, use_gzip=False):\ndef read_pickle(path, use_gzip=False):\ndef dump_pickle_frd_space(data, path):\ndef read_pickle_frd_space(path):\ndef dump_list_of_lists(data, path):\ndef read_list_of_lists(path):\ndef mkdir(target):\ndef read_txt(path, mode='r'):\ndef dump_txt(data_str, path, mode='w'):\ndef read_file_by_fileinput(file_path, inplace=True):\n def __init__(self, manager, use_cache=True):\n def is_cached(self, key):\n def reset(self):\n def get(self, key):\n def cache(self, key, img, lbl):\ndef build_kwargs(keys, arg_dict):\ndef inverse_kwargs(vars):\ndef save_args(fout, args):\ndef load_args(fout):\ndef get_group_args(args, args_parser, title):\ndef tensor_coo_sp_to_ivs(sparse_tensor):\ndef ivs_to_tensor_coo_sp(ivs, device='cpu'):\ndef sp_to_symmetric_sp(sparse_mx):\ndef sparse_mx_to_torch_sparse_tensor(sparse_mx):\ndef to_tensor(feature_x=None, labels=None, device='cpu'):\n def _to_torch_tensor(mat):\ndef to_device(feature_x=None, labels=None, device='cpu'):\ndef psn(x_tensor, prob, lower_value=0., upper_value=1.):\n def __init__(self):\n def __call__(self, module):\ndef round_x(x, alpha=0.5):\ndef get_x0(x, rounding_threshold=0.5, is_sample=False):\ndef or_tensors(x_1, x_2):\ndef xor_tensors(x_1, x_2):\ndef get_mal_data(x_batch, y_batch):\ndef get_mal_ben_data(x_batch, y_batch):\ndef java_class_name2smali_name(cls):\ndef remove_duplicate(components):\ndef crypt_identifier(idf, seed=2345):\n def md5_transform():\ndef random_string(code):\n def sha1_transform():\ndef string_on_code(code):\n def md5_transform():\ndef random_name(seed=2345, code='abc'):\ndef apply_encryption(base_string):\ndef get_sha256(file_path):\nclass SimplifyClass:\nclass NonnegWeightConstraint(object):" } ]
import time import os.path as path import torch import torch.nn as nn import torch.optim as optim import torch.nn.functional as F import numpy as np from core.attack.max import Max from core.attack.stepwise_max import StepwiseMax from core.defense.md_dnn import MalwareDetectionDNN from core.defense.amd_template import DetectorTemplate from config import config, logging, ErrorHandler from tools import utils from sklearn.metrics import f1_score, accuracy_score, confusion_matrix, balanced_accuracy_score
20,001
""" @article{grosse2017statistical, title={On the (statistical) detection of adversarial examples}, author={Grosse, Kathrin and Manoharan, Praveen and Papernot, Nicolas and Backes, Michael and McDaniel, Patrick}, journal={arXiv preprint arXiv:1702.06280}, year={2017} } @inproceedings{carlini2017adversarial, title={Adversarial examples are not easily detected: Bypassing ten detection methods}, author={Carlini, Nicholas and Wagner, David}, booktitle={Proceedings of the 10th ACM workshop on artificial intelligence and security}, pages={3--14}, year={2017} } This implementation is not an official version, but adapted from: https://github.com/carlini/nn_breaking_detection """ from __future__ import absolute_import from __future__ import division from __future__ import print_function logger = logging.getLogger('core.defense.amd_dnn_plus') logger.addHandler(ErrorHandler) class AMalwareDetectionDNNPlus(nn.Module, DetectorTemplate): def __init__(self, md_nn_model, input_size, n_classes, ratio=0.95, device='cpu', name='', **kwargs): # 调用父类构造函数 nn.Module.__init__(self) DetectorTemplate.__init__(self) # 初始化输入参数 self.input_size = input_size self.n_classes = n_classes self.ratio = ratio self.device = device self.name = name self.parse_args(**kwargs) # 恶意软件检测模型 if md_nn_model is not None and isinstance(md_nn_model, nn.Module): # 如果提供了已经训练好的模型,就使用这个模型 self.md_nn_model = md_nn_model self.is_fitting_md_model = False # 默认情况下,模型已经被训练 else: # 否则,创建一个新的恶意软件检测模型
""" @article{grosse2017statistical, title={On the (statistical) detection of adversarial examples}, author={Grosse, Kathrin and Manoharan, Praveen and Papernot, Nicolas and Backes, Michael and McDaniel, Patrick}, journal={arXiv preprint arXiv:1702.06280}, year={2017} } @inproceedings{carlini2017adversarial, title={Adversarial examples are not easily detected: Bypassing ten detection methods}, author={Carlini, Nicholas and Wagner, David}, booktitle={Proceedings of the 10th ACM workshop on artificial intelligence and security}, pages={3--14}, year={2017} } This implementation is not an official version, but adapted from: https://github.com/carlini/nn_breaking_detection """ from __future__ import absolute_import from __future__ import division from __future__ import print_function logger = logging.getLogger('core.defense.amd_dnn_plus') logger.addHandler(ErrorHandler) class AMalwareDetectionDNNPlus(nn.Module, DetectorTemplate): def __init__(self, md_nn_model, input_size, n_classes, ratio=0.95, device='cpu', name='', **kwargs): # 调用父类构造函数 nn.Module.__init__(self) DetectorTemplate.__init__(self) # 初始化输入参数 self.input_size = input_size self.n_classes = n_classes self.ratio = ratio self.device = device self.name = name self.parse_args(**kwargs) # 恶意软件检测模型 if md_nn_model is not None and isinstance(md_nn_model, nn.Module): # 如果提供了已经训练好的模型,就使用这个模型 self.md_nn_model = md_nn_model self.is_fitting_md_model = False # 默认情况下,模型已经被训练 else: # 否则,创建一个新的恶意软件检测模型
self.md_nn_model = MalwareDetectionDNN(self.input_size,
2
2023-11-27 02:00:23+00:00
24k
iann838/pulsefire
tests/test_taskgroups.py
[ { "identifier": "RiotAPIClient", "path": "pulsefire/clients.py", "snippet": "class RiotAPIClient(BaseClient):\n \"\"\"Riot API Client.\n\n | Resources | Support |\n | -------------------- | -------------------------- |\n | League of Legends | ✅ |\n | Legends of Runeterra | ✅ |\n | Teamfight Tactics | ✅ |\n | Valorant | ✅ |\n\n Example:\n ```python\n async with RiotAPIClient(\n default_headers={\"X-Riot-Token\": <API_KEY>}\n ) as client:\n summoner = await client.get_lol_summoner_v4_by_name(region=\"na1\", name=\"Not a Whale\")\n assert summoner[\"summonerLevel\"] > 200\n ```\n \"\"\"\n\n Region = Literal[\n \"americas\", \"europe\", \"asia\", \"sea\", \"esports\",\n \"br1\", \"eun1\", \"euw1\", \"jp1\", \"kr\", \"la1\", \"la2\",\n \"na1\", \"oc1\", \"tr1\", \"ru\", \"ph2\", \"sg2\", \"th2\", \"tw2\", \"vn2\",\n \"ap\", \"br\", \"eu\", \"kr\", \"latam\", \"na\",\n ] | _str\n\n def __init__(\n self,\n *,\n base_url: str = \"https://{region}.api.riotgames.com\",\n default_params: dict[str, Any] = {},\n default_headers: dict[str, str] = {\"X-Riot-Token\": \"\"},\n default_queries: dict[str, str] = {},\n middlewares: list[Middleware] = [\n json_response_middleware(),\n http_error_middleware(),\n rate_limiter_middleware(RiotAPIRateLimiter()),\n ],\n ) -> None:\n super().__init__(\n base_url=base_url,\n default_params=default_params,\n default_headers=default_headers,\n default_queries=default_queries,\n middlewares=middlewares\n )\n\n # Account Endpoints\n\n async def get_account_v1_by_puuid(self, *, region: Region = ..., puuid: str = ...) -> RiotAPISchema.AccountV1Account:\n return await self.invoke(\"GET\", \"/riot/account/v1/accounts/by-puuid/{puuid}\")\n\n async def get_account_v1_by_riot_id(self, *, region: Region = ..., game_name: str = ..., tag_line: str = ...) -> RiotAPISchema.AccountV1Account:\n return await self.invoke(\"GET\", \"/riot/account/v1/accounts/by-riot-id/{game_name}/{tag_line}\")\n\n async def get_account_v1_me(self, *, region: Region = ..., headers: dict = {\"Authorization\": \"\"}) -> RiotAPISchema.AccountV1Account:\n return await self.invoke(\"GET\", \"/riot/account/v1/accounts/me\")\n\n async def get_account_v1_active_shard_by_puuid(self, *, region: Region = ..., puuid: str = ..., game: str = ...) -> RiotAPISchema.AccountV1ActiveShard:\n return await self.invoke(\"GET\", \"/riot/account/v1/active-shards/by-game/{game}/by-puuid/{puuid}\")\n\n # League of Legends Endpoints\n\n async def get_lol_champion_v3_rotation(self, *, region: Region = ...) -> RiotAPISchema.LolChampionV3Rotation:\n return await self.invoke(\"GET\", \"/lol/platform/v3/champion-rotations\")\n\n async def get_lol_champion_v4_mastery_by_summoner(self, *, region: Region = ..., summoner_id: str = ..., champion_id: int = ...) -> RiotAPISchema.LolChampionV4Mastery:\n return await self.invoke(\"GET\", \"/lol/champion-mastery/v4/champion-masteries/by-summoner/{summoner_id}/by-champion/{champion_id}\")\n\n async def get_lol_champion_v4_masteries_by_summoner(self, *, region: Region = ..., summoner_id: str = ...) -> list[RiotAPISchema.LolChampionV4Mastery]:\n return await self.invoke(\"GET\", \"/lol/champion-mastery/v4/champion-masteries/by-summoner/{summoner_id}\")\n\n async def get_lol_champion_v4_top_masteries_by_summoner(self, *, region: Region = ..., summoner_id: str = ...) -> list[RiotAPISchema.LolChampionV4Mastery]:\n return await self.invoke(\"GET\", \"/lol/champion-mastery/v4/champion-masteries/by-summoner/{summoner_id}/top\")\n\n async def get_lol_champion_v4_mastery_score_by_summoner(self, *, region: Region = ..., summoner_id: str = ...) -> int:\n return await self.invoke(\"GET\", \"/lol/champion-mastery/v4/scores/by-summoner/{summoner_id}\")\n\n async def get_lol_champion_v4_mastery_by_puuid(self, *, region: Region = ..., puuid: str = ..., champion_id: int = ...) -> RiotAPISchema.LolChampionV4Mastery:\n return await self.invoke(\"GET\", \"/lol/champion-mastery/v4/champion-masteries/by-puuid/{puuid}/by-champion/{champion_id}\")\n\n async def get_lol_champion_v4_masteries_by_puuid(self, *, region: Region = ..., puuid: str = ...) -> list[RiotAPISchema.LolChampionV4Mastery]:\n return await self.invoke(\"GET\", \"/lol/champion-mastery/v4/champion-masteries/by-puuid/{puuid}\")\n\n async def get_lol_champion_v4_top_masteries_by_puuid(self, *, region: Region = ..., puuid: str = ...) -> list[RiotAPISchema.LolChampionV4Mastery]:\n return await self.invoke(\"GET\", \"/lol/champion-mastery/v4/champion-masteries/by-puuid/{puuid}/top\")\n\n async def get_lol_champion_v4_mastery_score_by_puuid(self, *, region: Region = ..., puuid: str = ...) -> int:\n return await self.invoke(\"GET\", \"/lol/champion-mastery/v4/scores/by-puuid/{puuid}\")\n\n async def get_lol_clash_v1_players_by_summoner(self, *, region: Region = ..., summoner_id: str = ...) -> list[RiotAPISchema.LolClashV1Player]:\n return await self.invoke(\"GET\", \"/lol/clash/v1/players/by-summoner/{summoner_id}\")\n\n async def get_lol_clash_v1_players_by_puuid(self, *, region: Region = ..., puuid: str = ...) -> list[RiotAPISchema.LolClashV1Player]:\n return await self.invoke(\"GET\", \"/lol/clash/v1/players/by-puuid/{puuid}\")\n\n async def get_lol_clash_v1_team(self, *, region: Region = ..., id: str = ...) -> RiotAPISchema.LolClashV1Team:\n return await self.invoke(\"GET\", \"/lol/clash/v1/teams/{id}\")\n\n async def get_lol_clash_v1_tournament_by_team(self, *, region: Region = ..., team_id: str = ...) -> RiotAPISchema.LolClashV1Tournament:\n return await self.invoke(\"GET\", \"/lol/clash/v1/tournaments/by-team/{team_id}\")\n\n async def get_lol_clash_v1_tournament(self, *, region: Region = ..., id: str = ...) -> RiotAPISchema.LolClashV1Tournament:\n return await self.invoke(\"GET\", \"/lol/clash/v1/tournaments/{id}\")\n\n async def get_lol_clash_v1_tournaments(self, *, region: Region = ...) -> list[RiotAPISchema.LolClashV1Tournament]:\n return await self.invoke(\"GET\", \"/lol/clash/v1/tournaments\")\n\n async def get_lol_league_v4_entries_by_summoner(self, *, region: Region = ..., summoner_id: str = ...) -> list[RiotAPISchema.LolLeagueV4LeagueFullEntry]:\n return await self.invoke(\"GET\", \"/lol/league/v4/entries/by-summoner/{summoner_id}\")\n\n async def get_lol_league_v4_challenger_league_by_queue(self, *, region: Region = ..., queue: str = ...) -> RiotAPISchema.LolLeagueV4League:\n return await self.invoke(\"GET\", \"/lol/league/v4/challengerleagues/by-queue/{queue}\")\n\n async def get_lol_league_v4_grandmaster_league_by_queue(self, *, region: Region = ..., queue: str = ...) -> RiotAPISchema.LolLeagueV4League:\n return await self.invoke(\"GET\", \"/lol/league/v4/grandmasterleagues/by-queue/{queue}\")\n\n async def get_lol_league_v4_master_league_by_queue(self, *, region: Region = ..., queue: str = ...) -> RiotAPISchema.LolLeagueV4League:\n return await self.invoke(\"GET\", \"/lol/league/v4/masterleagues/by-queue/{queue}\")\n\n async def get_lol_league_v4_entries_by_division(\n self, *, region: Region = ..., queue: str = ..., tier: str = ..., division: str = ..., queries: dict = {\"page\": 1}\n ) -> list[RiotAPISchema.LolLeagueV4LeagueFullEntry]:\n return await self.invoke(\"GET\", \"/lol/league/v4/entries/{queue}/{tier}/{division}\")\n\n async def get_lol_league_v4_league(self, *, region: Region = ..., id: str = ...) -> RiotAPISchema.LolLeagueV4League:\n return await self.invoke(\"GET\", \"/lol/league/v4/leagues/{id}\")\n\n async def get_lol_match_v5_match(self, *, region: Region = ..., id: str = ...) -> RiotAPISchema.LolMatchV5Match:\n return await self.invoke(\"GET\", \"/lol/match/v5/matches/{id}\")\n\n async def get_lol_match_v5_match_timeline(self, *, region: Region = ..., id: str = ...) -> RiotAPISchema.LolMatchV5MatchTimeline:\n return await self.invoke(\"GET\", \"/lol/match/v5/matches/{id}/timeline\")\n\n async def get_lol_match_v5_match_ids_by_puuid(self, *, region: Region = ..., puuid: str = ..., queries: dict = {\"start\": 0, \"count\": 100}) -> list[str]:\n return await self.invoke(\"GET\", \"/lol/match/v5/matches/by-puuid/{puuid}/ids\")\n\n async def get_lol_spectator_v4_active_game_by_summoner(self, *, region: Region = ..., summoner_id: str = ...) -> RiotAPISchema.LolSpectatorV4Game:\n return await self.invoke(\"GET\", \"/lol/spectator/v4/active-games/by-summoner/{summoner_id}\")\n\n async def get_lol_spectator_v4_featured_games(self, *, region: Region = ...) -> RiotAPISchema.LolSpectatorV4GameList:\n return await self.invoke(\"GET\", \"/lol/spectator/v4/featured-games\")\n\n async def get_lol_status_v4_platform_data(self, *, region: Region = ...) -> RiotAPISchema.StatusV1PlatformData:\n return await self.invoke(\"GET\", \"/lol/status/v4/platform-data\")\n\n async def get_lol_summoner_v4_by_id(self, *, region: Region = ..., id: str = ...) -> RiotAPISchema.LolSummonerV4Summoner:\n return await self.invoke(\"GET\", \"/lol/summoner/v4/summoners/{id}\")\n\n async def get_lol_summoner_v4_by_name(self, *, region: Region = ..., name: str = ...) -> RiotAPISchema.LolSummonerV4Summoner:\n return await self.invoke(\"GET\", \"/lol/summoner/v4/summoners/by-name/{name}\")\n\n async def get_lol_summoner_v4_by_puuid(self, *, region: Region = ..., puuid: str = ...) -> RiotAPISchema.LolSummonerV4Summoner:\n return await self.invoke(\"GET\", \"/lol/summoner/v4/summoners/by-puuid/{puuid}\")\n\n async def get_lol_summoner_v4_me(self, *, region: Region = ..., headers: dict = {\"Authorization\": \"\"}) -> RiotAPISchema.LolSummonerV4Summoner:\n return await self.invoke(\"GET\", \"/lol/summoner/v4/summoners/me\")\n\n async def get_lol_summoner_v4_by_rso_puuid(self, *, region: Region = ..., rso_puuid: str = ...) -> RiotAPISchema.LolSummonerV4Summoner:\n return await self.invoke(\"GET\", \"/fulfillment/v1/summoners/by-puuid/{rso_puuid}\")\n\n # Teamfight Tactics Endpoints\n\n async def get_tft_league_v1_entries_by_summoner(self, *, region: Region = ..., summoner_id: str = ...) -> list[RiotAPISchema.TftLeagueV1LeagueFullEntry]:\n return await self.invoke(\"GET\", \"/tft/league/v1/entries/by-summoner/{summoner_id}\")\n\n async def get_tft_league_v1_challenger_league(self, *, region: Region = ..., queries: dict = {}) -> RiotAPISchema.TftLeagueV1League:\n return await self.invoke(\"GET\", \"/tft/league/v1/challenger\")\n\n async def get_tft_league_v1_grandmaster_league(self, *, region: Region = ..., queries: dict = {}) -> RiotAPISchema.TftLeagueV1League:\n return await self.invoke(\"GET\", \"/tft/league/v1/grandmaster\")\n\n async def get_tft_league_v1_master_league(self, *, region: Region = ..., queries: dict = {}) -> RiotAPISchema.TftLeagueV1League:\n return await self.invoke(\"GET\", \"/tft/league/v1/master\")\n\n async def get_tft_league_v1_entries_by_division(\n self, *, region: Region = ..., tier: str = ..., division: str = ..., queries: dict = {\"page\": 1}\n ) -> list[RiotAPISchema.TftLeagueV1LeagueFullEntry]:\n return await self.invoke(\"GET\", \"/tft/league/v1/entries/{tier}/{division}\")\n\n async def get_tft_league_v1_league(self, *, region: Region = ..., id: str = ...) -> RiotAPISchema.TftLeagueV1League:\n return await self.invoke(\"GET\", \"/tft/league/v1/leagues/{id}\")\n\n async def get_tft_match_v1_match(self, *, region: Region = ..., id: str = ...) -> RiotAPISchema.TftMatchV1Match:\n return await self.invoke(\"GET\", \"/tft/match/v1/matches/{id}\")\n\n async def get_tft_match_v1_match_ids_by_puuid(self, *, region: Region = ..., puuid: str = ..., queries: dict = {\"start\": 0, \"count\": 100}) -> list[str]:\n return await self.invoke(\"GET\", \"/tft/match/v1/matches/by-puuid/{puuid}/ids\")\n\n async def get_tft_status_v1_platform_data(self, *, region: Region = ...) -> RiotAPISchema.StatusV1PlatformData:\n return await self.invoke(\"GET\", \"/tft/status/v1/platform-data\")\n\n async def get_tft_summoner_v1_by_id(self, *, region: Region = ..., id: str = ...) -> RiotAPISchema.TftSummonerV1Summoner:\n return await self.invoke(\"GET\", \"/tft/summoner/v1/summoners/{id}\")\n\n async def get_tft_summoner_v1_by_name(self, *, region: Region = ..., name: str = ...) -> RiotAPISchema.TftSummonerV1Summoner:\n return await self.invoke(\"GET\", \"/tft/summoner/v1/summoners/by-name/{name}\")\n\n async def get_tft_summoner_v1_by_puuid(self, *, region: Region = ..., puuid: str = ...) -> RiotAPISchema.TftSummonerV1Summoner:\n return await self.invoke(\"GET\", \"/tft/summoner/v1/summoners/by-puuid/{puuid}\")\n\n async def get_tft_summoner_v1_me(self, *, region: Region = ..., headers: dict = {\"Authorization\": \"\"}) -> RiotAPISchema.TftSummonerV1Summoner:\n return await self.invoke(\"GET\", \"/tft/summoner/v1/summoners/me\")\n\n # Legends of Runeterra Endpoints\n\n async def get_lor_ranked_v1_leaderboard(self, *, region: Region = ...) -> RiotAPISchema.LorRankedV1Leaderboard:\n return await self.invoke(\"GET\", \"/lor/ranked/v1/leaderboards\")\n\n async def get_lor_match_v1_match(self, *, region: Region = ..., id: str = ...) -> RiotAPISchema.LorMatchV1Match:\n return await self.invoke(\"GET\", \"/lor/match/v1/matches/{id}\")\n\n async def get_lor_match_v1_match_ids_by_puuid(self, *, region: Region = ..., puuid: str = ...) -> list[str]:\n return await self.invoke(\"GET\", \"/lor/match/v1/matches/by-puuid/{puuid}/ids\")\n\n async def get_lor_status_v1_platform_data(self, *, region: Region = ...) -> RiotAPISchema.StatusV1PlatformData:\n return await self.invoke(\"GET\", \"/lor/status/v1/platform-data\")\n\n # Valorant Endpoints\n\n async def get_val_content_v1_contents(self, *, region: Region = ..., queries: dict = {}) -> RiotAPISchema.ValContentV1Contents:\n return await self.invoke(\"GET\", \"/val/content/v1/contents\")\n\n async def get_val_ranked_v1_leaderboard_by_act(self, *, region: Region = ..., act_id: str = ...) -> RiotAPISchema.ValRankedV1Leaderboard:\n return await self.invoke(\"GET\", \"/val/ranked/v1/leaderboards/by-act/{act_id}\")\n\n async def get_val_match_v1_match(self, *, region: Region = ..., id: str = ...) -> RiotAPISchema.ValMatchV1Match:\n return await self.invoke(\"GET\", \"/val/match/v1/matches/{id}\")\n\n async def get_val_match_v1_matchlist_by_puuid(self, *, region: Region = ..., puuid: str = ...) -> RiotAPISchema.ValMatchV1Matchlist:\n return await self.invoke(\"GET\", \"/val/match/v1/matchlists/by-puuid/{puuid}\")\n\n async def get_val_match_v1_recent_matches_by_queue(self, *, region: Region = ..., queue: str = ...) -> RiotAPISchema.ValMatchV1RecentMatches:\n return await self.invoke(\"GET\", \"/val/match/v1/recent-matches/by-queue/{queue}\")\n\n async def get_val_status_v1_platform_data(self, *, region: Region = ...) -> RiotAPISchema.StatusV1PlatformData:\n return await self.invoke(\"GET\", \"/val/status/v1/platform-data\")" }, { "identifier": "async_to_sync", "path": "pulsefire/functools.py", "snippet": "def async_to_sync(runner: Callable[[Awaitable[Any]], Any] = asyncio.run):\n \"\"\"Convert a coroutine function to run synchronously. Use as decorator `@async_to_sync()`.\n\n Example:\n ```python\n @async_to_sync()\n async def sample_func(number: int):\n ...\n \n sample_func(0)\n ```\n\n Parameters:\n runner: A callable that runs the awaitable synchronously.\n\n Raises:\n TypeError: When `func` is not a coroutine function.\n \"\"\"\n\n def decorator[**P, R](func: Callable[P, Awaitable[R]]) -> Callable[P, R]:\n if not inspect.iscoroutinefunction(func):\n raise TypeError(f\"{func} is not a coroutine function\")\n\n @functools.wraps(func)\n def wrapper(*args: P.args, **kwargs: P.kwargs) -> R:\n return runner(func(*args, **kwargs))\n\n return wrapper\n\n return decorator" }, { "identifier": "RiotAPISchema", "path": "pulsefire/schemas.py", "snippet": "class RiotAPISchema:\n\n # Account Types\n\n AccountV1Account = TypedDict(\"AccountV1Account\", {\n \"puuid\": str,\n \"gameName\": str,\n \"tagLine\": str,\n })\n AccountV1ActiveShard = TypedDict(\"AccountV1ActiveShard\", {\n \"puuid\": str,\n \"game\": str,\n \"activeShard\": str,\n })\n\n # League of Legends Types\n\n LolChampionV3Rotation = TypedDict(\"LolChampionV3Rotation\", {\n \"freeChampionIds\": list[int],\n \"freeChampionIdsForNewPlayers\": list[int],\n \"maxNewPlayerLevel\": int\n })\n LolChampionV4Mastery = TypedDict(\"LolChampionV4Mastery\", {\n \"puuid\": str,\n \"championId\": int,\n \"championLevel\": int,\n \"championPoints\": int,\n \"lastPlayTime\": int,\n \"championPointsSinceLastLevel\": int,\n \"championPointsUntilNextLevel\": int,\n \"chestGranted\": bool,\n \"tokensEarned\": int,\n \"summonerId\": str\n })\n LolClashV1Player = TypedDict(\"LolClashV1Player\", {\n \"summonerId\": str,\n \"teamId\": str,\n \"position\": str,\n \"role\": str,\n })\n LolClashV1Team = TypedDict(\"LolClashV1Team\", {\n \"id\": str,\n \"tournamentId\": int,\n \"name\": str,\n \"iconId\": int,\n \"tier\": int,\n \"captain\": str,\n \"abbreviation\": str,\n \"players\": list[LolClashV1Player],\n })\n LolClashV1TournamentSchedule = TypedDict(\"LolClashV1TournamentSchedule\", {\n \"id\": int,\n \"registrationTime\": int,\n \"startTime\": int,\n \"cancelled\": bool,\n })\n LolClashV1Tournament = TypedDict(\"LolClashV1Tournament\", {\n \"id\": int,\n \"themeId\": int,\n \"nameKey\": str,\n \"nameKeySecondary\": str,\n \"schedule\": list[LolClashV1TournamentSchedule]\n })\n LolLeagueV4LeagueEntryMiniSeries = TypedDict(\"LolLeagueV4LeagueEntryMiniSeries\", {\n \"losses\": int,\n \"progress\": str,\n \"target\": int,\n \"wins\": int,\n })\n LolLeagueV4LeagueEntry = TypedDict(\"LolLeagueV4LeagueEntry\", {\n \"summonerId\": str,\n \"summonerName\": str,\n \"rank\": str,\n \"leaguePoints\": int,\n \"wins\": int,\n \"losses\": int,\n \"hotStreak\": bool,\n \"veteran\": bool,\n \"freshBlood\": bool,\n \"inactive\": bool,\n })\n LolLeagueV4LeagueFullEntry = TypedDict(\"LolLeagueV4LeagueFullEntry\", {\n \"leagueId\": str,\n \"summonerId\": str,\n \"summonerName\": str,\n \"queueType\": str,\n \"tier\": str,\n \"rank\": str,\n \"leaguePoints\": int,\n \"wins\": int,\n \"losses\": int,\n \"hotStreak\": bool,\n \"veteran\": bool,\n \"freshBlood\": bool,\n \"inactive\": bool,\n \"miniSeries\": NotRequired[LolLeagueV4LeagueEntryMiniSeries],\n })\n LolLeagueV4League = TypedDict(\"LolLeagueV4League\", {\n \"tier\": str,\n \"leagueId\": str,\n \"queue\": str,\n \"name\": str,\n \"entries\": list[LolLeagueV4LeagueEntry]\n })\n LolMatchV5MatchMetadata = TypedDict(\"LolMatchV5MatchMetadata\", {\n \"dataVersion\": str,\n \"matchId\": str,\n \"participants\": list[str]\n })\n LolMatchV5MatchTeamObjective = TypedDict(\"LolMatchV5MatchTeamObjective\", {\n \"first\": bool,\n \"kills\": int\n })\n LolMatchV5MatchInfoParticipantChallenges = TypedDict(\"LolMatchV5MatchInfoParticipantChallenges\", {\n \"12AssistStreakCount\": int,\n \"abilityUses\": int,\n \"acesBefore15Minutes\": int,\n \"alliedJungleMonsterKills\": int,\n \"baronTakedowns\": int,\n \"blastConeOppositeOpponentCount\": int,\n \"bountyGold\": int,\n \"buffsStolen\": int,\n \"completeSupportQuestInTime\": int,\n \"controlWardTimeCoverageInRiverOrEnemyHalf\": NotRequired[float],\n \"controlWardsPlaced\": int,\n \"damagePerMinute\": float,\n \"damageTakenOnTeamPercentage\": float,\n \"dancedWithRiftHerald\": int,\n \"deathsByEnemyChamps\": int,\n \"dodgeSkillShotsSmallWindow\": int,\n \"doubleAces\": int,\n \"dragonTakedowns\": int,\n \"earliestBaron\": float,\n \"earlyLaningPhaseGoldExpAdvantage\": int,\n \"effectiveHealAndShielding\": float,\n \"elderDragonKillsWithOpposingSoul\": int,\n \"elderDragonMultikills\": int,\n \"enemyChampionImmobilizations\": int,\n \"enemyJungleMonsterKills\": int,\n \"epicMonsterKillsNearEnemyJungler\": int,\n \"epicMonsterKillsWithin30SecondsOfSpawn\": int,\n \"epicMonsterSteals\": int,\n \"epicMonsterStolenWithoutSmite\": int,\n \"firstTurretKilled\": int,\n \"firstTurretKilledTime\": NotRequired[float],\n \"flawlessAces\": int,\n \"fullTeamTakedown\": int,\n \"gameLength\": float,\n \"getTakedownsInAllLanesEarlyJungleAsLaner\": NotRequired[int],\n \"goldPerMinute\": float,\n \"hadOpenNexus\": int,\n \"immobilizeAndKillWithAlly\": int,\n \"initialBuffCount\": int,\n \"initialCrabCount\": int,\n \"jungleCsBefore10Minutes\": float,\n \"junglerTakedownsNearDamagedEpicMonster\": int,\n \"kTurretsDestroyedBeforePlatesFall\": int,\n \"kda\": float,\n \"killAfterHiddenWithAlly\": int,\n \"killParticipation\": float,\n \"killedChampTookFullTeamDamageSurvived\": int,\n \"killingSprees\": int,\n \"killsNearEnemyTurret\": int,\n \"killsOnOtherLanesEarlyJungleAsLaner\": NotRequired[int],\n \"killsOnRecentlyHealedByAramPack\": int,\n \"killsUnderOwnTurret\": int,\n \"killsWithHelpFromEpicMonster\": int,\n \"knockEnemyIntoTeamAndKill\": int,\n \"landSkillShotsEarlyGame\": int,\n \"laneMinionsFirst10Minutes\": int,\n \"laningPhaseGoldExpAdvantage\": int,\n \"legendaryCount\": int,\n \"lostAnInhibitor\": int,\n \"maxCsAdvantageOnLaneOpponent\": float,\n \"maxKillDeficit\": int,\n \"maxLevelLeadLaneOpponent\": int,\n \"mejaisFullStackInTime\": int,\n \"moreEnemyJungleThanOpponent\": float,\n \"multiKillOneSpell\": int,\n \"multiTurretRiftHeraldCount\": int,\n \"multikills\": int,\n \"multikillsAfterAggressiveFlash\": int,\n \"mythicItemUsed\": NotRequired[int],\n \"outerTurretExecutesBefore10Minutes\": int,\n \"outnumberedKills\": int,\n \"outnumberedNexusKill\": int,\n \"perfectDragonSoulsTaken\": int,\n \"perfectGame\": int,\n \"pickKillWithAlly\": int,\n \"playedChampSelectPosition\": NotRequired[int],\n \"poroExplosions\": int,\n \"quickCleanse\": int,\n \"quickFirstTurret\": int,\n \"quickSoloKills\": int,\n \"riftHeraldTakedowns\": int,\n \"saveAllyFromDeath\": int,\n \"scuttleCrabKills\": int,\n \"shortestTimeToAceFromFirstTakedown\": NotRequired[float],\n \"skillshotsDodged\": int,\n \"skillshotsHit\": int,\n \"snowballsHit\": int,\n \"soloBaronKills\": int,\n \"soloKills\": int,\n \"stealthWardsPlaced\": int,\n \"survivedSingleDigitHpCount\": int,\n \"survivedThreeImmobilizesInFight\": int,\n \"takedownOnFirstTurret\": int,\n \"takedowns\": int,\n \"takedownsAfterGainingLevelAdvantage\": int,\n \"takedownsBeforeJungleMinionSpawn\": int,\n \"takedownsFirstXMinutes\": int,\n \"takedownsInAlcove\": int,\n \"takedownsInEnemyFountain\": int,\n \"teamBaronKills\": int,\n \"teamDamagePercentage\": float,\n \"teamElderDragonKills\": int,\n \"teamRiftHeraldKills\": int,\n \"tookLargeDamageSurvived\": int,\n \"turretPlatesTaken\": int,\n \"turretTakedowns\": int,\n \"turretsTakenWithRiftHerald\": int,\n \"twentyMinionsIn3SecondsCount\": int,\n \"twoWardsOneSweeperCount\": int,\n \"unseenRecalls\": int,\n \"visionScoreAdvantageLaneOpponent\": float,\n \"visionScorePerMinute\": float,\n \"wardTakedowns\": int,\n \"wardTakedownsBefore20M\": int,\n \"wardsGuarded\": int,\n \"earliestDragonTakedown\": NotRequired[float],\n \"baronBuffGoldAdvantageOverThreshold\": NotRequired[int],\n \"teleportTakedowns\": NotRequired[int],\n \"fastestLegendary\": NotRequired[float],\n \"highestChampionDamage\": NotRequired[int],\n \"highestCrowdControlScore\": NotRequired[int],\n \"junglerKillsEarlyJungle\": NotRequired[int],\n \"killsOnLanersEarlyJungleAsJungler\": NotRequired[int],\n \"fasterSupportQuestCompletion\": NotRequired[int],\n \"highestWardKills\": NotRequired[int],\n \"soloTurretsLategame\": NotRequired[int],\n \"thirdInhibitorDestroyedTime\": NotRequired[float],\n }, total=False) | dict[str, int | float]\n LolMatchV5MatchInfoParticipantPerksStatPerks = TypedDict(\"LolMatchV5MatchInfoParticipantPerksStatPerks\", {\n \"defense\": int,\n \"flex\": int,\n \"offense\": int\n })\n LolMatchV5MatchInfoParticipantPerksStyleSelection = TypedDict(\"LolMatchV5MatchInfoParticipantPerksStyleSelection\", {\n \"perk\": int,\n \"var1\": int,\n \"var2\": int,\n \"var3\": int}\n )\n LolMatchV5MatchInfoParticipantPerksStyle = TypedDict(\"LolMatchV5MatchInfoParticipantPerksStyle\", {\n \"description\": str,\n \"selections\": list[LolMatchV5MatchInfoParticipantPerksStyleSelection],\n \"style\": int\n })\n LolMatchV5MatchInfoParticipantPerks = TypedDict(\"LolMatchV5MatchInfoParticipantPerks\", {\n \"statPerks\": LolMatchV5MatchInfoParticipantPerksStatPerks,\n \"styles\": list[LolMatchV5MatchInfoParticipantPerksStyle]\n })\n LolMatchV5MatchInfoParticipantMissions = TypedDict(\"LolMatchV5MatchInfoParticipant\", {\n \"playerScore0\": float,\n \"playerScore1\": float,\n \"playerScore10\": float,\n \"playerScore11\": float,\n \"playerScore2\": float,\n \"playerScore3\": float,\n \"playerScore4\": float,\n \"playerScore5\": float,\n \"playerScore6\": float,\n \"playerScore7\": float,\n \"playerScore8\": float,\n \"playerScore9\": float,\n })\n LolMatchV5MatchInfoParticipant = TypedDict(\"LolMatchV5MatchInfoParticipant\", {\n \"allInPings\": int,\n \"assistMePings\": int,\n \"assists\": int,\n \"baitPings\": int,\n \"baronKills\": int,\n \"basicPings\": int,\n \"bountyLevel\": int,\n \"challenges\": NotRequired[LolMatchV5MatchInfoParticipantChallenges],\n \"champExperience\": int,\n \"champLevel\": int,\n \"championId\": int,\n \"championName\": str,\n \"championTransform\": int,\n \"commandPings\": int,\n \"consumablesPurchased\": int,\n \"damageDealtToBuildings\": int,\n \"damageDealtToObjectives\": int,\n \"damageDealtToTurrets\": int,\n \"damageSelfMitigated\": int,\n \"dangerPings\": int,\n \"deaths\": int,\n \"detectorWardsPlaced\": int,\n \"doubleKills\": int,\n \"dragonKills\": int,\n \"eligibleForProgression\": bool,\n \"enemyMissingPings\": int,\n \"enemyVisionPings\": int,\n \"firstBloodAssist\": bool,\n \"firstBloodKill\": bool,\n \"firstTowerAssist\": bool,\n \"firstTowerKill\": bool,\n \"gameEndedInEarlySurrender\": bool,\n \"gameEndedInSurrender\": bool,\n \"getBackPings\": int,\n \"goldEarned\": int,\n \"goldSpent\": int,\n \"holdPings\": int,\n \"individualPosition\": str,\n \"inhibitorKills\": int,\n \"inhibitorTakedowns\": int,\n \"inhibitorsLost\": int,\n \"item0\": int,\n \"item1\": int,\n \"item2\": int,\n \"item3\": int,\n \"item4\": int,\n \"item5\": int,\n \"item6\": int,\n \"itemsPurchased\": int,\n \"killingSprees\": int,\n \"kills\": int,\n \"lane\": str,\n \"largestCriticalStrike\": int,\n \"largestKillingSpree\": int,\n \"largestMultiKill\": int,\n \"longestTimeSpentLiving\": int,\n \"magicDamageDealt\": int,\n \"magicDamageDealtToChampions\": int,\n \"magicDamageTaken\": int,\n \"missions\": NotRequired[LolMatchV5MatchInfoParticipantMissions],\n \"needVisionPings\": int,\n \"neutralMinionsKilled\": int,\n \"nexusKills\": int,\n \"nexusLost\": int,\n \"nexusTakedowns\": int,\n \"objectivesStolen\": int,\n \"objectivesStolenAssists\": int,\n \"onMyWayPings\": int,\n \"participantId\": int,\n \"pentaKills\": int,\n \"perks\": LolMatchV5MatchInfoParticipantPerks,\n \"physicalDamageDealt\": int,\n \"physicalDamageDealtToChampions\": int,\n \"physicalDamageTaken\": int,\n \"placement\": int,\n \"playerAugment1\": int,\n \"playerAugment2\": int,\n \"playerAugment3\": int,\n \"playerAugment4\": int,\n \"playerSubteamId\": int,\n \"playerScore0\": NotRequired[float],\n \"playerScore1\": NotRequired[float],\n \"playerScore10\": NotRequired[float],\n \"playerScore11\": NotRequired[float],\n \"playerScore2\": NotRequired[float],\n \"playerScore3\": NotRequired[float],\n \"playerScore4\": NotRequired[float],\n \"playerScore5\": NotRequired[float],\n \"playerScore6\": NotRequired[float],\n \"playerScore7\": NotRequired[float],\n \"playerScore8\": NotRequired[float],\n \"playerScore9\": NotRequired[float],\n \"profileIcon\": int,\n \"pushPings\": int,\n \"puuid\": str,\n \"quadraKills\": int,\n \"riotIdName\": NotRequired[str],\n \"riotIdTagline\": str,\n \"riotIdGameName\": NotRequired[str],\n \"role\": str,\n \"sightWardsBoughtInGame\": int,\n \"spell1Casts\": int,\n \"spell2Casts\": int,\n \"spell3Casts\": int,\n \"spell4Casts\": int,\n \"subteamPlacement\": int,\n \"summoner1Casts\": int,\n \"summoner1Id\": int,\n \"summoner2Casts\": int,\n \"summoner2Id\": int,\n \"summonerId\": str,\n \"summonerLevel\": int,\n \"summonerName\": str,\n \"teamEarlySurrendered\": bool,\n \"teamId\": int,\n \"teamPosition\": str,\n \"timeCCingOthers\": int,\n \"timePlayed\": int,\n \"totalAllyJungleMinionsKilled\": int,\n \"totalDamageDealt\": int,\n \"totalDamageDealtToChampions\": int,\n \"totalDamageShieldedOnTeammates\": int,\n \"totalDamageTaken\": int,\n \"totalEnemyJungleMinionsKilled\": int,\n \"totalHeal\": int,\n \"totalHealsOnTeammates\": int,\n \"totalMinionsKilled\": int,\n \"totalTimeCCDealt\": int,\n \"totalTimeSpentDead\": int,\n \"totalUnitsHealed\": int,\n \"tripleKills\": int,\n \"trueDamageDealt\": int,\n \"trueDamageDealtToChampions\": int,\n \"trueDamageTaken\": int,\n \"turretKills\": int,\n \"turretTakedowns\": int,\n \"turretsLost\": int,\n \"unrealKills\": int,\n \"visionClearedPings\": int,\n \"visionScore\": int,\n \"visionWardsBoughtInGame\": int,\n \"wardsKilled\": int,\n \"wardsPlaced\": int,\n \"win\": bool\n })\n LolMatchV5MatchInfoTeamBan = TypedDict(\"LolMatchV5MatchInfoTeamBan\", {\n \"championId\": int,\n \"pickTurn\": int\n })\n LolMatchV5MatchInfoTeamObjectives = TypedDict(\"LolMatchV5MatchInfoTeamObjectives\", {\n \"baron\": LolMatchV5MatchTeamObjective,\n \"champion\": LolMatchV5MatchTeamObjective,\n \"dragon\": LolMatchV5MatchTeamObjective,\n \"horde\": NotRequired[LolMatchV5MatchTeamObjective],\n \"inhibitor\": LolMatchV5MatchTeamObjective,\n \"riftHerald\": LolMatchV5MatchTeamObjective,\n \"tower\": LolMatchV5MatchTeamObjective\n })\n LolMatchV5MatchInfoTeam = TypedDict(\"LolMatchV5MatchInfoTeam\", {\n \"bans\": list[LolMatchV5MatchInfoTeamBan],\n \"objectives\": LolMatchV5MatchInfoTeamObjectives,\n \"teamId\": int,\n \"win\": bool\n })\n LolMatchV5MatchInfo = TypedDict(\"LolMatchV5MatchInfo\", {\n \"gameCreation\": int,\n \"gameDuration\": int,\n \"gameEndTimestamp\": int,\n \"gameId\": int,\n \"gameMode\": str,\n \"gameName\": str,\n \"gameStartTimestamp\": int,\n \"gameType\": str,\n \"gameVersion\": str,\n \"mapId\": int,\n \"participants\": list[LolMatchV5MatchInfoParticipant],\n \"platformId\": str,\n \"queueId\": int,\n \"teams\": list[LolMatchV5MatchInfoTeam],\n \"tournamentCode\": str\n })\n LolMatchV5Match = TypedDict(\"LolMatchV5Match\", {\n \"metadata\": LolMatchV5MatchMetadata,\n \"info\": LolMatchV5MatchInfo\n })\n LolMatchV5MatchTimelineParticipantFrameChampionStats = TypedDict(\"LolMatchV5MatchTimelineParticipantFrameChampionStats\", {\n \"abilityHaste\": int,\n \"abilityPower\": int,\n \"armor\": int,\n \"armorPen\": int,\n \"armorPenPercent\": int,\n \"attackDamage\": int,\n \"attackSpeed\": int,\n \"bonusArmorPenPercent\": int,\n \"bonusMagicPenPercent\": int,\n \"ccReduction\": int,\n \"cooldownReduction\": int,\n \"health\": int,\n \"healthMax\": int,\n \"healthRegen\": int,\n \"lifesteal\": int,\n \"magicPen\": int,\n \"magicPenPercent\": int,\n \"magicResist\": int,\n \"movementSpeed\": int,\n \"omnivamp\": int,\n \"physicalVamp\": int,\n \"power\": int,\n \"powerMax\": int,\n \"powerRegen\": int,\n \"spellVamp\": int\n })\n LolMatchV5MatchTimelineParticipantFrameDamageStats = TypedDict(\"LolMatchV5MatchTimelineParticipantFrameDamageStats\", {\n \"magicDamageDone\": int,\n \"magicDamageDoneToChampions\": int,\n \"magicDamageTaken\": int,\n \"physicalDamageDone\": int,\n \"physicalDamageDoneToChampions\": int,\n \"physicalDamageTaken\": int,\n \"totalDamageDone\": int,\n \"totalDamageDoneToChampions\": int,\n \"totalDamageTaken\": int,\n \"trueDamageDone\": int,\n \"trueDamageDoneToChampions\": int,\n \"trueDamageTaken\": int\n })\n LolMatchV5MatchTimelinePosition = TypedDict(\"LolMatchV5MatchTimelinePosition\", {\n \"x\": int,\n \"y\": int\n })\n LolMatchV5MatchTimelineParticipantFrame = TypedDict(\"LolMatchV5MatchTimelineParticipantFrame\", {\n \"championStats\": LolMatchV5MatchTimelineParticipantFrameChampionStats,\n \"currentGold\": int,\n \"damageStats\": LolMatchV5MatchTimelineParticipantFrameDamageStats,\n \"goldPerSecond\": int,\n \"jungleMinionsKilled\": int,\n \"level\": int,\n \"minionsKilled\": int,\n \"participantId\": int,\n \"position\": LolMatchV5MatchTimelinePosition,\n \"timeEnemySpentControlled\": int,\n \"totalGold\": int,\n \"xp\": int\n })\n LolMatchV5MatchTimelineEventDamage = TypedDict(\"LolMatchV5MatchTimelineEventDamage\", {\n \"basic\": bool,\n \"magicDamage\": int,\n \"name\": str,\n \"participantId\": int,\n \"physicalDamage\": int,\n \"spellName\": str,\n \"spellSlot\": int,\n \"trueDamage\": int,\n \"type\": str\n })\n LolMatchV5MatchTimelineMetadata = TypedDict(\"LolMatchV5MatchTimelineMetadata\", {\n \"dataVersion\": str,\n \"matchId\": str,\n \"participants\": list[str]\n })\n LolMatchV5MatchTimelineInfoFrameEvent = TypedDict(\"LolMatchV5MatchTimelineInfoFrameEvent\", {\n \"afterId\": NotRequired[int],\n \"beforeId\": NotRequired[int],\n \"goldGain\": NotRequired[int],\n \"participantId\": NotRequired[int],\n \"timestamp\": int,\n \"type\": str,\n \"creatorId\": NotRequired[int],\n \"wardType\": NotRequired[str],\n \"level\": NotRequired[int],\n \"itemId\": NotRequired[int],\n \"assistingParticipantIds\": NotRequired[list[int]],\n \"bounty\": NotRequired[int],\n \"killStreakLength\": NotRequired[int],\n \"killerId\": NotRequired[int],\n \"position\": NotRequired[LolMatchV5MatchTimelinePosition],\n \"shutdownBounty\": NotRequired[int],\n \"victimDamageDealt\": NotRequired[list[LolMatchV5MatchTimelineEventDamage]],\n \"victimDamageReceived\": NotRequired[list[LolMatchV5MatchTimelineEventDamage]],\n \"victimId\": NotRequired[int],\n \"levelUpType\": NotRequired[str],\n \"skillSlot\": NotRequired[int],\n \"realTimestamp\": NotRequired[int],\n })\n LolMatchV5MatchTimelineInfoFrame = TypedDict(\"LolMatchV5MatchTimelineInfoFrame\", {\n \"events\": list[LolMatchV5MatchTimelineInfoFrameEvent],\n \"participantFrames\": dict[str, LolMatchV5MatchTimelineParticipantFrame],\n \"timestamp\": int\n })\n LolMatchV5MatchTimelineInfoParticipants = TypedDict(\"LolMatchV5MatchTimelineInfoParticipants\", {\n \"participantId\": int,\n \"puuid\": str,\n })\n LolMatchV5MatchTimelineInfo = TypedDict(\"LolMatchV5MatchTimelineInfo\", {\n \"frameInterval\": int,\n \"frames\": list[LolMatchV5MatchTimelineInfoFrame],\n \"gameId\": int,\n \"participants\": list[LolMatchV5MatchTimelineInfoParticipants]\n })\n LolMatchV5MatchTimeline = TypedDict(\"LolMatchV5MatchTimeline\", {\n \"metadata\": LolMatchV5MatchTimelineMetadata,\n \"info\": LolMatchV5MatchTimelineInfo\n })\n LolSpectatorV4GameParticipantPerks = TypedDict(\"LolSpectatorV4GameParticipantPerks\", {\n \"perkIds\": list[int],\n \"perkStyle\": int,\n \"perkSubStyle\": int\n })\n LolSpectatorV4GameParticipant = TypedDict(\"LolSpectatorV4GameParticipant\", {\n \"gameCustomizationObjects\": NotRequired[list[str]],\n \"perks\": NotRequired[LolSpectatorV4GameParticipantPerks],\n \"puuid\": str,\n \"summonerId\": str,\n \"teamId\": int,\n \"spell1Id\": int,\n \"spell2Id\": int,\n \"championId\": int,\n \"profileIconId\": int,\n \"summonerName\": str,\n \"bot\": bool\n })\n LolSpectatorV4GameObservers = TypedDict(\"LolSpectatorV4GameObservers\", {\n \"encryptionKey\": str\n })\n LolSpectatorV4Game = TypedDict(\"LolSpectatorV4Game\", {\n \"gameId\": int,\n \"mapId\": int,\n \"gameMode\": str,\n \"gameType\": str,\n \"gameQueueConfigId\": int,\n \"participants\": list[LolSpectatorV4GameParticipant],\n \"observers\": LolSpectatorV4GameObservers,\n \"platformId\": str,\n \"bannedChampions\": list[int],\n \"gameStartTime\": int,\n \"gameLength\": int\n })\n LolSpectatorV4GameList = TypedDict(\"LolSpectatorV4GameList\", {\n \"gameList\": list[LolSpectatorV4Game],\n \"clientRefreshInterval\": int\n })\n LolSummonerV4Summoner = TypedDict(\"SummonerV4Summoner\", {\n \"id\": str,\n \"accountId\": str,\n \"puuid\": str,\n \"name\": str,\n \"profileIconId\": int,\n \"revisionDate\": int,\n \"summonerLevel\": int\n })\n\n # Teamfight Tactics Types\n\n TftLeagueV1LeagueEntry = TypedDict(\"TftLeagueV1LeagueEntry\", {\n \"summonerId\": str,\n \"summonerName\": str,\n \"rank\": str,\n \"leaguePoints\": int,\n \"wins\": int,\n \"losses\": int,\n \"hotStreak\": bool,\n \"veteran\": bool,\n \"freshBlood\": bool,\n \"inactive\": bool,\n })\n TftLeagueV1LeagueFullEntry = TypedDict(\"TftLeagueV1LeagueFullEntry\", {\n \"leagueId\": str,\n \"puuid\": str,\n \"summonerId\": str,\n \"summonerName\": str,\n \"queueType\": str,\n \"tier\": str,\n \"rank\": str,\n \"leaguePoints\": int,\n \"wins\": int,\n \"losses\": int,\n \"hotStreak\": bool,\n \"veteran\": bool,\n \"freshBlood\": bool,\n \"inactive\": bool,\n \"miniSeries\": NotRequired[LolLeagueV4LeagueEntryMiniSeries],\n })\n TftLeagueV1League = TypedDict(\"TftLeagueV1League\", {\n \"tier\": str,\n \"leagueId\": NotRequired[str],\n \"queue\": NotRequired[str],\n \"name\": NotRequired[str],\n \"entries\": list[TftLeagueV1LeagueEntry]\n })\n TftMatchV1MatchMetadata = TypedDict(\"TftMatchV1MatchMetadata\", {\n \"data_version\": str,\n \"match_id\": str,\n \"participants\": list[str]\n })\n TftMatchV1MatchInfoParticipantCompanion = TypedDict(\"TftMatchV1MatchInfoParticipantCompanion\", {\n \"content_ID\": str,\n \"item_ID\": int,\n \"skin_ID\": int,\n \"species\": str\n })\n TftMatchV1MatchInfoParticipantTrait = TypedDict(\"TftMatchV1MatchInfoParticipantTrait\", {\n \"name\": str,\n \"num_units\": int,\n \"style\": int,\n \"tier_current\": int,\n \"tier_total\": int\n })\n TftMatchV1MatchInfoParticipantUnit = TypedDict(\"TftMatchV1MatchInfoParticipantUnit\", {\n \"character_id\": str,\n \"itemNames\": list[str],\n \"name\": str,\n \"rarity\": int,\n \"tier\": int\n })\n TftMatchV1MatchInfoParticipant = TypedDict(\"TftMatchV1MatchInfoParticipant\", {\n \"augments\": list[str],\n \"companion\": TftMatchV1MatchInfoParticipantCompanion,\n \"gold_left\": int,\n \"last_round\": int,\n \"level\": int,\n \"placement\": int,\n \"players_eliminated\": int,\n \"puuid\": str,\n \"time_eliminated\": float,\n \"total_damage_to_players\": int,\n \"traits\": list[TftMatchV1MatchInfoParticipantTrait],\n \"units\": list[TftMatchV1MatchInfoParticipantUnit]\n })\n TftMatchV1MatchInfo = TypedDict(\"TftMatchV1MatchInfo\", {\n \"game_datetime\": int,\n \"game_length\": float,\n \"game_version\": str,\n \"participants\": list[TftMatchV1MatchInfoParticipant],\n \"queue_id\": int,\n \"tft_game_type\": str,\n \"tft_set_core_name\": str,\n \"tft_set_number\": int\n })\n TftMatchV1Match = TypedDict(\"TftMatchV1Match\", {\n \"metadata\": TftMatchV1MatchMetadata,\n \"info\": TftMatchV1MatchInfo\n })\n TftSummonerV1Summoner = LolSummonerV4Summoner\n\n # Legends of Runeterra Types\n\n LorRankedV1LeaderboardPlayer = TypedDict(\"LorRankedV1LeaderboardPlayer\", {\n \"name\": str,\n \"rank\": int,\n \"lp\": float\n })\n LorRankedV1Leaderboard = TypedDict(\"LorRankedV1Leaderboard\", {\n \"players\": list[LorRankedV1LeaderboardPlayer]\n })\n LorMatchV1MatchMetadata = TypedDict(\"LorMatchV1MatchMetadata\", {\n \"data_version\": str,\n \"match_id\": str,\n \"participants\": list[str]\n })\n LorMatchV1MatchInfoPlayer = TypedDict(\"LorMatchV1MatchInfoPlayer\", {\n \"puuid\": str,\n \"deck_id\": str,\n \"deck_code\": str,\n \"factions\": list[str],\n \"game_outcome\": str,\n \"order_of_play\": int\n })\n LorMatchV1MatchInfo = TypedDict(\"LorMatchV1MatchInfo\", {\n \"game_mode\": str,\n \"game_type\": str,\n \"game_start_time_utc\": str,\n \"game_version\": str,\n \"players\": list[LorMatchV1MatchInfoPlayer],\n \"total_turn_count\": int\n })\n LorMatchV1Match = TypedDict(\"LorMatchV1Match\", {\n \"metadata\": LorMatchV1MatchMetadata,\n \"info\": LorMatchV1MatchInfo\n })\n\n # Valorant Types\n\n ValContentV1ContentsAssetLocalizedNames = TypedDict(\"ValContentV1ContentsAssetLocalizedNames\", {\n \"ar-AE\": str,\n \"de-DE\": str,\n \"en-US\": str,\n \"es-ES\": str,\n \"es-MX\": str,\n \"fr-FR\": str,\n \"id-ID\": str,\n \"it-IT\": str,\n \"ja-JP\": str,\n \"ko-KR\": str,\n \"pl-PL\": str,\n \"pt-BR\": str,\n \"ru-RU\": str,\n \"th-TH\": str,\n \"tr-TR\": str,\n \"vi-VN\": str,\n \"zh-CN\": str,\n \"zh-TW\": str,\n })\n ValContentV1ContentsAsset = TypedDict(\"ValContentV1ContentsAsset\", {\n \"name\": str,\n \"id\": str,\n \"localizedNames\": NotRequired[ValContentV1ContentsAssetLocalizedNames],\n \"assetName\": str,\n \"assetPath\": NotRequired[str]\n })\n ValContentV1ContentsAct = TypedDict(\"ValContentV1ContentsAct\", {\n \"id\": str,\n \"localizedNames\": NotRequired[ValContentV1ContentsAssetLocalizedNames],\n \"parentId\": str,\n \"type\": str,\n \"name\": str,\n \"isActive\": bool\n })\n ValContentV1Contents = TypedDict(\"ValContentV1Contents\", {\n \"version\": str,\n \"characters\": list[ValContentV1ContentsAsset],\n \"maps\": list[ValContentV1ContentsAsset],\n \"chromas\": list[ValContentV1ContentsAsset],\n \"skins\": list[ValContentV1ContentsAsset],\n \"skinLevels\": list[ValContentV1ContentsAsset],\n \"equips\": list[ValContentV1ContentsAsset],\n \"gameModes\": list[ValContentV1ContentsAsset],\n \"totems\": list[ValContentV1ContentsAsset],\n \"sprays\": list[ValContentV1ContentsAsset],\n \"sprayLevels\": list[ValContentV1ContentsAsset],\n \"charms\": list[ValContentV1ContentsAsset],\n \"charmLevels\": list[ValContentV1ContentsAsset],\n \"playerCards\": list[ValContentV1ContentsAsset],\n \"playerTitles\": list[ValContentV1ContentsAsset],\n \"acts\": list[ValContentV1ContentsAct],\n \"ceremonies\": list[ValContentV1ContentsAsset]\n })\n \n ValRankedV1LeaderboardTierDetail = TypedDict(\"ValRankedV1LeaderboardTierDetail\", {\n \"rankedRatingThreshold\": int,\n \"startingPage\": int,\n \"startingIndex\": int\n })\n ValRankedV1LeaderboardPlayer = TypedDict(\"ValRankedV1LeaderboardPlayer\", {\n \"puuid\": str,\n \"gameName\": str,\n \"tagLine\": str,\n \"leaderboardRank\": int,\n \"rankedRating\": int,\n \"numberOfWins\": int,\n \"competitiveTier\": int\n })\n ValRankedV1LeaderboardTierDetails = TypedDict(\"ValRankedV1LeaderboardTierDetails\", {\n \"24\": ValRankedV1LeaderboardTierDetail,\n \"25\": ValRankedV1LeaderboardTierDetail,\n \"26\": ValRankedV1LeaderboardTierDetail,\n \"27\": ValRankedV1LeaderboardTierDetail\n })\n ValRankedV1Leaderboard = TypedDict(\"ValRankedV1Leaderboard\", {\n \"actId\": str,\n \"players\": list[ValRankedV1LeaderboardPlayer],\n \"totalPlayers\": int,\n \"immortalStartingPage\": int,\n \"immortalStartingIndex\": int,\n \"topTierRRThreshold\": int,\n \"tierDetails\": ValRankedV1LeaderboardTierDetails,\n \"startIndex\": int,\n \"query\": str,\n \"shard\": str\n })\n ValMatchV1MatchLocation = TypedDict(\"ValMatchV1MatchLocation\", {\n \"x\": float,\n \"y\": float\n })\n ValMatchV1MatchPlayerLocation = TypedDict(\"ValMatchV1MatchPlayerLocation\", {\n \"puuid\": str,\n \"viewRadians\": float,\n \"location\": ValMatchV1MatchLocation\n })\n ValMatchV1MatchInfo = TypedDict(\"ValMatchV1MatchInfo\", {\n \"matchId\": str,\n \"mapId\": str,\n \"gameVersion\": str,\n \"gameLengthMillis\": int,\n \"region\": str,\n \"gameStartMillis\": int,\n \"provisioningFlowId\": str,\n \"isCompleted\": bool,\n \"customGameName\": str,\n \"queueId\": str,\n \"gameMode\": str,\n \"isRanked\": bool,\n \"premierMatchInfo\": dict,\n \"seasonId\": str\n })\n ValMatchV1MatchPlayerStatsAbilityCasts = TypedDict(\"ValMatchV1MatchPlayerStatsAbilityCasts\", {\n \"grenadeCasts\": int,\n \"ability1Casts\": int,\n \"ability2Casts\": int,\n \"ultimateCasts\": int\n })\n ValMatchV1MatchPlayerStats = TypedDict(\"ValMatchV1MatchPlayerStats\", {\n \"score\": int,\n \"roundsPlayed\": int,\n \"kills\": int,\n \"deaths\": int,\n \"assists\": int,\n \"playtimeMillis\": int,\n \"abilityCasts\": ValMatchV1MatchPlayerStatsAbilityCasts | None\n })\n ValMatchV1MatchPlayer = TypedDict(\"ValMatchV1MatchPlayer\", {\n \"puuid\": str,\n \"gameName\": str,\n \"tagLine\": str,\n \"teamId\": str,\n \"partyId\": str,\n \"characterId\": str,\n \"stats\": ValMatchV1MatchPlayerStats,\n \"competitiveTier\": int,\n \"isObserver\": bool,\n \"playerCard\": str,\n \"playerTitle\": str,\n \"accountLevel\": int\n })\n ValMatchV1MatchTeam = TypedDict(\"ValMatchV1MatchTeam\", {\n \"teamId\": str,\n \"won\": bool,\n \"roundsPlayed\": int,\n \"roundsWon\": int,\n \"numPoints\": int\n })\n ValMatchV1MatchRoundResultPlayerStatKill = TypedDict(\"ValMatchV1MatchRoundResultPlayerStatKill\", {\n \"timeSinceGameStartMillis\": int,\n \"timeSinceRoundStartMillis\": int,\n \"killer\": str,\n \"victim\": str,\n \"victimLocation\": ValMatchV1MatchLocation,\n \"assistants\": list[str],\n \"playerLocations\": list[ValMatchV1MatchPlayerLocation],\n \"finishingDamage\": TypedDict(\"FinishingDamage\", {\n \"damageType\": str,\n \"damageItem\": str,\n \"isSecondaryFireMode\": bool\n })\n })\n ValMatchV1MatchRoundResultPlayerStatDamage = TypedDict(\"ValMatchV1MatchRoundResultPlayerStatDamage\", {\n \"receiver\": str,\n \"damage\": int,\n \"legshots\": int,\n \"bodyshots\": int,\n \"headshots\": int\n })\n ValMatchV1MatchRoundResultPlayerStatEconomy = TypedDict(\"ValMatchV1MatchRoundResultPlayerStatEconomy\", {\n \"loadoutValue\": int,\n \"weapon\": str,\n \"armor\": str,\n \"remaining\": int,\n \"spent\": int\n })\n ValMatchV1MatchRoundResultPlayerStatAbility = TypedDict(\"ValMatchV1MatchRoundResultPlayerStatAbility\", {\n \"grenadeEffects\": str | None,\n \"ability1Effects\": str | None,\n \"ability2Effects\": str | None,\n \"ultimateEffects\": str | None\n })\n ValMatchV1MatchRoundResultPlayerStat = TypedDict(\"ValMatchV1MatchRoundResultPlayerStat\", {\n \"puuid\": str,\n \"kills\": list[ValMatchV1MatchRoundResultPlayerStatKill],\n \"damage\": list[ValMatchV1MatchRoundResultPlayerStatDamage],\n \"score\": int,\n \"economy\": ValMatchV1MatchRoundResultPlayerStatEconomy,\n \"ability\": ValMatchV1MatchRoundResultPlayerStatAbility\n })\n ValMatchV1MatchRoundResult = TypedDict(\"ValMatchV1MatchRoundResult\", {\n \"roundNum\": int,\n \"roundResult\": str,\n \"roundCeremony\": str,\n \"winningTeam\": str,\n \"bombPlanter\": str | None,\n \"bombDefuser\": str | None,\n \"plantRoundTime\": int,\n \"plantPlayerLocations\": list[ValMatchV1MatchPlayerLocation] | None,\n \"plantLocation\": ValMatchV1MatchLocation,\n \"plantSite\": str,\n \"defuseRoundTime\": int,\n \"defusePlayerLocations\": list[ValMatchV1MatchPlayerLocation] | None,\n \"defuseLocation\": ValMatchV1MatchLocation,\n \"playerStats\": list[ValMatchV1MatchRoundResultPlayerStat],\n \"roundResultCode\": str\n })\n ValMatchV1Match = TypedDict(\"ValMatchV1Match\", {\n \"matchInfo\": ValMatchV1MatchInfo,\n \"players\": list[ValMatchV1MatchPlayer],\n \"coaches\": list[str],\n \"teams\": list[ValMatchV1MatchTeam],\n \"roundResults\": list[ValMatchV1MatchRoundResult]\n })\n ValMatchV1MatchlistHistory = TypedDict(\"ValMatchV1MatchlistHistory\", {\n \"matchId\": str,\n \"gameStartTimeMillis\": int,\n \"queueId\": str\n })\n ValMatchV1Matchlist = TypedDict(\"ValMatchV1Matchlist\", {\n \"puuid\": str,\n \"history\": list[ValMatchV1MatchlistHistory]\n })\n ValMatchV1RecentMatches = TypedDict(\"ValMatchV1RecentMatches\", {\n \"currentTime\": int,\n \"matchIds\": list[str]\n })\n\n # Status Types\n\n StatusV1PlatformDataLocaleContent = TypedDict(\"StatusV1PlatformDataLocaleContent\", {\n \"locale\": str,\n \"content\": str\n })\n StatusV1PlatformDataEntryUpdate = TypedDict(\"StatusV1PlatformDataEntryUpdate\", {\n \"id\": int,\n \"created_at\": str,\n \"updated_at\": str,\n \"publish\": bool,\n \"author\": str,\n \"translations\": list[StatusV1PlatformDataLocaleContent],\n \"publish_locations\": list[str]\n })\n StatusV1PlatformDataEntry = TypedDict(\"StatusV1PlatformDataEntry\", {\n \"id\": int,\n \"created_at\": str,\n \"updated_at\": str | None,\n \"archive_at\": str | None,\n \"titles\": list[StatusV1PlatformDataLocaleContent],\n \"updates\": list[StatusV1PlatformDataEntryUpdate],\n \"platforms\": list[str],\n \"maintenance_status\": str | None,\n \"incident_severity\": str | None\n })\n StatusV1PlatformData = TypedDict(\"StatusV4PlatformData\", {\n \"id\": str,\n \"name\": str,\n \"locales\": list[str],\n \"maintenances\": list[StatusV1PlatformDataEntry],\n \"incidents\": list[StatusV1PlatformDataEntry]\n })" }, { "identifier": "TaskGroup", "path": "pulsefire/taskgroups.py", "snippet": "class TaskGroup(asyncio.TaskGroup):\n \"\"\"Asynchronous context manager for managing groups of tasks.\n See [python asyncio task groups documentation](https://docs.python.org/3/library/asyncio-task.html#task-groups).\n\n Adapted for pulsefire, key differences from `asyncio.TaskGroup`:\n\n - Accepts a semaphore to restrict the amount of concurrent running coroutines.\n - Due to semaphore support, the `create_task` method is now async.\n - Allows internal collection of results and exceptions, similar to `asyncio.Task`.\n - If exception collection is on (default), the task group will not abort on task exceptions.\n\n Example:\n ```python\n async with TaskGroup(asyncio.Semaphore(100)) as tg:\n await tg.create_task(coro_func(...))\n results = tg.results()\n ```\n \"\"\"\n\n semaphore: asyncio.Semaphore | None = None\n \"\"\"Semaphore for restricting concurrent running coroutines.\"\"\"\n collect_results: bool = True\n \"\"\"Flag for collecting task results.\"\"\"\n collect_exceptions: bool = True\n \"\"\"Flag for collecting task exceptions, disables abort.\"\"\"\n\n def __init__(\n self,\n semaphore: asyncio.Semaphore | None = None,\n *,\n collect_results: bool = True,\n collect_exceptions: bool = True,\n ) -> None:\n super().__init__()\n self.semaphore = semaphore\n self.collect_results = collect_results\n self.collect_exceptions = collect_exceptions\n self._exceptions: list[BaseException] = []\n self._results = []\n\n async def __aenter__(self):\n self._exceptions = []\n self._results = []\n return await super().__aenter__()\n\n def results[T](self) -> list[T]:\n \"\"\"Return the collected results returned from created tasks.\"\"\"\n if not self.collect_results:\n raise RuntimeError(f\"TaskGroup {self!r} has `collect_results` off\")\n return self._results\n\n def exceptions(self) -> list[BaseException]:\n \"\"\"Return the collected exceptions raised from created tasks.\"\"\"\n if not self.collect_exceptions:\n raise RuntimeError(f\"TaskGroup {self!r} has `collect_exceptions` off\")\n return self._exceptions\n\n @override\n async def create_task[T](self, coro: Awaitable[T], *, name: str | None = None, context: Context | None = None) -> asyncio.Task[T]:\n \"\"\"Create a new task in this group and return it.\n\n If this group has a semaphore, wrap this semaphore on the coroutine.\n \"\"\"\n _coro = coro\n if self.semaphore:\n await self.semaphore.acquire()\n async def semaphored():\n try:\n return await _coro\n finally:\n self.semaphore.release()\n coro = semaphored()\n return super().create_task(coro, name=name, context=context)\n\n def _on_task_done(self, task) -> None:\n if exc := task.exception():\n if self.collect_exceptions:\n LOGGER.warning(\n \"TaskGroup: unhandled exception\\n\" +\n \"\".join(traceback.format_exception(type(exc), exc, exc.__traceback__))\n )\n self._exceptions.append(exc)\n self._tasks.discard(task)\n if self._on_completed_fut is not None and not self._tasks:\n if not self._on_completed_fut.done():\n self._on_completed_fut.set_result(True)\n return\n elif self.collect_results and not task.cancelled():\n self._results.append(task.result())\n return super()._on_task_done(task)" } ]
import asyncio import os from pulsefire.clients import RiotAPIClient from pulsefire.functools import async_to_sync from pulsefire.schemas import RiotAPISchema from pulsefire.taskgroups import TaskGroup
16,648
@async_to_sync() async def test_taskgroup(): async with RiotAPIClient(default_headers={"X-Riot-Token": os.environ["RIOT_API_KEY"]}) as client: plat_league = await client.get_lol_league_v4_entries_by_division(region="na1", queue="RANKED_SOLO_5x5", tier="PLATINUM", division="IV") summoner = await client.get_lol_summoner_v4_by_id(region="na1", id=plat_league[0]["summonerId"]) match_ids = await client.get_lol_match_v5_match_ids_by_puuid(region="americas", puuid=summoner["puuid"]) async with TaskGroup() as tg: for match_id in match_ids[:20]: await tg.create_task(client.get_lol_match_v5_match(region="americas", id=match_id))
@async_to_sync() async def test_taskgroup(): async with RiotAPIClient(default_headers={"X-Riot-Token": os.environ["RIOT_API_KEY"]}) as client: plat_league = await client.get_lol_league_v4_entries_by_division(region="na1", queue="RANKED_SOLO_5x5", tier="PLATINUM", division="IV") summoner = await client.get_lol_summoner_v4_by_id(region="na1", id=plat_league[0]["summonerId"]) match_ids = await client.get_lol_match_v5_match_ids_by_puuid(region="americas", puuid=summoner["puuid"]) async with TaskGroup() as tg: for match_id in match_ids[:20]: await tg.create_task(client.get_lol_match_v5_match(region="americas", id=match_id))
matches: list[RiotAPISchema.LolMatchV5Match] = tg.results()
2
2023-11-27 13:37:24+00:00
24k
Matrixeigs/UncertaintyManagementInteroperablePowerTransportationSystems
TestCaseDistributionSystems/uc_mmgs_tess_pv_stochastic.py
[ { "identifier": "case33", "path": "TestCaseDistributionSystems/test_cases/case33.py", "snippet": "def case33():\n \"\"\"Power flow data for 33 bus, 6 generator case.\n Please see L{caseformat} for details on the case file format.\n\n Based on data from ...\n\n Alsac, O. & Stott, B., I{\"Optimal Load Flow with Steady State Security\"},\n IEEE Transactions on Power Apparatus and Systems, Vol. PAS 93, No. 3,\n 1974, pp. 745-751.\n\n ... with branch parameters rounded to nearest 0.01, shunt values divided\n by 100 and shunt on bus 10 moved to bus 5, load at bus 5 zeroed out.\n Generator locations, costs and limits and bus areas were taken from ...\n\n Ferrero, R.W., Shahidehpour, S.M., Ramesh, V.C., I{\"Transaction analysis\n in deregulated power systems using game theory\"}, IEEE Transactions on\n Power Systems, Vol. 12, No. 3, Aug 1997, pp. 1340-1347.\n\n Generator Q limits were derived from Alsac & Stott, using their Pmax\n capacities. V limits and line |S| limits taken from Alsac & Stott.\n\n @return: Power flow data for 30 bus, 6 generator case.\n @see: U{http://www.pserc.cornell.edu/matpower/}\n \"\"\"\n ppc = {\"version\": '2'}\n\n ##----- Power Flow Data -----##\n ## system MVA base\n ppc[\"baseMVA\"] = 100.0\n\n ## bus data\n # bus_i type Pd Qd Gs Bs area Vm Va baseKV zone Vmax Vmin\n ppc[\"bus\"] = array([\n [1, 3, 0, 0, 0, 0, 1, 1, 0, 12.66, 1, 1.05, 0.95],\n [2, 1, 0.1, 0.06, 0, 0, 1, 1, 0, 12.66, 1, 1.1, 0.95],\n [3, 1, 0.09, 0.04, 0, 0, 1, 1, 0, 12.66, 1, 1.05, 0.95],\n [4, 1, 0.12, 0.08, 0, 0, 1, 1, 0, 12.66, 1, 1.05, 0.95],\n [5, 1, 0.06, 0.03, 0, 0, 1, 1, 0, 12.66, 1, 1.05, 0.95],\n [6, 1, 0.06, 0.02, 0, 0, 1, 1, 0, 12.66, 1, 1.05, 0.95],\n [7, 1, 0.2, 0.1, 0, 0, 1, 1, 0, 12.66, 1, 1.05, 0.95],\n [8, 1, 0.2, 0.1, 0, 0, 1, 1, 0, 12.66, 1, 1.05, 0.95],\n [9, 1, 0.06, 0.02, 0, 0, 1, 1, 0, 12.66, 1, 1.05, 0.95],\n [10, 1, 0.06, 0.02, 0, 0, 3, 1, 0, 12.66, 1, 1.05, 0.95],\n [11, 1, 0.045, 0.03, 0, 0, 1, 1, 0, 12.66, 1, 1.05, 0.95],\n [12, 1, 0.06, 0.035, 0, 0, 2, 1, 0, 12.66, 1, 1.05, 0.95],\n [13, 1, 0.06, 0.035, 0, 0, 2, 1, 0, 12.66, 1, 1.1, 0.95],\n [14, 1, 0.12, 0.08, 0, 0, 2, 1, 0, 12.66, 1, 1.05, 0.95],\n [15, 1, 0.06, 0.01, 0, 0, 2, 1, 0, 12.66, 1, 1.05, 0.95],\n [16, 1, 0.06, 0.02, 0, 0, 2, 1, 0, 12.66, 1, 1.05, 0.95],\n [17, 1, 0.06, 0.02, 0, 0, 2, 1, 0, 12.66, 1, 1.05, 0.95],\n [18, 1, 0.09, 0.04, 0, 0, 2, 1, 0, 12.66, 1, 1.05, 0.95],\n [19, 1, 0.09, 0.04, 0, 0, 2, 1, 0, 12.66, 1, 1.05, 0.95],\n [20, 1, 0.09, 0.04, 0, 0, 2, 1, 0, 12.66, 1, 1.05, 0.95],\n [21, 1, 0.09, 0.04, 0, 0, 3, 1, 0, 12.66, 1, 1.05, 0.95],\n [22, 2, 0.09, 0.04, 0, 0, 3, 1, 0, 12.66, 1, 1.1, 0.95],\n [23, 2, 0.09, 0.05, 0, 0, 2, 1, 0, 12.66, 1, 1.1, 0.95],\n [24, 1, 0.42, 0.20, 0, 0.04, 3, 1, 0, 12.66, 1, 1.05, 0.95],\n [25, 1, 0.42, 0.2, 0, 0, 3, 1, 0, 12.66, 1, 1.05, 0.95],\n [26, 1, 0.06, 0.025, 0, 0, 3, 1, 0, 12.66, 1, 1.05, 0.95],\n [27, 1, 0.06, 0.025, 0, 0, 3, 1, 0, 12.66, 1, 1.1, 0.95],\n [28, 1, 0.06, 0.02, 0, 0, 1, 1, 0, 12.66, 1, 1.05, 0.95],\n [29, 1, 0.12, 0.07, 0, 0, 3, 1, 0, 12.66, 1, 1.05, 0.95],\n [30, 1, 0.2, 0.6, 0, 0, 3, 1, 0, 12.66, 1, 1.05, 0.95],\n [31, 1, 0.15, 0.07, 0, 0, 3, 1, 0, 12.66, 1, 1.05, 0.95],\n [32, 1, 0.21, 0.1, 0, 0, 3, 1, 0, 12.66, 1, 1.05, 0.95],\n [33, 1, 0.06, 0.04, 0, 0, 3, 1, 0, 12.66, 1, 1.05, 0.95],\n ])\n\n ## generator data\n # bus, Pg, Qg, Qmax, Qmin, Vg, mBase, status, Pmax, Pmin, Pc1, Pc2,\n # Qc1min, Qc1max, Qc2min, Qc2max, ramp_agc, ramp_10, ramp_30, ramp_q, apf, start-up time, shut-down time and initial condition!\n ppc[\"gen\"] = array([\n [1, 23.54, 0, 150, -20, 1, 100, 1, 80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 2, 1],\n ])\n\n ## branch data\n # fbus, tbus, r, x, b, rateA, rateB, rateC, ratio, angle, status, angmin, angmax\n ppc[\"branch\"] = array([\n [1, 2, 0.057525912, 0.029324489, 0, 130, 130, 130, 0, 0, 1, -360, 360],\n [2, 3, 0.307595167, 0.15666764, 0, 130, 130, 130, 0, 0, 1, -360, 360],\n [3, 4, 0.228356656, 0.116299674, 0, 65, 65, 65, 0, 0, 1, -360, 360],\n [4, 5, 0.237777928, 0.121103899, 0, 130, 130, 130, 0, 0, 1, -360, 360],\n [5, 6, 0.510994811, 0.441115179, 0, 130, 130, 130, 0, 0, 1, -360, 360],\n [6, 7, 0.116798814, 0.386084969, 0, 65, 65, 65, 0, 0, 1, -360, 360],\n [7, 8, 0.44386045, 0.146684835, 0, 90, 90, 90, 0, 0, 1, -360, 360],\n [8, 9, 0.642643047, 0.461704714, 0, 70, 70, 70, 0, 0, 1, -360, 360],\n [9, 10, 0.651378001, 0.461704714, 0, 130, 130, 130, 0, 0, 1, -360, 360],\n [10, 11, 0.122663712, 0.040555144, 0, 32, 32, 32, 0, 0, 1, -360, 360],\n [11, 12, 0.233597628, 0.077241951, 0, 65, 65, 65, 0, 0, 1, -360, 360],\n [12, 13, 0.915922324, 0.720633708, 0, 32, 32, 32, 0, 0, 1, -360, 360],\n [13, 14, 0.337917936, 0.444796338, 0, 65, 65, 65, 0, 0, 1, -360, 360],\n [14, 15, 0.368739846, 0.328184702, 0, 65, 65, 65, 0, 0, 1, -360, 360],\n [15, 16, 0.465635443, 0.340039282, 0, 65, 65, 65, 0, 0, 1, -360, 360],\n [16, 17, 0.804239697, 1.073775422, 0, 65, 65, 65, 0, 0, 1, -360, 360],\n [17, 18, 0.456713311, 0.358133116, 0, 32, 32, 32, 0, 0, 1, -360, 360],\n [2, 19, 0.102323747, 0.097644308, 0, 32, 32, 32, 0, 0, 1, -360, 360],\n [19, 20, 0.938508419, 0.845668336, 0, 32, 32, 32, 0, 0, 1, -360, 360],\n [20, 21, 0.255497406, 0.298485858, 0, 16, 16, 16, 0, 0, 1, -360, 360],\n [21, 22, 0.442300637, 0.584805173, 0, 16, 16, 16, 0, 0, 1, -360, 360],\n [3, 23, 0.28151509, 0.192356167, 0, 16, 16, 16, 0, 0, 1, -360, 360],\n [23, 24, 0.560284909, 0.442425422, 0, 16, 16, 16, 0, 0, 1, -360, 360],\n [24, 25, 0.559037059, 0.43743402, 0, 32, 32, 32, 0, 0, 1, -360, 360],\n [6, 26, 0.126656834, 0.064513875, 0, 32, 32, 32, 0, 0, 1, -360, 360],\n [26, 27, 0.177319567, 0.090281989, 0, 32, 32, 32, 0, 0, 1, -360, 360],\n [27, 28, 0.660736881, 0.582559042, 0, 32, 32, 32, 0, 0, 1, -360, 360],\n [28, 29, 0.501760717, 0.437122057, 0, 32, 32, 32, 0, 0, 1, -360, 360],\n [29, 30, 0.316642084, 0.161284687, 0, 32, 32, 32, 0, 0, 1, -360, 360],\n [30, 31, 0.607952801, 0.600840053, 0, 16, 16, 16, 0, 0, 1, -360, 360],\n [31, 32, 0.193728802, 0.225798562, 0, 16, 16, 16, 0, 0, 1, -360, 360],\n [32, 33, 0.212758523, 0.330805188, 0, 16, 16, 16, 0, 0, 1, -360, 360],\n [7, 20, 1.2479, 1.2479, 0, 16, 16, 16, 0, 0, 0, -360, 360],\n [8, 14, 1.2479, 1.2479, 0, 16, 16, 16, 0, 0, 0, -360, 360],\n [11, 21, 1.2479, 1.2479, 0, 16, 16, 16, 0, 0, 0, -360, 360],\n [17, 32, 0.3120, 0.3120, 0, 65, 65, 65, 0, 0, 0, -360, 360],\n [24, 28, 0.3120, 0.3120, 0, 16, 16, 16, 0, 0, 0, -360, 360]\n ])\n\n ##----- OPF Data -----##\n ## area data\n # area refbus\n ppc[\"areas\"] = array([\n [1, 8],\n [2, 23],\n [3, 26],\n ])\n\n ## generator cost data\n # 1 startup shutdown n x1 y1 ... xn yn\n # 2 startup shutdown n c(n-1) ... c0\n ppc[\"gencost\"] = array([\n [0, 0, 0, 3, 0.0, 20, 0]\n ])\n\n return ppc" }, { "identifier": "micro_grid", "path": "TestCasesMicrogrids/test_cases/cases_unit_commitment.py", "snippet": "AC_PD = array([323.0284, 308.2374, 318.1886, 307.9809, 331.2170, 368.6539, 702.0040, 577.7045, 1180.4547, 1227.6240,\n 1282.9344, 1311.9738, 1268.9502, 1321.7436, 1323.9218, 1327.1464, 1386.9117, 1321.6387, 1132.0476,\n 1109.2701, 882.5698, 832.4520, 349.3568, 299.9920])\nDC_PD = array([287.7698, 287.7698, 287.7698, 287.7698, 299.9920, 349.3582, 774.4047, 664.0625, 1132.6996, 1107.7366,\n 1069.6837, 1068.9819, 1027.3295, 1096.3820, 1109.4778, 1110.7039, 1160.1270, 1078.7839, 852.2514,\n 791.5814, 575.4085, 551.1441, 349.3568, 299.992])\nDG = {\"PMIN\": 0,\n \"PMAX\": 5,\n \"QMIN\": -5,\n \"QMAX\": 5,\n \"COST_A\": 0.01,\n \"COST_B\": 0.5}\nUG = {\"PMIN\": -5,\n \"PMAX\": 5,\n \"QMIN\": -5,\n \"QMAX\": 5,\n \"COST\": Price_UG, } # The cost should be a profile\nESS = {\"PDC_MAX\": 5,\n \"PCH_MAX\": 5,\n \"EFF_DC\": 0.95,\n \"EFF_CH\": 0.95,\n \"E0\": 10,\n \"EMIN\": 5,\n \"EMAX\": 20, }\nBIC = {\"PMAX\": 5,\n \"QMAX\": 5,\n \"SMAX\": 5,\n \"EFF_AC2DC\": 0.9,\n \"EFF_DC2AC\": 0.9, }\nMG = {\"PMAX\": 5,\n \"PMIN\": -5,\n \"QMAX\": 5,\n \"QMIN\": -5\n }\nPD = {\"AC\": AC_PD / max(AC_PD),\n \"AC_MAX\": 5,\n \"DC\": DC_PD / max(DC_PD),\n \"DC_MAX\": 5}\nQD = {\"AC\": AC_PD / max(AC_PD),\n \"AC_MAX\": 5, }\nPV = {\"PMAX\": 0,\n \"COST\": 0}" }, { "identifier": "PBIC_AC2DC", "path": "TestCaseDistributionSystems/data_format/idx_MG_PV.py", "snippet": "PBIC_AC2DC = 4" }, { "identifier": "PG", "path": "TestCaseDistributionSystems/data_format/idx_MG_PV.py", "snippet": "PG = 0" }, { "identifier": "PESS_DC", "path": "TestCaseDistributionSystems/data_format/idx_MG_PV.py", "snippet": "PESS_DC = 8" }, { "identifier": "PBIC_DC2AC", "path": "TestCaseDistributionSystems/data_format/idx_MG_PV.py", "snippet": "PBIC_DC2AC = 5" }, { "identifier": "PUG", "path": "TestCaseDistributionSystems/data_format/idx_MG_PV.py", "snippet": "PUG = 2" }, { "identifier": "PESS_CH", "path": "TestCaseDistributionSystems/data_format/idx_MG_PV.py", "snippet": "PESS_CH = 7" }, { "identifier": "PMESS", "path": "TestCaseDistributionSystems/data_format/idx_MG_PV.py", "snippet": "PMESS = 11 # Reactive power unit commitment of" }, { "identifier": "EESS", "path": "TestCaseDistributionSystems/data_format/idx_MG_PV.py", "snippet": "EESS = 9" }, { "identifier": "NX_MG", "path": "TestCaseDistributionSystems/data_format/idx_MG_PV.py", "snippet": "NX_MG = 12" }, { "identifier": "QBIC", "path": "TestCaseDistributionSystems/data_format/idx_MG_PV.py", "snippet": "QBIC = 6" }, { "identifier": "QUG", "path": "TestCaseDistributionSystems/data_format/idx_MG_PV.py", "snippet": "QUG = 3" }, { "identifier": "QG", "path": "TestCaseDistributionSystems/data_format/idx_MG_PV.py", "snippet": "QG = 1" }, { "identifier": "PPV", "path": "TestCaseDistributionSystems/data_format/idx_MG_PV.py", "snippet": "PPV = 10" }, { "identifier": "DataBaseManagement", "path": "TestCaseDistributionSystems/database_management_pv.py", "snippet": "class DataBaseManagement():\n\n def __init__(self, host=\"localhost\", user=\"ems\", password=\"12345678\", db=\"mess_pv\"):\n \"\"\"\n Initialized the database connection string\n :param host: host ip\n :param user: user name\n :param password: password\n :param db: database name\n :return\n \"\"\"\n self.db = pymysql.connect(host=host, user=user, password=password, db=db)\n\n def create_table(self, table_name, nl=32, nb=33, ng=6, nmg=3):\n \"\"\"\n Creat table name\n :param table_name:\n :param nb:\n :param nb:\n :param ng:\n :return: no return value\n \"\"\"\n cursor = self.db.cursor()\n sql = \"DROP TABLE IF EXISTS \"\n cursor.execute(sql + table_name)\n if table_name == \"distribution_networks\":\n sql_start = \"\"\"CREATE TABLE distribution_networks (\"\"\"\n sql = 'SCENARIO INT,\\n TIME INT NOT NULL,\\n '\n for i in range(nl):\n sql += \"PIJ{0} DECIMAL(8,6),\\n \".format(i)\n for i in range(nl):\n sql += \"QIJ{0} DECIMAL(8,6),\\n \".format(i)\n for i in range(nl):\n sql += \"IIJ{0} DECIMAL(8,6),\\n \".format(i)\n for i in range(nb):\n sql += \"V{0} DECIMAL(8,6),\\n \".format(i)\n for i in range(ng):\n sql += \"PG{0} DECIMAL(8,6),\\n \".format(i)\n for i in range(ng - 1):\n sql += \"QG{0} DECIMAL(8,6),\\n \".format(i)\n sql += \"QG{0} DECIMAL(8,6)\\n \".format(ng - 1)\n sql_end = \"\"\")\"\"\"\n elif table_name == \"micro_grids\":\n sql_start = \"\"\"CREATE TABLE micro_grids (\"\"\"\n sql = 'SCENARIO INT,\\n MG INT,\\n TIME INT,\\n '\n sql += 'PG DECIMAL(8,4),\\n QG DECIMAL(8,4),\\n PUG DECIMAL(8,4),\\n QUG DECIMAL(8,4),\\n '\n sql += 'PBIC_AC2DC DECIMAL(8,4),\\n PBIC_DC2AC DECIMAL(8,4),\\n QBIC DECIMAL(8,4),\\n PESS_CH DECIMAL(7,4),\\n '\n sql += 'PESS_DC DECIMAL(8,4),\\n EESS DECIMAL(8,4),\\n PPV DECIMAL(8,4),\\n PMESS DECIMAL(8,4)'\n sql_end = \"\"\")\"\"\"\n elif table_name == \"mobile_energy_storage_systems\":\n sql_start = \"\"\"CREATE TABLE mobile_energy_storage_systems (\"\"\"\n sql = 'SCENARIO INT,\\n MESS INT,\\n TIME INT,\\n'\n for i in range(nmg):\n sql += \"PDC_MG{0} DECIMAL(8,4),\\n \".format(i)\n for i in range(nmg):\n sql += \"PCH_MG{0} DECIMAL(8,4),\\n \".format(i)\n sql += \"EESS DECIMAL(8,4)\\n \"\n sql_end = \"\"\")\"\"\"\n elif table_name == \"first_stage_solutions\": # First-stage solution table\n sql_start = \"\"\"CREATE TABLE first_stage_solutions (\"\"\"\n sql = 'TIME INT,\\n'\n for i in range(ng):\n sql += \"PG{0} DECIMAL(8,4),\\n \".format(i)\n sql += \"RG{0} DECIMAL(8,4),\\n \".format(i)\n for i in range(nmg - 1):\n sql += \"PG_MG{0} DECIMAL(8,4),\\n \".format(i)\n sql += \"RG_MG{0} DECIMAL(8,4),\\n \".format(i)\n sql += \"IESS{0} INT,\\n \".format(i)\n sql += \"PESS_DC{0} DECIMAL(8,4),\\n \".format(i)\n sql += \"PESS_CH{0} DECIMAL(8,4),\\n \".format(i)\n sql += \"RESS{0} DECIMAL(8,4),\\n \".format(i)\n sql += \"ESS{0} DECIMAL(8,4),\\n \".format(i)\n sql += \"PG_MG{0} DECIMAL(8,4),\\n \".format(nmg - 1)\n sql += \"RG_MG{0} DECIMAL(8,4),\\n \".format(nmg - 1)\n sql += \"IESS{0} INT,\\n \".format(nmg - 1)\n sql += \"PESS_DC{0} DECIMAL(8,4),\\n \".format(nmg - 1)\n sql += \"PESS_CH{0} DECIMAL(8,4),\\n \".format(nmg - 1)\n sql += \"RESS{0} DECIMAL(8,4),\\n \".format(nmg - 1)\n sql += \"ESS{0} DECIMAL(8,4)\\n \".format(nmg - 1)\n sql_end = \"\"\")\"\"\"\n elif table_name == \"fisrt_stage_mess\": # First-stage solution table\n sql_start = \"\"\"CREATE TABLE fisrt_stage_mess (\"\"\"\n sql = 'MESS INT,\\n TIME INT,\\n'\n for i in range(nmg):\n sql += \"IDC_MG{0} INT,\\n \".format(i)\n for i in range(nmg):\n sql += \"PDC_MG{0} DECIMAL(8,4),\\n \".format(i)\n for i in range(nmg):\n sql += \"PCH_MG{0} DECIMAL(8,4),\\n \".format(i)\n for i in range(nmg):\n sql += \"RMESS{0} DECIMAL(8,4),\\n \".format(i)\n sql += \"MESS_F_STOP INT,\\n \"\n sql += \"MESS_T_STOP INT\\n \"\n sql_end = \"\"\")\"\"\"\n else:\n sql_start = \"\"\"CREATE TABLE scenarios (\"\"\"\n sql = 'SCENARIO INT,\\n WEIGHT DECIMAL(8,4),\\n TIME INT,\\n'\n for i in range(nb):\n sql += \"PD{0} DECIMAL(8,4),\\n \".format(i)\n for i in range(nmg):\n sql += \"PD_AC{0} DECIMAL(8,4),\\n \".format(i)\n for i in range(nmg):\n sql += \"PD_DC{0} DECIMAL(8,4),\\n \".format(i)\n for i in range(nmg - 1):\n sql += \"PPV{0} DECIMAL(8,4),\\n \".format(i)\n sql += \"PPV{0} DECIMAL(8,4)\\n\".format(nmg - 1)\n sql_end = \"\"\")\"\"\"\n\n cursor.execute(sql_start + sql + sql_end)\n cursor.close()\n\n def insert_data_ds(self, table_name, nl=32, nb=33, ng=6, scenario=0, time=0, pij=0, qij=0, lij=0, vi=0, pg=0, qg=0):\n \"\"\"\n Insert data into table_name\n :param table_name:\n :param nl:\n :param nb:\n :param ng:\n :param pij:\n :param qij:\n :param lij:\n :param vi:\n :param pg:\n :param qg:\n :return:\n \"\"\"\n cursor = self.db.cursor()\n sql_start = \"INSERT INTO \" + table_name + \" (\"\n sql = \"SCENARIO,TIME,\"\n value = \"{0},{1},\".format(scenario, time)\n for i in range(nl):\n sql += \"PIJ{0},\".format(i)\n value += \"{0},\".format(pij[i])\n for i in range(nl):\n sql += \"QIJ{0},\".format(i)\n value += \"{0},\".format(qij[i])\n for i in range(nl):\n sql += \"IIJ{0},\".format(i)\n value += \"{0},\".format(lij[i])\n for i in range(nb):\n sql += \"V{0},\".format(i)\n value += \"{0},\".format(vi[i])\n for i in range(ng):\n sql += \"PG{0},\".format(i)\n value += \"{0},\".format(pg[i])\n for i in range(ng - 1):\n sql += \"QG{0},\".format(i)\n value += \"{0},\".format(qg[i])\n sql += \"QG{0}\".format(ng - 1)\n value += \"{0}\".format(qg[ng - 1])\n\n sql += \") VALUES (\" + value + \")\"\n\n cursor.execute(sql_start + sql)\n self.db.commit()\n cursor.close()\n\n def insert_data_mg(self, table_name, scenario=0, time=0, mg=0, pg=0, qg=0, pug=0, qug=0, pbic_ac2dc=0, pbic_dc2ac=0,\n qbic=0, pess_ch=0, pess_dc=0, eess=0, pmess=0, ppv=0):\n \"\"\"\n insert microgrid data\n :param table_name:\n :param scenario:\n :param time:\n :param mg:\n :param pg:\n :param qg:\n :param pug:\n :param qug:\n :param pbic_ac2dc:\n :param pbic_dc2ac:\n :param qbic:\n :param pess_ch:\n :param pess_dc:\n :param eess:\n :param pmess:\n :return:\n \"\"\"\n cursor = self.db.cursor()\n sql_start = \"INSERT INTO \" + table_name + \" (\"\n sql = \"SCENARIO,MG,TIME,\"\n value = \"{0},{1},{2},\".format(scenario, mg, time)\n sql += \"PG,QG,PUG,QUG,PBIC_AC2DC,PBIC_DC2AC,QBIC,PESS_CH,PESS_DC,EESS,PPV,PMESS\"\n value += \"{0},{1},{2},{3},{4},{5},{6},{7},{8},{9},{10},{11}\".format(pg, qg, pug, qug, pbic_ac2dc, pbic_dc2ac,\n qbic, pess_ch, pess_dc, eess, ppv, pmess)\n sql += \") VALUES (\" + value + \")\"\n cursor.execute(sql_start + sql)\n self.db.commit()\n cursor.close()\n\n def insert_data_first_stage_mess(self, table_name, time=0, mess=0, imess=[0, 0, 0], pmess_ch=[0, 0, 0],\n pmess_dc=[0, 0, 0], rmess=[0, 0, 0], mess_f_stop=0, mess_t_stop=0, nmg=3):\n \"\"\"\n insert mobile energy storage systems data in the first-stage\n :param table_name:\n :param scenario:\n :param time:\n :param mess:\n :param pess_ch:\n :param pess_dc:\n :param eess:\n :param nmg:\n :return:\n \"\"\"\n cursor = self.db.cursor()\n sql_start = \"INSERT INTO \" + table_name + \" (\"\n sql = \"MESS,TIME,\"\n value = \"{0},{1},\".format(mess, time)\n for i in range(nmg):\n sql += \"IDC_MG{0},\".format(i)\n value += \"{0},\".format(imess[i])\n for i in range(nmg):\n sql += \"PDC_MG{0},\".format(i)\n value += \"{0},\".format(pmess_dc[i])\n for i in range(nmg):\n sql += \"PCH_MG{0},\".format(i)\n value += \"{0},\".format(pmess_ch[i])\n for i in range(nmg):\n sql += \"RMESS{0},\".format(i)\n value += \"{0},\".format(rmess[i])\n sql += \"MESS_F_STOP,MESS_T_STOP\"\n value += \"{0},{1}\".format(mess_f_stop, mess_t_stop)\n sql += \") VALUES (\" + value + \")\"\n cursor.execute(sql_start + sql)\n self.db.commit()\n cursor.close()\n\n def insert_data_mess(self, table_name, scenario=0, time=0, mess=0, pmess_ch=[0, 0, 0], pmess_dc=[0, 0, 0],\n emess=0, nmg=3):\n \"\"\"\n insert mobile energy storage systems data\n :param table_name:\n :param scenario:\n :param time:\n :param mess:\n :param pess_ch:\n :param pess_dc:\n :param eess:\n :param nmg:\n :return:\n \"\"\"\n cursor = self.db.cursor()\n sql_start = \"INSERT INTO \" + table_name + \" (\"\n sql = \"SCENARIO,MESS,TIME,\"\n value = \"{0},{1},{2},\".format(scenario, mess, time)\n for i in range(nmg):\n sql += \"PDC_MG{0},\".format(i)\n value += \"{0},\".format(pmess_dc[i])\n for i in range(nmg):\n sql += \"PCH_MG{0},\".format(i)\n value += \"{0},\".format(pmess_ch[i])\n sql += \"EESS\"\n value += \"{0}\".format(emess)\n sql += \") VALUES (\" + value + \")\"\n cursor.execute(sql_start + sql)\n self.db.commit()\n cursor.close()\n\n def insert_data_first_stage(self, table_name, time=0, ng=2, nmg=2, pg=[0, 0], rg=[0, 0], pg_mg=[0, 0],\n rg_mg=[0, 0], iess=[0, 0], pess_dc=[0, 0], pess_ch=[0, 0], ress=[0, 0], ess=[0, 0]):\n \"\"\"\n insert scenario data\n :param table_name:\n :param scenario:\n :param weight:\n :param time:\n :param nb:\n :param nmg:\n :param pd:\n :param pd_ac:\n :param pd_dc:\n :return:\n \"\"\"\n cursor = self.db.cursor()\n sql_start = \"INSERT INTO \" + table_name + \" (\"\n sql = \"TIME,\"\n value = \"{0},\".format(time)\n for i in range(ng):\n sql += \"PG{0},\".format(i)\n sql += \"RG{0},\".format(i)\n value += \"{0},\".format(pg[i])\n value += \"{0},\".format(rg[i])\n if nmg > 1:\n for i in range(nmg - 1):\n sql += \"PG_MG{0},\".format(i)\n sql += \"RG_MG{0},\".format(i)\n sql += \"IESS{0},\".format(i)\n sql += \"PESS_DC{0},\".format(i)\n sql += \"PESS_CH{0},\".format(i)\n sql += \"RESS{0},\".format(i)\n sql += \"ESS{0},\".format(i)\n value += \"{0},\".format(pg_mg[i])\n value += \"{0},\".format(rg_mg[i])\n value += \"{0},\".format(iess[i])\n value += \"{0},\".format(pess_dc[i])\n value += \"{0},\".format(pess_ch[i])\n value += \"{0},\".format(ress[i])\n value += \"{0},\".format(ess[i])\n sql += \"PG_MG{0},\".format(nmg - 1)\n sql += \"RG_MG{0},\".format(nmg - 1)\n sql += \"IESS{0},\".format(nmg - 1)\n sql += \"PESS_DC{0},\".format(nmg - 1)\n sql += \"PESS_CH{0},\".format(nmg - 1)\n sql += \"RESS{0},\".format(nmg - 1)\n sql += \"ESS{0}\".format(nmg - 1)\n value += \"{0},\".format(pg_mg[nmg - 1])\n value += \"{0},\".format(rg_mg[nmg - 1])\n value += \"{0},\".format(iess[nmg - 1])\n value += \"{0},\".format(pess_dc[nmg - 1])\n value += \"{0},\".format(pess_ch[nmg - 1])\n value += \"{0},\".format(ress[nmg - 1])\n value += \"{0}\".format(ess[nmg - 1])\n else:\n sql += \"PG_MG{0},\".format(nmg - 1)\n sql += \"RG_MG{0},\".format(nmg - 1)\n sql += \"IESS{0},\".format(nmg - 1)\n sql += \"PESS_DC{0},\".format(nmg - 1)\n sql += \"PESS_CH{0},\".format(nmg - 1)\n sql += \"RESS{0},\".format(nmg - 1)\n sql += \"ESS{0}\".format(nmg - 1)\n value += \"{0},\".format(pg_mg)\n value += \"{0},\".format(rg_mg)\n value += \"{0},\".format(iess)\n value += \"{0},\".format(pess_dc)\n value += \"{0},\".format(pess_ch)\n value += \"{0},\".format(ress)\n value += \"{0}\".format(ess)\n\n sql += \") VALUES (\" + value + \")\"\n cursor.execute(sql_start + sql)\n self.db.commit()\n cursor.close()\n\n def insert_data_scenario(self, table_name, scenario=0, weight=0, time=0, nb=1, nmg=2, pd=[0, 0], pd_ac=[0, 0],\n pd_dc=[0, 0], ppv=[0, 0]):\n cursor = self.db.cursor()\n sql_start = \"INSERT INTO \" + table_name + \" (\"\n sql = \"SCENARIO,WEIGHT,TIME,\"\n value = \"{0},{1},{2},\".format(scenario, weight, time)\n for i in range(nb):\n sql += \"PD{0},\".format(i)\n value += \"{0},\".format(pd[i])\n for i in range(nmg):\n sql += \"PD_AC{0},\".format(i)\n value += \"{0},\".format(pd_ac[i])\n for i in range(nmg):\n sql += \"PD_DC{0},\".format(i)\n value += \"{0},\".format(pd_dc[i])\n for i in range(nmg - 1):\n sql += \"PPV{0},\".format(i)\n value += \"{0},\".format(ppv[i])\n if nmg > 1:\n sql += \"PPV{0}\".format(nmg - 1)\n value += \"{0}\".format(ppv[nmg - 1])\n\n sql += \") VALUES (\" + value + \")\"\n cursor.execute(sql_start + sql)\n self.db.commit()\n cursor.close()\n\n def inquery_data_scenario(self, table_name, scenario=0, time=0):\n cursor = self.db.cursor()\n # sql = \"SELECT * FROM \" + table_name + \" ;\"\n sql = \"SELECT * FROM \" + table_name + \" WHERE SCENARIO={0} AND TIME={1};\".format(scenario, time)\n cursor.execute(sql)\n data = cursor.fetchall()\n n_data = len(data[0])\n\n temp = []\n for i in range(n_data): temp.append(float(data[0][i]))\n\n cursor.close()\n return temp" }, { "identifier": "ScenarioReduction", "path": "StochasticOptimization/scenario_reduction.py", "snippet": "class ScenarioReduction():\n def __init__(self):\n self.name = \"Scenario reduction\"\n\n def run(self, scenario, weight, n_reduced, power):\n \"\"\"\n\n :param scenario: A fan scenario tree, when more stage are considered, some merge operation can be implemented\n :param weight: Weight of each scenario\n :param n_reduced: Number of scenarios needs to be reduced\n :param power: The power in the distance calculation\n :return:\n \"\"\"\n n_scenario = scenario.shape[0] # number of original scenarios\n c = zeros((n_scenario, n_scenario))\n # Calculate the c matrix\n for i in range(n_scenario):\n for j in range(n_scenario):\n c[i, j] = linalg.norm((scenario[i, :] - scenario[j, :]), 2)\n c[i, j] = max([1, linalg.norm(scenario[i, :], power - 1), linalg.norm(scenario[j, :], power - 1)]) * \\\n c[i, j]\n\n J = arange(n_scenario) # The original index range\n J_reduced = array([])\n # Implement the iteration\n for n in range(n_reduced): # find the minimal distance\n print(\"The reduction is in process {0}\".format(n))\n c_n = inf * ones(n_scenario)\n c_n[J] = 0\n for u in J:\n # Delete the i-th distance\n J_temp = delete(J, where(J == u))\n for k in J_temp:\n c_k_j = delete(c[int(k)], J_temp)\n c_n[int(u)] += weight[int(k)] * min(c_k_j)\n u_i = argmin(c_n)\n J_reduced = append(J_reduced, u_i)\n J = delete(J, where(J == u_i))\n # Optimal redistribution\n p_s = weight.copy()\n p_s[J_reduced.astype(int)] = 0\n\n for i in J_reduced:\n c_temp = c[int(i), :]\n c_temp[J_reduced.astype(int)] = inf\n index = argmin(c_temp)\n p_s[index] += weight[int(i)]\n\n scenario_reduced = scenario[J.astype(int), :]\n weight_reduced = p_s[J.astype(int)]\n\n return scenario_reduced, weight_reduced" } ]
from TestCaseDistributionSystems.test_cases import case33 from TestCasesMicrogrids.test_cases.cases_unit_commitment import micro_grid from TestCasesTransportationSystems.test_cases import case3, TIME, LOCATION from numpy import zeros, shape, ones, diag, concatenate, eye from scipy.sparse import csr_matrix as sparse from scipy.sparse import hstack, vstack, lil_matrix from numpy import flatnonzero as find from numpy import array, tile, arange, random from pypower.idx_brch import F_BUS, T_BUS, BR_R, BR_X, RATE_A from pypower.idx_bus import PD, VMAX, VMIN, QD from pypower.idx_gen import GEN_BUS, PMAX, PMIN, QMAX, QMIN from pypower.ext2int import ext2int from Solvers.mixed_integer_quadratic_constrained_cplex import mixed_integer_quadratic_constrained_programming as miqcp from Solvers.mixed_integer_solvers_cplex import mixed_integer_linear_programming as milp from copy import deepcopy from TestCaseDistributionSystems.data_format.idx_MG_PV import PBIC_AC2DC, PG, PESS_DC, PBIC_DC2AC, PUG, PESS_CH, \ PMESS, EESS, NX_MG, QBIC, QUG, QG, PPV from TestCaseDistributionSystems.database_management_pv import DataBaseManagement from StochasticOptimization.scenario_reduction import ScenarioReduction
15,466
for t in range(T): Aeq[t, n_stops * 2 + t] = 1 Aeq[t, n_stops + nb_tra_ele * t:n_stops + nb_tra_ele * (t + 1)] = -mess["EFF_CH"] Aeq[t, nb_tra_ele * t:nb_tra_ele * (t + 1)] = 1 / mess["EFF_DC"] if t == 0: beq[t] = mess["E0"] else: Aeq[t, n_stops * 2 + t - 1] = -1 c = concatenate((ones(n_stops * 2) * mess["COST_OP"], zeros(T))) # sol = milp(c, Aeq=Aeq, beq=beq, A=None, b=None, xmin=lx, xmax=ux) model_tess = {"c": c, "q": zeros(nv), "lb": lb, "ub": ub, "vtypes": vtypes, "A": None, "b": None, "Aeq": Aeq, "beq": beq, "NX": nv, } return model_tess def scenario_generation_reduction(self, micro_grids, profile, pns, pv_profile, update=0, ns=2, ns_reduced=2, std=0.03, interval=0.05, std_pv=0.05): """ Scenario generation function for the second-stage scheduling Stochastic variables include 1) loads in distribution networks, active loads for 2) AC bus and 3)DC bus. The assumption is that, the 1) loads in distribution networks follow normal distribution nb*T 2) loads for AC bus and DC bus follow uniform distribution nmg*T*4 :return: """ T = self.T nmg = self.nmg nb = self.nb db_management = DataBaseManagement() if update > 0: # 1) scenario generation bus_load = zeros((ns, nb * T)) mg_load = zeros((ns, nmg * T * 2)) mg_pv = zeros((ns, nmg * T)) weight = ones(ns) / ns for i in range(ns): for t in range(T): for j in range(nb): bus_load[i, t * nb + j] = pns["bus"][j, PD] * (1 + random.normal(0, std)) * profile[t] pv_rand = random.normal(0, std_pv) # all PV are correlated! for j in range(nmg): mg_load[i, t * nmg + j] = micro_grids[j]["PD"]["AC"][t] * \ (1 + random.uniform(-interval, interval)) mg_load[i, nmg * T + t * nmg + j] = micro_grids[j]["PD"]["DC"][t] * \ (1 + random.uniform(-interval, interval)) mg_pv[i, t * nmg + j] = micro_grids[j]["PV"]["PMAX"] * pv_profile[t] * \ (1 + pv_rand) # 2) scenario reduction scenario_reduction = ScenarioReduction() (scenario_reduced, weight_reduced) = \ scenario_reduction.run(scenario=concatenate([bus_load, mg_load, mg_pv], axis=1), weight=weight, n_reduced=ns_reduced, power=2) # 3) Store the data into database db_management.create_table("scenarios", nb=nb, nmg=nmg) for i in range(ns - ns_reduced): for t in range(T): # print(scenario_reduced[i, nb * T + nmg * T + t * nmg: nb * T + nmg * T + (t + 1) * nmg].tolist()) db_management.insert_data_scenario("scenarios", scenario=i, weight=weight_reduced[i], time=t, nb=nb, pd=scenario_reduced[i, t * nb:(t + 1) * nb].tolist(), nmg=nmg, pd_ac=scenario_reduced[i, nb * T + t * nmg: nb * T + (t + 1) * nmg].tolist(), pd_dc=scenario_reduced[i, nb * T + nmg * T + t * nmg: nb * T + nmg * T + (t + 1) * nmg].tolist(), ppv=scenario_reduced[i, nb * T + nmg * T * 2 + t * nmg: nb * T + nmg * T * 2 + (t + 1) * nmg].tolist()) # print(t) else: # 4) if not updated, inquery the database scenario_reduced = zeros((ns - ns_reduced, nb * T + nmg * T * 3)) weight_reduced = zeros(ns - ns_reduced) for i in range(ns - ns_reduced): for t in range(T): data = db_management.inquery_data_scenario(table_name="scenarios", scenario=i, time=t) weight_reduced[i] = data[1] scenario_reduced[i, nb * t:nb * (t + 1)] = array(data[3:nb + 3]) scenario_reduced[i, nb * T + nmg * t:nb * T + nmg * (t + 1)] = array(data[nb + 3:nb + 3 + nmg]) scenario_reduced[i, nb * T + nmg * T + nmg * t:nb * T + nmg * T + nmg * (t + 1)] = \ array(data[nb + 3 + nmg:nb + 3 + nmg * 2]) scenario_reduced[i, nb * T + nmg * T * 2 + nmg * t:nb * T + nmg * T * 2 + nmg * (t + 1)] = \ array(data[nb + 3 + nmg * 2:nb + 3 + nmg * 3]) # assert sum(weight_reduced) == 1, "The weight factor is not right!" # 4) return value ds_load_profile = scenario_reduced[:, 0:nb * T] mgs_load_profile = scenario_reduced[:, nb * T:nb * T + nmg * T * 2] pv_load_profile = scenario_reduced[:, nb * T + nmg * T * 2:] # profile_second_stage = zeros((ns, T)) microgrids_second_stage = [0] * (ns - ns_reduced) # for i in range(ns): # for j in range(T): # profile_second_stage[i, j] = profile[j] * (1 + 0.5 * random.random()) # for i in range(ns - ns_reduced): microgrids_second_stage[i] = deepcopy(micro_grids) for j in range(nmg): microgrids_second_stage[i][j]["PV"]["PROFILE"] = zeros(T) for t in range(T): microgrids_second_stage[i][j]["PD"]["AC"][t] = mgs_load_profile[i, t * nmg + j] microgrids_second_stage[i][j]["QD"]["AC"][t] = mgs_load_profile[i, t * nmg + j] * 0.2 microgrids_second_stage[i][j]["PD"]["DC"][t] = mgs_load_profile[i, T * nmg + t * nmg + j] microgrids_second_stage[i][j]["PV"]["PROFILE"][t] = pv_load_profile[i, t * nmg + j] return ds_load_profile, microgrids_second_stage, weight_reduced if __name__ == "__main__":
""" Stochastic optimal power flow with multiple microgrids and mobile energy storage systems @author: Zhao Tianyang @e-mail: [email protected] @date: 10 Jan 2019 Major updates: 1) Update code style using PEP 8 -- Style Guide for Python Code 2) Store data in database 3) Scenario generation and reduction 4) Automatic results analysis Nomenclature: nV: number of variables mg: microgrid ds: distribution systems me: mobile energy storage systems ch: charging dc: discharging ele: electricity tra: traffic i,j,k: index t: time index T: time periods tns:traffic networks pns:power networks """ class StochasticDynamicOptimalPowerFlowTess(): def __init__(self): self.name = "Unit commitment with tess" def main(self, power_networks, micro_grids, profile, pv_profile, mess, traffic_networks, ns=100): """ Main entrance for network reconfiguration problems :param case: electric network information :param profile: load profile within the distribution networks :param micrgrids: dictionary for microgrids :param tess: dictionary for tess :return: network reconfiguration, distribution network status, and microgrid status """ T = len(profile) # Time spans self.T = T nmg = len(micro_grids) # Number of microgrids self.nmg = nmg nmes = len(mess) # Number of mobile energy storage systems self.nmes = nmes nb_tra = traffic_networks["bus"].shape[0] # Number of buses in the transportation networks self.nb_tra = nb_tra assert nb_tra == nmg, "The microgrids within the transportation networks are not synchronized!" # 1) Formulate the first stage optimization problem model_first_stage = self.first_stage_problem_formulation(pns=power_networks, mgs=micro_grids, mess=mess, tns=traffic_networks) # (sol_first_stage, obj, success) = milp(model_first_stage["c"], Aeq=model_first_stage["Aeq"], # beq=model_first_stage["beq"], # A=model_first_stage["A"], b=model_first_stage["b"], # vtypes=model_first_stage["vtypes"], # xmax=model_first_stage["ub"], xmin=model_first_stage["lb"]) # sol_first_stage = self.first_stage_solution_validation(sol=sol_first_stage) # 2) Formulate the second stage optimization problem # Formulate the second stage scenarios (ds_second_stage, mgs_second_stage, weight) = self.scenario_generation_reduction(profile=profile, micro_grids=micro_grids, ns=ns, pns=power_networks, pv_profile=pv_profile, ns_reduced=round(0.98 * ns)) ns -= round(0.98 * ns) model_second_stage = {} for i in range(ns): model_second_stage[i] = self.second_stage_problem_formualtion(pns=power_networks, mgs=mgs_second_stage[i], mess=mess, tns=traffic_networks, profile=ds_second_stage[i, :], index=i, weight=weight[i]) # 3) Merge the first-stage problem and second stage problem lb = model_first_stage["lb"] ub = model_first_stage["ub"] vtypes = model_first_stage["vtypes"] c = model_first_stage["c"] Qc = dict() if model_first_stage["Aeq"] is not None: neq = model_first_stage["Aeq"].shape[0] else: neq = 0 if model_first_stage["A"] is not None: nineq = model_first_stage["A"].shape[0] else: nineq = 0 nv_first_stage = self.nv_first_stage nv_second_stage = self.nv_second_stage q = zeros(nv_first_stage) nv_index = zeros(ns + 1).astype(int) neq_index = zeros(ns + 1).astype(int) nineq_index = zeros(ns + 1).astype(int) neq_index[0] = neq nineq_index[0] = nineq nv_index[0] = nv_first_stage beq = model_first_stage["beq"] for i in range(ns): if model_second_stage[i]["Aeq"] is not None: neq_index[i + 1] = neq_index[i] + model_second_stage[i]["Aeq"].shape[0] else: neq_index[i + 1] = neq_index[i] if model_second_stage[i]["Ts"] is not None: nineq_index[i + 1] = nineq_index[i] + model_second_stage[i]["Ts"].shape[0] else: nineq_index[i + 1] = nineq_index[i] nv_index[i + 1] = nv_index[i] + nv_second_stage c = concatenate([c, model_second_stage[i]["c"]]) q = concatenate([q, model_second_stage[i]["q"]]) lb = concatenate([lb, model_second_stage[i]["lb"]]) ub = concatenate([ub, model_second_stage[i]["ub"]]) vtypes += model_second_stage[i]["vtypes"] beq = concatenate([beq, model_second_stage[i]["beq"]]) Aeq_full = lil_matrix((neq_index[-1], nv_index[-1])) Aeq_full[0:neq_index[0], 0:nv_index[0]] = model_first_stage["Aeq"] rc = zeros(0) for i in range(ns): Aeq_full[neq_index[i]:neq_index[i + 1], nv_index[i]:nv_index[i + 1]] = model_second_stage[i]["Aeq"] Qc.update(model_second_stage[i]["Qc"]) rc = concatenate([rc, model_second_stage[i]["rc"]]) A_full = lil_matrix((nineq_index[-1], nv_index[-1])) b = model_first_stage["b"] A_full[0:int(nineq_index[0]), 0:int(nv_index[0])] = model_first_stage["A"] for i in range(ns): A_full[nineq_index[i]:nineq_index[i + 1], 0:nv_index[0]] = model_second_stage[i]["Ts"] A_full[nineq_index[i]:nineq_index[i + 1], nv_index[i]:nv_index[i + 1]] = model_second_stage[i]["Ws"] b = concatenate([b, model_second_stage[i]["hs"]]) # 3) Obtain the results for first-stage and second stage optimization problems # 3.1) Obtain the integrated solution (sol, obj, success) = miqcp(c, q, Aeq=Aeq_full, beq=beq, A=A_full, b=b, Qc=Qc, rc=rc, xmin=lb, xmax=ub, vtypes=vtypes) # 3.2) decouple the solution into multiple subsystems sol_first_stage = sol[0:nv_second_stage] sol_second_stage = {} for i in range(ns): sol_second_stage[i] = sol[int(nv_index[i]):int(nv_index[i + 1])] # 4) Verify the first-stage and second stage optization problem # 4.1) First-stage solution sol_first_stage = self.first_stage_solution_validation(sol=sol_first_stage) # 4.2) Second-stage solution sol_second_stage_checked = {} db_management = DataBaseManagement() db_management.create_table(table_name="distribution_networks", nl=self.nl, nb=self.nb, ng=self.ng) db_management.create_table(table_name="micro_grids", nmg=self.nmg) db_management.create_table(table_name="mobile_energy_storage_systems", nmg=self.nmg) db_management.create_table(table_name="first_stage_solutions", nmg=self.nmg, ng=self.ng) db_management.create_table(table_name="fisrt_stage_mess", nmg=self.nmg) for t in range(T): db_management.insert_data_first_stage(table_name="first_stage_solutions", time=t, ng=self.ng, nmg=self.nmg, pg=sol_first_stage["pg"][:, t].tolist(), rg=sol_first_stage["rg"][:, t].tolist(), pg_mg=sol_first_stage["pg_mg"][:, t].tolist(), rg_mg=sol_first_stage["rg_mg"][:, t].tolist(), pess_ch=sol_first_stage["pess_ch"][:, t].tolist(), pess_dc=sol_first_stage["pess_dc"][:, t].tolist(), ress=sol_first_stage["ress"][:, t].tolist(), ess=sol_first_stage["eess"][:, t].tolist(), iess=sol_first_stage["iess"][:, t].tolist()) for i in range(nmes): for t in range(T): db_management.insert_data_first_stage_mess(table_name="fisrt_stage_mess", nmg=self.nmg, time=t, mess=i, imess=sol_first_stage["MESS"][i]["idc"][:, t].tolist(), rmess=sol_first_stage["MESS"][i]["rmess"][:, t].tolist(), pmess_ch= sol_first_stage["MESS"][i]["pmess_ch"][:, t].tolist(), pmess_dc= sol_first_stage["MESS"][i]["pmess_dc"][:, t].tolist(), mess_f_stop=sol_first_stage["MESS"][i]["VRP"][t + 1][0], mess_t_stop=sol_first_stage["MESS"][i]["VRP"][t + 1][1]) for i in range(ns): sol_second_stage_checked[i] = self.second_stage_solution_validation(sol_second_stage[i]) for i in range(ns): for t in range(T): db_management.insert_data_ds(table_name="distribution_networks", nl=self.nl, nb=self.nb, ng=self.ng, scenario=i, time=t, pij=sol_second_stage_checked[i]["DS"]["pij"][:, t].tolist(), qij=sol_second_stage_checked[i]["DS"]["qij"][:, t].tolist(), lij=sol_second_stage_checked[i]["DS"]["lij"][:, t].tolist(), vi=sol_second_stage_checked[i]["DS"]["vi"][:, t].tolist(), pg=sol_second_stage_checked[i]["DS"]["pg"][:, t].tolist(), qg=sol_second_stage_checked[i]["DS"]["qg"][:, t].tolist(), ) for i in range(ns): for j in range(nmg): for t in range(T): db_management.insert_data_mg(table_name="micro_grids", scenario=i, time=t, mg=j, pg=sol_second_stage_checked[i]["MG"]["pg"][j, t], qg=sol_second_stage_checked[i]["MG"]["qg"][j, t], pug=sol_second_stage_checked[i]["MG"]["pug"][j, t], qug=sol_second_stage_checked[i]["MG"]["qug"][j, t], pbic_ac2dc=sol_second_stage_checked[i]["MG"]["pbic_ac2dc"][j, t], pbic_dc2ac=sol_second_stage_checked[i]["MG"]["pbic_dc2ac"][j, t], qbic=sol_second_stage_checked[i]["MG"]["qbic"][j, t], pess_ch=sol_second_stage_checked[i]["MG"]["pess_ch"][j, t], pess_dc=sol_second_stage_checked[i]["MG"]["pess_dc"][j, t], eess=sol_second_stage_checked[i]["MG"]["eess"][j, t], pmess=sol_second_stage_checked[i]["MG"]["pmess"][j, t], ppv=sol_second_stage_checked[i]["MG"]["ppv"][j, t]) for i in range(ns): for j in range(nmes): for t in range(T): db_management.insert_data_mess(table_name="mobile_energy_storage_systems", scenario=i, time=t, mess=j, nmg=self.nmg, pmess_dc= sol_second_stage_checked[i]["MESS"][j]["pmess_dc"][:, t].tolist(), pmess_ch= sol_second_stage_checked[i]["MESS"][j]["pmess_ch"][:, t].tolist(), emess=sol_second_stage_checked[i]["MESS"][j]["emess"][0, t]) # 4.3) Cross validation of the first-stage and second-stage decision variables tess_check = {} for i in range(ns): tess_temp = {} for j in range(nmes): tess_temp[j] = sol_second_stage_checked[i]["MESS"][j]["pmess_dc"] - \ sol_second_stage_checked[i]["MESS"][j]["pmess_ch"] - \ sol_first_stage["MESS"][j]["pmess_dc"] + \ sol_first_stage["MESS"][j]["pmess_ch"] - \ sol_first_stage["MESS"][j]["rmess"] tess_temp[j + nmes] = sol_second_stage_checked[i]["MESS"][j]["pmess_ch"] - \ sol_second_stage_checked[i]["MESS"][j]["pmess_dc"] - \ sol_first_stage["MESS"][j]["pmess_ch"] + \ sol_first_stage["MESS"][j]["pmess_dc"] - \ sol_first_stage["MESS"][j]["rmess"] tess_check[i] = tess_temp # return sol_distribution_network, sol_microgrids, sol_tess return sol_first_stage, sol_second_stage_checked def first_stage_problem_formulation(self, pns, mgs, mess, tns): """ Problem formulation for the first stage optimization, Decision variables include, DGs within power networks, DGs within MGs, EESs within MGs and TESSs :param power_networks: Parameters for the power networks :param micro_grids: Parameters for the microgrids :param tess: Parameters for the mobile energy storage systems :param traffic_networks: Parameters for the transportation networks :return: Formulated first-stage problem """ T = self.T # Time slots nmg = self.nmg # Number of mgs nmes = self.nmes # Number of tess mpc = ext2int(pns) baseMVA, bus, gen, branch, gencost = mpc["baseMVA"], mpc["bus"], mpc["gen"], mpc["branch"], mpc["gencost"] ng = shape(mpc['gen'])[0] ## number of dispatchable injections nb = shape(mpc["bus"])[0] self.nb = nb self.ng = ng # Obtain the initial status, start-up and shut down of generators Ig0 = gen[:, -1].astype(int) MIN_DOWN = gen[:, -2].astype(int) MIN_UP = gen[:, -3].astype(int) alpha_l = zeros(ng) beta_l = zeros(ng) Ig_l = zeros(ng) pg_l = zeros(ng) # Boundary for DGs within distribution networks rg_l = zeros(ng) alpha_u = ones(ng) beta_u = ones(ng) Ig_u = ones(ng) pg_u = gen[:, PMAX] / baseMVA rg_u = gen[:, PMAX] / baseMVA c_alpha = gencost[:, 0] c_beta = gencost[:, 1] c_ig = gencost[:, 6] cg = gencost[:, 5] * baseMVA cr = zeros(ng) pg_mg_l = zeros(nmg) # Boundary for DGs within MGs rg_mg_l = zeros(nmg) pg_mg_u = zeros(nmg) rg_mg_u = zeros(nmg) cg_mg = zeros(nmg) cr_mg = zeros(nmg) for i in range(nmg): pg_mg_l[i] = mgs[i]["DG"]["PMIN"] pg_mg_u[i] = mgs[i]["DG"]["PMAX"] rg_mg_u[i] = mgs[i]["DG"]["PMAX"] cg_mg[i] = mgs[i]["DG"]["COST_B"] pes_ch_l = zeros(nmg) # Lower boundary for ESSs within MGs pes_dc_l = zeros(nmg) ees_l = zeros(nmg) res_l = zeros(nmg) ies_l = zeros(nmg) pes_ch_u = zeros(nmg) # Upper boundary for ESSs within MGs pes_dc_u = zeros(nmg) ees_u = zeros(nmg) res_u = zeros(nmg) ies_u = ones(nmg) ces_ch = zeros(nmg) # Cost boundary for ESSs within MGs ces_dc = zeros(nmg) ces_r = zeros(nmg) ces = zeros(nmg) ces_i = zeros(nmg) for i in range(nmg): pes_ch_u[i] = mgs[i]["ESS"]["PCH_MAX"] pes_dc_u[i] = mgs[i]["ESS"]["PDC_MAX"] + mgs[i]["ESS"]["PCH_MAX"] res_u[i] = mgs[i]["ESS"]["PCH_MAX"] ees_l[i] = mgs[i]["ESS"]["EMIN"] ees_u[i] = mgs[i]["ESS"]["EMAX"] _nv_first_stage = ng * 5 + nmg * 2 + nmg * 5 nv_first_stage = _nv_first_stage * T # Formulate the boundaries lb = concatenate( [tile(concatenate( [alpha_l, beta_l, Ig_l, pg_l, rg_l, pg_mg_l, rg_mg_l, pes_ch_l, pes_dc_l, res_l, ees_l, ies_l]), T)]) ub = concatenate( [tile(concatenate( [alpha_u, beta_u, Ig_u, pg_u, rg_u, pg_mg_u, rg_mg_u, pes_ch_u, pes_dc_u, res_u, ees_u, ies_u]), T)]) # Objective value c = concatenate( [tile(concatenate([c_alpha, c_beta, c_ig, cg, cr, cg_mg, cr_mg, ces_ch, ces_dc, ces, ces_r, ces_i]), T)]) # Variable types vtypes = (["b"] * ng * 3 + ["c"] * (ng * 2 + nmg * 2 + nmg * 4) + ["b"] * nmg) * T ## Constraint sets # 1) Pg+Rg<=PguIg A = lil_matrix((ng * T, nv_first_stage)) b = zeros(ng * T) for t in range(T): for j in range(ng): A[t * ng + j, t * _nv_first_stage + ng * 3 + j] = 1 A[t * ng + j, t * _nv_first_stage + ng * 4 + j] = 1 A[t * ng + j, t * _nv_first_stage + ng * 2 + j] = -pg_u[j] # 2) Pg-Rg>=IgPgl A_temp = lil_matrix((ng * T, nv_first_stage)) b_temp = zeros(ng * T) for t in range(T): for j in range(ng): A_temp[t * ng + j, t * _nv_first_stage + ng * 3 + j] = -1 A_temp[t * ng + j, t * _nv_first_stage + ng * 4 + j] = 1 A_temp[t * ng + j, t * _nv_first_stage + j] = pg_l[j] A = vstack([A, A_temp]) b = concatenate([b, b_temp]) # 3) Start-up and shut-down constraints of DGs UP_LIMIT = zeros(ng).astype(int) DOWN_LIMIT = zeros(ng).astype(int) for i in range(ng): UP_LIMIT[i] = T - MIN_UP[i] DOWN_LIMIT[i] = T - MIN_DOWN[i] # 3.1) Up limit A_temp = lil_matrix((sum(UP_LIMIT), nv_first_stage)) b_temp = zeros(sum(UP_LIMIT)) for i in range(ng): for t in range(MIN_UP[i], T): for k in range(t - MIN_UP[i], t): A_temp[sum(UP_LIMIT[0:i]) + t - MIN_UP[i], k * _nv_first_stage + i] = 1 A_temp[sum(UP_LIMIT[0:i]) + t - MIN_UP[i], t * _nv_first_stage + ng * 2 + i] = -1 A = vstack([A, A_temp]) b = concatenate([b, b_temp]) # # 3.2) Down limit A_temp = lil_matrix((sum(DOWN_LIMIT), nv_first_stage)) b_temp = ones(sum(DOWN_LIMIT)) for i in range(ng): for t in range(MIN_DOWN[i], T): for k in range(t - MIN_DOWN[i], t): A_temp[sum(DOWN_LIMIT[0:i]) + t - MIN_DOWN[i], k * _nv_first_stage + ng + i] = 1 A_temp[sum(DOWN_LIMIT[0:i]) + t - MIN_DOWN[i], t * _nv_first_stage + ng * 2 + i] = 1 A = vstack([A, A_temp]) b = concatenate([b, b_temp]) # 4) Status transformation of each unit Aeq = lil_matrix((T * ng, nv_first_stage)) beq = zeros(T * ng) for i in range(ng): for t in range(T): Aeq[i * T + t, t * _nv_first_stage + i] = 1 Aeq[i * T + t, t * _nv_first_stage + ng + i] = -1 Aeq[i * T + t, t * _nv_first_stage + ng * 2 + i] = -1 if t != 0: Aeq[i * T + t, (t - 1) * _nv_first_stage + ng * 2 + i] = 1 else: beq[i * T + t] = -Ig0[i] # 3) Pg_mg+Rg_mg<=Pg_mg_u A_temp = lil_matrix((nmg * T, nv_first_stage)) b_temp = zeros(nmg * T) for t in range(T): for j in range(nmg): A_temp[t * nmg + j, t * _nv_first_stage + ng * 5 + j] = 1 A_temp[t * nmg + j, t * _nv_first_stage + ng * 5 + nmg + j] = 1 b_temp[t * nmg + j] = pg_mg_u[j] A = vstack([A, A_temp]) b = concatenate([b, b_temp]) # 4) Pg_mg-Rg_mg<=Pg_mg_l A_temp = lil_matrix((nmg * T, nv_first_stage)) b_temp = zeros(nmg * T) for t in range(T): for j in range(nmg): A_temp[t * nmg + j, t * _nv_first_stage + ng * 5 + j] = -1 A_temp[t * nmg + j, t * _nv_first_stage + ng * 5 + nmg + j] = 1 b_temp[t * nmg + j] = pg_mg_l[j] A = vstack([A, A_temp]) b = concatenate([b, b_temp]) # 5) Pess_dc-Pess_ch+Ress<=Pess_dc_max A_temp = lil_matrix((nmg * T, nv_first_stage)) b_temp = zeros(nmg * T) for t in range(T): for j in range(nmg): A_temp[t * nmg + j, t * _nv_first_stage + ng * 5 + nmg * 2 + j] = -1 A_temp[t * nmg + j, t * _nv_first_stage + ng * 5 + nmg * 2 + nmg + j] = 1 A_temp[t * nmg + j, t * _nv_first_stage + ng * 5 + nmg * 2 + nmg * 2 + j] = 1 b_temp[t * nmg + j] = pes_dc_u[j] A = vstack([A, A_temp]) b = concatenate([b, b_temp]) # 6) Pess_ch-Pess_dc+Ress<=Pess_ch_max A_temp = lil_matrix((nmg * T, nv_first_stage)) b_temp = zeros(nmg * T) for t in range(T): for j in range(nmg): A_temp[t * nmg + j, t * _nv_first_stage + ng * 5 + nmg * 2 + j] = 1 A_temp[t * nmg + j, t * _nv_first_stage + ng * 5 + nmg * 2 + nmg + j] = -1 A_temp[t * nmg + j, t * _nv_first_stage + ng * 5 + nmg * 2 + nmg * 2 + j] = 1 b_temp[t * nmg + j] = pes_ch_u[j] A = vstack([A, A_temp]) b = concatenate([b, b_temp]) # 7) Energy storage balance equation Aeq_temp = lil_matrix((T * nmg, nv_first_stage)) beq_temp = zeros(T * nmg) for t in range(T): for j in range(nmg): Aeq_temp[t * nmg + j, t * _nv_first_stage + ng * 5 + nmg * 2 + nmg * 3 + j] = 1 Aeq_temp[t * nmg + j, t * _nv_first_stage + ng * 5 + nmg * 2 + j] = -mgs[j]["ESS"]["EFF_CH"] Aeq_temp[t * nmg + j, t * _nv_first_stage + ng * 5 + nmg * 2 + nmg + j] = 1 / mgs[j]["ESS"]["EFF_DC"] if t == 0: beq_temp[t * nmg + j] = mgs[j]["ESS"]["E0"] else: Aeq_temp[t * nmg + j, (t - 1) * _nv_first_stage + ng * 5 + nmg * 2 + nmg * 3 + j] = -1 Aeq = vstack([Aeq, Aeq_temp]) beq = concatenate([beq, beq_temp]) # 8) Pess_ch<=I*Pess_ch_max A_temp = lil_matrix((nmg * T, nv_first_stage)) b_temp = zeros(nmg * T) for t in range(T): for j in range(nmg): A_temp[t * nmg + j, t * _nv_first_stage + ng * 5 + nmg * 2 + j] = 1 A_temp[t * nmg + j, t * _nv_first_stage + ng * 5 + nmg * 2 + nmg * 4 + j] = -pes_ch_u[j] A = vstack([A, A_temp]) b = concatenate([b, b_temp]) # 9) Pess_dc<=(1-I)*Pess_dc_max A_temp = lil_matrix((nmg * T, nv_first_stage)) b_temp = zeros(nmg * T) for t in range(T): for j in range(nmg): A_temp[t * nmg + j, t * _nv_first_stage + ng * 5 + nmg * 2 + nmg + j] = 1 A_temp[t * nmg + j, t * _nv_first_stage + ng * 5 + nmg * 2 + nmg * 4 + j] = pes_dc_u[j] b_temp[t * nmg + j] = pes_dc_u[j] A = vstack([A, A_temp]) b = concatenate([b, b_temp]) # 2) Transportation energy storage systems problem model_mess = {} for i in range(nmes): model_mess[i] = self.problem_formulation_tess(mess=mess[i], tns=tns) # 3) Merge the DGs, ESSs and TESSs neq = Aeq.shape[0] nineq = A.shape[0] nV_index = zeros(nmes + 1).astype(int) neq_index = zeros(nmes + 1).astype(int) nineq_index = zeros(nmes + 1).astype(int) nV_index[0] = nv_first_stage neq_index[0] = neq nineq_index[0] = nineq for i in range(nmes): nV_index[i + 1] = nV_index[i] + len(model_mess[i]["c"]) neq_index[i + 1] = neq_index[i] + model_mess[i]["Aeq"].shape[0] nineq_index[i + 1] = nineq_index[i] + model_mess[i]["A"].shape[0] neq += model_mess[i]["Aeq"].shape[0] nineq += model_mess[i]["A"].shape[0] # Merge the objective function, boundaries, types and rhs c = concatenate([c, model_mess[i]["c"]]) lb = concatenate([lb, model_mess[i]["lb"]]) ub = concatenate([ub, model_mess[i]["ub"]]) vtypes += model_mess[i]["vtypes"] beq = concatenate([beq, model_mess[i]["beq"]]) b = concatenate([b, model_mess[i]["b"]]) A_full = lil_matrix((nineq_index[-1], nV_index[-1])) Aeq_full = lil_matrix((neq_index[-1], nV_index[-1])) if Aeq is not None: Aeq_full[0:int(neq_index[0]), 0:int(nV_index[0])] = Aeq if A is not None: A_full[0:int(nineq_index[0]), 0:int(nV_index[0])] = A for i in range(nmes): Aeq_full[neq_index[i]:neq_index[i + 1], nV_index[i]:nV_index[i + 1]] = model_mess[i]["Aeq"] A_full[nineq_index[i]:nineq_index[i + 1], nV_index[i]:nV_index[i + 1]] = model_mess[i]["A"] self.nv_first_stage = nV_index[-1] # The number of first stage decision variables self._nv_first_stage = _nv_first_stage model_first_stage = {"c": c, "lb": lb, "ub": ub, "vtypes": vtypes, "A": A_full, "b": b, "Aeq": Aeq_full, "beq": beq, } return model_first_stage def first_stage_solution_validation(self, sol): """ Validation of the first-stage solution :param sol: The first stage solution :return: the first stage solution """ T = self.T ng = self.ng nmg = self.nmg nmes = self.nmes # Set-points of DGs within DSs, MGs and ESSs _nv_first_stage = self._nv_first_stage alpha = zeros((ng, T)) beta = zeros((ng, T)) Ig = zeros((ng, T)) Pg = zeros((ng, T)) Rg = zeros((ng, T)) Pg_mg = zeros((nmg, T)) Rg_mg = zeros((nmg, T)) Pess_dc = zeros((nmg, T)) Pess_ch = zeros((nmg, T)) Ress = zeros((nmg, T)) Eess = zeros((nmg, T)) Iess = zeros((nmg, T)) for i in range(T): alpha[:, i] = sol[_nv_first_stage * i:_nv_first_stage * i + ng] beta[:, i] = sol[_nv_first_stage * i + ng:_nv_first_stage * i + ng * 2] Ig[:, i] = sol[_nv_first_stage * i + ng * 2:_nv_first_stage * i + ng * 3] Pg[:, i] = sol[_nv_first_stage * i + ng * 3:_nv_first_stage * i + ng * 4] Rg[:, i] = sol[_nv_first_stage * i + ng * 4:_nv_first_stage * i + ng * 5] Pg_mg[:, i] = sol[_nv_first_stage * i + ng * 5:_nv_first_stage * i + ng * 5 + nmg] Rg_mg[:, i] = sol[_nv_first_stage * i + ng * 5 + nmg:_nv_first_stage * i + ng * 5 + nmg * 2] Pess_ch[:, i] = sol[_nv_first_stage * i + ng * 5 + nmg * 2:_nv_first_stage * i + ng * 5 + nmg * 3] Pess_dc[:, i] = sol[_nv_first_stage * i + ng * 5 + nmg * 3:_nv_first_stage * i + ng * 5 + nmg * 4] Ress[:, i] = sol[_nv_first_stage * i + ng * 5 + nmg * 4:_nv_first_stage * i + ng * 5 + nmg * 5] Eess[:, i] = sol[_nv_first_stage * i + ng * 5 + nmg * 5:_nv_first_stage * i + ng * 5 + nmg * 6] Iess[:, i] = sol[_nv_first_stage * i + ng * 5 + nmg * 6:_nv_first_stage * i + ng * 5 + nmg * 7] # Set-points and scheduling of mobile energy storage systems nv_tra = self.nv_tra nl_traffic = self.nl_tra n_stops = self.n_stops nb_tra_ele = self.nb_tra_ele sol_ev = {} for i in range(nmes): ev_temp = {} ev_temp["VRP"] = [] for t in range(nl_traffic): if sol[_nv_first_stage * T + nv_tra * i + t] > 0: # obtain the solution for vrp if self.connection_matrix[t, TIME] > 0: for j in range(int(self.connection_matrix[t, TIME])): ev_temp["VRP"].append(((self.connection_matrix[t, F_BUS] - 1) % nmg, (self.connection_matrix[t, T_BUS] - 1) % nmg)) else: ev_temp["VRP"].append(((self.connection_matrix[t, F_BUS] - 1) % nmg, (self.connection_matrix[t, T_BUS] - 1) % nmg)) ev_temp["idc"] = zeros((nb_tra_ele, T)) ev_temp["pmess_dc"] = zeros((nb_tra_ele, T)) ev_temp["pmess_ch"] = zeros((nb_tra_ele, T)) ev_temp["rmess"] = zeros((nb_tra_ele, T)) for t in range(T): for k in range(nb_tra_ele): ev_temp["idc"][k, t] = sol[_nv_first_stage * T + nv_tra * i + nl_traffic + nb_tra_ele * t + k] ev_temp["pmess_dc"][k, t] = \ sol[_nv_first_stage * T + nv_tra * i + nl_traffic + n_stops + nb_tra_ele * t + k] ev_temp["pmess_ch"][k, t] = \ sol[_nv_first_stage * T + nv_tra * i + nl_traffic + n_stops * 2 + nb_tra_ele * t + k] ev_temp["rmess"][k, t] = \ sol[_nv_first_stage * T + nv_tra * i + nl_traffic + n_stops * 3 + nb_tra_ele * t + k] sol_ev[i] = ev_temp sol_first_stage = {"alpha": alpha, "beta": beta, "ig": Ig, "rg": Rg, "pg": Pg, "pg_mg": Pg_mg, "rg_mg": Rg_mg, "pess_ch": Pess_ch, "pess_dc": Pess_dc, "ress": Ress, "eess": Eess, "iess": Iess, "MESS": sol_ev, } return sol_first_stage def second_stage_problem_formualtion(self, pns, mgs, mess, tns, profile, index=0, weight=1): """ Second-stage problem formulation, the decision variables includes DGs within power networks, DGs within MGs, EESs within MGs and TESSs and other systems' information :param power_networks: :param micro_grids: :param tess: :param traffic_networks: :return: The second stage problems as list, including coupling constraints, and other constraint set """ # I) Formulate the problem for distribution systems operator T = self.T mpc = ext2int(pns) baseMVA, bus, gen, branch, gencost = mpc["baseMVA"], mpc["bus"], mpc["gen"], mpc["branch"], mpc["gencost"] nb = shape(mpc['bus'])[0] ## number of buses nl = shape(mpc['branch'])[0] ## number of branches ng = shape(mpc['gen'])[0] ## number of dispatchable injections nmg = self.nmg nmes = self.nmes self.nl = nl self.nb = nb self.ng = ng m = zeros(nmg) ## list of integration index pmg_l = zeros(nmg) ## list of lower boundary pmg_u = zeros(nmg) ## list of upper boundary qmg_l = zeros(nmg) ## list of lower boundary qmg_u = zeros(nmg) ## list of upper boundary for i in range(nmg): m[i] = mgs[i]["BUS"] pmg_l[i] = mgs[i]["UG"]["PMIN"] / 1000 / baseMVA pmg_u[i] = mgs[i]["UG"]["PMAX"] / 1000 / baseMVA qmg_l[i] = mgs[i]["UG"]["QMIN"] / 1000 / baseMVA qmg_u[i] = mgs[i]["UG"]["QMAX"] / 1000 / baseMVA f = branch[:, F_BUS] ## list of "from" buses t = branch[:, T_BUS] ## list of "to" buses i = range(nl) ## double set of row indices self.f = f ## record from bus for each branch # Connection matrix Cf = sparse((ones(nl), (i, f)), (nl, nb)) Ct = sparse((ones(nl), (i, t)), (nl, nb)) Cg = sparse((ones(ng), (gen[:, GEN_BUS], range(ng))), (nb, ng)) Cmg = sparse((ones(nmg), (m, range(nmg))), (nb, nmg)) Branch_R = branch[:, BR_R] Branch_X = branch[:, BR_X] Cf = Cf.T Ct = Ct.T # Obtain the boundary information slmax = branch[:, RATE_A] / baseMVA pij_l = -slmax qij_l = -slmax lij_l = zeros(nl) vm_l = bus[:, VMIN] ** 2 pg_l = gen[:, PMIN] / baseMVA qg_l = gen[:, QMIN] / baseMVA pij_u = slmax qij_u = slmax lij_u = slmax vm_u = bus[:, VMAX] ** 2 pg_u = 2 * gen[:, PMAX] / baseMVA qg_u = 2 * gen[:, QMAX] / baseMVA _nv_second_stage = int(3 * nl + nb + 2 * ng + 2 * nmg) self._nv_second_stage = _nv_second_stage # Number of decision variable within each time slot lb = concatenate([tile(concatenate([pij_l, qij_l, lij_l, vm_l, pg_l, qg_l, pmg_l, qmg_l]), T)]) ub = concatenate([tile(concatenate([pij_u, qij_u, lij_u, vm_u, pg_u, qg_u, pmg_u, qmg_u]), T)]) vtypes = ["c"] * _nv_second_stage * T nv_ds = _nv_second_stage * T # Number of total decision variables # Add system level constraints # 1) Active power balance Aeq_p = lil_matrix((nb * T, nv_ds)) beq_p = zeros(nb * T) for i in range(T): Aeq_p[i * nb:(i + 1) * nb, i * _nv_second_stage: (i + 1) * _nv_second_stage] = \ hstack([Ct - Cf, zeros((nb, nl)), -diag(Ct * Branch_R) * Ct, zeros((nb, nb)), Cg, zeros((nb, ng)), -Cmg, zeros((nb, nmg))]) beq_p[i * nb:(i + 1) * nb] = profile[i * nb:(i + 1) * nb] / baseMVA # 2) Reactive power balance Aeq_q = lil_matrix((nb * T, nv_ds)) beq_q = zeros(nb * T) for i in range(T): Aeq_q[i * nb:(i + 1) * nb, i * _nv_second_stage: (i + 1) * _nv_second_stage] = \ hstack([zeros((nb, nl)), Ct - Cf, -diag(Ct * Branch_X) * Ct, zeros((nb, nb)), zeros((nb, ng)), Cg, zeros((nb, nmg)), -Cmg]) for j in range(nb): if bus[j, PD] > 0: beq_q[i * nb:(i + 1) * nb] = profile[i * nb + j] / bus[j, PD] * bus[j, QD] / baseMVA # 3) KVL equation Aeq_kvl = lil_matrix((nl * T, nv_ds)) beq_kvl = zeros(nl * T) for i in range(T): Aeq_kvl[i * nl:(i + 1) * nl, i * _nv_second_stage: i * _nv_second_stage + nl] = -2 * diag(Branch_R) Aeq_kvl[i * nl:(i + 1) * nl, i * _nv_second_stage + nl: i * _nv_second_stage + 2 * nl] = -2 * diag(Branch_X) Aeq_kvl[i * nl:(i + 1) * nl, i * _nv_second_stage + 2 * nl: i * _nv_second_stage + 3 * nl] = diag( Branch_R ** 2) + diag(Branch_X ** 2) Aeq_kvl[i * nl:(i + 1) * nl, i * _nv_second_stage + 3 * nl:i * _nv_second_stage + 3 * nl + nb] = ( Cf.T - Ct.T).toarray() Aeq = vstack([Aeq_p, Aeq_q, Aeq_kvl]) beq = concatenate([beq_p, beq_q, beq_kvl]) c = zeros(nv_ds) q = zeros(nv_ds) c0 = 0 for t in range(T): for i in range(ng): c[t * _nv_second_stage + i + 3 * nl + nb] = gencost[i, 5] * baseMVA q[t * _nv_second_stage + i + 3 * nl + nb] = gencost[i, 4] * baseMVA * baseMVA c0 += gencost[i, 6] # Coupling constraints between the distribution systems and micro_grids Ax2y = lil_matrix((2 * nmg * T, nv_ds)) # connection matrix with the microgrids for i in range(T): for j in range(nmg): # Active power Ax2y[i * nmg + j, i * _nv_second_stage + 3 * nl + nb + 2 * ng + j] = 1000 * baseMVA # Reactive power Ax2y[nmg * T + i * nmg + j, i * _nv_second_stage + 3 * nl + nb + 2 * ng + nmg + j] = 1000 * baseMVA # II) Formulate the problem for microgrids model_microgrids = {} for i in range(nmg): model_microgrids[i] = self.problem_formulation_microgrid(mg=mgs[i], mess=mess) # II.A) Combine the distribution system operation problem and microgrid systems if Aeq is not None: neq_ds = Aeq.shape[0] else: neq_ds = 0 nVariables = int(nv_ds) neq = int(neq_ds) nv_index = zeros(nmg + 1).astype(int) neq_index = zeros(nmg + 1).astype(int) nv_index[0] = nv_ds neq_index[0] = int(neq_ds) for i in range(nmg): nv_index[i + 1] = nv_index[i] + len(model_microgrids[i]["c"]) neq_index[i + 1] = neq_index[i] + model_microgrids[i]["Aeq"].shape[0] nVariables += len(model_microgrids[i]["c"]) neq += int(model_microgrids[i]["Aeq"].shape[0]) Aeq_full = lil_matrix((int(neq_index[-1]), int(nv_index[-1]))) Aeq_full[0:neq_ds, 0:nv_ds] = Aeq for i in range(nmg): lb = concatenate([lb, model_microgrids[i]["lb"]]) ub = concatenate([ub, model_microgrids[i]["ub"]]) c = concatenate([c, model_microgrids[i]["c"]]) q = concatenate([q, model_microgrids[i]["q"]]) vtypes += model_microgrids[i]["vtypes"] beq = concatenate([beq, model_microgrids[i]["beq"]]) Aeq_full[neq_index[i]:neq_index[i + 1], nv_index[i]:nv_index[i + 1]] = model_microgrids[i]["Aeq"] # Add coupling constraints, between the microgrids and distribution networks Ay2x = lil_matrix((2 * nmg * T, nv_index[-1] - nv_index[0])) for i in range(T): for j in range(nmg): Ay2x[i * nmg + j, int(nv_index[j] - nv_index[0]) + i * NX_MG + PUG] = -1 Ay2x[nmg * T + i * nmg + j, int(nv_index[j] - nv_index[0]) + i * NX_MG + QUG] = -1 Aeq_temp = hstack([Ax2y, Ay2x]) beq_temp = zeros(2 * nmg * T) Aeq_full = vstack([Aeq_full, Aeq_temp]) beq = concatenate([beq, beq_temp]) # III) Formulate the optimization problem for tess in the second stage optimization model_tess = {} for i in range(nmes): model_tess[i] = self.problem_formulation_tess_second_stage(mess=mess[i]) # III.1) Merge the models of mirogrids and distribution # Formulate the index nv_index_ev = zeros(1 + nmes).astype(int) neq_index_temp = zeros(1 + nmes).astype(int) nv_index_ev[0] = int(Aeq_full.shape[1]) neq_index_temp[0] = int(Aeq_full.shape[0]) for i in range(nmes): nv_index_ev[i + 1] = nv_index_ev[i] + len(model_tess[i]["c"]) neq_index_temp[i + 1] = neq_index_temp[i] + model_tess[i]["Aeq"].shape[0] Aeq = lil_matrix((int(neq_index_temp[-1]), int(nv_index_ev[-1]))) Aeq[0:int(neq_index_temp[0]), 0:int(nv_index_ev[0])] = Aeq_full for i in range(nmes): lb = concatenate([lb, model_tess[i]["lb"]]) ub = concatenate([ub, model_tess[i]["ub"]]) c = concatenate([c, model_tess[i]["c"]]) q = concatenate([q, model_tess[i]["q"]]) vtypes += model_tess[i]["vtypes"] beq = concatenate([beq, model_tess[i]["beq"]]) Aeq[neq_index_temp[i]:neq_index_temp[i + 1], nv_index_ev[i]:nv_index_ev[i + 1]] = model_tess[i]["Aeq"] # III.2) Coupling constraints between the microgrids and mobile energy storage systems # Additional equal constraints, nmg*T Aeq_temp = lil_matrix((nmg * T, nv_index_ev[-1])) beq_temp = zeros(nmg * T) for i in range(nmg): for t in range(T): Aeq_temp[i * T + t, nv_index[i] + t * NX_MG + PMESS] = 1 # TESSs injections to the MGs for j in range(nmes): Aeq_temp[i * T + t, nv_index_ev[j] + t * self.nb_tra_ele + i] = -1 # Discharging Aeq_temp[i * T + t, nv_index_ev[j] + self.nb_tra_ele * T + t * self.nb_tra_ele + i] = 1 # Sort by order Aeq = vstack([Aeq, Aeq_temp]) beq = concatenate((beq, beq_temp)) nv_second_stage = nv_index_ev[-1] nv_first_stage = self.nv_first_stage self.nv_second_stage = nv_second_stage Qc = dict() # 4) Pij**2+Qij**2<=Vi*Iij for t in range(T): for i in range(nl): Qc[(T * nl + T * nmg) * index + t * nl + i] = [ [int(nv_first_stage + index * nv_second_stage + t * _nv_second_stage + i), int(nv_first_stage + index * nv_second_stage + t * _nv_second_stage + i + nl), int(nv_first_stage + index * nv_second_stage + t * _nv_second_stage + i + 2 * nl), int(nv_first_stage + index * nv_second_stage + t * _nv_second_stage + f[i] + 3 * nl)], [int(nv_first_stage + index * nv_second_stage + t * _nv_second_stage + i), int(nv_first_stage + index * nv_second_stage + t * _nv_second_stage + i + nl), int(nv_first_stage + index * nv_second_stage + t * _nv_second_stage + f[i] + 3 * nl), int(nv_first_stage + index * nv_second_stage + t * _nv_second_stage + i + 2 * nl)], [1, 1, -1 / 2, -1 / 2]] Rc = zeros(nl * T) # 5) (Pbic_ac2dc+Pbic_dc2ac)**2+Qbic**2<=Sbic**2 Rc_temp = zeros(nmg * T) for i in range(nmg): for t in range(T): Qc[(T * nl + T * nmg) * index + T * nl + T * i + t] = [ [int(nv_first_stage + index * nv_second_stage + nv_ds + NX_MG * T * i + NX_MG * t + PBIC_AC2DC), int(nv_first_stage + index * nv_second_stage + nv_ds + NX_MG * T * i + NX_MG * t + PBIC_DC2AC), int(nv_first_stage + index * nv_second_stage + nv_ds + NX_MG * T * i + NX_MG * t + PBIC_AC2DC), int(nv_first_stage + index * nv_second_stage + nv_ds + NX_MG * T * i + NX_MG * t + PBIC_DC2AC), int(nv_first_stage + index * nv_second_stage + nv_ds + NX_MG * T * i + NX_MG * t + QBIC)], [int(nv_first_stage + index * nv_second_stage + nv_ds + NX_MG * T * i + NX_MG * t + PBIC_AC2DC), int(nv_first_stage + index * nv_second_stage + nv_ds + NX_MG * T * i + NX_MG * t + PBIC_DC2AC), int(nv_first_stage + index * nv_second_stage + nv_ds + NX_MG * T * i + NX_MG * t + PBIC_DC2AC), int(nv_first_stage + index * nv_second_stage + nv_ds + NX_MG * T * i + NX_MG * t + PBIC_AC2DC), int(nv_first_stage + index * nv_second_stage + nv_ds + NX_MG * T * i + NX_MG * t + QBIC)], [1, 1, 1, 1, 1]] Rc_temp[i * T + t] = mgs[i]["BIC"]["SMAX"] ** 2 Rc = concatenate([Rc, Rc_temp]) ## IV. Coupling constraints between the first stage and second stage decision variables # pg, pg_mg, pess_mg, pess_tess # Ts*x+Ws*ys<=hs ## IV) Formulate the coupling constraints between the first-stage and second-stage problems # 1) -Pg -Rg + pg <= 0 _nv_first_stage = self._nv_first_stage Ts = lil_matrix((ng * T, nv_first_stage)) Ws = lil_matrix((ng * T, nv_second_stage)) hs = zeros(ng * T) for i in range(T): for j in range(ng): Ts[i * ng + j, i * _nv_first_stage + ng * 3 + j] = -1 Ts[i * ng + j, i * _nv_first_stage + ng * 4 + j] = -1 Ws[i * ng + j, i * _nv_second_stage + 3 * nl + nb + j] = 1 # 2) Pg-Rg - pg <= 0 Ts_temp = lil_matrix((ng * T, nv_first_stage)) Ws_temp = lil_matrix((ng * T, nv_second_stage)) hs_temp = zeros(ng * T) for i in range(T): for j in range(ng): Ts_temp[i * ng + j, i * _nv_first_stage + ng * 3 + j] = 1 Ts_temp[i * ng + j, i * _nv_first_stage + ng * 4 + j] = -1 Ws_temp[i * ng + j, i * _nv_second_stage + 3 * nl + nb + j] = -1 Ts = vstack((Ts, Ts_temp)) Ws = vstack((Ws, Ws_temp)) hs = concatenate((hs, hs_temp)) # 3) Qg <= IgQg_max Ts_temp = lil_matrix((ng * T, nv_first_stage)) Ws_temp = lil_matrix((ng * T, nv_second_stage)) hs_temp = zeros(ng * T) for i in range(T): for j in range(ng): Ts_temp[i * ng + j, i * _nv_first_stage + ng * 2 + j] = -qg_u[j] Ws_temp[i * ng + j, i * _nv_second_stage + 3 * nl + nb + ng + j] = 1 Ts = vstack((Ts, Ts_temp)) Ws = vstack((Ws, Ws_temp)) hs = concatenate((hs, hs_temp)) # 4) Qg >= IgQg_min Ts_temp = lil_matrix((ng * T, nv_first_stage)) Ws_temp = lil_matrix((ng * T, nv_second_stage)) hs_temp = zeros(ng * T) for i in range(T): for j in range(ng): Ts_temp[i * ng + j, i * _nv_first_stage + ng * 2 + j] = qg_l[j] Ws_temp[i * ng + j, i * _nv_second_stage + 3 * nl + nb + ng + j] = -1 Ts = vstack((Ts, Ts_temp)) Ws = vstack((Ws, Ws_temp)) hs = concatenate((hs, hs_temp)) # 3) -Pg_mg - Rg_mg + pg_mg <= 0 Ts_temp = lil_matrix((nmg * T, nv_first_stage)) Ws_temp = lil_matrix((nmg * T, nv_second_stage)) hs_temp = zeros(nmg * T) for i in range(T): for j in range(nmg): Ts_temp[i * nmg + j, i * _nv_first_stage + ng * 5 + j] = -1 Ts_temp[i * nmg + j, i * _nv_first_stage + ng * 5 + nmg + j] = -1 Ws_temp[i * nmg + j, nv_index[j] + i * NX_MG + PG] = 1 Ts = vstack((Ts, Ts_temp)) Ws = vstack((Ws, Ws_temp)) hs = concatenate((hs, hs_temp)) # 4) Pg_mg - Rg_mg - pg_mg <= 0 Ts_temp = lil_matrix((nmg * T, nv_first_stage)) Ws_temp = lil_matrix((nmg * T, nv_second_stage)) hs_temp = zeros(nmg * T) for i in range(T): for j in range(nmg): Ts_temp[i * nmg + j, i * _nv_first_stage + ng * 5 + j] = 1 Ts_temp[i * nmg + j, i * _nv_first_stage + ng * 5 + nmg + j] = -1 Ws_temp[i * nmg + j, nv_index[j] + i * NX_MG + PG] = -1 Ts = vstack((Ts, Ts_temp)) Ws = vstack((Ws, Ws_temp)) hs = concatenate((hs, hs_temp)) # 5) pess_dc - pess_ch <= Pess_dc - Pess_ch + Ress Ts_temp = lil_matrix((nmg * T, nv_first_stage)) Ws_temp = lil_matrix((nmg * T, nv_second_stage)) hs_temp = zeros(nmg * T) for i in range(T): for j in range(nmg): Ts_temp[i * nmg + j, i * _nv_first_stage + ng * 5 + nmg * 2 + j] = 1 # Charging Ts_temp[i * nmg + j, i * _nv_first_stage + ng * 5 + nmg * 3 + j] = -1 # Dis-charging Ts_temp[i * nmg + j, i * _nv_first_stage + ng * 5 + nmg * 4 + j] = -1 # Reserve Ws_temp[i * nmg + j, nv_index[j] + i * NX_MG + PESS_CH] = -1 Ws_temp[i * nmg + j, nv_index[j] + i * NX_MG + PESS_DC] = 1 Ts = vstack((Ts, Ts_temp)) Ws = vstack((Ws, Ws_temp)) hs = concatenate((hs, hs_temp)) # 6) pess_ch - pess_dc <= Pess_ch - Pess_dc + Ress Ts_temp = lil_matrix((nmg * T, nv_first_stage)) Ws_temp = lil_matrix((nmg * T, nv_second_stage)) hs_temp = zeros(nmg * T) for i in range(T): for j in range(nmg): Ts_temp[i * nmg + j, i * _nv_first_stage + ng * 5 + nmg * 2 + j] = -1 # Charging Ts_temp[i * nmg + j, i * _nv_first_stage + ng * 5 + nmg * 3 + j] = 1 # Dis-charging Ts_temp[i * nmg + j, i * _nv_first_stage + ng * 5 + nmg * 4 + j] = -1 # Reserve Ws_temp[i * nmg + j, nv_index[j] + i * NX_MG + PESS_CH] = 1 Ws_temp[i * nmg + j, nv_index[j] + i * NX_MG + PESS_DC] = -1 Ts = vstack((Ts, Ts_temp)) Ws = vstack((Ws, Ws_temp)) hs = concatenate((hs, hs_temp)) # 7) ptss_ch - ptss_dc <= Ptss_ch - Ptss_dc + Rtss nv_tra = self.nv_tra nl_tra = self.nl_tra Ts_temp = lil_matrix((nmg * T * nmes, nv_first_stage)) Ws_temp = lil_matrix((nmg * T * nmes, nv_second_stage)) hs_temp = zeros(nmg * T * nmes) for i in range(nmes): Ts_temp[i * nmg * T:(i + 1) * nmg * T, _nv_first_stage * T + nv_tra * i + nl_tra + nmg * T:_nv_first_stage * T + nv_tra * i + nl_tra + nmg * T * 2] = eye( nmg * T) Ts_temp[i * nmg * T:(i + 1) * nmg * T, _nv_first_stage * T + nv_tra * i + nl_tra + nmg * T * 2: _nv_first_stage * T + nv_tra * i + nl_tra + nmg * T * 3] = -eye(nmg * T) Ts_temp[i * nmg * T:(i + 1) * nmg * T, _nv_first_stage * T + nv_tra * i + nl_tra + nmg * T * 3: _nv_first_stage * T + nv_tra * i + nl_tra + nmg * T * 4] = -eye(nmg * T) Ws_temp[i * nmg * T:(i + 1) * nmg * T, nv_index_ev[i] + nmg * T * 0:nv_index_ev[i] + nmg * T * 1] = \ -eye(nmg * T) Ws_temp[i * nmg * T:(i + 1) * nmg * T, nv_index_ev[i] + nmg * T * 1:nv_index_ev[i] + nmg * T * 2] = \ eye(nmg * T) Ts = vstack((Ts, Ts_temp)) Ws = vstack((Ws, Ws_temp)) hs = concatenate((hs, hs_temp)) # 8) ptss_dc - ptss_ch <= Ptss_dc - Ptss_ch + Rtss Ts_temp = lil_matrix((nmg * T * nmes, nv_first_stage)) Ws_temp = lil_matrix((nmg * T * nmes, nv_second_stage)) hs_temp = zeros(nmg * T * nmes) for i in range(nmes): Ts_temp[i * nmg * T:(i + 1) * nmg * T, _nv_first_stage * T + nv_tra * i + nl_tra + nmg * T: _nv_first_stage * T + nv_tra * i + nl_tra + nmg * T * 2] = \ -eye(nmg * T) Ts_temp[i * nmg * T:(i + 1) * nmg * T, _nv_first_stage * T + nv_tra * i + nl_tra + nmg * T * 2: _nv_first_stage * T + nv_tra * i + nl_tra + nmg * T * 3] = \ eye(nmg * T) Ts_temp[i * nmg * T:(i + 1) * nmg * T, _nv_first_stage * T + nv_tra * i + nl_tra + nmg * T * 3: _nv_first_stage * T + nv_tra * i + nl_tra + nmg * T * 4] = \ -eye(nmg * T) Ws_temp[i * nmg * T:(i + 1) * nmg * T, int(nv_index_ev[i]) + nmg * T * 0: int(nv_index_ev[i]) + nmg * T * 1] = eye(nmg * T) Ws_temp[i * nmg * T:(i + 1) * nmg * T, int(nv_index_ev[i]) + nmg * T * 1: int(nv_index_ev[i]) + nmg * T * 2] = -eye(nmg * T) Ts = vstack((Ts, Ts_temp)) Ws = vstack((Ws, Ws_temp)) hs = concatenate((hs, hs_temp)) # sol = miqcp(c, q, Aeq=Aeq, beq=beq, A=None, b=None, Qc=Qc, xmin=lx, xmax=ux) model_second_stage = {"c": c * weight, "q": q * weight, "lb": lb, "ub": ub, "vtypes": vtypes, "A": None, "b": None, "Aeq": Aeq, "beq": beq, "Qc": Qc, "rc": Rc, "c0": c0, "Ts": Ts, "Ws": Ws, "hs": hs} return model_second_stage def second_stage_solution_validation(self, sol): """ :param sol: The second stage solution under specific scenario :return: for each value """ T = self.T nb = self.nb ng = self.ng nl = self.nl nmg = self.nmg nmes = self.nmes f = self.f # Solutions for distribution networks ds_sol = {} _nv_second_stage = self._nv_second_stage ds_sol["pij"] = zeros((nl, T)) ds_sol["qij"] = zeros((nl, T)) ds_sol["lij"] = zeros((nl, T)) ds_sol["vi"] = zeros((nb, T)) ds_sol["pg"] = zeros((ng, T)) ds_sol["qg"] = zeros((ng, T)) ds_sol["pmg"] = zeros((nmg, T)) ds_sol["qmg"] = zeros((nmg, T)) ds_sol["gap"] = zeros((nl, T)) for i in range(T): ds_sol["pij"][:, i] = sol[_nv_second_stage * i:_nv_second_stage * i + nl] ds_sol["qij"][:, i] = sol[_nv_second_stage * i + nl:_nv_second_stage * i + nl * 2] ds_sol["lij"][:, i] = sol[_nv_second_stage * i + nl * 2:_nv_second_stage * i + nl * 3] ds_sol["vi"][:, i] = sol[_nv_second_stage * i + nl * 3:_nv_second_stage * i + nl * 3 + nb] ds_sol["pg"][:, i] = sol[_nv_second_stage * i + nl * 3 + nb:_nv_second_stage * i + nl * 3 + nb + ng] ds_sol["qg"][:, i] = sol[_nv_second_stage * i + nl * 3 + nb + ng: _nv_second_stage * i + nl * 3 + nb + ng * 2] ds_sol["pmg"][:, i] = sol[_nv_second_stage * i + nl * 3 + nb + ng * 2: _nv_second_stage * i + nl * 3 + nb + ng * 2 + nmg] ds_sol["qmg"][:, i] = sol[_nv_second_stage * i + nl * 3 + nb + ng * 2 + nmg: _nv_second_stage * i + nl * 3 + nb + ng * 2 + nmg * 2] for j in range(nl): ds_sol["gap"][j, i] = ds_sol["pij"][j, i] ** 2 + ds_sol["qij"][j, i] ** 2 - \ ds_sol["lij"][j, i] * ds_sol["vi"][int(f[j]), i] # Solutions for the microgrids mg_sol = {} mg_sol["pg"] = zeros((nmg, T)) mg_sol["qg"] = zeros((nmg, T)) mg_sol["pug"] = zeros((nmg, T)) mg_sol["qug"] = zeros((nmg, T)) mg_sol["pbic_ac2dc"] = zeros((nmg, T)) mg_sol["pbic_dc2ac"] = zeros((nmg, T)) mg_sol["qbic"] = zeros((nmg, T)) mg_sol["pess_ch"] = zeros((nmg, T)) mg_sol["pess_dc"] = zeros((nmg, T)) mg_sol["eess"] = zeros((nmg, T)) mg_sol["pmess"] = zeros((nmg, T)) mg_sol["ppv"] = zeros((nmg, T)) for i in range(nmg): for t in range(T): mg_sol["pg"][i, t] = sol[_nv_second_stage * T + NX_MG * T * i + NX_MG * t + PG] mg_sol["qg"][i, t] = sol[_nv_second_stage * T + NX_MG * T * i + NX_MG * t + QG] mg_sol["pug"][i, t] = sol[_nv_second_stage * T + NX_MG * T * i + NX_MG * t + PUG] mg_sol["qug"][i, t] = sol[_nv_second_stage * T + NX_MG * T * i + NX_MG * t + QUG] mg_sol["pbic_ac2dc"][i, t] = sol[_nv_second_stage * T + NX_MG * T * i + NX_MG * t + PBIC_AC2DC] mg_sol["pbic_dc2ac"][i, t] = sol[_nv_second_stage * T + NX_MG * T * i + NX_MG * t + PBIC_DC2AC] mg_sol["qbic"][i, t] = sol[_nv_second_stage * T + NX_MG * T * i + NX_MG * t + QBIC] mg_sol["pess_ch"][i, t] = sol[_nv_second_stage * T + NX_MG * T * i + NX_MG * t + PESS_CH] mg_sol["pess_dc"][i, t] = sol[_nv_second_stage * T + NX_MG * T * i + NX_MG * t + PESS_DC] mg_sol["eess"][i, t] = sol[_nv_second_stage * T + NX_MG * T * i + NX_MG * t + EESS] mg_sol["ppv"][i, t] = sol[_nv_second_stage * T + NX_MG * T * i + NX_MG * t + PPV] mg_sol["pmess"][i, t] = sol[_nv_second_stage * T + NX_MG * T * i + NX_MG * t + PMESS] mg_sol["gap"] = mg_sol["pbic_ac2dc"].__mul__(mg_sol["pbic_dc2ac"]) # Solutions for the mess n_stops = self.n_stops mess_sol = {} for i in range(nmes): mess_temp = {} mess_temp["pmess_dc"] = zeros((nmg, T)) mess_temp["pmess_ch"] = zeros((nmg, T)) mess_temp["emess"] = zeros((1, T)) for t in range(T): mess_temp["pmess_dc"][:, t] = \ sol[_nv_second_stage * T + NX_MG * T * nmg + (2 * n_stops + T) * i + nmg * t: _nv_second_stage * T + NX_MG * T * nmg + (2 * n_stops + T) * i + nmg * (t + 1)] mess_temp["pmess_ch"][:, t] = \ sol[_nv_second_stage * T + NX_MG * T * nmg + (2 * n_stops + T) * i + n_stops + nmg * t: _nv_second_stage * T + NX_MG * T * nmg + (2 * n_stops + T) * i + n_stops + nmg * (t + 1)] mess_temp["emess"][:, t] = \ sol[_nv_second_stage * T + NX_MG * T * nmg + (2 * n_stops + T) * i + n_stops * 2 + t] mess_sol[i] = mess_temp second_stage_solution = {} second_stage_solution["DS"] = ds_sol second_stage_solution["MG"] = mg_sol second_stage_solution["MESS"] = mess_sol return second_stage_solution def problem_formulation_microgrid(self, mg, mess): """ Unit commitment problem formulation of single micro_grid :param micro_grid: :return: """ try: T = self.T except: T = 24 nmes = self.nmes pmess_l = 0 pmess_u = 0 for i in range(nmes): pmess_l -= mess[i]["PCMAX"] pmess_u += mess[i]["PDMAX"] ## 1) boundary information and objective function nv = NX_MG * T lb = zeros(nv) ub = zeros(nv) c = zeros(nv) q = zeros(nv) vtypes = ["c"] * nv for t in range(T): ## 1.1) lower boundary lb[t * NX_MG + PG] = 0 lb[t * NX_MG + QG] = mg["DG"]["QMIN"] lb[t * NX_MG + PUG] = 0 lb[t * NX_MG + QUG] = mg["UG"]["QMIN"] lb[t * NX_MG + PBIC_DC2AC] = 0 lb[t * NX_MG + PBIC_AC2DC] = 0 lb[t * NX_MG + QBIC] = -mg["BIC"]["SMAX"] lb[t * NX_MG + PESS_CH] = 0 lb[t * NX_MG + PESS_DC] = 0 lb[t * NX_MG + EESS] = mg["ESS"]["EMIN"] lb[t * NX_MG + PPV] = 0 lb[t * NX_MG + PMESS] = pmess_l ## 1.2) upper boundary ub[t * NX_MG + PG] = mg["DG"]["PMAX"] ub[t * NX_MG + QG] = mg["DG"]["QMAX"] ub[t * NX_MG + PUG] = mg["UG"]["PMAX"] ub[t * NX_MG + QUG] = mg["UG"]["QMAX"] ub[t * NX_MG + PBIC_DC2AC] = mg["BIC"]["PMAX"] ub[t * NX_MG + PBIC_AC2DC] = mg["BIC"]["PMAX"] ub[t * NX_MG + QBIC] = mg["BIC"]["SMAX"] ub[t * NX_MG + PESS_CH] = mg["ESS"]["PCH_MAX"] ub[t * NX_MG + PESS_DC] = mg["ESS"]["PDC_MAX"] ub[t * NX_MG + EESS] = mg["ESS"]["EMAX"] ub[t * NX_MG + PPV] = mg["PV"]["PROFILE"][t] ub[t * NX_MG + PMESS] = pmess_u ## 1.3) Objective functions c[t * NX_MG + PG] = mg["DG"]["COST_A"] c[t * NX_MG + PESS_CH] = mg["ESS"]["COST_OP"] c[t * NX_MG + PESS_DC] = mg["ESS"]["COST_OP"] c[t * NX_MG + PPV] = mg["PV"]["COST"] # c[t * NX_MG + PBIC_AC2DC] = mg["ESS"]["COST_OP"] # c[t * NX_MG + PBIC_DC2AC] = mg["ESS"]["COST_OP"] # c[t * NX_MG + PUG] = mg["DG"]["COST_A"] # c[t * NX_MG + PMESS] = 0.001 ## 1.4) Upper and lower boundary information if t == T - 1: lb[t * NX_MG + EESS] = mg["ESS"]["E0"] ub[t * NX_MG + EESS] = mg["ESS"]["E0"] # 2) Formulate the equal constraints # 2.1) Power balance equation # a) AC bus equation Aeq = lil_matrix((T, nv)) beq = zeros(T) for t in range(T): Aeq[t, t * NX_MG + PG] = 1 Aeq[t, t * NX_MG + PUG] = 1 Aeq[t, t * NX_MG + PBIC_AC2DC] = -1 Aeq[t, t * NX_MG + PBIC_DC2AC] = mg["BIC"]["EFF_DC2AC"] beq[t] = mg["PD"]["AC"][t] # b) DC bus equation Aeq_temp = lil_matrix((T, nv)) beq_temp = zeros(T) for t in range(T): Aeq_temp[t, t * NX_MG + PBIC_AC2DC] = mg["BIC"]["EFF_AC2DC"] Aeq_temp[t, t * NX_MG + PBIC_DC2AC] = -1 Aeq_temp[t, t * NX_MG + PESS_CH] = -1 Aeq_temp[t, t * NX_MG + PESS_DC] = 1 Aeq_temp[t, t * NX_MG + PPV] = 1 Aeq_temp[t, t * NX_MG + PMESS] = 1 # The power injection from mobile energy storage systems beq_temp[t] = mg["PD"]["DC"][t] Aeq = vstack([Aeq, Aeq_temp]) beq = concatenate([beq, beq_temp]) # c) AC reactive power balance equation Aeq_temp = lil_matrix((T, nv)) beq_temp = zeros(T) for t in range(T): Aeq_temp[t, t * NX_MG + QUG] = 1 Aeq_temp[t, t * NX_MG + QBIC] = 1 Aeq_temp[t, t * NX_MG + QG] = 1 beq_temp[t] = mg["QD"]["AC"][t] Aeq = vstack([Aeq, Aeq_temp]) beq = concatenate([beq, beq_temp]) # 2.2) Energy storage balance equation Aeq_temp = lil_matrix((T, nv)) beq_temp = zeros(T) for t in range(T): Aeq_temp[t, t * NX_MG + EESS] = 1 Aeq_temp[t, t * NX_MG + PESS_CH] = -mg["ESS"]["EFF_CH"] Aeq_temp[t, t * NX_MG + PESS_DC] = 1 / mg["ESS"]["EFF_DC"] if t == 0: beq_temp[t] = mg["ESS"]["E0"] else: Aeq_temp[t, (t - 1) * NX_MG + EESS] = -1 Aeq = vstack([Aeq, Aeq_temp]) beq = concatenate([beq, beq_temp]) # 3) Formualte inequality constraints # There is no inequality constraint. # sol = milp(c, Aeq=Aeq, beq=beq, A=None, b=None, xmin=lb, xmax=ub) model_micro_grid = {"c": c, "q": q, "lb": lb, "ub": ub, "vtypes": vtypes, "A": None, "b": None, "Aeq": Aeq, "beq": beq } return model_micro_grid def problem_formulation_tess(self, mess, tns): """ Problem formulation for transportation energy storage scheduling, including vehicle routine problem and etc. :param tess: specific tess information :param traffic_network: transportation network information :return: """ nb_tra = self.nb_tra T = self.T nb = self.nb nl_tra = tns["branch"].shape[0] # Formulate the connection matrix between the transportation networks and power networks connection_matrix = zeros(((2 * nl_tra + nb_tra) * T, 4)) weight = zeros((2 * nl_tra + nb_tra) * T) for i in range(T): for j in range(nl_tra): # Add from matrix connection_matrix[i * (2 * nl_tra + nb_tra) + j, F_BUS] = tns["branch"][j, F_BUS] + i * nb_tra connection_matrix[i * (2 * nl_tra + nb_tra) + j, T_BUS] = tns["branch"][j, T_BUS] + \ tns["branch"][j, TIME] * nb_tra + i * nb_tra weight[i * (2 * nl_tra + nb_tra) + j] = 1 connection_matrix[i * (2 * nl_tra + nb_tra) + j, TIME] = tns["branch"][j, TIME] for j in range(nl_tra): # Add to matrix connection_matrix[i * (2 * nl_tra + nb_tra) + j + nl_tra, F_BUS] = tns["branch"][j, T_BUS] + i * nb_tra connection_matrix[i * (2 * nl_tra + nb_tra) + j + nl_tra, T_BUS] = tns["branch"][j, F_BUS] + \ tns["branch"][j, TIME] * nb_tra + \ i * nb_tra weight[i * (2 * nl_tra + nb_tra) + j + nl_tra] = 1 connection_matrix[i * (2 * nl_tra + nb_tra) + j + nl_tra, TIME] = tns["branch"][j, TIME] for j in range(nb_tra): connection_matrix[i * (2 * nl_tra + nb_tra) + 2 * nl_tra + j, F_BUS] = j + i * nb_tra connection_matrix[i * (2 * nl_tra + nb_tra) + 2 * nl_tra + j, T_BUS] = j + (i + 1) * nb_tra if tns["bus"][j, LOCATION] >= 0: connection_matrix[i * (2 * nl_tra + nb_tra) + 2 * nl_tra + j, 3] = tns["bus"][j, LOCATION] + i * nb # Delete the out of range lines index = find(connection_matrix[:, T_BUS] < T * nb_tra) connection_matrix = connection_matrix[index, :] weight = weight[index] # add two virtual nodes to represent the initial and end status of vehicles # special attention should be paid here, as the original index has been modified! connection_matrix[:, F_BUS] += 1 connection_matrix[:, T_BUS] += 1 # From matrix temp = zeros((nb_tra, 4)) weight_temp = zeros(nb_tra) for i in range(nb_tra): temp[i, 1] = i + 1 connection_matrix = concatenate([temp, connection_matrix]) weight = concatenate([weight_temp, weight]) # To matrix for i in range(nb_tra): temp = zeros((1, 4)) temp[0, 0] = nb_tra * (T - 1) + i + 1 temp[0, 1] = nb_tra * T + 1 if tns["bus"][i, LOCATION] >= 0: temp[0, 3] = tns["bus"][i, LOCATION] + (T - 1) * nb connection_matrix = concatenate([connection_matrix, temp]) weight_temp = zeros(1) weight = concatenate([weight, weight_temp]) # Status transition matrix nl_tra = connection_matrix.shape[0] # 0 represents that, the bus is not within the power networks nb_tra_ele = sum((tns["bus"][:, 2]) >= 0) status_matrix = zeros((T, nl_tra)) for i in range(T): for j in range(nl_tra): if connection_matrix[j, F_BUS] >= i * nb_tra + 1 and connection_matrix[j, F_BUS] < (i + 1) * nb_tra + 1: status_matrix[i, j] = 1 if connection_matrix[j, F_BUS] <= i * nb_tra + 1 and connection_matrix[j, T_BUS] > (i + 1) * nb_tra + 1: status_matrix[i, j] = 1 # Update connection matrix connection_matrix_f = zeros((T * nb_tra + 2, nl_tra)) connection_matrix_t = zeros((T * nb_tra + 2, nl_tra)) for i in range(T * nb_tra + 2): connection_matrix_f[i, find(connection_matrix[:, F_BUS] == i)] = 1 connection_matrix_t[i, find(connection_matrix[:, T_BUS] == i)] = 1 n_stops = find(connection_matrix[:, 3]).__len__() assert n_stops == nb_tra_ele * T, "The number of bus stop is not right!" nv_tra = nl_tra + 4 * n_stops # Status transition, discharging status, charging rate, discharging rate, spinning reserve lx = zeros(nv_tra) ux = ones(nv_tra) self.nv_tra = nv_tra self.nl_tra = nl_tra self.n_stops = n_stops self.nb_tra_ele = nb_tra_ele self.connection_matrix = connection_matrix ux[nl_tra + 0 * n_stops:nl_tra + 1 * n_stops] = 1 # The locations ux[nl_tra + 1 * n_stops:nl_tra + 2 * n_stops] = mess["PDMAX"] ux[nl_tra + 2 * n_stops:nl_tra + 3 * n_stops] = mess["PCMAX"] ux[nl_tra + 3 * n_stops:nl_tra + 4 * n_stops] = mess["PCMAX"] + mess["PDMAX"] # The initial location and stop location lx[find(connection_matrix[:, F_BUS] == 0)] = mess["initial"] ux[find(connection_matrix[:, F_BUS] == 0)] = mess["initial"] lx[find(connection_matrix[:, T_BUS] == T * nb_tra + 1)] = mess["end"] ux[find(connection_matrix[:, T_BUS] == T * nb_tra + 1)] = mess["end"] vtypes = ["b"] * nl_tra + ["b"] * n_stops + ["c"] * 3 * n_stops Aeq = connection_matrix_f - connection_matrix_t beq = zeros(T * nb_tra + 2) beq[0] = 1 beq[-1] = -1 # statue constraints Aeq_temp = status_matrix beq_temp = ones(T) Aeq = concatenate([Aeq, Aeq_temp]) beq = concatenate([beq, beq_temp]) neq_traffic = Aeq.shape[0] # Fulfill the missing zeros Aeq = concatenate([Aeq, zeros((neq_traffic, 4 * n_stops))], axis=1) ## Inequality constraints index_stops = find(connection_matrix[:, 3]) index_operation = arange(n_stops) power_limit = sparse((ones(n_stops), (index_operation, index_stops)), (n_stops, nl_tra)) # This mapping matrix plays an important role in the connection between the power network and traffic network ## 1) Stopping status A = zeros((3 * n_stops, nv_tra)) # Charging, discharging status, RBS # Discharging A[0:n_stops, 0: nl_tra] = -power_limit.toarray() * mess["PDMAX"] A[0:n_stops, nl_tra + n_stops: nl_tra + 2 * n_stops] = eye(n_stops) # Charging A[n_stops:n_stops * 2, 0: nl_tra] = -power_limit.toarray() * mess["PCMAX"] A[n_stops:n_stops * 2, nl_tra + 2 * n_stops:nl_tra + 3 * n_stops] = eye(n_stops) # spinning reserve A[n_stops * 2: n_stops * 3, 0: nl_tra] = -power_limit.toarray() * (mess["PCMAX"] + mess["PDMAX"]) A[n_stops * 2:n_stops * 3, nl_tra + 3 * n_stops:nl_tra + 4 * n_stops] = eye(n_stops) b = zeros(3 * n_stops) ## 2) Operating status Arange = zeros((2 * n_stops, nv_tra)) brange = zeros(2 * n_stops) # 1) Pdc<(1-Ic)*Pdc_max Arange[0: n_stops, nl_tra:nl_tra + n_stops] = eye(n_stops) * mess["PDMAX"] Arange[0: n_stops, nl_tra + n_stops: nl_tra + n_stops * 2] = eye(n_stops) brange[0: n_stops] = ones(n_stops) * mess["PDMAX"] # 2) Pc<Ic*Pch_max Arange[n_stops:n_stops * 2, nl_tra: nl_tra + n_stops] = -eye(n_stops) * mess["PCMAX"] Arange[n_stops:n_stops * 2, nl_tra + n_stops * 2: nl_tra + n_stops * 3] = eye(n_stops) A = concatenate([A, Arange]) b = concatenate([b, brange]) ## 2) Power limitation Areserve = zeros((2 * n_stops, nv_tra)) breserve = zeros(2 * n_stops) # 1) Pdc-Pc+Rbs<=Pdc_max Areserve[0: n_stops, nl_tra + n_stops: nl_tra + n_stops * 2] = eye(n_stops) Areserve[0: n_stops, nl_tra + n_stops * 2:nl_tra + n_stops * 3] = -eye(n_stops) Areserve[0: n_stops, nl_tra + n_stops * 3:nl_tra + n_stops * 4] = eye(n_stops) breserve[0: n_stops] = ones(n_stops) * mess["PDMAX"] # 2) Pc-Pdc+Rbs<=Pc_max Areserve[n_stops:n_stops * 2, nl_tra + n_stops: nl_tra + n_stops * 2] = - eye(n_stops) Areserve[n_stops:n_stops * 2, nl_tra + n_stops * 2:nl_tra + n_stops * 3] = eye(n_stops) Areserve[n_stops:n_stops * 2, nl_tra + n_stops * 3:nl_tra + n_stops * 4] = eye(n_stops) breserve[n_stops:n_stops * 2] = ones(n_stops) * mess["PCMAX"] A = concatenate([A, Areserve]) b = concatenate([b, breserve]) # Add constraints on the energy status Aenergy = zeros((2 * T, nv_tra)) benergy = zeros(2 * T) for j in range(T): # minimal energy Aenergy[j, nl_tra + n_stops:nl_tra + n_stops + (j + 1) * nb_tra_ele] = 1 / mess["EFF_DC"] Aenergy[j, nl_tra + 2 * n_stops:nl_tra + 2 * n_stops + (j + 1) * nb_tra_ele] = -mess["EFF_CH"] # Aenergy[j, NX_status + 3 * n_stops + (j + 1) * nb_traffic_electric - 1] = 0.5 if j != (T - 1): benergy[j] = mess["E0"] - mess["EMIN"] else: benergy[j] = 0 # maximal energy Aenergy[T + j, nl_tra + n_stops: nl_tra + n_stops + (j + 1) * nb_tra_ele] = -1 / mess["EFF_DC"] Aenergy[T + j, nl_tra + 2 * n_stops:nl_tra + 2 * n_stops + (j + 1) * nb_tra_ele] = mess["EFF_CH"] if j != (T - 1): benergy[T + j] = mess["EMAX"] - mess["E0"] else: benergy[T + j] = 0 A = concatenate([A, Aenergy]) b = concatenate([b, benergy]) c = concatenate([connection_matrix[:, TIME], zeros(n_stops * 4)]) # sol = milp(zeros(NX_traffic), q=zeros(NX_traffic), Aeq=Aeq, beq=beq, A=A, b=b, xmin=lx, xmax=ux) model_tess = {"c": c, "q": zeros(nv_tra), "lb": lx, "ub": ux, "vtypes": vtypes, "A": A, "b": b, "Aeq": Aeq, "beq": beq, "NV": nv_tra, } return model_tess def problem_formulation_tess_second_stage(self, mess): """ Problem formulation for transportation energy storage scheduling, including vehicle routine problem and etc. :param tess: specific tess information :param traffic_network: transportation network information :return: """ T = self.T n_stops = self.n_stops # Number of stops in nb_tra_ele = self.nb_tra_ele nv = 2 * n_stops + T # Status transition, charging status, charging rate, discharging rate, spinning reserve lb = zeros(nv) ub = zeros(nv) lb[n_stops * 2:nv] = mess["EMIN"] ub[n_stops * 0:n_stops * 1] = mess["PDMAX"] ub[n_stops * 1:n_stops * 2] = mess["PCMAX"] ub[n_stops * 2:nv] = mess["EMAX"] lb[-1] = mess["E0"] # energy storage systems end status ub[-1] = mess["E0"] # energy storage systems end status vtypes = ["c"] * nv # The energy status dynamics Aeq = zeros((T, nv)) beq = zeros(T) for t in range(T): Aeq[t, n_stops * 2 + t] = 1 Aeq[t, n_stops + nb_tra_ele * t:n_stops + nb_tra_ele * (t + 1)] = -mess["EFF_CH"] Aeq[t, nb_tra_ele * t:nb_tra_ele * (t + 1)] = 1 / mess["EFF_DC"] if t == 0: beq[t] = mess["E0"] else: Aeq[t, n_stops * 2 + t - 1] = -1 c = concatenate((ones(n_stops * 2) * mess["COST_OP"], zeros(T))) # sol = milp(c, Aeq=Aeq, beq=beq, A=None, b=None, xmin=lx, xmax=ux) model_tess = {"c": c, "q": zeros(nv), "lb": lb, "ub": ub, "vtypes": vtypes, "A": None, "b": None, "Aeq": Aeq, "beq": beq, "NX": nv, } return model_tess def scenario_generation_reduction(self, micro_grids, profile, pns, pv_profile, update=0, ns=2, ns_reduced=2, std=0.03, interval=0.05, std_pv=0.05): """ Scenario generation function for the second-stage scheduling Stochastic variables include 1) loads in distribution networks, active loads for 2) AC bus and 3)DC bus. The assumption is that, the 1) loads in distribution networks follow normal distribution nb*T 2) loads for AC bus and DC bus follow uniform distribution nmg*T*4 :return: """ T = self.T nmg = self.nmg nb = self.nb db_management = DataBaseManagement() if update > 0: # 1) scenario generation bus_load = zeros((ns, nb * T)) mg_load = zeros((ns, nmg * T * 2)) mg_pv = zeros((ns, nmg * T)) weight = ones(ns) / ns for i in range(ns): for t in range(T): for j in range(nb): bus_load[i, t * nb + j] = pns["bus"][j, PD] * (1 + random.normal(0, std)) * profile[t] pv_rand = random.normal(0, std_pv) # all PV are correlated! for j in range(nmg): mg_load[i, t * nmg + j] = micro_grids[j]["PD"]["AC"][t] * \ (1 + random.uniform(-interval, interval)) mg_load[i, nmg * T + t * nmg + j] = micro_grids[j]["PD"]["DC"][t] * \ (1 + random.uniform(-interval, interval)) mg_pv[i, t * nmg + j] = micro_grids[j]["PV"]["PMAX"] * pv_profile[t] * \ (1 + pv_rand) # 2) scenario reduction scenario_reduction = ScenarioReduction() (scenario_reduced, weight_reduced) = \ scenario_reduction.run(scenario=concatenate([bus_load, mg_load, mg_pv], axis=1), weight=weight, n_reduced=ns_reduced, power=2) # 3) Store the data into database db_management.create_table("scenarios", nb=nb, nmg=nmg) for i in range(ns - ns_reduced): for t in range(T): # print(scenario_reduced[i, nb * T + nmg * T + t * nmg: nb * T + nmg * T + (t + 1) * nmg].tolist()) db_management.insert_data_scenario("scenarios", scenario=i, weight=weight_reduced[i], time=t, nb=nb, pd=scenario_reduced[i, t * nb:(t + 1) * nb].tolist(), nmg=nmg, pd_ac=scenario_reduced[i, nb * T + t * nmg: nb * T + (t + 1) * nmg].tolist(), pd_dc=scenario_reduced[i, nb * T + nmg * T + t * nmg: nb * T + nmg * T + (t + 1) * nmg].tolist(), ppv=scenario_reduced[i, nb * T + nmg * T * 2 + t * nmg: nb * T + nmg * T * 2 + (t + 1) * nmg].tolist()) # print(t) else: # 4) if not updated, inquery the database scenario_reduced = zeros((ns - ns_reduced, nb * T + nmg * T * 3)) weight_reduced = zeros(ns - ns_reduced) for i in range(ns - ns_reduced): for t in range(T): data = db_management.inquery_data_scenario(table_name="scenarios", scenario=i, time=t) weight_reduced[i] = data[1] scenario_reduced[i, nb * t:nb * (t + 1)] = array(data[3:nb + 3]) scenario_reduced[i, nb * T + nmg * t:nb * T + nmg * (t + 1)] = array(data[nb + 3:nb + 3 + nmg]) scenario_reduced[i, nb * T + nmg * T + nmg * t:nb * T + nmg * T + nmg * (t + 1)] = \ array(data[nb + 3 + nmg:nb + 3 + nmg * 2]) scenario_reduced[i, nb * T + nmg * T * 2 + nmg * t:nb * T + nmg * T * 2 + nmg * (t + 1)] = \ array(data[nb + 3 + nmg * 2:nb + 3 + nmg * 3]) # assert sum(weight_reduced) == 1, "The weight factor is not right!" # 4) return value ds_load_profile = scenario_reduced[:, 0:nb * T] mgs_load_profile = scenario_reduced[:, nb * T:nb * T + nmg * T * 2] pv_load_profile = scenario_reduced[:, nb * T + nmg * T * 2:] # profile_second_stage = zeros((ns, T)) microgrids_second_stage = [0] * (ns - ns_reduced) # for i in range(ns): # for j in range(T): # profile_second_stage[i, j] = profile[j] * (1 + 0.5 * random.random()) # for i in range(ns - ns_reduced): microgrids_second_stage[i] = deepcopy(micro_grids) for j in range(nmg): microgrids_second_stage[i][j]["PV"]["PROFILE"] = zeros(T) for t in range(T): microgrids_second_stage[i][j]["PD"]["AC"][t] = mgs_load_profile[i, t * nmg + j] microgrids_second_stage[i][j]["QD"]["AC"][t] = mgs_load_profile[i, t * nmg + j] * 0.2 microgrids_second_stage[i][j]["PD"]["DC"][t] = mgs_load_profile[i, T * nmg + t * nmg + j] microgrids_second_stage[i][j]["PV"]["PROFILE"][t] = pv_load_profile[i, t * nmg + j] return ds_load_profile, microgrids_second_stage, weight_reduced if __name__ == "__main__":
mpc = case33.case33() # Default test case
0
2023-11-27 15:57:53+00:00
24k
girgle/DouZero_For_New_HLDDZ
main.py
[ { "identifier": "GameHelper", "path": "GameHelper.py", "snippet": "class GameHelper:\n def __init__(self):\n self.ScreenZoomRate = None\n self.counter = QTime()\n self.Pics = {}\n self.PicsCV = {}\n st = time.time()\n self.Handle = win32gui.FindWindow(\"UnityWndClass\", None)\n self.Interrupt = False\n self.RealRate = (1440, 810)\n self.GetZoomRate()\n for file in os.listdir(\"./pics\"):\n info = file.split(\".\")\n if info[1] == \"png\":\n tmpImage = Image.open(\"./pics/\" + file)\n imgCv = cv2.imread(\"./pics/\" + file)\n self.Pics.update({info[0]: tmpImage})\n self.PicsCV.update({info[0]: imgCv})\n\n def sleep(self, ms):\n self.counter.restart()\n while self.counter.elapsed() < ms:\n QtWidgets.QApplication.processEvents(QEventLoop.AllEvents, 50)\n\n def Screenshot(self, region=None): # -> (im, (left, top))\n try_count = 3\n success = False\n while try_count > 0 and not success:\n try:\n try_count -= 1\n self.Handle = win32gui.FindWindow(\"UnityWndClass\", None)\n hwnd = self.Handle\n left, top, right, bot = win32gui.GetWindowRect(hwnd)\n width = right - left\n height = bot - top\n self.RealRate = (width, height)\n width = int(width)\n height = int(height)\n hwndDC = win32gui.GetWindowDC(hwnd)\n mfcDC = win32ui.CreateDCFromHandle(hwndDC)\n saveDC = mfcDC.CreateCompatibleDC()\n saveBitMap = win32ui.CreateBitmap()\n saveBitMap.CreateCompatibleBitmap(mfcDC, width, height)\n saveDC.SelectObject(saveBitMap)\n result = windll.user32.PrintWindow(hwnd, saveDC.GetSafeHdc(), 3)\n bmpinfo = saveBitMap.GetInfo()\n bmpstr = saveBitMap.GetBitmapBits(True)\n im = Image.frombuffer(\n \"RGB\",\n (bmpinfo['bmWidth'], bmpinfo['bmHeight']),\n bmpstr, 'raw', 'BGRX', 0, 1)\n win32gui.DeleteObject(saveBitMap.GetHandle())\n saveDC.DeleteDC()\n mfcDC.DeleteDC()\n win32gui.ReleaseDC(hwnd, hwndDC)\n im = im.resize((1440, 810))\n if region is not None:\n im = im.crop((region[0], region[1], region[0] + region[2], region[1] + region[3]))\n if result:\n success = True\n return im, (left, top)\n except Exception as e:\n print(\"截图时出现错误:\", repr(e))\n self.sleep(200)\n return None, (0, 0)\n\n def GetZoomRate(self):\n self.ScreenZoomRate = ctypes.windll.shcore.GetScaleFactorForDevice(0) / 100\n\n def LocateOnScreen(self, templateName, region, confidence=0.8, img=None):\n if img is not None:\n image = img\n else:\n image, _ = self.Screenshot()\n imgcv = cv2.cvtColor(np.asarray(image), cv2.COLOR_RGB2BGR)\n return LocateOnImage(imgcv, self.PicsCV[templateName], region=region, confidence=confidence)\n\n def ClickOnImage(self, templateName, region=None, confidence=0.8, img=None):\n if img is not None:\n image = img\n else:\n image, _ = self.Screenshot()\n imgcv = cv2.cvtColor(np.asarray(image), cv2.COLOR_RGB2BGR)\n result = LocateOnImage(imgcv, self.PicsCV[templateName], region=region, confidence=confidence)\n\n if result is not None:\n self.LeftClick(result)\n print(result)\n\n def LeftClick(self, pos):\n x, y = pos\n x = (x / 1440) * self.RealRate[0]\n y = (y / 810) * self.RealRate[1]\n x = int(x)\n y = int(y)\n self.Handle = win32gui.FindWindow(\"UnityWndClass\", None)\n left, top, _, _ = win32gui.GetWindowRect(self.Handle)\n x, y = int(left + x), int(top + y)\n\n pyautogui.mouseDown(x, y, button='left')\n time.sleep(0.1)\n pyautogui.mouseUp(x, y, button='left')\n time.sleep(0.1)\n pyautogui.moveTo(int(left + 1000), int(top + 550))\n\n '''win32gui.SetActiveWindow(self.Handle)\n lParam = win32api.MAKELONG(x, y)\n\n win32gui.PostMessage(self.Handle, WM_ACTIVATE, WA_ACTIVE, lParam)\n win32gui.PostMessage(self.Handle, WM_ACTIVATE, WA_ACTIVE, lParam)\n win32gui.PostMessage(self.Handle, WM_MOUSEMOVE, MK_LBUTTON, lParam)\n win32gui.PostMessage(self.Handle, WM_LBUTTONDOWN, MK_LBUTTON, lParam)\n win32gui.PostMessage(self.Handle, WM_LBUTTONUP, MK_LBUTTON, lParam)'''\n\n def LeftClick2(self, pos):\n x, y = pos\n x = (x / 1440) * self.RealRate[0]\n y = (y / 810) * self.RealRate[1]\n x = int(x)\n y = int(y)\n self.Handle = win32gui.FindWindow(\"UnityWndClass\", None)\n left, top, _, _ = win32gui.GetWindowRect(self.Handle)\n x, y = int(left + x), int(top + y)\n\n pyautogui.mouseDown(x, y, button='left')\n time.sleep(0.1)\n pyautogui.mouseUp(x, y, button='left')" }, { "identifier": "get_move_type", "path": "douzero/env/move_detector.py", "snippet": "def get_move_type(move):\n move_size = len(move)\n move_dict = collections.Counter(move)\n\n if move_size == 0:\n return {'type': TYPE_0_PASS}\n\n if move_size == 1:\n return {'type': TYPE_1_SINGLE, 'rank': move[0]}\n\n if move_size == 2:\n if move[0] == move[1]:\n return {'type': TYPE_2_PAIR, 'rank': move[0]}\n elif move == [20, 30]: # Kings\n return {'type': TYPE_5_KING_BOMB}\n else:\n return {'type': TYPE_15_WRONG}\n\n if move_size == 3:\n if len(move_dict) == 1:\n return {'type': TYPE_3_TRIPLE, 'rank': move[0]}\n else:\n return {'type': TYPE_15_WRONG}\n\n if move_size == 4:\n if len(move_dict) == 1:\n return {'type': TYPE_4_BOMB, 'rank': move[0]}\n elif len(move_dict) == 2:\n if move[0] == move[1] == move[2] or move[1] == move[2] == move[3]:\n return {'type': TYPE_6_3_1, 'rank': move[1]}\n else:\n return {'type': TYPE_15_WRONG}\n else:\n return {'type': TYPE_15_WRONG}\n\n if is_continuous_seq(move):\n return {'type': TYPE_8_SERIAL_SINGLE, 'rank': move[0], 'len': len(move)}\n\n if move_size == 5:\n if len(move_dict) == 2:\n return {'type': TYPE_7_3_2, 'rank': move[2]}\n else:\n return {'type': TYPE_15_WRONG}\n\n count_dict = collections.defaultdict(int)\n for c, n in move_dict.items():\n count_dict[n] += 1\n\n if move_size == 6:\n if (len(move_dict) == 2 or len(move_dict) == 3) and count_dict.get(4) == 1 and \\\n (count_dict.get(2) == 1 or count_dict.get(1) == 2):\n return {'type': TYPE_13_4_2, 'rank': move[2]}\n\n if move_size == 8 and (((len(move_dict) == 3 or len(move_dict) == 2) and\n (count_dict.get(4) == 1 and count_dict.get(2) == 2)) or count_dict.get(4) == 2):\n return {'type': TYPE_14_4_22, 'rank': max([c for c, n in move_dict.items() if n == 4])}\n\n mdkeys = sorted(move_dict.keys())\n if len(move_dict) == count_dict.get(2) and is_continuous_seq(mdkeys):\n return {'type': TYPE_9_SERIAL_PAIR, 'rank': mdkeys[0], 'len': len(mdkeys)}\n\n if len(move_dict) == count_dict.get(3) and is_continuous_seq(mdkeys):\n return {'type': TYPE_10_SERIAL_TRIPLE, 'rank': mdkeys[0], 'len': len(mdkeys)}\n\n # Check Type 11 (serial 3+1) and Type 12 (serial 3+2)\n if count_dict.get(3, 0) >= MIN_TRIPLES:\n serial_3 = list()\n single = list()\n pair = list()\n\n for k, v in move_dict.items():\n if v == 3:\n serial_3.append(k)\n elif v == 1:\n single.append(k)\n elif v == 2:\n pair.append(k)\n else: # no other possibilities\n return {'type': TYPE_15_WRONG}\n\n serial_3.sort()\n if is_continuous_seq(serial_3):\n if len(serial_3) == len(single)+len(pair)*2:\n return {'type': TYPE_11_SERIAL_3_1, 'rank': serial_3[0], 'len': len(serial_3)}\n if len(serial_3) == len(pair) and len(move_dict) == len(serial_3) * 2:\n return {'type': TYPE_12_SERIAL_3_2, 'rank': serial_3[0], 'len': len(serial_3)}\n\n if len(serial_3) == 4:\n if is_continuous_seq(serial_3[1:]):\n return {'type': TYPE_11_SERIAL_3_1, 'rank': serial_3[1], 'len': len(serial_3) - 1}\n if is_continuous_seq(serial_3[:-1]):\n return {'type': TYPE_11_SERIAL_3_1, 'rank': serial_3[0], 'len': len(serial_3) - 1}\n\n return {'type': TYPE_15_WRONG}" }, { "identifier": "Ui_Form", "path": "MainWindow.py", "snippet": "class Ui_Form(object):\n def setupUi(self, Form):\n Form.setObjectName(\"Form\")\n Form.resize(677, 450)\n font = QtGui.QFont()\n font.setFamily(\"Arial\")\n font.setPointSize(9)\n font.setBold(True)\n font.setItalic(False)\n font.setWeight(75)\n Form.setFont(font)\n Form.setWindowOpacity(0.8)\n self.WinRate = QtWidgets.QLabel(Form)\n self.WinRate.setGeometry(QtCore.QRect(320, 120, 121, 51))\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(False)\n font.setItalic(False)\n font.setWeight(50)\n self.WinRate.setFont(font)\n self.WinRate.setAlignment(QtCore.Qt.AlignCenter)\n self.WinRate.setObjectName(\"WinRate\")\n self.UserHandCards = QtWidgets.QLabel(Form)\n self.UserHandCards.setGeometry(QtCore.QRect(30, 330, 351, 31))\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(False)\n font.setItalic(False)\n font.setWeight(50)\n self.UserHandCards.setFont(font)\n self.UserHandCards.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)\n self.UserHandCards.setObjectName(\"UserHandCards\")\n self.ThreeLandlordCards = QtWidgets.QLabel(Form)\n self.ThreeLandlordCards.setGeometry(QtCore.QRect(30, 120, 121, 51))\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(False)\n font.setItalic(False)\n font.setWeight(50)\n self.ThreeLandlordCards.setFont(font)\n self.ThreeLandlordCards.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)\n self.ThreeLandlordCards.setObjectName(\"ThreeLandlordCards\")\n self.BidWinrate = QtWidgets.QLabel(Form)\n self.BidWinrate.setGeometry(QtCore.QRect(30, 220, 161, 31))\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(False)\n font.setItalic(False)\n font.setWeight(50)\n self.BidWinrate.setFont(font)\n self.BidWinrate.setObjectName(\"BidWinrate\")\n self.PreWinrate = QtWidgets.QLabel(Form)\n self.PreWinrate.setGeometry(QtCore.QRect(30, 280, 161, 31))\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(False)\n font.setItalic(False)\n font.setWeight(50)\n self.PreWinrate.setFont(font)\n self.PreWinrate.setObjectName(\"PreWinrate\")\n self.label = QtWidgets.QLabel(Form)\n self.label.setGeometry(QtCore.QRect(490, 320, 101, 41))\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(False)\n font.setItalic(False)\n font.setWeight(50)\n self.label.setFont(font)\n self.label.setAlignment(QtCore.Qt.AlignCenter)\n self.label.setObjectName(\"label\")\n self.LPlayedCard = QtWidgets.QLabel(Form)\n self.LPlayedCard.setGeometry(QtCore.QRect(170, 120, 102, 51))\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(False)\n font.setItalic(False)\n font.setWeight(50)\n self.LPlayedCard.setFont(font)\n self.LPlayedCard.setAlignment(QtCore.Qt.AlignCenter)\n self.LPlayedCard.setObjectName(\"LPlayedCard\")\n self.splitter_2 = QtWidgets.QSplitter(Form)\n self.splitter_2.setGeometry(QtCore.QRect(20, 380, 621, 41))\n self.splitter_2.setOrientation(QtCore.Qt.Horizontal)\n self.splitter_2.setObjectName(\"splitter_2\")\n self.SingleButton = QtWidgets.QPushButton(self.splitter_2)\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(False)\n font.setItalic(False)\n font.setWeight(50)\n self.SingleButton.setFont(font)\n self.SingleButton.setObjectName(\"SingleButton\")\n self.LoopButton = QtWidgets.QPushButton(self.splitter_2)\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(False)\n font.setItalic(False)\n font.setWeight(50)\n self.LoopButton.setFont(font)\n self.LoopButton.setObjectName(\"LoopButton\")\n self.StopButton = QtWidgets.QPushButton(self.splitter_2)\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(False)\n font.setItalic(False)\n font.setWeight(50)\n self.StopButton.setFont(font)\n self.StopButton.setObjectName(\"StopButton\")\n self.tableWidget = QtWidgets.QTableWidget(Form)\n self.tableWidget.setGeometry(QtCore.QRect(20, 10, 611, 75))\n self.tableWidget.setMaximumSize(QtCore.QSize(16777215, 75))\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(12)\n font.setBold(False)\n font.setItalic(False)\n font.setWeight(50)\n self.tableWidget.setFont(font)\n self.tableWidget.setLayoutDirection(QtCore.Qt.LeftToRight)\n self.tableWidget.setStyleSheet(\"QTableWidget{\\n\"\n\"color:#DCDCDC;\\n\"\n\"background:#444444;\\n\"\n\"border:1px solid #242424;\\n\"\n\"alternate-background-color:#525252;\\n\"\n\"gridline-color:#242424;\\n\"\n\"}\\n\"\n\" \\n\"\n\"QTableWidget::item:selected{\\n\"\n\"color:#DCDCDC;\\n\"\n\"background:qlineargradient(spread:pad,x1:0,y1:0,x2:0,y2:1,stop:0 #484848,stop:1 #383838);\\n\"\n\"}\\n\"\n\" \\n\"\n\"QTableWidget::item:hover{\\n\"\n\"background:#5B5B5B;\\n\"\n\"}\\n\"\n\"QHeaderView::section{\\n\"\n\"text-align:center;\\n\"\n\"background:#5E5E5E;\\n\"\n\"padding:3px;\\n\"\n\"margin:0px;\\n\"\n\"color:#DCDCDC;\\n\"\n\"border:1px solid #242424;\\n\"\n\"border-left-width:0;\\n\"\n\"}\\n\"\n\" \\n\"\n\"QScrollBar:vertical{\\n\"\n\"background:#484848;\\n\"\n\"padding:0px;\\n\"\n\"border-radius:6px;\\n\"\n\"max-width:12px;\\n\"\n\"}\\n\"\n\" \\n\"\n\" \\n\"\n\"QScrollBar::handle:vertical{\\n\"\n\"background:#CCCCCC;\\n\"\n\"}\\n\"\n\" \\n\"\n\"QScrollBar::handle:hover:vertical,QScrollBar::handle:pressed:vertical{\\n\"\n\"background:#A7A7A7;\\n\"\n\"}\\n\"\n\"QScrollBar::sub-page:vertical{\\n\"\n\"background:444444;\\n\"\n\"}\\n\"\n\" \\n\"\n\" \\n\"\n\"QScrollBar::add-page:vertical{\\n\"\n\"background:5B5B5B;\\n\"\n\"}\\n\"\n\" \\n\"\n\"QScrollBar::add-line:vertical{\\n\"\n\"background:none;\\n\"\n\"}\\n\"\n\"QScrollBar::sub-line:vertical{\\n\"\n\"background:none;\\n\"\n\"}\")\n self.tableWidget.setFrameShadow(QtWidgets.QFrame.Sunken)\n self.tableWidget.setMidLineWidth(-1)\n self.tableWidget.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)\n self.tableWidget.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)\n self.tableWidget.setAutoScroll(False)\n self.tableWidget.setEditTriggers(QtWidgets.QAbstractItemView.NoEditTriggers)\n self.tableWidget.setSelectionMode(QtWidgets.QAbstractItemView.NoSelection)\n self.tableWidget.setSelectionBehavior(QtWidgets.QAbstractItemView.SelectRows)\n self.tableWidget.setTextElideMode(QtCore.Qt.ElideNone)\n self.tableWidget.setObjectName(\"tableWidget\")\n self.tableWidget.setColumnCount(15)\n self.tableWidget.setRowCount(1)\n item = QtWidgets.QTableWidgetItem()\n self.tableWidget.setVerticalHeaderItem(0, item)\n item = QtWidgets.QTableWidgetItem()\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(True)\n font.setWeight(75)\n item.setFont(font)\n self.tableWidget.setHorizontalHeaderItem(0, item)\n item = QtWidgets.QTableWidgetItem()\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(True)\n font.setWeight(75)\n item.setFont(font)\n self.tableWidget.setHorizontalHeaderItem(1, item)\n item = QtWidgets.QTableWidgetItem()\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(True)\n font.setWeight(75)\n item.setFont(font)\n self.tableWidget.setHorizontalHeaderItem(2, item)\n item = QtWidgets.QTableWidgetItem()\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(True)\n font.setWeight(75)\n item.setFont(font)\n self.tableWidget.setHorizontalHeaderItem(3, item)\n item = QtWidgets.QTableWidgetItem()\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(True)\n font.setWeight(75)\n item.setFont(font)\n self.tableWidget.setHorizontalHeaderItem(4, item)\n item = QtWidgets.QTableWidgetItem()\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(True)\n font.setWeight(75)\n item.setFont(font)\n self.tableWidget.setHorizontalHeaderItem(5, item)\n item = QtWidgets.QTableWidgetItem()\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(True)\n font.setWeight(75)\n item.setFont(font)\n self.tableWidget.setHorizontalHeaderItem(6, item)\n item = QtWidgets.QTableWidgetItem()\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(True)\n font.setWeight(75)\n item.setFont(font)\n self.tableWidget.setHorizontalHeaderItem(7, item)\n item = QtWidgets.QTableWidgetItem()\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(True)\n font.setWeight(75)\n item.setFont(font)\n self.tableWidget.setHorizontalHeaderItem(8, item)\n item = QtWidgets.QTableWidgetItem()\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(True)\n font.setWeight(75)\n item.setFont(font)\n self.tableWidget.setHorizontalHeaderItem(9, item)\n item = QtWidgets.QTableWidgetItem()\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(True)\n font.setWeight(75)\n item.setFont(font)\n self.tableWidget.setHorizontalHeaderItem(10, item)\n item = QtWidgets.QTableWidgetItem()\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(True)\n font.setWeight(75)\n item.setFont(font)\n self.tableWidget.setHorizontalHeaderItem(11, item)\n item = QtWidgets.QTableWidgetItem()\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(True)\n font.setWeight(75)\n item.setFont(font)\n self.tableWidget.setHorizontalHeaderItem(12, item)\n item = QtWidgets.QTableWidgetItem()\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(True)\n font.setWeight(75)\n item.setFont(font)\n self.tableWidget.setHorizontalHeaderItem(13, item)\n item = QtWidgets.QTableWidgetItem()\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(True)\n font.setWeight(75)\n item.setFont(font)\n self.tableWidget.setHorizontalHeaderItem(14, item)\n item = QtWidgets.QTableWidgetItem()\n item.setTextAlignment(QtCore.Qt.AlignCenter)\n self.tableWidget.setItem(0, 0, item)\n item = QtWidgets.QTableWidgetItem()\n item.setTextAlignment(QtCore.Qt.AlignCenter)\n self.tableWidget.setItem(0, 1, item)\n item = QtWidgets.QTableWidgetItem()\n item.setTextAlignment(QtCore.Qt.AlignCenter)\n self.tableWidget.setItem(0, 2, item)\n item = QtWidgets.QTableWidgetItem()\n item.setTextAlignment(QtCore.Qt.AlignCenter)\n self.tableWidget.setItem(0, 3, item)\n item = QtWidgets.QTableWidgetItem()\n item.setTextAlignment(QtCore.Qt.AlignCenter)\n self.tableWidget.setItem(0, 4, item)\n item = QtWidgets.QTableWidgetItem()\n item.setTextAlignment(QtCore.Qt.AlignCenter)\n self.tableWidget.setItem(0, 5, item)\n item = QtWidgets.QTableWidgetItem()\n item.setTextAlignment(QtCore.Qt.AlignCenter)\n self.tableWidget.setItem(0, 6, item)\n item = QtWidgets.QTableWidgetItem()\n item.setTextAlignment(QtCore.Qt.AlignCenter)\n self.tableWidget.setItem(0, 7, item)\n item = QtWidgets.QTableWidgetItem()\n item.setTextAlignment(QtCore.Qt.AlignCenter)\n self.tableWidget.setItem(0, 8, item)\n item = QtWidgets.QTableWidgetItem()\n item.setTextAlignment(QtCore.Qt.AlignCenter)\n self.tableWidget.setItem(0, 9, item)\n item = QtWidgets.QTableWidgetItem()\n item.setTextAlignment(QtCore.Qt.AlignCenter)\n self.tableWidget.setItem(0, 10, item)\n item = QtWidgets.QTableWidgetItem()\n item.setTextAlignment(QtCore.Qt.AlignCenter)\n self.tableWidget.setItem(0, 11, item)\n item = QtWidgets.QTableWidgetItem()\n item.setTextAlignment(QtCore.Qt.AlignCenter)\n self.tableWidget.setItem(0, 12, item)\n item = QtWidgets.QTableWidgetItem()\n item.setTextAlignment(QtCore.Qt.AlignCenter)\n self.tableWidget.setItem(0, 13, item)\n item = QtWidgets.QTableWidgetItem()\n item.setTextAlignment(QtCore.Qt.AlignCenter)\n self.tableWidget.setItem(0, 14, item)\n self.tableWidget.horizontalHeader().setVisible(True)\n self.tableWidget.horizontalHeader().setCascadingSectionResizes(True)\n self.tableWidget.horizontalHeader().setDefaultSectionSize(41)\n self.tableWidget.horizontalHeader().setStretchLastSection(True)\n self.tableWidget.verticalHeader().setVisible(False)\n self.tableWidget.verticalHeader().setCascadingSectionResizes(False)\n self.tableWidget.verticalHeader().setDefaultSectionSize(40)\n self.tableWidget.verticalHeader().setHighlightSections(True)\n self.tableWidget.verticalHeader().setMinimumSectionSize(40)\n self.tableWidget.verticalHeader().setSortIndicatorShown(False)\n self.RPlayedCard = QtWidgets.QLabel(Form)\n self.RPlayedCard.setGeometry(QtCore.QRect(490, 120, 102, 51))\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(False)\n font.setItalic(False)\n font.setWeight(50)\n self.RPlayedCard.setFont(font)\n self.RPlayedCard.setAlignment(QtCore.Qt.AlignCenter)\n self.RPlayedCard.setObjectName(\"RPlayedCard\")\n self.PredictedCard = QtWidgets.QLabel(Form)\n self.PredictedCard.setGeometry(QtCore.QRect(320, 190, 121, 51))\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(False)\n font.setItalic(False)\n font.setWeight(50)\n self.PredictedCard.setFont(font)\n self.PredictedCard.setStyleSheet(\"\")\n self.PredictedCard.setFrameShape(QtWidgets.QFrame.Panel)\n self.PredictedCard.setLineWidth(1)\n self.PredictedCard.setAlignment(QtCore.Qt.AlignCenter)\n self.PredictedCard.setObjectName(\"PredictedCard\")\n\n self.retranslateUi(Form)\n QtCore.QMetaObject.connectSlotsByName(Form)\n\n def retranslateUi(self, Form):\n _translate = QtCore.QCoreApplication.translate\n Form.setWindowTitle(_translate(\"Form\", \"Hi\"))\n self.WinRate.setText(_translate(\"Form\", \"评分\"))\n self.UserHandCards.setText(_translate(\"Form\", \"手牌\"))\n self.ThreeLandlordCards.setText(_translate(\"Form\", \"地主牌\"))\n self.BidWinrate.setText(_translate(\"Form\", \"叫牌胜率:\"))\n self.PreWinrate.setText(_translate(\"Form\", \"局前胜率:\"))\n self.label.setText(_translate(\"Form\", \"游戏状态\"))\n self.LPlayedCard.setText(_translate(\"Form\", \"上家出牌区域\"))\n self.SingleButton.setText(_translate(\"Form\", \"单局\"))\n self.LoopButton.setText(_translate(\"Form\", \" 连续\"))\n self.StopButton.setText(_translate(\"Form\", \"停止\"))\n item = self.tableWidget.horizontalHeaderItem(0)\n item.setText(_translate(\"Form\", \"大\"))\n item = self.tableWidget.horizontalHeaderItem(1)\n item.setText(_translate(\"Form\", \"小\"))\n item = self.tableWidget.horizontalHeaderItem(2)\n item.setText(_translate(\"Form\", \"2\"))\n item = self.tableWidget.horizontalHeaderItem(3)\n item.setText(_translate(\"Form\", \"A\"))\n item = self.tableWidget.horizontalHeaderItem(4)\n item.setText(_translate(\"Form\", \"K\"))\n item = self.tableWidget.horizontalHeaderItem(5)\n item.setText(_translate(\"Form\", \"Q\"))\n item = self.tableWidget.horizontalHeaderItem(6)\n item.setText(_translate(\"Form\", \"J\"))\n item = self.tableWidget.horizontalHeaderItem(7)\n item.setText(_translate(\"Form\", \"10\"))\n item = self.tableWidget.horizontalHeaderItem(8)\n item.setText(_translate(\"Form\", \"9\"))\n item = self.tableWidget.horizontalHeaderItem(9)\n item.setText(_translate(\"Form\", \"8\"))\n item = self.tableWidget.horizontalHeaderItem(10)\n item.setText(_translate(\"Form\", \"7\"))\n item = self.tableWidget.horizontalHeaderItem(11)\n item.setText(_translate(\"Form\", \"6\"))\n item = self.tableWidget.horizontalHeaderItem(12)\n item.setText(_translate(\"Form\", \"5\"))\n item = self.tableWidget.horizontalHeaderItem(13)\n item.setText(_translate(\"Form\", \"4\"))\n item = self.tableWidget.horizontalHeaderItem(14)\n item.setText(_translate(\"Form\", \"3\"))\n __sortingEnabled = self.tableWidget.isSortingEnabled()\n self.tableWidget.setSortingEnabled(False)\n item = self.tableWidget.item(0, 0)\n item.setText(_translate(\"Form\", \"0\"))\n item = self.tableWidget.item(0, 1)\n item.setText(_translate(\"Form\", \"0\"))\n item = self.tableWidget.item(0, 2)\n item.setText(_translate(\"Form\", \"0\"))\n item = self.tableWidget.item(0, 3)\n item.setText(_translate(\"Form\", \"0\"))\n item = self.tableWidget.item(0, 4)\n item.setText(_translate(\"Form\", \"0\"))\n item = self.tableWidget.item(0, 5)\n item.setText(_translate(\"Form\", \"0\"))\n item = self.tableWidget.item(0, 6)\n item.setText(_translate(\"Form\", \"0\"))\n item = self.tableWidget.item(0, 7)\n item.setText(_translate(\"Form\", \"0\"))\n item = self.tableWidget.item(0, 8)\n item.setText(_translate(\"Form\", \"0\"))\n item = self.tableWidget.item(0, 9)\n item.setText(_translate(\"Form\", \"0\"))\n item = self.tableWidget.item(0, 10)\n item.setText(_translate(\"Form\", \"0\"))\n item = self.tableWidget.item(0, 11)\n item.setText(_translate(\"Form\", \"0\"))\n item = self.tableWidget.item(0, 12)\n item.setText(_translate(\"Form\", \"0\"))\n item = self.tableWidget.item(0, 13)\n item.setText(_translate(\"Form\", \"0\"))\n item = self.tableWidget.item(0, 14)\n item.setText(_translate(\"Form\", \"0\"))\n self.tableWidget.setSortingEnabled(__sortingEnabled)\n self.RPlayedCard.setText(_translate(\"Form\", \"下家出牌区域\"))\n self.PredictedCard.setText(_translate(\"Form\", \"AI出牌区域\"))" }, { "identifier": "GameEnv", "path": "douzero/env/game.py", "snippet": "class GameEnv(object):\n\n def __init__(self, players):\n\n self.card_play_action_seq = []\n\n self.three_landlord_cards = None\n self.game_over = False\n\n self.acting_player_position = None\n self.player_utility_dict = None\n\n self.players = players\n\n self.last_move_dict = {'landlord': [],\n 'landlord_up': [],\n 'landlord_down': []}\n\n self.played_cards = {'landlord': [],\n 'landlord_up': [],\n 'landlord_down': []}\n\n self.last_move = []\n self.last_two_moves = []\n\n self.num_wins = {'landlord': 0,\n 'farmer': 0}\n\n self.num_scores = {'landlord': 0,\n 'farmer': 0}\n\n self.info_sets = {'landlord': InfoSet('landlord'),\n 'landlord_up': InfoSet('landlord_up'),\n 'landlord_down': InfoSet('landlord_down')}\n\n self.bomb_num = 0\n self.last_pid = 'landlord'\n\n self.bid_info = [[1, 1, 1],\n [1, 1, 1],\n [1, 1, 1],\n [1, 1, 1]]\n self.bid_count = 0\n self.multiply_count = {'landlord': 1,\n 'landlord_up': 1,\n 'landlord_down': 1}\n self.step_count = 0\n\n\n def card_play_init(self, card_play_data):\n self.info_sets['landlord'].player_hand_cards = \\\n card_play_data['landlord']\n self.info_sets['landlord_up'].player_hand_cards = \\\n card_play_data['landlord_up']\n self.info_sets['landlord_down'].player_hand_cards = \\\n card_play_data['landlord_down']\n self.three_landlord_cards = card_play_data['three_landlord_cards']\n self.get_acting_player_position()\n self.game_infoset = self.get_infoset()\n\n\n def game_done(self):\n if len(self.info_sets['landlord'].player_hand_cards) == 0 or \\\n len(self.info_sets['landlord_up'].player_hand_cards) == 0 or \\\n len(self.info_sets['landlord_down'].player_hand_cards) == 0:\n # if one of the three players discards his hand,\n # then game is over.\n self.compute_player_utility()\n self.update_num_wins_scores()\n\n self.game_over = True\n\n def compute_player_utility(self):\n\n if len(self.info_sets['landlord'].player_hand_cards) == 0:\n self.player_utility_dict = {'landlord': 2,\n 'farmer': -1}\n else:\n self.player_utility_dict = {'landlord': -2,\n 'farmer': 1}\n\n def update_num_wins_scores(self):\n for pos, utility in self.player_utility_dict.items():\n base_score = 2 if pos == 'landlord' else 1\n if utility > 0:\n self.num_wins[pos] += 1\n self.winner = pos\n self.num_scores[pos] += base_score * (2 ** self.bomb_num)\n else:\n self.num_scores[pos] -= base_score * (2 ** self.bomb_num)\n\n def get_winner(self):\n return self.winner\n\n def get_bomb_num(self):\n return self.bomb_num\n\n def step(self, position, action=[]):\n win_rate = 0\n if self.acting_player_position == position:\n action, actions_confidence = self.players[1].act(self.game_infoset)\n # 计算胜率\n win_rate = actions_confidence\n # win_rate = max(actions_confidence, -1)\n # win_rate = min(win_rate, 1)\n # win_rate = str(round(float((win_rate + 1) / 2), 4))\n\n if len(action) > 0:\n self.last_pid = self.acting_player_position\n\n if action in bombs:\n self.bomb_num += 1\n\n self.last_move_dict[\n self.acting_player_position] = action.copy()\n\n self.card_play_action_seq.append((position, action))\n self.update_acting_player_hand_cards(action)\n\n self.played_cards[self.acting_player_position] += action\n\n if self.acting_player_position == 'landlord' and \\\n len(action) > 0 and \\\n len(self.three_landlord_cards) > 0:\n for card in action:\n if len(self.three_landlord_cards) > 0:\n if card in self.three_landlord_cards:\n self.three_landlord_cards.remove(card)\n else:\n break\n self.game_done()\n if not self.game_over:\n self.get_acting_player_position()\n self.game_infoset = self.get_infoset()\n # 返回动作和胜率,只有玩家角色会接受返回值\n action_message = {\"action\": str(''.join([EnvCard2RealCard[c] for c in action])),\n \"win_rate\": str(round(float(win_rate), 4))}\n return action_message\n\n def get_last_move(self):\n last_move = []\n if len(self.card_play_action_seq) != 0:\n if len(self.card_play_action_seq[-1][1]) == 0:\n last_move = self.card_play_action_seq[-2][1]\n else:\n last_move = self.card_play_action_seq[-1][1]\n\n return last_move\n\n def get_last_two_moves(self):\n last_two_moves = [[], []]\n for card in self.card_play_action_seq[-2:]:\n last_two_moves.insert(0, card[1])\n last_two_moves = last_two_moves[:2]\n return last_two_moves\n\n def get_acting_player_position(self):\n if self.acting_player_position is None:\n self.acting_player_position = 'landlord'\n\n else:\n if self.acting_player_position == 'landlord':\n self.acting_player_position = 'landlord_down'\n\n elif self.acting_player_position == 'landlord_down':\n self.acting_player_position = 'landlord_up'\n\n else:\n self.acting_player_position = 'landlord'\n\n return self.acting_player_position\n\n def update_acting_player_hand_cards(self, action):\n if action != []:\n # 更新玩家手牌,删除对应的牌\n if self.acting_player_position == self.players[0]:\n for card in action:\n self.info_sets[self.acting_player_position].player_hand_cards.remove(card)\n # 更新另外两个玩家手牌,删除相同数量的牌\n else:\n del self.info_sets[self.acting_player_position].player_hand_cards[0:len(action)]\n self.info_sets[self.acting_player_position].player_hand_cards.sort()\n\n def get_legal_card_play_actions(self):\n mg = MovesGener(\n self.info_sets[self.acting_player_position].player_hand_cards)\n\n action_sequence = self.card_play_action_seq\n\n rival_move = []\n if len(action_sequence) != 0:\n if len(action_sequence[-1][1]) == 0:\n rival_move = action_sequence[-2][1]\n else:\n rival_move = action_sequence[-1][1]\n\n rival_type = md.get_move_type(rival_move)\n rival_move_type = rival_type['type']\n rival_move_len = rival_type.get('len', 1)\n moves = list()\n\n if rival_move_type == md.TYPE_0_PASS:\n moves = mg.gen_moves()\n\n elif rival_move_type == md.TYPE_1_SINGLE:\n all_moves = mg.gen_type_1_single()\n moves = ms.filter_type_1_single(all_moves, rival_move)\n\n elif rival_move_type == md.TYPE_2_PAIR:\n all_moves = mg.gen_type_2_pair()\n moves = ms.filter_type_2_pair(all_moves, rival_move)\n\n elif rival_move_type == md.TYPE_3_TRIPLE:\n all_moves = mg.gen_type_3_triple()\n moves = ms.filter_type_3_triple(all_moves, rival_move)\n\n elif rival_move_type == md.TYPE_4_BOMB:\n all_moves = mg.gen_type_4_bomb() + mg.gen_type_5_king_bomb()\n moves = ms.filter_type_4_bomb(all_moves, rival_move)\n\n elif rival_move_type == md.TYPE_5_KING_BOMB:\n moves = []\n\n elif rival_move_type == md.TYPE_6_3_1:\n all_moves = mg.gen_type_6_3_1()\n moves = ms.filter_type_6_3_1(all_moves, rival_move)\n\n elif rival_move_type == md.TYPE_7_3_2:\n all_moves = mg.gen_type_7_3_2()\n moves = ms.filter_type_7_3_2(all_moves, rival_move)\n\n elif rival_move_type == md.TYPE_8_SERIAL_SINGLE:\n all_moves = mg.gen_type_8_serial_single(repeat_num=rival_move_len)\n moves = ms.filter_type_8_serial_single(all_moves, rival_move)\n\n elif rival_move_type == md.TYPE_9_SERIAL_PAIR:\n all_moves = mg.gen_type_9_serial_pair(repeat_num=rival_move_len)\n moves = ms.filter_type_9_serial_pair(all_moves, rival_move)\n\n elif rival_move_type == md.TYPE_10_SERIAL_TRIPLE:\n all_moves = mg.gen_type_10_serial_triple(repeat_num=rival_move_len)\n moves = ms.filter_type_10_serial_triple(all_moves, rival_move)\n\n elif rival_move_type == md.TYPE_11_SERIAL_3_1:\n all_moves = mg.gen_type_11_serial_3_1(repeat_num=rival_move_len)\n moves = ms.filter_type_11_serial_3_1(all_moves, rival_move)\n\n elif rival_move_type == md.TYPE_12_SERIAL_3_2:\n all_moves = mg.gen_type_12_serial_3_2(repeat_num=rival_move_len)\n moves = ms.filter_type_12_serial_3_2(all_moves, rival_move)\n\n elif rival_move_type == md.TYPE_13_4_2:\n all_moves = mg.gen_type_13_4_2()\n moves = ms.filter_type_13_4_2(all_moves, rival_move)\n\n elif rival_move_type == md.TYPE_14_4_22:\n all_moves = mg.gen_type_14_4_22()\n moves = ms.filter_type_14_4_22(all_moves, rival_move)\n\n if rival_move_type not in [md.TYPE_0_PASS,\n md.TYPE_4_BOMB, md.TYPE_5_KING_BOMB]:\n moves = moves + mg.gen_type_4_bomb() + mg.gen_type_5_king_bomb()\n\n if len(rival_move) != 0: # rival_move is not 'pass'\n moves = moves + [[]]\n\n for m in moves:\n m.sort()\n\n return moves\n\n def reset(self):\n self.card_play_action_seq = []\n\n self.three_landlord_cards = None\n self.game_over = False\n\n self.acting_player_position = None\n self.player_utility_dict = None\n\n self.last_move_dict = {'landlord': [],\n 'landlord_up': [],\n 'landlord_down': []}\n\n self.played_cards = {'landlord': [],\n 'landlord_up': [],\n 'landlord_down': []}\n\n self.last_move = []\n self.last_two_moves = []\n\n self.info_sets = {'landlord': InfoSet('landlord'),\n 'landlord_up': InfoSet('landlord_up'),\n 'landlord_down': InfoSet('landlord_down')}\n\n self.bomb_num = 0\n self.last_pid = 'landlord'\n self.bid_info = [[1, 1, 1],\n [1, 1, 1],\n [1, 1, 1],\n [1, 1, 1]]\n self.bid_count = 0\n self.multiply_count = {'landlord': 0,\n 'landlord_up': 0,\n 'landlord_down': 0}\n self.step_count = 0\n\n def get_infoset(self):\n self.info_sets[\n self.acting_player_position].last_pid = self.last_pid\n\n self.info_sets[\n self.acting_player_position].legal_actions = \\\n self.get_legal_card_play_actions()\n\n self.info_sets[\n self.acting_player_position].bomb_num = self.bomb_num\n\n self.info_sets[\n self.acting_player_position].last_move = self.get_last_move()\n\n self.info_sets[\n self.acting_player_position].last_two_moves = self.get_last_two_moves()\n\n self.info_sets[\n self.acting_player_position].last_move_dict = self.last_move_dict\n\n self.info_sets[self.acting_player_position].num_cards_left_dict = \\\n {pos: len(self.info_sets[pos].player_hand_cards)\n for pos in ['landlord', 'landlord_up', 'landlord_down']}\n\n self.info_sets[self.acting_player_position].other_hand_cards = []\n\n '''\n 调整计算其他人手牌的方法,整副牌减去玩家手牌与出过的牌\n for pos in ['landlord', 'landlord_up', 'landlord_down']:\n if pos != self.acting_player_position:\n self.info_sets[\n self.acting_player_position].other_hand_cards += \\\n self.info_sets[pos].player_hand_cards\n '''\n # 把出过的牌中三个子列表合成一个列表\n played_cards_tmp = []\n for i in list(self.played_cards.values()):\n played_cards_tmp.extend(i)\n # 出过的牌和玩家手上的牌\n played_and_hand_cards = played_cards_tmp + self.info_sets[self.acting_player_position].player_hand_cards\n # 整副牌减去出过的牌和玩家手上的牌,就是其他人的手牌\n for i in set(AllEnvCard):\n self.info_sets[\n self.acting_player_position].other_hand_cards.extend([i] * (AllEnvCard.count(i) - played_and_hand_cards.count(i)))\n\n self.info_sets[self.acting_player_position].played_cards = \\\n self.played_cards\n self.info_sets[self.acting_player_position].three_landlord_cards = \\\n self.three_landlord_cards\n self.info_sets[self.acting_player_position].card_play_action_seq = \\\n self.card_play_action_seq\n\n self.info_sets[\n self.acting_player_position].all_handcards = \\\n {pos: self.info_sets[pos].player_hand_cards\n for pos in ['landlord', 'landlord_up', 'landlord_down']}\n\n # Custom bid info\n self.info_sets[self.acting_player_position].bid_info = bid_infos[self.acting_player_position]\n\n return deepcopy(self.info_sets[self.acting_player_position])" }, { "identifier": "DeepAgent", "path": "douzero/evaluation/deep_agent.py", "snippet": "class DeepAgent:\n\n def __init__(self, position, model_path):\n self.model_type = \"old\"\n if \"general\" in model_path:\n self.model_type = \"general\"\n elif \"resnet\" in model_path:\n self.model_type = \"resnet\"\n self.model = _load_model(position, model_path, self.model_type)\n\n def act(self, infoset):\n obs = get_obs(infoset, model_type=self.model_type)\n z_batch = torch.from_numpy(obs['z_batch']).float()\n x_batch = torch.from_numpy(obs['x_batch']).float()\n if torch.cuda.is_available():\n z_batch, x_batch = z_batch.cuda(), x_batch.cuda()\n y_pred = self.model.forward(z_batch, x_batch, return_value=True)['values']\n y_pred = y_pred.detach().cpu().numpy()\n\n best_action_index = np.argmax(y_pred, axis=0)[0]\n best_action = infoset.legal_actions[best_action_index]\n best_action_confidence = y_pred[best_action_index]\n return best_action, best_action_confidence" } ]
import GameHelper as gh import os import sys import time import threading import pyautogui import win32gui import multiprocessing as mp import DetermineColor as DC import cv2 import numpy as np import traceback import BidModel import LandlordModel import FarmerModel from GameHelper import GameHelper from PIL import Image from skimage.metrics import structural_similarity as ssim from collections import defaultdict from douzero.env.move_detector import get_move_type from PyQt5 import QtGui, QtWidgets, QtCore from PyQt5.QtWidgets import QTableWidgetItem, QInputDialog, QMessageBox from PyQt5.QtGui import QPixmap, QIcon from PyQt5.QtCore import QTime, QEventLoop, Qt from MainWindow import Ui_Form from douzero.env.game import GameEnv from douzero.evaluation.deep_agent import DeepAgent
15,028
f.write(str(int(time.time())) + " " + cards_str + " " + str(round(win_rate, 2)) + "\n") print("叫牌预估胜率:", win_rate) self.BidWinrate.setText("叫牌胜率:" + str(round(win_rate, 2)) + "%") if jiaodizhu_btn is not None: print("找到《叫地主》按钮", jiaodizhu_btn) HaveBid = True print(win_rate, self.BidThreshold1) if win_rate > self.BidThreshold1: helper.ClickOnImage("jiaodizhu_btn", region=self.GeneralBtnPos) else: helper.ClickOnImage("bujiao_btn", region=self.GeneralBtnPos) self.sleep(500) if qiangdizhu_btn is not None: print("找到《抢地主》按钮", qiangdizhu_btn) HaveBid = True if win_rate > self.BidThreshold2: is_stolen = 1 helper.ClickOnImage("qiangdizhu_btn", region=self.GeneralBtnPos) else: print("点《不抢》") helper.ClickOnImage("buqiang_btn", region=self.GeneralBtnPos) self.sleep(500) if jiabei_btn is not None: self.sleep(500) break self.label.setText("游戏开始") self.label.setStyleSheet('background-color: rgba(255, 0, 0, 0.5);') laotou = helper.LocateOnScreen("laotou", region=(761, 45, 255, 100)) while laotou is not None: self.detect_start_btn() if not self.RunGame: break self.sleep(200) print("在游戏里,还在抢地主。。。。") self.label.setText("在抢地主") self.label.setStyleSheet('background-color: rgba(255, 0, 0, 0.5);') print("底牌现身。。。") self.label.setText("抢完地主") self.label.setStyleSheet('background-color: rgba(255, 0, 0, 0.5);') self.sleep(200) llcards = self.find_landlord_cards() while len(llcards) != 3: self.detect_start_btn() if not self.RunGame: break if len(llcards) > 3: self.ThreeLandlordCardsConfidence += 0.05 time.sleep(200) elif len(llcards) < 3: self.ThreeLandlordCardsConfidence -= 0.05 time.sleep(200) llcards = self.find_landlord_cards() self.ThreeLandlordCards.setText("底牌:" + llcards) print("地主牌:", llcards) cards = self.find_my_cards() while len(cards) != 17 and len(cards) != 20: self.detect_start_btn() if not self.RunGame: break self.sleep(200) cards = self.find_my_cards() cards_str = "".join([card[0] for card in cards]) self.UserHandCards.setText("手牌:" + cards_str) print("手牌:" + cards_str) if len(cards_str) == 20: win_rate = LandlordModel.predict(cards_str) self.PreWinrate.setText("局前胜率:" + str(round(win_rate, 2)) + "%") print("预估地主胜率:", win_rate) else: user_position_code = self.find_landlord(self.LandlordFlagPos) print(user_position_code) while user_position_code is None: self.detect_start_btn() if not self.RunGame: break user_position_code = self.find_landlord(self.LandlordFlagPos) self.sleep(200) user_position = ['up', 'landlord', 'down'][user_position_code] win_rate = FarmerModel.predict(cards_str, llcards, user_position) - 5 print("预估农民胜率:", win_rate) self.PreWinrate.setText("局前胜率:" + str(round(win_rate, 2)) + "%") self.sleep(500) if win_rate > self.JiabeiThreshold[is_stolen][0]: chaojijiabei_btn = helper.LocateOnScreen("chaojijiabei_btn", region=self.GeneralBtnPos) if chaojijiabei_btn is not None: helper.ClickOnImage("chaojijiabei_btn", region=self.GeneralBtnPos) else: helper.ClickOnImage("jiabei_btn", region=self.GeneralBtnPos) self.sleep(500) elif win_rate > self.JiabeiThreshold[is_stolen][1]: helper.ClickOnImage("jiabei_btn", region=self.GeneralBtnPos) self.sleep(500) else: helper.ClickOnImage("bujiabei_btn", region=self.GeneralBtnPos) self.sleep(500) if win_rate > self.MingpaiThreshold: self.sleep(1000) mingpai_btn = helper.LocateOnScreen("mingpai_btn", region=self.GeneralBtnPos) while mingpai_btn is None: print('没找到《明牌》按钮') self.sleep(200) mingpai_btn = helper.LocateOnScreen("mingpai_btn", region=self.GeneralBtnPos) helper.ClickOnImage("mingpai_btn", region=self.GeneralBtnPos) self.sleep(500) print("加倍环节已结束") def animation(self, cards):
# -*- coding: utf-8 -*- # Created by: Raf # Modify by: Vincentzyx EnvCard2RealCard = {3: '3', 4: '4', 5: '5', 6: '6', 7: '7', 8: '8', 9: '9', 10: 'T', 11: 'J', 12: 'Q', 13: 'K', 14: 'A', 17: '2', 20: 'X', 30: 'D'} RealCard2EnvCard = {'3': 3, '4': 4, '5': 5, '6': 6, '7': 7, '8': 8, '9': 9, 'T': 10, 'J': 11, 'Q': 12, 'K': 13, 'A': 14, '2': 17, 'X': 20, 'D': 30} AllEnvCard = [3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 8, 9, 9, 9, 9, 10, 10, 10, 10, 11, 11, 11, 11, 12, 12, 12, 12, 13, 13, 13, 13, 14, 14, 14, 14, 17, 17, 17, 17, 20, 30] AllCards = ['D', 'X', '2', 'A', 'K', 'Q', 'J', 'T', '9', '8', '7', '6', '5', '4', '3'] helper = GameHelper() class MyPyQT_Form(QtWidgets.QWidget, Ui_Form): def __init__(self): super(MyPyQT_Form, self).__init__() self.other_hands_cards_str = None self.stop_sign = None self.loop_sign = None self.env = None self.three_landlord_cards_env = None self.three_landlord_cards_real = None self.user_hand_cards_env = None self.user_hand_cards_real = None self.play_order = None self.card_play_data_list = None self.other_hand_cards = None self.other_played_cards_env = None self.other_played_cards_real = None self.user_position = None self.user_position_code = None self.setupUi(self) self.setWindowFlags(QtCore.Qt.WindowMinimizeButtonHint | # 使能最小化按钮 QtCore.Qt.WindowStaysOnTopHint | # 窗体总在最前端 QtCore.Qt.WindowCloseButtonHint) self.setWindowIcon(QIcon(':/pics/favicon.ico')) self.setWindowTitle("DouZero欢乐斗地主v2.0") self.setFixedSize(self.width(), self.height()) # 固定窗体大小 self.move(50, 50) # self.setWindowIcon(QIcon('pics/favicon.ico')) window_pale = QtGui.QPalette() # window_pale.setBrush(self.backgroundRole(), QtGui.QBrush(QtGui.QPixmap("pics/bg.png"))) self.setPalette(window_pale) self.SingleButton.clicked.connect(self.game_single) self.LoopButton.clicked.connect(self.game_loop) self.StopButton.clicked.connect(self.stop) # self.Players = [self.RPlayer, self.Player, self.LPlayer] self.Players = [self.RPlayedCard, self.PredictedCard, self.LPlayedCard] self.counter = QTime() # 参数 self.MyConfidence = 0.8 # 我的牌的置信度 self.OtherConfidence = 0.8 # 别人的牌的置信度 self.WhiteConfidence = 0.85 # 检测白块的置信度 self.LandlordFlagConfidence = 0.8 # 检测地主标志的置信度 self.ThreeLandlordCardsConfidence = 0.8 # 检测地主底牌的置信度 self.PassConfidence = 0.7 self.PassConfidence = 0.8 self.WaitTime = 1 # 等待状态稳定延时 self.MyFilter = 40 # 我的牌检测结果过滤参数 self.OtherFilter = 25 # 别人的牌检测结果过滤参数 self.SleepTime = 0.1 # 循环中睡眠时间 self.RunGame = False self.AutoPlay = False self.BidThreshold1 = 65 # 叫地主阈值 self.BidThreshold2 = 72 # 抢地主阈值 self.JiabeiThreshold = ( (85, 72), # 叫地主 超级加倍 加倍 阈值 (85, 75) # 叫地主 超级加倍 加倍 阈值 (在地主是抢来的情况下) ) self.MingpaiThreshold = 92 # 坐标 self.MyHandCardsPos = (180, 560, 1050, 90) # 我的截图区域 self.LPlayedCardsPos = (320, 280, 500, 120) # 左边出牌截图区域 self.RPlayedCardsPos = (600, 280, 500, 120) # 右边出牌截图区域 self.LandlordCardsPos = (600, 33, 220, 103) # 地主底牌截图区域 self.LPassPos = (360, 360, 120, 80) # 左边不出截图区域 self.RPassPos = (940, 360, 120, 80) # 右边不出截图区域 self.PassBtnPos = (200, 450, 1000, 120) # 要不起截图区域 self.GeneralBtnPos = (200, 450, 1000, 120) # 叫地主、抢地主、加倍按钮截图区域 self.LandlordFlagPos = [(1247, 245, 48, 52), (12, 661, 51, 53), (123, 243, 52, 54)] # 地主标志截图区域(右-我-左) self.card_play_model_path_dict = { 'landlord': "baselines/resnet/resnet_landlord.ckpt", 'landlord_up': "baselines/resnet/resnet_landlord_up.ckpt", 'landlord_down': "baselines/resnet/resnet_landlord_down.ckpt" } def game_single(self): self.loop_sign = 0 self.stop_sign = 0 self.detect_start_btn() self.before_start() self.init_cards() def game_loop(self): self.loop_sign = 1 self.stop_sign = 0 while True: if self.stop_sign == 1: break self.detect_start_btn() self.before_start() self.init_cards() self.sleep(5000) def stop(self): self.stop_sign = 1 print("按下停止键") try: self.RunGame = False self.loop_sign = 0 self.env.game_over = True self.env.reset() self.init_display() self.PreWinrate.setText("局前胜率: ") self.BidWinrate.setText("叫牌胜率: ") except AttributeError as e: traceback.print_exc() def init_display(self): self.WinRate.setText("评分") self.label.setText("游戏状态") self.label.setStyleSheet('background-color: rgba(255, 0, 0, 0);') self.UserHandCards.setText("手牌") # self.LBrowser.clear() # self.RBrowser.clear() self.LPlayedCard.setText("上家出牌区域") self.RPlayedCard.setText("下家出牌区域") self.PredictedCard.setText("AI出牌区域") self.ThreeLandlordCards.setText("地主牌") self.recorder2zero() for player in self.Players: player.setStyleSheet('background-color: rgba(0, 255, 0, 0);') def init_cards(self): self.RunGame = True GameHelper.Interrupt = False self.user_hand_cards_real = "" self.user_hand_cards_env = [] # 其他玩家出牌 self.other_played_cards_real = "" self.other_played_cards_env = [] # 其他玩家手牌(整副牌减去玩家手牌,后续再减掉历史出牌) self.other_hand_cards = [] # 三张底牌 self.three_landlord_cards_real = "" self.three_landlord_cards_env = [] # 玩家角色代码:0-地主上家, 1-地主, 2-地主下家 self.user_position_code = None self.user_position = "" # 开局时三个玩家的手牌 self.card_play_data_list = {} # 识别玩家手牌 self.user_hand_cards_real = self.find_my_cards() while len(self.user_hand_cards_real) != 17 and len(self.user_hand_cards_real) != 20: self.detect_start_btn() if not self.RunGame: break self.sleep(200) self.user_hand_cards_real = self.find_my_cards() self.user_hand_cards_env = [RealCard2EnvCard[c] for c in list(self.user_hand_cards_real)] # 识别三张底牌 self.three_landlord_cards_real = self.find_landlord_cards() self.ThreeLandlordCards.setText("底牌:" + self.three_landlord_cards_real) self.three_landlord_cards_env = [RealCard2EnvCard[c] for c in list(self.three_landlord_cards_real)] while len(self.three_landlord_cards_env) != 3: self.detect_start_btn() if not self.RunGame: break if len(self.three_landlord_cards_env) > 3: self.ThreeLandlordCardsConfidence += 0.05 elif len(self.three_landlord_cards_env) < 3: self.ThreeLandlordCardsConfidence -= 0.05 self.three_landlord_cards_real = self.find_landlord_cards() self.ThreeLandlordCards.setText("底牌:" + self.three_landlord_cards_real) self.three_landlord_cards_env = [RealCard2EnvCard[c] for c in list(self.three_landlord_cards_real)] # 识别玩家的角色 self.sleep(500) self.user_position_code = self.find_landlord(self.LandlordFlagPos) self.sleep(200) while self.user_position_code is None: self.detect_start_btn() if not self.RunGame: break self.user_position_code = self.find_landlord(self.LandlordFlagPos) self.sleep(200) print("正在出牌人的代码: ", self.user_position_code) if self.user_position_code is None: items = ("地主上家", "地主", "地主下家") item, okPressed = QInputDialog.getItem(self, "选择角色", "未识别到地主,请手动选择角色:", items, 0, False) if okPressed and item: self.user_position_code = items.index(item) else: return self.user_position = ['landlord_up', 'landlord', 'landlord_down'][self.user_position_code] print("我现在在地主的方向:", self.user_position) for player in self.Players: player.setStyleSheet('background-color: rgba(0, 255, 0, 0);') self.Players[self.user_position_code].setStyleSheet('background-color: rgba(0, 255, 0, 0.5);') # 整副牌减去玩家手上的牌,就是其他人的手牌,再分配给另外两个角色(如何分配对AI判断没有影响) for i in set(AllEnvCard): self.other_hand_cards.extend([i] * (AllEnvCard.count(i) - self.user_hand_cards_env.count(i))) self.other_hands_cards_str = str(''.join([EnvCard2RealCard[c] for c in self.other_hand_cards]))[::-1] self.cards_recorder(self.other_hands_cards_str) self.card_play_data_list.update({ 'three_landlord_cards': self.three_landlord_cards_env, ['landlord_up', 'landlord', 'landlord_down'][(self.user_position_code + 0) % 3]: self.user_hand_cards_env, ['landlord_up', 'landlord', 'landlord_down'][(self.user_position_code + 1) % 3]: self.other_hand_cards[0:17] if (self.user_position_code + 1) % 3 != 1 else self.other_hand_cards[17:], ['landlord_up', 'landlord', 'landlord_down'][(self.user_position_code + 2) % 3]: self.other_hand_cards[0:17] if (self.user_position_code + 1) % 3 == 1 else self.other_hand_cards[17:] }) print("开始对局") print("手牌:", self.user_hand_cards_real) print("地主牌:", self.three_landlord_cards_real) # 生成手牌结束,校验手牌数量 if len(self.card_play_data_list["three_landlord_cards"]) != 3: QMessageBox.critical(self, "底牌识别出错", "底牌必须是3张!", QMessageBox.Yes, QMessageBox.Yes) self.init_display() return if len(self.card_play_data_list["landlord_up"]) != 17 or \ len(self.card_play_data_list["landlord_down"]) != 17 or \ len(self.card_play_data_list["landlord"]) != 20: QMessageBox.critical(self, "手牌识别出错", "初始手牌数目有误", QMessageBox.Yes, QMessageBox.Yes) self.init_display() return # 出牌顺序:0-玩家出牌, 1-玩家下家出牌, 2-玩家上家出牌 self.play_order = 0 if self.user_position == "landlord" else 1 if self.user_position == "landlord_up" else 2 # 创建一个代表玩家的AI ai_players = [0, 0] ai_players[0] = self.user_position ai_players[1] = DeepAgent(self.user_position, self.card_play_model_path_dict[self.user_position]) self.env = GameEnv(ai_players) try: self.start() except Exception as e: exc_type, exc_obj, exc_tb = sys.exc_info() print(e) traceback.print_tb(exc_tb) # self.stop() def sleep(self, ms): self.counter.restart() while self.counter.elapsed() < ms: QtWidgets.QApplication.processEvents(QEventLoop.AllEvents, 50) def start(self): # print("现在的出牌顺序是谁:0是我;1是下家;2是上家:", self.play_order) self.env.card_play_init(self.card_play_data_list) print("开始出牌\n") while not self.env.game_over: self.detect_start_btn() if not self.RunGame: break if self.play_order == 0: self.PredictedCard.setText("...") action_message = self.env.step(self.user_position) score = float(action_message['win_rate']) if "resnet" in self.card_play_model_path_dict[self.user_position]: score *= 8 self.UserHandCards.setText("手牌:" + str(''.join( [EnvCard2RealCard[c] for c in self.env.info_sets[self.user_position].player_hand_cards]))[::-1]) self.PredictedCard.setText(action_message["action"] if action_message["action"] else "不出") self.PredictedCard.setStyleSheet('background-color: rgba(0, 255, 0, 0.5);') self.WinRate.setText("评分:" + action_message["win_rate"]) print("出牌:", action_message["action"] if action_message["action"] else "不出", ",得分:", action_message["win_rate"]) print("\n手牌:", str(''.join( [EnvCard2RealCard[c] for c in self.env.info_sets[self.user_position].player_hand_cards]))) if action_message["action"] == "": result = helper.LocateOnScreen("pass_btn", region=self.PassBtnPos, confidence=0.7) passSign = helper.LocateOnScreen("yaobuqi", region=self.GeneralBtnPos, confidence=0.7) while result is None and passSign is None: self.detect_start_btn() if not self.RunGame: break result = helper.LocateOnScreen("pass_btn", region=self.PassBtnPos, confidence=0.7) passSign = helper.LocateOnScreen("yaobuqi", region=self.GeneralBtnPos, confidence=0.7) if result is not None: helper.ClickOnImage("pass_btn", region=self.PassBtnPos, confidence=0.7) self.sleep(100) if passSign is not None: self.sleep(100) helper.ClickOnImage("yaobuqi", region=self.GeneralBtnPos, confidence=0.7) # helper.LeftClick((940, 640)) self.sleep(200) else: hand_cards_str = ''.join( [EnvCard2RealCard[c] for c in self.env.info_sets[self.user_position].player_hand_cards]) result = helper.LocateOnScreen("play_card", region=self.PassBtnPos, confidence=0.7) while result is None: self.detect_start_btn() if not self.RunGame: break print("等待出牌按钮") self.sleep(200) result = helper.LocateOnScreen("play_card", region=self.PassBtnPos, confidence=0.7) self.click_cards(action_message["action"]) self.sleep(200) helper.ClickOnImage("play_card", region=self.PassBtnPos, confidence=0.7) self.sleep(200) result = helper.LocateOnScreen("play_card", region=self.PassBtnPos, confidence=0.7) if result is not None: self.click_cards(action_message["action"]) ani = self.animation(action_message["action"]) if ani: self.sleep(800) if len(hand_cards_str) == 0: self.RunGame = False try: if self.env is not None: self.env.game_over = True self.env.reset() self.init_display() self.PreWinrate.setText("局前胜率: ") self.BidWinrate.setText("叫牌胜率: ") print("程序走到这里") except AttributeError as e: traceback.print_exc() break self.PredictedCard.setStyleSheet('background-color: rgba(0, 255, 0, 0);') self.sleep(200) self.play_order = 1 elif self.play_order == 1: self.RPlayedCard.setText("等待下家出牌") self.RPlayedCard.setStyleSheet('background-color: rgba(0, 255, 0, 0.5);') pass_flag = helper.LocateOnScreen('buchu', region=self.RPassPos) rightCards = self.find_other_cards(self.RPlayedCardsPos) while self.RunGame and len(rightCards) == 0 and pass_flag is None: self.detect_start_btn() print("等待下家出牌") self.sleep(100) pass_flag = helper.LocateOnScreen('buchu', region=self.RPassPos) rightCards = self.find_other_cards(self.RPlayedCardsPos) self.sleep(10) # 未找到"不出" if pass_flag is None: # 识别下家出牌 while True: self.detect_start_btn() if not self.RunGame: break rightOne = self.find_other_cards(self.RPlayedCardsPos) self.sleep(50) rightTwo = self.find_other_cards(self.RPlayedCardsPos) if rightOne == rightTwo: self.other_played_cards_real = rightOne if "X" in rightOne or "D" in rightOne: self.sleep(500) self.other_played_cards_real = self.find_other_cards(self.RPlayedCardsPos) ani = self.animation(rightOne) if ani: self.RPlayedCard.setText("等待动画") self.sleep(500) break # self.RBrowser.append(self.other_played_cards_real) # 找到"不出" else: self.other_played_cards_real = "" print("\n下家出牌:", self.other_played_cards_real) self.other_played_cards_env = [RealCard2EnvCard[c] for c in list(self.other_played_cards_real)] self.other_played_cards_env.sort() self.env.step(self.user_position, self.other_played_cards_env) # 更新界面 self.RPlayedCard.setText(self.other_played_cards_real if self.other_played_cards_real else "不出") self.RPlayedCard.setStyleSheet('background-color: rgba(0, 255, 0, 0);') # self.other_hands_cards_str = self.other_hands_cards_str.replace(self.other_played_cards_real, "", 1) self.other_hands_cards_str = self.subtract_strings(self.other_hands_cards_str, self.other_played_cards_real) # print("记牌器:", self.other_hands_cards_str) self.cards_recorder(self.other_hands_cards_str) self.sleep(200) self.play_order = 2 elif self.play_order == 2: self.LPlayedCard.setText("等待上家出牌") self.LPlayedCard.setStyleSheet('background-color: rgba(0, 255, 0, 0.5);') pass_flag = helper.LocateOnScreen('buchu', region=self.LPassPos) leftCards = self.find_other_cards(self.LPlayedCardsPos) while len(leftCards) == 0 and pass_flag is None: self.detect_start_btn() if not self.RunGame: break print("等待上家出牌") self.sleep(100) pass_flag = helper.LocateOnScreen('buchu', region=self.LPassPos) leftCards = self.find_other_cards(self.LPlayedCardsPos) self.sleep(10) if pass_flag is None: # 识别上家出牌 while True: self.detect_start_btn() if not self.RunGame: break leftOne = self.find_other_cards(self.LPlayedCardsPos) self.sleep(50) leftTwo = self.find_other_cards(self.LPlayedCardsPos) if leftOne == leftTwo: self.other_played_cards_real = leftOne if "X" in leftOne or "D" in leftOne: self.sleep(500) self.other_played_cards_real = self.find_other_cards(self.LPlayedCardsPos) ani = self.animation(leftOne) if ani: self.LPlayedCard.setText("等待动画") self.sleep(500) break # self.LBrowser.append(self.other_played_cards_real) # 找到"不出" else: self.other_played_cards_real = "" print("\n上家出牌:", self.other_played_cards_real) self.other_played_cards_env = [RealCard2EnvCard[c] for c in list(self.other_played_cards_real)] self.other_played_cards_env.sort() self.env.step(self.user_position, self.other_played_cards_env) # 更新界面 self.LPlayedCard.setText(self.other_played_cards_real if self.other_played_cards_real else "不出") self.LPlayedCard.setStyleSheet('background-color: rgba(0, 255, 0, 0);') self.other_hands_cards_str = self.subtract_strings(self.other_hands_cards_str, self.other_played_cards_real) # print("记牌器:", self.other_hands_cards_str) self.cards_recorder(self.other_hands_cards_str) self.sleep(300) self.play_order = 0 if self.loop_sign == 0: self.stop() print("这里有问题") self.label.setText("游戏结束") self.label.setStyleSheet('background-color: rgba(255, 0, 0, 0);') self.init_display() def detect_start_btn(self): beans = [(308, 204, 254, 60), (295, 474, 264, 60), (882, 203, 230, 60)] for i in beans: result = helper.LocateOnScreen("over", region=i, confidence=0.9) if result is not None: print("找到游戏结束的豆子,游戏已结束") self.RunGame = False self.init_display() try: if self.env is not None: self.env.game_over = True self.env.reset() self.init_display() self.PreWinrate.setText("局前胜率: ") self.BidWinrate.setText("叫牌胜率: ") print("程序走到这里") except AttributeError as e: traceback.print_exc() break result = helper.LocateOnScreen("continue", region=(1100, 617, 200, 74)) if result is not None: if self.loop_sign == 0: print("检测到本局游戏已结束") self.label.setText("游戏已结束") self.stop() else: self.RunGame = True helper.ClickOnImage("continue", region=(1100, 617, 200, 74)) self.sleep(100) try: if self.env is not None: self.env.game_over = True self.env.reset() self.init_display() self.PreWinrate.setText("局前胜率: ") self.BidWinrate.setText("叫牌胜率: ") except AttributeError as e: traceback.print_exc() result = helper.LocateOnScreen("start_game", region=(720, 466, 261, 117)) if result is not None: helper.ClickOnImage("start_game", region=(720, 466, 261, 117)) self.sleep(1000) result = helper.LocateOnScreen("sure", region=(657, 500, 216, 72)) if result is not None: helper.ClickOnImage("sure", region=(657, 500, 216, 72)) self.sleep(1000) result = helper.LocateOnScreen("good", region=(434, 599, 219, 88)) if result is not None: helper.ClickOnImage("good", region=(434, 599, 219, 88)) self.sleep(1000) result = helper.LocateOnScreen("zhidao", region=(593, 543, 224, 94)) if result is not None: helper.ClickOnImage("zhidao", region=(593, 543, 224, 94)) self.sleep(1000) result = helper.LocateOnScreen("chacha", region=(930, 150, 454, 504), confidence=0.7) if result is not None: helper.ClickOnImage("chacha", region=(930, 150, 454, 504), confidence=0.7) self.sleep(1000) def cards_filter(self, location, distance): # 牌检测结果滤波 if len(location) == 0: return 0 locList = [location[0][0]] poslist = [location[0]] count = 1 for e in location: flag = 1 # “是新的”标志 for have in locList: # print(abs(e[0] - have)) if abs(e[0] - have) <= distance: flag = 0 break if flag: count += 1 locList.append(e[0]) poslist.append(e) # print(poslist) return count, poslist def find_cards(self, img, pos, mark="", confidence=0.8): cards_real = "" D_king = 0 X_king = 0 for card in AllCards: result = gh.LocateAllOnImage(img, helper.PicsCV[mark + card], region=pos, confidence=confidence) if len(result) > 0: count, s = self.cards_filter(list(result), 30) if card == "X" or card == "D": for a in s: classifier = DC.ColorClassify(debug=True) img1 = img[pos[1]:pos[1] + pos[3], pos[0]:pos[0] + pos[2]] img2 = img1[a[1]:a[1] + a[3] - 50, a[0]:a[0] + 20] # 注意连着裁切时img不能重名 # gh.ShowImg(img2) result = classifier.classify(img2) # print(result) for b in result: if b[0] == "Red": if b[1] > 0.54: D_king = 1 else: X_king = 1 else: cards_real += card[0] * count if X_king: cards_real += "X" cards_real = cards_real[-1] + cards_real[:-1] if D_king: cards_real += "D" cards_real = cards_real[-1] + cards_real[:-1] return cards_real def find_my_cards(self): img, _ = helper.Screenshot() img = cv2.cvtColor(np.asarray(img), cv2.COLOR_RGB2BGR) my_cards_real = self.find_cards(img, self.MyHandCardsPos, mark="m") return my_cards_real def find_other_cards(self, pos): img, _ = helper.Screenshot() img = cv2.cvtColor(np.asarray(img), cv2.COLOR_RGB2BGR) other_cards_real = self.find_cards(img, pos, mark="o") return other_cards_real def find_landlord_cards(self): img, _ = helper.Screenshot() img = cv2.cvtColor(np.asarray(img), cv2.COLOR_RGB2BGR) landlord__cards_real = self.find_cards(img, self.LandlordCardsPos, mark="z", confidence=self.ThreeLandlordCardsConfidence) return landlord__cards_real def click_cards(self, out_cards): cards = self.find_my_cards() num = len(cards) space = 45.6 res1 = helper.LocateOnScreen("up_left", region=self.MyHandCardsPos, confidence=0.65) while res1 is None: self.detect_start_btn() if not self.RunGame: break print("未找到手牌区域") self.sleep(500) res1 = helper.LocateOnScreen("up_left", region=self.MyHandCardsPos, confidence=0.65) pos = res1[0] + 6, res1[1] + 7 res2 = helper.LocateOnScreen("up_left", region=(180, 580, 1050, 90), confidence=0.65) if res2 is not None: pos = res1[0] + 6, res1[1] + 7 pos_list = [(int(pos[0] + i * space), pos[1]) for i in range(num)] # 将两个列表合并转为字典 cards_dict = defaultdict(list) for key, value in zip(cards, pos_list): cards_dict[key].append(value) # 转换为普通的字典 cards_dict = dict(cards_dict) remove_dict = {key: [] for key in cards_dict.keys()} # print(cards_dict) if out_cards == "DX": helper.LeftClick2((cards_dict["X"][0][0] + 20, 650)) self.sleep(500) else: for i in out_cards: cars_pos = cards_dict[i][-1][0:2] # print("准备点击的牌:", cards_dict[i]) point = cars_pos[0] + 20, cars_pos[1] + 100 img, _ = helper.Screenshot() img = cv2.cvtColor(np.asarray(img), cv2.COLOR_BGR2RGB) check_one = self.find_cards(img=img, pos=(cars_pos[0] - 2, 565, 60, 60), mark="m", confidence=0.8) # print("系统帮你点的牌:", check_one, "你要出的牌:", i) if check_one == i and check_one != "D" and check_one != "X": # print("腾讯自动帮你选牌:", check_one) img, _ = helper.Screenshot() img = cv2.cvtColor(np.asarray(img), cv2.COLOR_RGB2BGR) cv2.imwrite("debug.png", img) else: helper.LeftClick2(point) # print(point) self.sleep(100) remove_dict[i].append(cards_dict[i][-1]) cards_dict[i].remove(cards_dict[i][-1]) # print("remove_dict", remove_dict) img, _ = helper.Screenshot() img = cv2.cvtColor(np.asarray(img), cv2.COLOR_RGB2BGR) check_cards = self.find_cards(img, (180, 590, 1050, 90), mark="m") for i in out_cards: cards = cards.replace(i, "", 1) print("检查剩的牌: ", check_cards, "应该剩的牌: ", cards) if len(check_cards) < len(cards): for m in check_cards: cards = cards.replace(m, "", 1) print("系统多点的牌: ", cards) for n in cards: # print("字典里还剩的牌: ", cards_dict) cars_pos2 = cards_dict[n][-1][0:2] # print("准备点回来的牌:", cars_pos2) point2 = cars_pos2[0] + 20, cars_pos2[1] + 100 helper.LeftClick2(point2) self.sleep(100) remove_dict[n].append(cards_dict[n][-1]) cards_dict[n].remove(cards_dict[n][-1]) elif len(check_cards) > len(cards): img, _ = helper.Screenshot() img = cv2.cvtColor(np.asarray(img), cv2.COLOR_RGB2BGR) cv2.imwrite("debug2.png", img) for m in cards: check_cards = check_cards.replace(m, "", 1) print("系统少点的牌: ", check_cards) for n in check_cards: # print("删除的字典: ", remove_dict) cars_pos3 = remove_dict[n][0][0:2] # print("准备再点出去的牌:", cars_pos3) point3 = cars_pos3[0] + 20, cars_pos3[1] + 100 helper.LeftClick2(point3) self.sleep(300) remove_dict[n].remove(remove_dict[n][0]) # print(remove_dict) self.sleep(200) '''def find_landlord(self, landlord_flag_pos): for pos in landlord_flag_pos: result = helper.LocateOnScreen("landlord", region=pos, confidence=0.6) if result is not None: return landlord_flag_pos.index(pos) self.sleep(200) print("没找到地主的位置")''' def find_landlord(self, landlord_flag_pos): img, _ = helper.Screenshot() img = cv2.cvtColor(np.asarray(img), cv2.COLOR_BGR2HSV) for pos in landlord_flag_pos: classifier = DC.ColorClassify(debug=True) imgCut = img[pos[1]:pos[1] + pos[3], pos[0]:pos[0] + pos[2]] result = classifier.classify(imgCut) for b in result: if b[0] == "Orange": if b[1] > 0.7: return landlord_flag_pos.index(pos) self.sleep(100) print("未找到地主位置") print("=============================") def before_start(self): global win_rate self.RunGame = True GameHelper.Interrupt = True HaveBid = False is_stolen = 0 in_game = helper.LocateOnScreen("chat", region=(1302, 744, 117, 56)) while self.RunGame and in_game is None: self.sleep(1000) print("还没进入到游戏中。。。") self.label.setText("未进入游戏") self.label.setStyleSheet('background-color: rgba(255, 0, 0, 0);') if self.loop_sign == 1: self.detect_start_btn() in_game = helper.LocateOnScreen("chat", region=(1302, 744, 117, 56)) self.detect_start_btn() self.sleep(300) print(in_game, "进入到游戏中") self.label.setText("游戏开始") self.label.setStyleSheet('background-color: rgba(255, 0, 0, 0.5);') while True: self.detect_start_btn() if not self.RunGame: break jiaodizhu_btn = helper.LocateOnScreen("jiaodizhu_btn", region=self.GeneralBtnPos) qiangdizhu_btn = helper.LocateOnScreen("qiangdizhu_btn", region=self.GeneralBtnPos) jiabei_btn = helper.LocateOnScreen("jiabei_btn", region=self.GeneralBtnPos) while jiaodizhu_btn is None and qiangdizhu_btn is None and jiabei_btn is None: self.detect_start_btn() if not self.RunGame: break print("等待叫地主、抢地主或加倍") self.sleep(200) jiaodizhu_btn = helper.LocateOnScreen("jiaodizhu_btn", region=self.GeneralBtnPos) qiangdizhu_btn = helper.LocateOnScreen("qiangdizhu_btn", region=self.GeneralBtnPos) jiabei_btn = helper.LocateOnScreen("jiabei_btn", region=self.GeneralBtnPos) print("《叫地主》, 《抢地主》, 《加倍》", jiaodizhu_btn, qiangdizhu_btn, jiabei_btn) cards = self.find_my_cards() while len(cards) != 17 and len(cards) != 20: self.detect_start_btn() if not self.RunGame: break self.sleep(200) cards = self.find_my_cards() cards_str = "".join([card[0] for card in cards]) self.UserHandCards.setText("手牌:" + cards_str) print("手牌:" + cards_str) win_rate = BidModel.predict(cards_str) if not HaveBid: with open("cardslog.txt", "a") as f: f.write(str(int(time.time())) + " " + cards_str + " " + str(round(win_rate, 2)) + "\n") print("叫牌预估胜率:", win_rate) self.BidWinrate.setText("叫牌胜率:" + str(round(win_rate, 2)) + "%") if jiaodizhu_btn is not None: print("找到《叫地主》按钮", jiaodizhu_btn) HaveBid = True print(win_rate, self.BidThreshold1) if win_rate > self.BidThreshold1: helper.ClickOnImage("jiaodizhu_btn", region=self.GeneralBtnPos) else: helper.ClickOnImage("bujiao_btn", region=self.GeneralBtnPos) self.sleep(500) if qiangdizhu_btn is not None: print("找到《抢地主》按钮", qiangdizhu_btn) HaveBid = True if win_rate > self.BidThreshold2: is_stolen = 1 helper.ClickOnImage("qiangdizhu_btn", region=self.GeneralBtnPos) else: print("点《不抢》") helper.ClickOnImage("buqiang_btn", region=self.GeneralBtnPos) self.sleep(500) if jiabei_btn is not None: self.sleep(500) break self.label.setText("游戏开始") self.label.setStyleSheet('background-color: rgba(255, 0, 0, 0.5);') laotou = helper.LocateOnScreen("laotou", region=(761, 45, 255, 100)) while laotou is not None: self.detect_start_btn() if not self.RunGame: break self.sleep(200) print("在游戏里,还在抢地主。。。。") self.label.setText("在抢地主") self.label.setStyleSheet('background-color: rgba(255, 0, 0, 0.5);') print("底牌现身。。。") self.label.setText("抢完地主") self.label.setStyleSheet('background-color: rgba(255, 0, 0, 0.5);') self.sleep(200) llcards = self.find_landlord_cards() while len(llcards) != 3: self.detect_start_btn() if not self.RunGame: break if len(llcards) > 3: self.ThreeLandlordCardsConfidence += 0.05 time.sleep(200) elif len(llcards) < 3: self.ThreeLandlordCardsConfidence -= 0.05 time.sleep(200) llcards = self.find_landlord_cards() self.ThreeLandlordCards.setText("底牌:" + llcards) print("地主牌:", llcards) cards = self.find_my_cards() while len(cards) != 17 and len(cards) != 20: self.detect_start_btn() if not self.RunGame: break self.sleep(200) cards = self.find_my_cards() cards_str = "".join([card[0] for card in cards]) self.UserHandCards.setText("手牌:" + cards_str) print("手牌:" + cards_str) if len(cards_str) == 20: win_rate = LandlordModel.predict(cards_str) self.PreWinrate.setText("局前胜率:" + str(round(win_rate, 2)) + "%") print("预估地主胜率:", win_rate) else: user_position_code = self.find_landlord(self.LandlordFlagPos) print(user_position_code) while user_position_code is None: self.detect_start_btn() if not self.RunGame: break user_position_code = self.find_landlord(self.LandlordFlagPos) self.sleep(200) user_position = ['up', 'landlord', 'down'][user_position_code] win_rate = FarmerModel.predict(cards_str, llcards, user_position) - 5 print("预估农民胜率:", win_rate) self.PreWinrate.setText("局前胜率:" + str(round(win_rate, 2)) + "%") self.sleep(500) if win_rate > self.JiabeiThreshold[is_stolen][0]: chaojijiabei_btn = helper.LocateOnScreen("chaojijiabei_btn", region=self.GeneralBtnPos) if chaojijiabei_btn is not None: helper.ClickOnImage("chaojijiabei_btn", region=self.GeneralBtnPos) else: helper.ClickOnImage("jiabei_btn", region=self.GeneralBtnPos) self.sleep(500) elif win_rate > self.JiabeiThreshold[is_stolen][1]: helper.ClickOnImage("jiabei_btn", region=self.GeneralBtnPos) self.sleep(500) else: helper.ClickOnImage("bujiabei_btn", region=self.GeneralBtnPos) self.sleep(500) if win_rate > self.MingpaiThreshold: self.sleep(1000) mingpai_btn = helper.LocateOnScreen("mingpai_btn", region=self.GeneralBtnPos) while mingpai_btn is None: print('没找到《明牌》按钮') self.sleep(200) mingpai_btn = helper.LocateOnScreen("mingpai_btn", region=self.GeneralBtnPos) helper.ClickOnImage("mingpai_btn", region=self.GeneralBtnPos) self.sleep(500) print("加倍环节已结束") def animation(self, cards):
move_type = get_move_type(self.real_to_env(cards))
1
2023-12-01 04:04:30+00:00
24k
super1207/satoricq
satori.py
[ { "identifier": "AdapterKook", "path": "kook_adapter.py", "snippet": "class AdapterKook:\n def __init__(self,config = {}) -> None:\n '''用于初始化一些配置信息,尽量不要在这里阻塞,因为此处不具备异步环境,如果你需要读写配置文件,请在init_after中进行'''\n self._access_token = config[\"access_token\"]\n self._http_url = \"https://www.kookapp.cn/api/v3\"\n self._is_stop = False\n self._login_status = SatoriLogin.LoginStatus.DISCONNECT\n self._queue = Queue(maxsize=100)\n self._id = 0\n self._sn = 0\n self._self_id = None\n\n\n async def enable(self) -> None:\n '''适配器启用的时候会调用,可以不理,也可以没这个函数\n 配合下面的停用函数,适配器可以得到自己在整个系统中的状态,进而进行一些优化\n 如,如果适配器处于停用状态,适配器可以自行选择关闭网络连接,以节省资源,当然,也可以不理会\n '''\n pass\n\n async def disable(self) -> None:\n '''适配器停用的时候会调用,可以不理,也可以没这个函数'''\n pass\n \n async def release(self) -> None:\n '''适配器释放的时候会调用一次,应该在这里停用ws连接\n 一般认为,适配器会和真正的协议端建立连接,所以,这个函数大多数时候是需要写的\n 但是,这个函数允许资源延迟释放,只要能释放就行\n 你可以在这个函数里面进行数据保存之类的,这种用途下,请阻塞这个函数,直到保存完成\n '''\n self._is_stop = True\n\n async def get_msg(self) -> dict:\n '''阻塞并等待消息返回,如果你的适配器不具备接收消息的能力,请不要写这个函数'''\n return await self._queue.get()\n \n\n async def _ws_recv(self,websocket):\n try:\n reply = await asyncio.wait_for(websocket.recv(),0.1)\n return reply\n except asyncio.TimeoutError:\n return None\n\n async def _ws_connect(self):\n self._login_status = SatoriLogin.LoginStatus.CONNECT\n ws_url = (await self._api_call(\"/gateway/index?compress=0\"))[\"url\"]\n async with connect(ws_url) as websocket:\n tm = time.time()\n while not self._is_stop:\n reply = await self._ws_recv(websocket)\n if not reply:\n now_time = time.time()\n if now_time - tm > 30:\n tm = now_time\n await websocket.send(json.dumps({\"s\": 2,\"sn\": self._sn}))\n continue\n js = json.loads(reply)\n s = js[\"s\"]\n if s == 5:raise Exception(\"recv reset ws\")\n elif s == 3:pass # heartbeat\n elif s == 1:\n self._login_status = SatoriLogin.LoginStatus.ONLINE\n print(\"kook:ws连接成功\")\n elif s == 0:\n self._sn = js[\"sn\"]\n asyncio.create_task(self._event_deal(js[\"d\"]))\n\n async def _ws_server(self) -> None:\n while not self._is_stop:\n try:\n await self._ws_connect()\n except:\n self._login_status = SatoriLogin.LoginStatus.DISCONNECT\n print(traceback.format_exc())\n await asyncio.sleep(3)\n self._login_status = SatoriLogin.LoginStatus.DISCONNECT\n\n async def init_after(self) -> None:\n '''适配器创建之后会调用一次,应该在这里进行ws连接等操作,如果不需要,可以不写'''\n asyncio.create_task(self._ws_server())\n\n def _kook_msg_to_satori(self,msg_type:int,message:str)->str:\n ret = \"\"\n if msg_type == 2: #图片\n ret += \"<img src={}/>\".format(json.dumps(message))\n else:\n def kook_msg_f(msg):\n ret = \"\"\n is_f = False\n for ch in msg:\n if is_f:\n is_f = False\n ret += ch\n elif ch == \"\\\\\":\n is_f = True\n else:\n ret += ch\n return ret\n \n index = 0\n msg_list = message.split(\"(met)\")\n for it in msg_list:\n if index % 2 == 0:\n ret += satori_to_plain(kook_msg_f(it))\n else:\n if it == \"all\":\n ret += \"<at type=\\\"all\\\"/>\"\n else:\n ret += \"<at id=\\\"{}\\\"/>\".format(it)\n index += 1\n return ret\n\n\n async def _deal_group_message_event(self,data,user_id:str):\n group_id = data[\"target_id\"]\n kook_msg = data[\"content\"]\n extra = data[\"extra\"]\n author = extra[\"author\"]\n msg_type = data[\"type\"]\n\n if msg_type == 10:#卡牌\n return\n satori_msg = self._kook_msg_to_satori(msg_type,kook_msg)\n\n satori_evt = SatoriGroupMessageCreatedEvent(\n id=self._id,\n self_id=self._self_id,\n timestamp=data[\"msg_timestamp\"],\n platform=\"kook\",\n channel=SatoriChannel(\n id=\"GROUP_\"+group_id,\n type=SatoriChannel.ChannelType.TEXT,\n name=extra[\"channel_name\"]\n ),\n message=SatoriMessage(\n id=data[\"msg_id\"],\n content=satori_msg,\n created_at=data[\"msg_timestamp\"]\n ),\n user=SatoriUser(\n id=author[\"id\"],\n name=author[\"username\"],\n avatar=author[\"avatar\"],\n is_bot=author[\"bot\"]\n ),\n member=SatoriGuildMember(\n nick=author[\"nickname\"],\n avatar=author[\"avatar\"]\n ),\n guild=SatoriGuild(\n id=extra[\"guild_id\"]\n ),\n role=SatoriGuildRole(\n id=json.dumps(sorted(author[\"roles\"]))\n )\n )\n self._id += 1\n self._queue.put_nowait(satori_evt.to_dict())\n\n async def _deal_private_message_event(self,data,user_id:str):\n\n kook_msg = data[\"content\"]\n extra = data[\"extra\"]\n author = extra[\"author\"]\n msg_type = data[\"type\"]\n\n if msg_type == 10:#卡牌\n return\n satori_msg = self._kook_msg_to_satori(msg_type,kook_msg)\n\n satori_evt = SatoriPrivateMessageCreatedEvent(\n id=self._id,\n self_id=self._self_id,\n timestamp=data[\"msg_timestamp\"],\n channel=SatoriChannel(\n id=user_id,\n type=SatoriChannel.ChannelType.TEXT,\n name=author[\"username\"]\n ),\n message=SatoriMessage(\n id=data[\"msg_id\"],\n content=satori_msg,\n created_at=data[\"msg_timestamp\"]\n ),\n user=SatoriUser(\n id=user_id,\n name=author[\"username\"],\n avatar=author[\"avatar\"],\n is_bot=author[\"bot\"]\n ),\n platform=\"kook\"\n ).to_dict()\n self._id += 1\n self._queue.put_nowait(satori_evt)\n\n async def _deal_group_increase_event(self,data):\n extra = data[\"extra\"]\n satori_evt = {\n \"id\":self._id,\n \"type\":\"guild-member-added\",\n \"platform\":\"kook\",\n \"self_id\":self._self_id,\n \"timestamp\":data[\"msg_timestamp\"],\n \"guild\":SatoriGuild(id=data[\"target_id\"]).to_dict(),\n \"member\":SatoriGuildMember(joined_at=extra[\"body\"][\"joined_at\"]).to_dict(),\n \"user\":SatoriUser(id=extra[\"body\"][\"user_id\"]).to_dict()\n }\n self._id += 1\n self._queue.put_nowait(satori_evt)\n\n\n\n async def _deal_group_evt(self,data):\n user_id:str = data[\"author_id\"]\n if user_id == \"1\": # system message\n tp = data[\"type\"]\n if tp != 255:\n return\n sub_type = data[\"extra\"][\"type\"]\n if sub_type == \"joined_guild\":\n await self._deal_group_increase_event(data)\n else:\n if self._self_id:\n if user_id != self._self_id:\n await self._deal_group_message_event(data,user_id)\n\n\n async def _deal_person_evt(self,data):\n user_id:str = data[\"author_id\"]\n if user_id != 1: # 不是系统消息\n if self._self_id:\n if user_id != self._self_id:\n await self._deal_private_message_event(data,user_id)\n\n\n async def _event_deal(self,data:dict):\n try:\n tp = data[\"channel_type\"]\n if tp == \"GROUP\":\n await self._deal_group_evt(data)\n else:\n await self._deal_person_evt(data)\n except:\n print(traceback.format_exc())\n \n async def _api_call(self,path,data = None) -> dict:\n url:str = self._http_url + path\n headers = {\"Authorization\":\"Bot {}\".format(self._access_token)}\n if data == None:\n async with httpx.AsyncClient() as client:\n return (await client.get(url,headers=headers)).json()[\"data\"]\n else:\n async with httpx.AsyncClient() as client:\n return (await client.post(url,headers=headers,data=data)).json()[\"data\"]\n\n def _make_kook_text(self,text):\n ret = \"\"\n for ch in text:\n if ch in [\"\\\\\",\"*\",\"~\",\"[\",\"(\",\")\",\"]\",\"-\",\">\",\"`\"]:\n ret += \"\\\\\"\n ret += ch\n return ret\n \n async def _satori_to_kook(self,satori_obj) -> [dict]:\n to_send_data = []\n last_type = 1\n for node in satori_obj:\n if isinstance(node,str):\n text = self._make_kook_text(node)\n if last_type == 1 and len(to_send_data) != 0:\n l = len(to_send_data)\n to_send_data[l - 1][\"content\"] += text\n else:\n to_send_data.append({\n \"type\":1,\n \"content\":text\n })\n last_type = 1\n else:\n if node[\"type\"] == \"at\":\n type = get_json_or(node[\"attrs\"],\"type\",None)\n id = get_json_or(node[\"attrs\"],\"id\",None)\n if type == \"all\":\n text = \"(met)all(met)\"\n elif id != None:\n text = \"(met){}(met)\".format(self._make_kook_text(id))\n if last_type == 1 and len(to_send_data) != 0:\n l = len(to_send_data)\n to_send_data[l - 1][\"content\"] += text\n else:\n to_send_data.append({\n \"type\":1,\n \"content\":text\n })\n last_type = 1\n elif node[\"type\"] == \"img\":\n img_url:str = node[\"attrs\"][\"src\"]\n kook_img_url = \"\"\n if img_url.startswith(\"https://img.kookapp.cn\"):\n kook_img_url = img_url\n else:\n if img_url.startswith(\"data:image/\"):\n base64_start = img_url.find(\"base64,\")\n img_content = base64.b64decode(img_url[base64_start + 7:])\n else:\n async with httpx.AsyncClient() as client:\n img_content = (await client.get(img_url)).content\n files = {\n 'file':('test',img_content)\n }\n headers = {\"Authorization\":\"Bot {}\".format(self._access_token)}\n async with httpx.AsyncClient() as client:\n ret = (await client.post(self._http_url + \"/asset/create\",files=files,headers=headers)).json()\n kook_img_url = ret[\"data\"][\"url\"]\n to_send_data.append({\n \"type\":2,\n \"content\":kook_img_url\n })\n last_type = 2\n return to_send_data\n \n async def create_message(self,platform:str,self_id:str,channel_id:str,content:str):\n '''发送消息'''\n satori_obj = parse_satori_html(content)\n to_sends = await self._satori_to_kook(satori_obj)\n if channel_id.startswith(\"GROUP_\"):\n channel_id = int(channel_id[6:])\n to_ret = []\n for it in to_sends:\n ret = await self._api_call(\"/message/create\",{\"content\":it[\"content\"],\"type\":it[\"type\"],\"target_id\":channel_id})\n to_ret.append(SatoriMessage(id=ret[\"msg_id\"],content=\"\").to_dict())\n return to_ret\n else:\n to_ret = []\n for it in to_sends:\n ret = await self._api_call(\"/direct-message/create\",{\"content\":it[\"content\"],\"type\":it[\"type\"],\"target_id\":channel_id})\n to_ret.append(SatoriMessage(id=ret[\"msg_id\"],content=\"\").to_dict())\n return to_ret\n \n async def get_login(self,platform:Optional[str],self_id:Optional[str]) -> [dict]:\n '''获取登录信息,如果platform和self_id为空,那么应该返回一个列表'''\n obret = (await self._api_call(\"/user/me\"))\n satori_ret = SatoriLogin(\n status=self._login_status,\n user=SatoriUser(\n id=obret[\"id\"],\n name=obret[\"username\"],\n avatar=get_json_or(obret,\"avatar\",None),\n is_bot=True\n ),\n self_id=obret[\"id\"],\n platform=\"kook\"\n ).to_dict()\n self._self_id = obret[\"id\"]\n if platform == None and self_id == None:\n return [satori_ret]\n else:\n return satori_ret\n \n async def get_guild_member(self,platform:Optional[str],self_id:Optional[str],guild_id:str,user_id:str) -> [dict]:\n '''获取群组成员信息'''\n url = \"/user/view?user_id={}&guild_id={}\".format(user_id,guild_id)\n obret = (await self._api_call(url))\n satori_ret = SatoriGuildMember(\n user=SatoriUser(\n id=obret[\"id\"],\n name=get_json_or(obret,\"username\",None),\n avatar=get_json_or(obret,\"avatar\",None),\n is_bot=get_json_or(obret,\"bot\",None)\n ),\n nick=get_json_or(obret,\"nickname\",None),\n avatar=get_json_or(obret,\"avatar\",None),\n joined_at=get_json_or(obret,\"join_time\",None)\n ).to_dict()\n return satori_ret\n \n async def get_user(self,platform:Optional[str],self_id:Optional[str],user_id:str) -> [dict]:\n '''获取用户信息'''\n url = \"/user/view?user_id={}\".format(user_id)\n obret = (await self._api_call(url))\n satori_ret = SatoriUser(\n id=obret[\"id\"],\n name=obret[\"username\"],\n avatar=obret[\"avatar\"],\n is_bot=obret[\"bot\"],\n ).to_dict()\n return satori_ret\n \n async def get_channel_list(self,platform:Optional[str],self_id:Optional[str],guild_id:str) -> [dict]:\n '''获取频道列表'''\n url = \"/channel/list?guild_id={}\".format(guild_id)\n obret = (await self._api_call(url))\n ret_list = []\n items = get_json_or(obret,\"items\",None)\n for it in items:\n channel_type = it[\"type\"]\n channel_id = \"GROUP_\" + it[\"id\"]\n channel_name = it[\"name\"]\n channel_parent = it[\"parent_id\"]\n if channel_type == 1:\n ret_list.append(SatoriChannel(\n id=channel_id,\n name=channel_name,\n type=SatoriChannel.ChannelType.TEXT,\n parent_id=channel_parent\n ).to_dict())\n page_total = get_json_or(obret,\"data\",1)\n if page_total > 1:\n for i in range(2,page_total + 1):\n url = \"/channel/list?guild_id={}&page={}\".format(guild_id,i)\n obret = (await self._api_call(url))\n items = get_json_or(obret,\"items\",None)\n for it in items:\n channel_type = it[\"type\"]\n channel_id = \"GROUP_\" + it[\"id\"]\n channel_name = it[\"name\"]\n channel_parent = it[\"parent_id\"]\n if channel_type == 1:\n ret_list.append(SatoriChannel(\n id=channel_id,\n name=channel_name,\n type=SatoriChannel.ChannelType.TEXT,\n parent=channel_parent\n ).to_dict())\n return {\"data\":ret_list}" }, { "identifier": "AdapterMihoyo", "path": "mihoyo_adapter.py", "snippet": "class AdapterMihoyo:\n def __init__(self,config = {}) -> None:\n '''用于初始化一些配置信息,尽量不要在这里阻塞,因为此处不具备异步环境,如果你需要读写配置文件,请在init_after中进行'''\n self._http_url = \"https://bbs-api.miyoushe.com\"\n self._is_stop = False\n self._login_status = SatoriLogin.LoginStatus.DISCONNECT\n self._queue = Queue(maxsize=100)\n self._id = 0\n self._sn = 1\n self._self_id = config[\"bot_id\"]\n self._secret = config[\"secret\"]\n self._villa_id = config[\"villa_id\"]\n\n\n async def enable(self) -> None:\n '''适配器启用的时候会调用,可以不理,也可以没这个函数\n 配合下面的停用函数,适配器可以得到自己在整个系统中的状态,进而进行一些优化\n 如,如果适配器处于停用状态,适配器可以自行选择关闭网络连接,以节省资源,当然,也可以不理会\n '''\n pass\n\n async def disable(self) -> None:\n '''适配器停用的时候会调用,可以不理,也可以没这个函数'''\n pass\n \n async def release(self) -> None:\n '''适配器释放的时候会调用一次,应该在这里停用ws连接\n 一般认为,适配器会和真正的协议端建立连接,所以,这个函数大多数时候是需要写的\n 但是,这个函数允许资源延迟释放,只要能释放就行\n 你可以在这个函数里面进行数据保存之类的,这种用途下,请阻塞这个函数,直到保存完成\n '''\n self._is_stop = True\n\n async def get_msg(self) -> dict:\n '''阻塞并等待消息返回,如果你的适配器不具备接收消息的能力,请不要写这个函数'''\n return await self._queue.get()\n\n async def _send_ws_pack(self,ws,ws_dat,biztype):\n magic = 0xBABEFACE.to_bytes(length=4, byteorder='little', signed=False)\n if biztype == 7:\n pb_pack = bytes(PLogin(\n uid=int(ws_dat[\"uid\"]),\n token=self._villa_id + \".\" + self._secret + \".\" + self._self_id,\n platform=ws_dat[\"platform\"],\n app_id=ws_dat[\"app_id\"],\n device_id=ws_dat[\"device_id\"]\n ))\n elif biztype == 6:\n pb_pack = bytes(PHeartBeat(\n client_timestamp=str(int(round(time.time() * 1000)))\n ))\n else:\n raise Exception(\"unkonw biztype:{}\".format(biztype))\n \n wid = self._sn\n self._sn += 1\n\n flag = 1\n appid = 104\n headerlen = 24\n datalen = headerlen + len(pb_pack)\n\n to_send = magic\n to_send += datalen.to_bytes(length=4, byteorder='little', signed=False)\n to_send += headerlen.to_bytes(length=4, byteorder='little', signed=False)\n to_send += wid.to_bytes(length=8, byteorder='little', signed=False)\n to_send += flag.to_bytes(length=4, byteorder='little', signed=False)\n to_send += biztype.to_bytes(length=4, byteorder='little', signed=False)\n to_send += appid.to_bytes(length=4, byteorder='little', signed=True)\n to_send += pb_pack\n\n await ws.send(to_send)\n \n async def _ws_recv(self,websocket):\n try:\n reply = await asyncio.wait_for(websocket.recv(),0.1)\n return reply\n except asyncio.TimeoutError:\n return None\n\n async def _ws_connect(self):\n self._login_status = SatoriLogin.LoginStatus.CONNECT\n ws_dat = (await self._api_call(\"/vila/api/bot/platform/getWebsocketInfo\"))\n # print(ws_dat)\n ws_url = ws_dat[\"websocket_url\"]\n async with connect(ws_url) as websocket:\n await self._send_ws_pack(websocket,ws_dat,biztype=7)\n tm = time.time()\n while not self._is_stop:\n reply = await self._ws_recv(websocket)\n if not reply:\n now_time = time.time()\n if now_time - tm > 30:\n tm = now_time\n await self._send_ws_pack(websocket,ws_dat,biztype=6)\n continue\n biztype = int.from_bytes(reply[24:28],byteorder='little',signed=False)\n if biztype == 7: # 登录返回\n login_reply = PLoginReply().parse(reply[32:])\n if login_reply.code == 0:\n print(\"mihoyo:ws连接成功\")\n self._login_status = SatoriLogin.LoginStatus.ONLINE\n continue\n else:\n print(\"mihoyo:ws连接失败\",login_reply.to_json())\n break\n elif biztype == 53:\n print(\"mihoyo:ws被踢下线\")\n pkoff = PKickOff().parse(reply[32:])\n print(\"mihoyo:\" + pkoff.reason)\n break\n elif biztype == 52:\n print(\"mihoyo:ws服务关机\")\n break\n elif biztype == 6:\n heart_reply = PHeartBeatReply().parse(reply[32:])\n if heart_reply.code != 0:\n print(\"mihoyo:ws心跳失败\")\n break\n elif biztype == 30001: # 正常处理\n evt = RobotEvent().parse(reply[32:]).to_dict()\n asyncio.create_task(self._event_deal(evt))\n\n async def _ws_server(self) -> None:\n while not self._is_stop:\n try:\n await self._ws_connect()\n except:\n self._login_status = SatoriLogin.LoginStatus.DISCONNECT\n traceback.print_exc()\n await asyncio.sleep(3)\n self._login_status = SatoriLogin.LoginStatus.DISCONNECT\n\n async def init_after(self) -> None:\n asyncio.create_task(self._ws_server())\n\n def _mihoyo_msg_to_satori(self,content_obj)->str:\n ret = \"\"\n entities = content_obj[\"content\"][\"entities\"]\n text = content_obj[\"content\"][\"text\"]\n l = len(text)\n i = 0\n while i < l:\n for en in entities:\n if en[\"offset\"] == i:\n print(en)\n i += en[\"length\"]\n if en[\"entity\"][\"type\"] == \"mention_all\": # 实际上收不到\n ret += \"<at type=\\\"all\\\"/>\"\n elif en[\"entity\"][\"type\"] == \"mentioned_robot\":\n ret += \"<at id=\\\"{}\\\"/>\".format(en[\"entity\"][\"bot_id\"])\n elif en[\"entity\"][\"type\"] == \"mentioned_user\":\n ret += \"<at id=\\\"{}\\\"/>\".format(en[\"entity\"][\"user_id\"])\n break\n else:\n ret += satori_to_plain(text[i])\n i += 1\n return ret\n async def _deal_group_message_event(self,data):\n extendData = data[\"extendData\"]\n\n sendMessage = extendData[\"sendMessage\"]\n user_id = sendMessage[\"fromUserId\"]\n villaId = sendMessage[\"villaId\"]\n roomId = sendMessage[\"roomId\"]\n\n villaRoomId = villaId + \"_\" + roomId\n\n content_obj = json.loads(sendMessage[\"content\"])\n\n extra_obj = json.loads(content_obj[\"user\"][\"extra\"])\n\n satori_msg = self._mihoyo_msg_to_satori(content_obj) # todo\n\n satori_evt = SatoriGroupMessageCreatedEvent(\n id=self._id,\n self_id=self._self_id,\n timestamp=int(data[\"sendAt\"]) * 1000,\n platform=\"mihoyo\",\n channel=SatoriChannel(\n id=villaRoomId,\n type=SatoriChannel.ChannelType.TEXT,\n ),\n message=SatoriMessage(\n id=data[\"id\"],\n content=satori_msg,\n created_at=int(sendMessage[\"sendAt\"])\n ),\n user=SatoriUser(\n id=user_id,\n name=sendMessage[\"nickname\"],\n avatar=content_obj[\"user\"][\"portraitUri\"]\n ),\n member=SatoriGuildMember(\n nick=sendMessage[\"nickname\"],\n avatar=content_obj[\"user\"][\"portraitUri\"]\n ),\n guild=SatoriGuild(\n id=villaId\n ),\n role=SatoriGuildRole(\n id=extra_obj[\"member_roles\"][\"name\"],\n name=extra_obj[\"member_roles\"][\"name\"]\n )\n )\n self._id += 1\n self._queue.put_nowait(satori_evt.to_dict())\n\n async def _event_deal(self,data:dict):\n try:\n event_type = data[\"type\"]\n if event_type == \"SendMessage\":\n await self._deal_group_message_event(data)\n except:\n print(traceback.format_exc())\n\n \n async def _api_call(self,path,data = None,villa_id = 0) -> dict:\n url:str = self._http_url + path\n headers = {\"x-rpc-bot_id\":self._self_id,\"x-rpc-bot_secret\":self._secret}\n if villa_id == 0:\n headers[\"x-rpc-bot_villa_id\"] = self._villa_id\n else:\n headers[\"x-rpc-bot_villa_id\"] = villa_id\n if data == None:\n async with httpx.AsyncClient() as client:\n return (await client.get(url,headers=headers)).json()[\"data\"]\n else:\n headers[\"Content-Type\"] = \"application/json\"\n async with httpx.AsyncClient() as client:\n ret = (await client.post(url,headers=headers,data=data)).json()\n if ret[\"retcode\"] != 0:\n print(\"mihoyo:\",ret)\n return ret[\"data\"]\n\n \n async def _satori_to_mihoyo(self,satori_obj,villa_id) -> [dict]:\n to_send_data = []\n last_type = 1\n for node in satori_obj:\n if isinstance(node,str):\n text = node\n if last_type == 1 and len(to_send_data) != 0:\n l = len(to_send_data)\n to_send_data[l - 1][\"text\"] += text\n else:\n to_send_data.append({\n \"type\":1,\n \"text\":text,\n \"entities\":[]\n })\n last_type = 1\n else:\n if node[\"type\"] == \"at\":\n type = get_json_or(node[\"attrs\"],\"type\",None)\n id = get_json_or(node[\"attrs\"],\"id\",None)\n if type == \"all\":\n text = \"@全体成员\"\n elif id != None:\n text = \"@\" + id\n else:\n continue\n\n if last_type != 1 or len(to_send_data) == 0:\n to_send_data.append({\n \"type\":1,\n \"text\":\"\",\n \"entities\":[]\n })\n last_type = 1\n\n l = len(to_send_data)\n ll = len(to_send_data[l - 1][\"text\"])\n to_send_data[l - 1][\"text\"] += text\n if type == \"all\":\n to_send_data[l - 1][\"entities\"].append({\n \"entity\": {\n \"type\": \"mention_all\"\n },\n \"length\":5,\n \"offset\":ll\n })\n else:\n if id.startswith(\"bot_\"):\n to_send_data[l - 1][\"entities\"].append({\n \"entity\": {\n \"type\": \"mentioned_robot\",\n \"bot_id\": id\n },\n \"length\":len(id) + 1,\n \"offset\":ll\n })\n else:\n to_send_data[l - 1][\"entities\"].append({\n \"entity\": {\n \"type\": \"mentioned_user\",\n \"user_id\": id\n },\n \"length\":len(id) + 1,\n \"offset\":ll\n })\n\n elif node[\"type\"] == \"img\":\n img_url:str = node[\"attrs\"][\"src\"]\n mihoyo_img_url = \"\"\n if img_url.startswith(\"data:image/\"):\n base64_start = img_url.find(\"base64,\")\n img_content = base64.b64decode(img_url[base64_start + 7:])\n else:\n async with httpx.AsyncClient() as client:\n img_content = (await client.get(img_url)).content\n ext = imghdr.what(file = \"\",h=img_content)\n m = hashlib.md5()\n m.update(img_content)\n headers = {\"x-rpc-bot_id\":self._self_id,\"x-rpc-bot_secret\":self._secret,\"x-rpc-bot_villa_id\":villa_id}\n upload_info_url = self._http_url + \"/vila/api/bot/platform/getUploadImageParams\"\n async with httpx.AsyncClient() as client:\n req = client.build_request(\"GET\",upload_info_url,json={\n \"md5\":m.hexdigest(),\n \"ext\":ext\n },headers=headers)\n file_params = (await client.send(req)).json()[\"data\"][\"params\"]\n files = {\n \"x:extra\":file_params[\"callback_var\"][\"x:extra\"],\n \"OSSAccessKeyId\":file_params[\"accessid\"],\n \"signature\":file_params[\"signature\"],\n \"success_action_status\":file_params[\"success_action_status\"],\n \"name\":file_params[\"name\"],\n \"callback\":file_params[\"callback\"],\n \"x-oss-content-type\":file_params[\"x_oss_content_type\"],\n \"key\":file_params[\"key\"],\n \"policy\":file_params[\"policy\"],\n \"Content-Disposition\":file_params[\"content_disposition\"],\n 'file':('test',img_content)\n }\n async with httpx.AsyncClient() as client:\n ret = (await client.post(file_params[\"host\"],files=files)).json()\n mihoyo_img_url = ret[\"data\"][\"url\"]\n to_send_data.append({\n \"type\":2,\n \"url\":mihoyo_img_url,\n })\n last_type = 2\n to_send_data2 = []\n for it in to_send_data:\n type = it[\"type\"]\n if type == 1:\n to_send_data2.append({\n \"object_name\":\"MHY:Text\",\n \"msg_content\":json.dumps({\n \"content\":{\n \"text\":it[\"text\"],\n \"entities\":it[\"entities\"]\n }\n })})\n elif type == 2:\n to_send_data2.append({\n \"object_name\":\"MHY:Image\",\n \"msg_content\":json.dumps({\n \"content\":{\n \"url\":it[\"url\"]\n }\n \n })})\n \n return to_send_data2\n \n async def create_message(self,platform:str,self_id:str,channel_id:str,content:str):\n '''发送消息'''\n villa_id = channel_id.split(\"_\")[0]\n satori_obj = parse_satori_html(content)\n to_sends = await self._satori_to_mihoyo(satori_obj,villa_id)\n to_ret = []\n # print(to_sends)\n for it in to_sends:\n it[\"room_id\"] = channel_id.split(\"_\")[1]\n ret = await self._api_call(\"/vila/api/bot/platform/sendMessage\",json.dumps(it),villa_id=villa_id)\n to_ret.append(SatoriMessage(id=ret[\"bot_msg_id\"],content=\"\").to_dict())\n return to_ret\n \n \n async def get_login(self,platform:Optional[str],self_id:Optional[str]) -> [dict]:\n '''获取登录信息,如果platform和self_id为空,那么应该返回一个列表'''\n satori_ret = SatoriLogin(\n status=self._login_status,\n user=SatoriUser(\n id=self._self_id,\n is_bot=True\n ),\n self_id=self._self_id,\n platform=\"mihoyo\"\n ).to_dict()\n if platform == None and self_id == None:\n return [satori_ret]\n else:\n return satori_ret\n\n async def get_guild_member(self,platform:Optional[str],self_id:Optional[str],guild_id:str,user_id:str) -> [dict]:\n '''获取群组成员信息'''\n url = self._http_url + \"/vila/api/bot/platform/getMember\"\n headers = {\"x-rpc-bot_id\":self._self_id,\"x-rpc-bot_secret\":self._secret,\"x-rpc-bot_villa_id\":guild_id}\n async with httpx.AsyncClient() as client:\n req = client.build_request(\"GET\",url,json={\n \"uid\":user_id\n },headers=headers)\n obret = (await client.send(req)).json()[\"data\"][\"member\"]\n satori_ret = SatoriGuildMember(\n user=SatoriUser(\n id=obret[\"basic\"][\"uid\"],\n name=obret[\"basic\"][\"nickname\"],\n avatar=obret[\"basic\"][\"avatar_url\"],\n is_bot=False\n ),\n nick=obret[\"basic\"][\"nickname\"],\n avatar=obret[\"basic\"][\"avatar_url\"],\n joined_at=int(obret[\"joined_at\"] + \"000\")\n ).to_dict()\n return satori_ret" }, { "identifier": "AdapterOnebot", "path": "onebot_adapter.py", "snippet": "class AdapterOnebot:\n def __init__(self,config = {}) -> None:\n '''用于初始化一些配置信息,尽量不要在这里阻塞,因为此处不具备异步环境,如果你需要读写配置文件,请在init_after中进行'''\n self._http_url = config[\"http_url\"]\n self._ws_url = config[\"ws_url\"]\n if \"access_token\" in config:\n self._access_token = config[\"access_token\"]\n else:\n self._access_token = None\n self._is_stop = False\n self._login_status = 3 # DISCONNECT\n self._queue = Queue(maxsize=100)\n self._id = 0\n\n def _cqarr_to_satori(self,cqarr):\n ret = \"\"\n for node in cqarr:\n if node[\"type\"] == \"text\":\n ret += satori_to_plain(node[\"data\"][\"text\"])\n elif node[\"type\"] == \"at\":\n qq = node[\"data\"][\"qq\"]\n if qq == \"all\":\n ret += \"<at type=\\\"all\\\"/>\"\n else:\n ret += \"<at id={}/>\".format(json.dumps(qq))\n elif node[\"type\"] == \"image\":\n url = node[\"data\"][\"url\"]\n ret += \"<img src={}/>\".format(json.dumps(url))\n return ret\n\n async def enable(self) -> None:\n '''适配器启用的时候会调用,可以不理,也可以没这个函数\n 配合下面的停用函数,适配器可以得到自己在整个系统中的状态,进而进行一些优化\n 如,如果适配器处于停用状态,适配器可以自行选择关闭网络连接,以节省资源,当然,也可以不理会\n '''\n pass\n\n async def disable(self) -> None:\n '''适配器停用的时候会调用,可以不理,也可以没这个函数'''\n pass\n \n async def release(self) -> None:\n '''适配器释放的时候会调用一次,应该在这里停用ws连接\n 一般认为,适配器会和真正的协议端建立连接,所以,这个函数大多数时候是需要写的\n 但是,这个函数允许资源延迟释放,只要能释放就行\n 你可以在这个函数里面进行数据保存之类的,这种用途下,请阻塞这个函数,直到保存完成\n '''\n self._is_stop = True\n\n async def get_msg(self) -> dict:\n '''阻塞并等待消息返回,如果你的适配器不具备接收消息的能力,请不要写这个函数'''\n return await self._queue.get()\n\n async def init_after(self) -> None:\n '''适配器创建之后会调用一次,应该在这里进行ws连接等操作,如果不需要,可以不写'''\n async def _ws_server(self:AdapterOnebot) -> None:\n while not self._is_stop:\n try:\n self._login_status = 2 # CONNECT\n async with connect(self._ws_url) as websocket:\n print(\"onebot:ws已经连接\")\n self._login_status = 1 # ONLINE\n try:\n while True:\n try:\n reply = await asyncio.wait_for(websocket.recv(),0.1)\n await self._event_deal(json.loads(reply))\n except asyncio.TimeoutError:\n if self._is_stop:\n await websocket.close()\n except asyncio.QueueFull:\n print(\"队列满\")\n except Exception as e:\n print(e) \n except Exception as e:\n print(e)\n print(\"onebot:ws连接已经断开\")\n self._login_status = 3 # DISCONNECT\n asyncio.create_task(_ws_server(self))\n \n async def _event_deal(self,evt:dict):\n '''自己定义的事件转化函数'''\n post_type = evt[\"post_type\"]\n if post_type == \"message\":\n message_type = evt[\"message_type\"]\n sender = evt[\"sender\"]\n if message_type == \"group\":\n channel_obj = {\n \"id\":\"GROUP_\"+str(evt[\"group_id\"]),\n \"type\":0,\n \"name\":None,\n \"parent_id\":None\n }\n guild_obj = {\n \"id\":\"GROUP_\"+str(evt[\"group_id\"]),\n \"name\":None,\n \"avatar\":None\n }\n user_obj = {\n \"id\":str(evt[\"user_id\"]),\n \"name\":get_json_or(sender,\"nickname\",None),\n \"nick\":get_json_or(sender,\"nickname\",None),\n \"avatar\":get_json_or(sender,\"avatar\",None),\n \"is_bot\":None\n }\n joined_at = get_json_or(sender,\"join_time\",None)\n if joined_at:\n joined_at = int(str(joined_at) + \"000\")\n member_obj = {\n \"nick\":get_json_or(sender,\"card\",None),\n \"avatar\":get_json_or(sender,\"avatar\",None),\n \"joined_at\":joined_at\n }\n message_obj = {\n \"id\":str(evt[\"message_id\"]),\n \"content\":self._cqarr_to_satori(_cqmsg_to_arr(evt[\"message\"])),\n \"created_at\":int(str(evt[\"time\"] ) + \"000\")\n }\n role_obj = {\n \"id\":get_json_or(sender, \"role\",\"member\"),\n \"name\":get_json_or(sender,\"role\",\"member\")\n }\n satori_evt = {\n \"id\":self._id,\n \"type\":\"message-created\",\n \"platform\":\"onebot\",\n \"self_id\":str(evt[\"self_id\"]),\n \"timestamp\":int(str(evt[\"time\"] ) + \"000\"),\n \"channel\":channel_obj,\n \"guild\":guild_obj,\n \"member\":member_obj,\n \"message\":message_obj,\n \"role\":role_obj,\n \"user\":user_obj\n }\n self._id += 1\n self._queue.put_nowait(satori_evt)\n elif message_type == \"private\":\n channel_obj = {\n \"id\":str(evt[\"user_id\"]),\n \"type\":1,\n \"name\":None,\n \"parent_id\":None\n }\n user_obj = {\n \"id\":str(evt[\"user_id\"]),\n \"name\":get_json_or(sender,\"nickname\",None),\n \"nick\":get_json_or(sender,\"nickname\",None),\n \"avatar\":get_json_or(sender,\"avatar\",None),\n \"is_bot\":None\n }\n joined_at = get_json_or(sender,\"join_time\",None)\n if joined_at:\n joined_at = int(str(joined_at) + \"000\")\n message_obj = {\n \"id\":str(evt[\"message_id\"]),\n \"content\":self._cqarr_to_satori(_cqmsg_to_arr(evt[\"message\"])),\n \"created_at\":int(str(evt[\"time\"] ) + \"000\")\n }\n satori_evt = {\n \"id\":self._id,\n \"type\":\"message-created\",\n \"platform\":\"onebot\",\n \"self_id\":str(evt[\"self_id\"]),\n \"timestamp\":int(str(evt[\"time\"] ) + \"000\"),\n \"channel\":channel_obj,\n \"message\":message_obj,\n \"user\":user_obj\n }\n self._id += 1\n self._queue.put_nowait(satori_evt)\n elif post_type == \"notice\":\n notice_type = evt[\"notice_type\"]\n if notice_type == \"group_increase\":\n guild_obj = {\n \"id\":\"GROUP_\"+str(evt[\"group_id\"]),\n \"name\":None,\n \"avatar\":None\n }\n member_obj = {\n \"nick\":None,\n \"avatar\":get_json_or(evt,\"avatar\",None),\n \"joined_at\":int(str(evt[\"time\"] ) + \"000\")\n }\n user_obj = {\n \"id\":str(evt[\"user_id\"]),\n \"name\":None,\n \"nick\":None,\n \"avatar\":None,\n \"is_bot\":None\n }\n satori_evt = {\n \"id\":self._id,\n \"type\":\"guild-member-added\",\n \"platform\":\"onebot\",\n \"self_id\":str(evt[\"self_id\"]),\n \"timestamp\":int(str(evt[\"time\"] ) + \"000\"),\n \"guild\":guild_obj,\n \"member\":member_obj,\n \"user\":user_obj\n }\n self._id += 1\n self._queue.put_nowait(satori_evt)\n\n async def _api_call(self,path,data) -> dict:\n url:str = self._http_url + path\n if self._access_token:\n headers = {\"Authorization\":\"Bearer {}\".format(self._access_token)}\n else:\n headers = {}\n async with httpx.AsyncClient() as client:\n # headers[\"Content-Type\"] = \"application/json\"\n return (await client.post(url,headers=headers,data=data)).json()\n \n async def _satori_to_cq(self,satori_obj) -> str:\n ret = \"\"\n for node in satori_obj:\n if isinstance(node,str):\n ret += _cq_text_encode(node)\n else:\n if node[\"type\"] == \"at\":\n type = get_json_or(node[\"attrs\"],\"type\",None)\n id = get_json_or(node[\"attrs\"],\"id\",None)\n if type == \"all\":\n ret += \"[CQ:at,qq=all]\"\n elif id != None:\n ret += \"[CQ:at,qq={}]\".format(_cq_params_encode(id))\n elif node[\"type\"] == \"img\":\n img_url = node[\"attrs\"][\"src\"]\n if img_url.startswith(\"data:image/\"):\n base64_start = img_url.find(\"base64,\")\n img_url = \"base64://\" + img_url[base64_start + 7:]\n ret += \"[CQ:image,file={}]\".format(_cq_params_encode(img_url)) \n\n return ret\n\n\n async def create_message(self,platform:str,self_id:str,channel_id:str,content:str):\n '''发送消息'''\n satori_obj = parse_satori_html(content)\n to_send = await self._satori_to_cq(satori_obj)\n if channel_id.startswith(\"GROUP_\"):\n group_id = int(channel_id[6:])\n ret = await self._api_call(\"/send_group_msg\",{\"group_id\":group_id,\"message\":to_send})\n return [{\"id\":str(ret[\"data\"][\"message_id\"]),\"content\":\"\"}]\n else:\n user_id = int(channel_id)\n ret = await self._api_call(\"/send_private_msg\",{\"user_id\":user_id,\"message\":to_send})\n return [{\"id\":str(ret[\"data\"][\"message_id\"]),\"content\":\"\"}]\n \n async def get_login(self,platform:Optional[str],self_id:Optional[str]) -> [dict]:\n '''获取登录信息,如果platform和self_id为空,那么应该返回一个列表'''\n obret = (await self._api_call(\"/get_login_info\",{}))[\"data\"]\n satori_ret = {\n \"user\":{\n \"id\":str(obret[\"user_id\"]),\n \"name\":obret[\"nickname\"],\n \"nick\":obret[\"nickname\"],\n \"avatar\":get_json_or(obret,\"avatar\",None),\n \"is_bot\":None\n },\n \"self_id\":str(obret[\"user_id\"]),\n \"platform\":\"onebot\",\n \"status\":self._login_status,\n }\n if platform == None and self_id == None:\n return [satori_ret]\n else:\n return satori_ret\n \n async def get_guild_member(self,platform:Optional[str],self_id:Optional[str],guild_id:str,user_id:str) -> [dict]:\n '''获取群组成员信息'''\n obret = (await self._api_call(\"/get_group_member_info\",{\n \"group_id\":int(guild_id[6:]),\n \"user_id\":int(user_id)\n }))[\"data\"]\n joined_at = get_json_or(obret,\"join_time\",None)\n if joined_at:\n joined_at = int(str(joined_at) + \"000\")\n satori_ret = {\n \"user\":{\n \"id\":str(obret[\"user_id\"]),\n \"name\":get_json_or(obret,\"nickname\",None),\n \"nick\":get_json_or(obret,\"card\",None),\n \"avatar\":get_json_or(obret,\"avatar\",None),\n \"is_bot\":None\n },\n \"nick\":get_json_or(obret,\"card\",None),\n \"avatar\":get_json_or(obret,\"avatar\",None),\n \"joined_at\":joined_at,\n }\n return satori_ret" }, { "identifier": "Config", "path": "config.py", "snippet": "class Config:\n def __init__(self) -> None:\n self.botlist:list = []\n self.web_port:int = 8080\n self.web_host:str = \"127.0.0.1\"\n self.access_token:str = \"\"\n \n async def read_config(self):\n async with aiofiles.open('config.json', mode='r') as f:\n json_dat = json5.loads(await f.read())\n self.botlist = json_dat[\"botlist\"]\n self.web_port = json_dat[\"web_port\"]\n self.web_host = json_dat[\"web_host\"]\n self.access_token = json_dat[\"access_token\"]" }, { "identifier": "AdapterQQ", "path": "qq_adapter.py", "snippet": "class AdapterQQ:\n def __init__(self,config = {}) -> None:\n '''用于初始化一些配置信息,尽量不要在这里阻塞,因为此处不具备异步环境,如果你需要读写配置文件,请在init_after中进行'''\n self._botqq = config[\"botqq\"]\n self._appid = config[\"appid\"]\n self._token = config[\"token\"]\n if \"withgroup\" in config:\n self._withgroup = config[\"withgroup\"]\n else:\n self._withgroup = None\n self._appsecret = config[\"appsecret\"]\n self._http_url = \"https://api.sgroup.qq.com\"\n self._is_stop = False\n self._login_status = SatoriLogin.LoginStatus.DISCONNECT\n self._queue = Queue(maxsize=100)\n self._id = 0\n self._sn = None\n self._self_id = None\n self._access_token = None\n self._expires_in = 0\n self.msgid_map = dict()\n # self._self_name = None\n\n\n async def enable(self) -> None:\n '''适配器启用的时候会调用,可以不理,也可以没这个函数\n 配合下面的停用函数,适配器可以得到自己在整个系统中的状态,进而进行一些优化\n 如,如果适配器处于停用状态,适配器可以自行选择关闭网络连接,以节省资源,当然,也可以不理会\n '''\n pass\n\n async def disable(self) -> None:\n '''适配器停用的时候会调用,可以不理,也可以没这个函数'''\n pass\n \n async def release(self) -> None:\n '''适配器释放的时候会调用一次,应该在这里停用ws连接\n 一般认为,适配器会和真正的协议端建立连接,所以,这个函数大多数时候是需要写的\n 但是,这个函数允许资源延迟释放,只要能释放就行\n 你可以在这个函数里面进行数据保存之类的,这种用途下,请阻塞这个函数,直到保存完成\n '''\n self._is_stop = True\n\n async def get_msg(self) -> dict:\n '''阻塞并等待消息返回,如果你的适配器不具备接收消息的能力,请不要写这个函数'''\n return await self._queue.get()\n \n\n async def _ws_recv(self,websocket):\n try:\n reply = await asyncio.wait_for(websocket.recv(),0.1)\n return reply\n except asyncio.TimeoutError:\n return None\n\n async def _ws_connect(self):\n self._login_status = SatoriLogin.LoginStatus.CONNECT\n ws_url = (await self._api_call(\"/gateway\"))[\"url\"]\n async with connect(ws_url) as websocket:\n tm = time.time()\n while not self._is_stop:\n reply = await self._ws_recv(websocket)\n if not reply:\n now_time = time.time()\n if now_time - tm > 30:\n tm = now_time\n await websocket.send(json.dumps({\"op\": 1,\"d\": self._sn}))\n continue\n js = json.loads(reply)\n op = js[\"op\"]\n if op == 0: # 事件\n self._sn = js[\"s\"]\n t = js[\"t\"]\n if t == \"READY\":\n print(\"qq:ws连接成功\")\n print(json.dumps(js))\n self._login_status = SatoriLogin.LoginStatus.ONLINE\n else:\n print(json.dumps(js))\n asyncio.create_task(self._deal_event(js))\n elif op == 1: # 心跳\n await websocket.send(json.dumps({\"op\":11}))\n elif op == 7: # 重连\n print(\"qq:服务端要求重连\")\n break\n elif op == 9: # 参数错误\n print(\"qq:参数错误:\",json.dumps(js))\n break\n elif op == 10: # ws建立成功\n if self._withgroup:\n await websocket.send(json.dumps({\n \"op\":2,\n \"d\":{\n \"token\":\"QQBot {}\".format(self._access_token),\n \"intents\":0 | (1 << 0) | (1 << 1) | (1 << 30) | (1 << 25),\n \"shard\":[0, 1],\n }\n }))\n else:\n await websocket.send(json.dumps({\n \"op\":2,\n \"d\":{\n \"token\":\"QQBot {}\".format(self._access_token),\n \"intents\":0 | (1 << 0) | (1 << 1) | (1 << 30),\n \"shard\":[0, 1],\n }\n }))\n elif op == 11: # HTTP Callback ACK\n pass\n\n async def _ws_server(self) -> None:\n while not self._is_stop:\n try:\n await self._ws_connect()\n except:\n self._login_status = SatoriLogin.LoginStatus.DISCONNECT\n print(traceback.format_exc())\n await asyncio.sleep(3)\n self._login_status = SatoriLogin.LoginStatus.DISCONNECT\n\n async def _token_refresh(self):\n async with httpx.AsyncClient() as client:\n if not self._expires_in or int(self._expires_in) < 60 * 5:\n url = \"https://bots.qq.com/app/getAppAccessToken\"\n ret = (await client.post(url,json={\n \"appId\":self._appid,\n \"clientSecret\":self._appsecret\n })).json()\n self._access_token = ret[\"access_token\"]\n self._expires_in = ret[\"expires_in\"]\n # print(ret)\n\n async def _qqarr_to_satori(self,qqmsg_arr):\n ret = \"\"\n for it in qqmsg_arr:\n if it[\"type\"] == \"text\":\n ret += satori_to_plain(it[\"data\"])\n else:\n if it[\"data\"].startswith(\"<@!\"):\n user_id = it[\"data\"][3:len(it[\"data\"]) - 1]\n ret += \"<at id=\\\"{}\\\">\".format(satori_to_plain(user_id))\n elif it[\"data\"].startswith(\"<@\"):\n user_id = it[\"data\"][2:len(it[\"data\"]) - 1]\n ret += \"<at id=\\\"{}\\\">\".format(satori_to_plain(user_id))\n return ret\n \n async def _deal_channel_event(self,data):\n qqmsg_arr = _qqmsg_to_arr(data[\"content\"])\n # print(\"qqmsg_arr\",qqmsg_arr)\n satori_msg = await self._qqarr_to_satori(qqmsg_arr)\n self.msgid_map[\"CHANNEL_\"+data[\"channel_id\"]] = data[\"id\"]\n satori_evt = SatoriGroupMessageCreatedEvent(\n id=self._id,\n self_id=self._self_id,\n timestamp=int(time.mktime(time.strptime(data[\"timestamp\"], \"%Y-%m-%dT%H:%M:%S%z\"))) * 1000,\n platform=\"qq_guild\",\n channel=SatoriChannel(\n id=\"CHANNEL_\"+data[\"channel_id\"],\n type=SatoriChannel.ChannelType.TEXT,\n ),\n message=SatoriMessage(\n id=data[\"id\"],\n content=satori_msg,\n created_at=int(time.mktime(time.strptime(data[\"timestamp\"], \"%Y-%m-%dT%H:%M:%S%z\"))) * 1000\n ),\n user=SatoriUser(\n id=data[\"author\"][\"id\"],\n name=data[\"author\"][\"username\"],\n avatar=data[\"author\"][\"avatar\"],\n is_bot=data[\"author\"][\"bot\"]\n ),\n member=SatoriGuildMember(\n nick=data[\"member\"][\"nick\"],\n avatar=data[\"author\"][\"avatar\"],\n joined_at=int(time.mktime(time.strptime(data[\"member\"][\"joined_at\"], \"%Y-%m-%dT%H:%M:%S%z\"))) * 1000\n ),\n guild=SatoriGuild(\n id=data[\"guild_id\"]\n ),\n role=SatoriGuildRole(\n id=json.dumps(sorted(data[\"member\"][\"roles\"]))\n )\n )\n self._id += 1\n self._queue.put_nowait(satori_evt.to_dict())\n\n async def _deal_group_event(self,data):\n qqmsg_arr = _qqmsg_to_arr(data[\"content\"])\n # print(\"qqmsg_arr\",qqmsg_arr)\n satori_msg = await self._qqarr_to_satori(qqmsg_arr)\n self.msgid_map[\"GROUP_\"+data[\"group_id\"]] = data[\"id\"]\n satori_evt = SatoriGroupMessageCreatedEvent(\n id=self._id,\n self_id=self._botqq,\n timestamp=int(time.mktime(time.strptime(data[\"timestamp\"], \"%Y-%m-%dT%H:%M:%S%z\"))) * 1000,\n platform=\"qq_group\",\n channel=SatoriChannel(\n id=\"GROUP_\"+data[\"group_id\"],\n type=SatoriChannel.ChannelType.TEXT,\n ),\n message=SatoriMessage(\n id=data[\"id\"],\n content=satori_msg,\n created_at=int(time.mktime(time.strptime(data[\"timestamp\"], \"%Y-%m-%dT%H:%M:%S%z\"))) * 1000\n ),\n user=SatoriUser(\n id=data[\"author\"][\"id\"]\n ),\n member=SatoriGuildMember(\n ),\n guild=SatoriGuild(\n id=\"GROUP_\"+data[\"group_id\"]\n ),\n role=SatoriGuildRole(\n id=\"unkonw\",\n name=\"unkonw\"\n )\n )\n self._id += 1\n self._queue.put_nowait(satori_evt.to_dict())\n\n async def _deal_event(self,event):\n try:\n type = event[\"t\"]\n if type == \"AT_MESSAGE_CREATE\":\n d = event[\"d\"]\n if (\"channel_id\" in d) and d[\"channel_id\"]:\n await self._deal_channel_event(d)\n else:\n if type == \"GROUP_AT_MESSAGE_CREATE\":\n d = event[\"d\"]\n if (\"group_id\" in d) and d[\"group_id\"]:\n await self._deal_group_event(d)\n except:\n print(traceback.format_exc())\n\n async def _token_refresh_task(self):\n while True:\n try:\n await self._token_refresh()\n index = 0\n while index < 60: # 每60秒检测一次token是否过期\n await asyncio.sleep(1)\n if self._is_stop:\n break\n index += 1\n if self._is_stop:break\n except:\n print(traceback.format_exc())\n\n async def init_after(self) -> None:\n '''适配器创建之后会调用一次,应该在这里进行ws连接等操作,如果不需要,可以不写'''\n try:\n await self._token_refresh()\n except:\n print(traceback.format_exc())\n asyncio.create_task(self._token_refresh_task())\n asyncio.create_task(self._ws_server())\n\n async def _api_call(self,path,data = None) -> dict:\n url:str = self._http_url + path\n headers = {\"Authorization\":\"QQBot {}\".format(self._access_token),\"X-Union-Appid\":self._appid}\n if data == None:\n async with httpx.AsyncClient() as client:\n return (await client.get(url,headers=headers)).json()\n else:\n async with httpx.AsyncClient() as client:\n ret = (await client.post(url,headers=headers,json=data))\n # print(ret.content)\n return ret.json()\n\n def _make_qq_text(self,text:str):\n ret = text\n ret = ret.replace(\"&\",\"&amp;\")\n ret = ret.replace(\"<\",\"&lt;\")\n ret = ret.replace(\">\",\"&gt;\")\n return ret\n \n async def _satori_to_qq(self,satori_obj,platform = \"qq_guild\") -> [dict]:\n to_reply_id = None\n ret_text = \"\"\n ret_img = []\n for node in satori_obj:\n if isinstance(node,str):\n text = self._make_qq_text(node)\n ret_text += text\n else:\n if node[\"type\"] == \"at\":\n type = get_json_or(node[\"attrs\"],\"type\",None)\n id = get_json_or(node[\"attrs\"],\"id\",None)\n if type == \"all\":\n # 注意,机器人不支持at all,不能发,也不能收,这里假装at all了\n ret_text += \"@全体成员\"\n # text = \"<@everyone>\"\n elif id != None:\n ret_text += \"<@{}>\".format(self._make_qq_text(id))\n elif node[\"type\"] == \"img\":\n img_url:str = node[\"attrs\"][\"src\"]\n if img_url.startswith(\"data:image/\"):\n base64_start = img_url.find(\"base64,\")\n img_content = base64.b64decode(img_url[base64_start + 7:])\n ret_img.append(img_content)\n else:\n if platform == \"qq_guild\":\n async with httpx.AsyncClient() as client:\n img_content = (await client.get(img_url)).content\n ret_img.append(img_content)\n else:\n ret_img.append(img_url)\n elif node[\"type\"] == \"passive\":\n to_reply_id = node[\"attrs\"][\"id\"]\n \n ret_vec = []\n ret_vec.append({\n \"content\":ret_text,\n \"file_image\":None,\n \"to_reply_id\":to_reply_id\n })\n if len(ret_img) != 0:\n ret_vec[0][\"file_image\"] = ret_img[0]\n for img in ret_img[1:]:\n ret_vec.append({\n \"content\":\"\",\n \"file_image\":img,\n \"to_reply_id\":to_reply_id\n })\n return ret_vec\n \n async def create_message(self,platform:str,self_id:str,channel_id:str,content:str):\n '''发送消息'''\n to_reply_id = self.msgid_map[channel_id]\n satori_obj = parse_satori_html(content)\n to_sends = await self._satori_to_qq(satori_obj,platform)\n # print(to_sends)\n if channel_id.startswith(\"CHANNEL_\") and platform == \"qq_guild\":\n channel_id = channel_id[8:]\n to_ret = []\n for it in to_sends:\n if it[\"to_reply_id\"]:to_reply_id = it[\"to_reply_id\"]\n async with httpx.AsyncClient() as client:\n headers = {\"Authorization\":\"QQBot {}\".format(self._access_token),\"X-Union-Appid\":self._appid,\"Accept\":\"application/json\"}\n url:str = self._http_url + \"/channels/{}/messages\".format(channel_id)\n data = {\n \"msg_id\":to_reply_id,\n \"content\":it[\"content\"]\n }\n if it[\"file_image\"]:\n ret = (await client.post(url,headers=headers,data=data,files={\"file_image\":it[\"file_image\"]})).json()\n else:\n ret = (await client.post(url,headers=headers,json=data)).json()\n # print(ret)\n to_ret.append(SatoriMessage(id=ret[\"id\"],content=\"\").to_dict())\n return to_ret\n elif channel_id.startswith(\"GROUP_\") and platform == \"qq_group\":\n channel_id = channel_id[6:]\n to_ret = []\n msg_seq = 1\n for it in to_sends:\n if it[\"to_reply_id\"]:to_reply_id = it[\"to_reply_id\"]\n async with httpx.AsyncClient() as client:\n headers = {\"Authorization\":\"QQBot {}\".format(self._access_token),\"X-Union-Appid\":self._appid,\"Accept\":\"application/json\"}\n url:str = self._http_url + \"/v2/groups/{}/messages\".format(channel_id)\n data = {\n \"msg_id\":to_reply_id,\n \"content\":it[\"content\"],\n \"msg_type\":0,\n \"msg_seq\":msg_seq,\n # \"image\": 目前暂不支持\n }\n msg_seq += 1\n ret = (await client.post(url,headers=headers,json=data)).json()\n # print(ret)\n to_ret.append(SatoriMessage(id=ret[\"msg_id\"],content=\"\").to_dict())\n return to_ret\n \n async def get_login(self,platform:Optional[str],self_id:Optional[str]) -> [dict]:\n '''获取登录信息,如果platform和self_id为空,那么应该返回一个列表'''\n\n if platform == \"qq_group\":\n return SatoriLogin(\n status=self._login_status,\n user=SatoriUser(\n id=self._botqq,\n is_bot=True\n ),\n self_id=self._botqq,\n platform=\"qq_group\"\n ).to_dict()\n else: \n obret = (await self._api_call(\"/users/@me\"))\n satori_ret = SatoriLogin(\n status=self._login_status,\n user=SatoriUser(\n id=obret[\"id\"],\n name=obret[\"username\"],\n avatar=obret[\"avatar\"],\n is_bot=True\n ),\n self_id=obret[\"id\"],\n platform=\"qq_guild\"\n ).to_dict()\n self._self_id = obret[\"id\"]\n if platform == \"qq_guild\":\n return satori_ret\n elif platform == None:\n if not self._withgroup:\n return [satori_ret]\n else:\n return [satori_ret,SatoriLogin(\n status=self._login_status,\n user=SatoriUser(\n id=self._botqq,\n is_bot=True\n ),\n self_id=self._botqq,\n platform=\"qq_group\"\n ).to_dict()]\n \n async def get_guild_member(self,platform:Optional[str],self_id:Optional[str],guild_id:str,user_id:str) -> [dict]:\n '''获取群组成员信息'''\n if platform == \"qq_guild\":\n url = \"/guilds/{}/members/{}\".format(guild_id,user_id)\n obret = (await self._api_call(url))\n satori_ret = SatoriGuildMember(\n user=SatoriUser(\n id=obret[\"user\"][\"id\"],\n name=obret[\"user\"][\"username\"],\n avatar=obret[\"user\"][\"avatar\"],\n is_bot=obret[\"user\"][\"bot\"]\n ),\n nick=get_json_or(obret,\"nick\",None),\n avatar=obret[\"user\"][\"avatar\"],\n joined_at=int(time.mktime(time.strptime(obret[\"joined_at\"], \"%Y-%m-%dT%H:%M:%S%z\"))) * 1000\n ).to_dict()\n return satori_ret" }, { "identifier": "remove_json_null", "path": "tool.py", "snippet": "def remove_json_null(js) -> dict:\n '''将json中的None字段删除'''\n if isinstance(js,dict):\n st = {}\n for key in js:\n if js[key] != None:\n st[key] = remove_json_null(js[key])\n return st\n elif isinstance(js,list):\n lst = []\n for it in js:\n lst.append(remove_json_null(it))\n return lst\n else:\n return js" } ]
import asyncio import aiohttp import json import uuid from kook_adapter import AdapterKook from mihoyo_adapter import AdapterMihoyo from onebot_adapter import AdapterOnebot from config import Config from aiohttp import web from qq_adapter import AdapterQQ from tool import remove_json_null
16,628
class Satori: def __init__(self) -> None: self._config:Config = Config() self.adapterlist = [] self.wsmap = {} self._evt_id = 100 async def _get_adapter(self,platform,self_id): ''' 用于获取适配器 ''' for adapter in self.adapterlist: info = adapter["info"] for bot in info: if self_id == bot["self_id"] and bot["platform"] == platform: return adapter["adapter"] return None async def ws_send_json(ws,js) -> None:
class Satori: def __init__(self) -> None: self._config:Config = Config() self.adapterlist = [] self.wsmap = {} self._evt_id = 100 async def _get_adapter(self,platform,self_id): ''' 用于获取适配器 ''' for adapter in self.adapterlist: info = adapter["info"] for bot in info: if self_id == bot["self_id"] and bot["platform"] == platform: return adapter["adapter"] return None async def ws_send_json(ws,js) -> None:
js = remove_json_null(js)
5
2023-12-03 13:53:47+00:00
24k
aliyun/pai-python-sdk
pai/model.py
[ { "identifier": "git_utils", "path": "pai/common/git_utils.py", "snippet": "def git_clone_repo(git_config: Dict[str, str], source_dir: Optional[str] = None):\ndef _validate_git_config(git_config):\ndef _build_and_run_clone_command(git_config, dest_dir):\ndef _clone_command_for_codeup(git_config, dest_dir):\ndef _clone_command_for_github(git_config, dest_dir):\ndef _clone_command_for_ssh(git_config, dest_dir):\ndef _clone_command_for_github_https(git_config, dest_dir):\ndef _clone_command_for_codeup_https(git_config, dest_dir):\ndef _clone_command(repo_url, dest_dir, branch=None):\ndef _update_url_with_token(repo_url, token):\ndef _update_url_with_username_and_password(repo_url, username, password):\ndef _checkout_commit(git_config, dest_dir):" }, { "identifier": "INSTANCE_TYPE_LOCAL_GPU", "path": "pai/common/consts.py", "snippet": "INSTANCE_TYPE_LOCAL_GPU = \"local_gpu\"" }, { "identifier": "ModelFormat", "path": "pai/common/consts.py", "snippet": "class ModelFormat(object):\n SavedModel = \"SavedModel\"\n FrozenPb = \"FrozenPb\"\n KerasH5 = \"KerasH5\"\n CaffePrototxt = \"Caffe\"\n ONNX = \"ONNX\"\n BladeModel = \"BladeModel\"\n PMML = \"PMML\"\n TorchScript = \"TorchScript\"\n TFLite = \"TFLite\"" }, { "identifier": "ContainerRun", "path": "pai/common/docker_utils.py", "snippet": "class ContainerRun(object):\n \"\"\"A class represent a container run in local.\"\"\"\n\n CONTAINER_STATUS_RUNNING = \"running\"\n CONTAINER_STATUS_EXITED = \"exited\"\n CONTAINER_STATUS_PAUSED = \"paused\"\n\n def __init__(self, container, port: Optional[int] = None):\n \"\"\"Initialize a container run.\n\n Args:\n container: A docker container object.\n port (int): The host port that container is exposed to.\n\n \"\"\"\n self.container = container\n self.port = port\n\n @property\n def status(self):\n self.container.reload()\n return self.container.status\n\n def is_running(self):\n \"\"\"Return True if container is running, otherwise False.\"\"\"\n return self.status == self.CONTAINER_STATUS_RUNNING\n\n def is_terminated(self):\n \"\"\"Return True if container is terminated, otherwise False.\"\"\"\n return self.status in [\n self.CONTAINER_STATUS_EXITED,\n self.CONTAINER_STATUS_PAUSED,\n ]\n\n def is_succeeded(self):\n \"\"\"Return True if container is succeeded, otherwise False.\"\"\"\n return (\n self.status == \"exited\" and self.container.attrs[\"State\"][\"ExitCode\"] == 0\n )\n\n def wait_for_ready(self, interval=5):\n \"\"\"Wait until container enter running state or terminated state.\"\"\"\n while True:\n status = self.status\n if status == self.CONTAINER_STATUS_RUNNING:\n break\n elif status in [self.CONTAINER_STATUS_EXITED, self.CONTAINER_STATUS_PAUSED]:\n raise RuntimeError(\n \"Container is terminated : id={} status={}\".format(\n self.container.id, self.container.status\n )\n )\n time.sleep(interval)\n\n def stop(self):\n if self.is_running():\n self.container.stop()\n\n def start(self):\n if not self.is_running():\n self.container.start()\n\n def delete(self):\n if self.is_running():\n self.container.stop()\n self.container.remove()\n\n def watch(self, show_logs: bool = True):\n \"\"\"Watch container log and wait for container to exit.\"\"\"\n if not show_logs:\n self.container.wait()\n else:\n log_iter = self.container.logs(\n stream=True,\n follow=True,\n )\n for log in log_iter:\n print(log.decode())\n\n self.container.reload()\n exit_code = self.container.attrs[\"State\"][\"ExitCode\"]\n if exit_code != 0:\n raise RuntimeError(\n \"Container run exited failed: exit_code={}\".format(exit_code)\n )" }, { "identifier": "run_container", "path": "pai/common/docker_utils.py", "snippet": "def run_container(\n image_uri: str,\n container_name: Optional[str] = None,\n port: Optional[int] = None,\n environment_variables: Optional[Dict[str, str]] = None,\n command: Optional[Union[List[str], str]] = None,\n entry_point: Optional[Union[List[str], str]] = None,\n volumes: Optional[Dict[str, Any]] = None,\n working_dir: Optional[str] = None,\n gpu_count: Optional[int] = None,\n gpu_device_ids: Optional[List[str]] = None,\n gpu_capabilities: Optional[List[List[str]]] = None,\n) -> ContainerRun:\n \"\"\"Run a container in local.\n\n Args:\n image_uri (str): A docker image uri.\n container_name (str, optional): Name of the container.\n port (int, optional): The port to expose.\n environment_variables (Dict[str, str], optional): Environment variables to set\n in the container.\n command (Union[List[str], str], optional): Command to run the container.\n entry_point (Union[List[str], str], optional): Entry point to run the container.\n volumes (Dict[str, Any], optional): Volumes to mount in the container.\n working_dir (str, optional): Working directory in the container.\n gpu_count (int, optional): Number of GPU devices to request. Set to -1 to\n request all available devices.\n To use GPU, set either ``gpu_count`` or ``gpu_device_ids``.\n gpu_device_ids (List[str], optional): List of strings for GPU device IDs,\n corresponding to `NVIDIA_VISIBLE_DEVICES` in the NVIDIA Runtime.\n To use GPU, set either ``gpu_count`` or ``gpu_device_ids``.\n gpu_capabilities (List[List[str]], optional): This parameter corresponds to\n `NVIDIA_DRIVER_CAPABILITIES` in the NVIDIA Runtime. The default value is\n ``[[\"compute\", \"utility\"]]`` if ``gpu_device_ids`` or ``gpu_count`` is set.\n Available capabilities for the NVIDIA driver can be found in\n https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/user-guide.html#driver-capabilities.\n\n Returns:\n ContainerRun: A ContainerRun object.\n\n \"\"\"\n try:\n import docker\n except ImportError:\n raise ImportError(\"Please install docker first: pip install docker\")\n\n client = docker.from_env()\n # use a random host port.\n host_port = randint(49152, 65535)\n\n if gpu_count or gpu_device_ids or gpu_capabilities:\n if not gpu_capabilities:\n gpu_capabilities = [[\"compute\", \"utility\"]]\n device_requests = [\n docker.types.DeviceRequest(\n count=gpu_count,\n device_ids=gpu_device_ids,\n capabilities=gpu_capabilities,\n )\n ]\n else:\n device_requests = []\n\n container = client.containers.run(\n name=container_name,\n entrypoint=entry_point,\n image=image_uri,\n command=command,\n environment=environment_variables,\n ports={port: host_port} if port else None,\n volumes=volumes,\n working_dir=working_dir,\n detach=True,\n device_requests=device_requests,\n )\n container_run = ContainerRun(\n container=container,\n port=host_port,\n )\n return container_run" }, { "identifier": "OssUriObj", "path": "pai/common/oss_utils.py", "snippet": "class OssUriObj(object):\n \"\"\"A class that represents an OSS URI and provides some convenient methods.\"\"\"\n\n def __init__(self, uri: str):\n \"\"\"Constructor for class OssUriObj.\n\n Args:\n uri (str): A string in OSS URI schema: oss://<bucket_name>[.endpoint]/<path/to/file>,\n endpoint in uri is optional.\n \"\"\"\n if not uri.startswith(\"oss://\"):\n raise ValueError(\n \"Invalid OSS URI schema, please provide a string starts with 'oss://'\"\n )\n bucket_name, object_key, endpoint, role_arn = self.parse(uri)\n self.bucket_name = bucket_name\n self.object_key = object_key\n self.endpoint = endpoint\n self.role_arn = role_arn\n\n @classmethod\n def from_bucket_key_endpoint(\n cls, bucket_name: str, object_key: str, endpoint: Optional[str] = None\n ) -> \"OssUriObj\":\n \"\"\"Initialize an OSSUri object from bucket_name, object_key and endpoint.\n\n Args:\n bucket_name (str): The name of the OSS bucket.\n object_key (str): OSS object key/path.\n endpoint (str, optional): Endpoint for the OSS bucket.\n\n Returns:\n OssUriObj: An OssUriObj instance represents the specified OSS object.\n\n \"\"\"\n # OSS object key could not contain leading slashes.\n # Document: https://help.aliyun.com/document_detail/273129.html\n if object_key.startswith(\"/\"):\n logger.warning(\n \"OSS object key should not contain leading slashes, the leading\"\n \" slashes will be removed.\"\n )\n object_key = object_key.lstrip(\"/\")\n\n if endpoint:\n if endpoint.startswith(\"http://\"):\n endpoint = endpoint.lstrip(\"http://\")\n elif endpoint.startswith(\"https://\"):\n endpoint = endpoint.lstrip(\"https://\")\n\n uri = f\"oss://{bucket_name}.{endpoint}/{object_key}\"\n else:\n uri = f\"oss://{bucket_name}/{object_key}\"\n return OssUriObj(uri=uri)\n\n @classmethod\n def parse(cls, oss_uri: str) -> Tuple[str, str, str, str]:\n \"\"\"Parse OSS uri string and returns a tuple of (bucket_name, object_key,\n endpoint, role_arn).\n\n Args:\n oss_uri (str): A string in OSS Uri schema: oss://{bucket_name}.{endpoint}/{object_key}.\n\n Returns:\n Tuple: An tuple of [bucket_name, object_key, endpoint, role_arn].\n\n \"\"\"\n parsed_result = urlparse(oss_uri)\n if parsed_result.scheme != \"oss\":\n raise ValueError(\n \"require OSS uri('oss://[bucket_name]/[object_key]') but \"\n \"given '{}'\".format(oss_uri)\n )\n object_key = parsed_result.path\n if object_key.startswith(\"/\"):\n object_key = object_key[1:]\n\n query = parse_qs(parsed_result.query)\n if \".\" in parsed_result.hostname:\n bucket_name, endpoint = parsed_result.hostname.split(\".\", 1)\n else:\n bucket_name = parsed_result.hostname\n # try to get OSS endpoint from url query.\n if \"endpoint\" in query:\n endpoint = query.get(\"endpoint\")[0]\n elif \"host\" in query:\n endpoint = query.get(\"host\")[0]\n else:\n endpoint = None\n role_arn = query.get(\"role_arn\")[0] if \"role_arn\" in query else None\n\n return bucket_name, object_key, endpoint, role_arn\n\n def get_uri_with_endpoint(self, endpoint: str = None) -> str:\n \"\"\"Get an OSS uri string contains endpoint.\n\n Args:\n endpoint (str): Endpoint of the OSS bucket.\n\n Returns:\n str: An string in OSS uri schema contains endpoint.\n\n \"\"\"\n if not endpoint and not self.endpoint:\n raise ValueError(\"Unknown endpoint for the OSS bucket.\")\n\n return \"oss://{bucket_name}.{endpoint}/{object_key}\".format(\n bucket_name=self.bucket_name,\n endpoint=endpoint or self.endpoint,\n object_key=self.object_key,\n )\n\n def get_dir_uri(self):\n \"\"\"Returns directory in OSS uri string format of the original object.\"\"\"\n _, dirname, _ = self.parse_object_key()\n dir_uri = f\"oss://{self.bucket_name}{dirname}\"\n return dir_uri\n\n @property\n def uri(self) -> str:\n \"\"\"Returns OSS uri in string format.\"\"\"\n return \"oss://{bucket_name}/{object_key}\".format(\n bucket_name=self.bucket_name,\n object_key=self.object_key,\n )\n\n def parse_object_key(self) -> Tuple[bool, str, str]:\n \"\"\"Parse the OSS URI object key, returns a tuple of (is_dir, dir_path, file_name).\n\n Returns:\n namedtuple: An tuple of is_dir, dir_path, file_name.\n \"\"\"\n object_key = self.object_key.strip()\n if object_key.endswith(\"/\"):\n is_dir, dir_path, file_name = True, os.path.join(\"/\", object_key), None\n else:\n idx = object_key.rfind(\"/\")\n if idx < 0:\n is_dir, dir_path, file_name = False, \"/\", object_key\n else:\n is_dir, dir_path, file_name = (\n False,\n os.path.join(\"/\", object_key[: idx + 1]),\n object_key[idx + 1 :],\n )\n return is_dir, dir_path, file_name" }, { "identifier": "download", "path": "pai/common/oss_utils.py", "snippet": "def download(\n oss_path: Union[str, OssUriObj],\n local_path: str,\n bucket: Optional[oss2.Bucket] = None,\n un_tar=False,\n):\n \"\"\"Download OSS objects to local path.\n\n Args:\n oss_path (str): Source OSS path, could be a single OSS object or a OSS\n directory.\n local_path (str): Local path used to store the data from OSS.\n bucket (oss2.Bucket, optional): OSS bucket used to store the upload data. If it\n is not provided, OSS bucket of the default session will be used.\n un_tar (bool, optional): Whether to decompress the downloaded data. It is only\n work for `oss_path` point to a single file that has a suffix \"tar.gz\".\n\n Returns:\n str: A local file path for the downloaded data.\n\n \"\"\"\n\n bucket, oss_path = _get_bucket_and_path(bucket, oss_path)\n\n if not bucket.object_exists(oss_path) or oss_path.endswith(\"/\"):\n # The `oss_path` represents a \"directory\" in the OSS bucket, download the\n # objects which object key is prefixed with `oss_path`.\n # Note: `un_tar` is not work while `oss_path` is a directory.\n\n oss_path += \"/\" if not oss_path.endswith(\"/\") else \"\"\n iterator = oss2.ObjectIteratorV2(\n bucket=bucket,\n prefix=oss_path,\n )\n keys = [obj.key for obj in iterator if not obj.key.endswith(\"/\")]\n for key in tqdm(keys, desc=f\"Downloading: {oss_path}\"):\n rel_path = os.path.relpath(key, oss_path)\n dest = os.path.join(local_path, rel_path)\n os.makedirs(os.path.dirname(dest), exist_ok=True)\n _download_with_progress(\n dest,\n object_key=key,\n oss_bucket=bucket,\n )\n return local_path\n else:\n # The `oss_path` represents a single file in OSS bucket.\n if oss_path.endswith(\".tar.gz\") and un_tar:\n # currently, only tar.gz format is supported for un_tar after downloading.\n with tempfile.TemporaryDirectory() as temp_dir:\n target_path = os.path.join(temp_dir, os.path.basename(oss_path))\n _download_with_progress(\n target_path,\n object_key=oss_path,\n oss_bucket=bucket,\n )\n with tarfile.open(name=target_path, mode=\"r\") as t:\n t.extractall(path=local_path)\n\n return local_path\n else:\n os.makedirs(local_path, exist_ok=True)\n dest = os.path.join(local_path, os.path.basename(oss_path))\n _download_with_progress(\n dest,\n object_key=oss_path,\n oss_bucket=bucket,\n )\n\n return dest" }, { "identifier": "is_oss_uri", "path": "pai/common/oss_utils.py", "snippet": "def is_oss_uri(uri: Union[str, bytes]) -> bool:\n \"\"\"Determines whether the given uri is an OSS uri.\n\n Args:\n uri (Union[str, bytes]): A string in OSS URI schema:\n oss://<bucket_name>[.endpoint]/<path/to/file>,\n\n\n Returns:\n bool: True if the given uri is an OSS uri, else False.\n\n \"\"\"\n return bool(uri and isinstance(uri, (str, bytes)) and str(uri).startswith(\"oss://\"))" }, { "identifier": "upload", "path": "pai/common/oss_utils.py", "snippet": "def upload(\n source_path: str,\n oss_path: Union[str, OssUriObj],\n bucket: Optional[oss2.Bucket] = None,\n is_tar: Optional[bool] = False,\n) -> str:\n \"\"\"Upload local source file/directory to OSS.\n\n Examples::\n\n # compress and upload local directory `./src/` to OSS\n >>> upload(source_path=\"./src/\", oss_path=\"path/to/file\",\n ... bucket=session.oss_bucket, is_tar=True)\n\n\n Args:\n source_path (str): Source file local path which needs to be uploaded, can be\n a single file or a directory.\n oss_path (Union[str, OssUriObj]): Destination OSS path.\n bucket (oss2.Bucket): OSS bucket used to store the upload data. If it is not\n provided, OSS bucket of the default session will be used.\n is_tar (bool): Whether to compress the file before uploading (default: False).\n\n Returns:\n str: A string in OSS URI format. If the source_path is directory, return the\n OSS URI representing the directory for uploaded data, else then\n returns the OSS URI points to the uploaded file.\n \"\"\"\n\n bucket, oss_path = _get_bucket_and_path(bucket, oss_path)\n\n source_path_obj = pathlib.Path(source_path)\n if not source_path_obj.exists():\n raise RuntimeError(\"Source path is not exist: {}\".format(source_path))\n\n if is_tar:\n # compress the local data and upload the compressed source data.\n with tempfile.TemporaryDirectory() as dir_name:\n temp_tar_path = _tar_file(\n source_path, os.path.join(dir_name, \"source.tar.gz\")\n )\n dest_path = (\n os.path.join(oss_path, os.path.basename(temp_tar_path))\n if oss_path.endswith(\"/\")\n else oss_path\n )\n _upload_with_progress(\n filename=temp_tar_path, object_key=dest_path, oss_bucket=bucket\n )\n return \"oss://{}/{}\".format(bucket.bucket_name, dest_path)\n elif not source_path_obj.is_dir():\n # if source path is a file, just invoke bucket.put_object.\n\n # if the oss_path is endswith slash, the file will be uploaded to\n # \"{oss_path}{filename}\", else the file will be uploaded to \"{oss_path}\".\n dest_path = (\n os.path.join(oss_path, os.path.basename(source_path))\n if oss_path.endswith(\"/\")\n else oss_path\n )\n _upload_with_progress(\n filename=source_path, object_key=dest_path, oss_bucket=bucket\n )\n return \"oss://{}/{}\".format(bucket.bucket_name, dest_path)\n else:\n # if the source path is a directory, upload all the file under the directory.\n source_files = glob.glob(\n pathname=str(source_path_obj / \"**\"),\n recursive=True,\n )\n if not oss_path.endswith(\"/\"):\n oss_path += \"/\"\n\n files = [f for f in source_files if not os.path.isdir(f)]\n for file_path in files:\n file_path_obj = pathlib.Path(file_path)\n file_relative_path = file_path_obj.relative_to(source_path_obj).as_posix()\n object_key = oss_path + file_relative_path\n _upload_with_progress(\n filename=file_path, object_key=object_key, oss_bucket=bucket\n )\n return \"oss://{}/{}\".format(bucket.bucket_name, oss_path)" }, { "identifier": "generate_repr", "path": "pai/common/utils.py", "snippet": "def generate_repr(repr_obj, *attr_names: str, **kwargs) -> str:\n \"\"\"Generate a string representation of the given object.\n\n Args:\n repr_obj: The object used to generate the string representation.\n attr_names: A list of attribute names to include in the string representation.\n\n Returns:\n str: A string representation of the object.\n\n \"\"\"\n attrs = {name: getattr(repr_obj, name) for name in attr_names}\n attrs.update(kwargs)\n attr_repr = \", \".join([\"{}={}\".format(k, v) for k, v in attrs.items()])\n cls_name = repr_obj.__class__.__name__\n\n return f\"{cls_name}({attr_repr})\"" }, { "identifier": "is_local_run_instance_type", "path": "pai/common/utils.py", "snippet": "def is_local_run_instance_type(instance_type: str) -> bool:\n \"\"\"Return True if instance_type is local run instance type.\"\"\"\n return instance_type and instance_type.strip() in [\n INSTANCE_TYPE_LOCAL_GPU,\n INSTANCE_TYPE_LOCAL,\n ]" }, { "identifier": "random_str", "path": "pai/common/utils.py", "snippet": "def random_str(n):\n \"\"\"Random string generation with lower case letters and digits.\n\n Args:\n n: Size of generated random string.\n\n Returns:\n str: generated random string.\n\n \"\"\"\n return \"\".join(\n random.choice(string.ascii_lowercase + string.digits) for _ in range(n)\n )" }, { "identifier": "to_plain_text", "path": "pai/common/utils.py", "snippet": "def to_plain_text(\n input_str: str, allowed_characters=DEFAULT_PLAIN_TEXT_ALLOW_CHARACTERS, repl_ch=\"_\"\n):\n \"\"\"Replace characters in input_str if it is not in allowed_characters.\"\"\"\n return \"\".join([c if c in allowed_characters else repl_ch for c in input_str])" }, { "identifier": "DuplicatedMountException", "path": "pai/exception.py", "snippet": "class DuplicatedMountException(PAIException):\n \"\"\"Raised if a OSS path is mounted twice.\"\"\"" }, { "identifier": "MountPathIsOccupiedException", "path": "pai/exception.py", "snippet": "class MountPathIsOccupiedException(PAIException):\n \"\"\"Raised if target mount path is already used.\"\"\"" }, { "identifier": "ImageInfo", "path": "pai/image.py", "snippet": "class ImageInfo(object):\n \"\"\"This class represents information for an image provided by PAI.\n\n Args:\n image_name (str): The name of the image.\n image_uri (str): The URI of the image.\n framework_name (str): The name of the framework installed in the image.\n framework_version (str, optional): The version of the framework (Default None).\n image_scope (str): The scope of the image, could be 'training', 'inference' or\n 'develop'.\n accelerator_type (str, optional): The type of accelerator. Defaults to None.\n python_version (str, optional): The version of Python. Defaults to None.\n \"\"\"\n\n def __repr__(self):\n return (\n \"{}(framework_name={}: framework_version={}: image_scope={}: \"\n \"accelerator_type={}: py_version={})\".format(\n self.__class__.__name__,\n self.framework_name,\n self.framework_version,\n self.image_scope,\n self.accelerator_type,\n self.python_version,\n )\n )\n\n def __init__(\n self,\n image_name: str,\n image_uri: str,\n framework_name: str,\n image_scope: str,\n framework_version: str = None,\n accelerator_type: Optional[str] = None,\n python_version: Optional[str] = None,\n ):\n self.image_name = image_name\n self.image_uri = image_uri\n self.framework_name = framework_name\n self.framework_version = framework_version\n self.accelerator_type = accelerator_type\n self.python_version = python_version\n self.image_scope = image_scope" }, { "identifier": "AsyncPredictor", "path": "pai/predictor.py", "snippet": "class AsyncPredictor(PredictorBase, _ServicePredictorMixin):\n \"\"\"A class that facilitates making predictions to asynchronous prediction service.\n\n Examples::\n\n # Initialize an AsyncPredictor object using the name of a running service.\n async_predictor = AsyncPredictor(service_name=\"example_service\")\n\n # Make a prediction with the service and get the prediction result.\n resp = async_predictor.predict(data=\"YourPredictionData\")\n result = resp.wait()\n\n # Make a prediction with async API.\n import asyncio\n result = asyncio.run(async_predictor.predict_async(data=\"YourPredictionData\"))\n\n \"\"\"\n\n def __init__(\n self,\n service_name: str,\n max_workers: Optional[int] = None,\n endpoint_type: str = EndpointType.INTERNET,\n serializer: Optional[SerializerBase] = None,\n session: Optional[Session] = None,\n ):\n \"\"\"Construct a `AsyncPredictor` object using an existing async prediction service.\n\n Args:\n service_name (str): Name of the existing prediction service.\n max_workers (int): The maximum number of threads that can be used to\n execute the given prediction calls.\n endpoint_type (str): Selects the endpoint used by the predictor, which\n should be one of `INTERNET` or `INTRANET`. The `INTERNET` endpoint type\n means that the predictor calls the service over a public endpoint, while\n the `INTRANET` endpoint type is over a VPC endpoint.\n serializer (SerializerBase, optional): A serializer object that transforms\n the input Python object for data transmission and deserialize the\n response data to Python object.\n session (Session, optional): A PAI session object used for communicating\n with PAI service.\n \"\"\"\n\n super(AsyncPredictor, self).__init__(\n service_name=service_name,\n session=session or get_default_session(),\n endpoint_type=endpoint_type,\n serializer=serializer,\n )\n self._max_workers = max_workers\n self.executor = ThreadPoolExecutor(max_workers=self._max_workers)\n self._check()\n\n @property\n def max_workers(self):\n return self._max_workers\n\n @max_workers.setter\n def max_workers(self, n: int):\n if hasattr(self, \"executor\"):\n logger.info(\"Waiting for all submitted tasks in the queue to complete...\")\n self.executor.shutdown()\n self._max_workers = n\n self.executor = ThreadPoolExecutor(max_workers=self._max_workers)\n\n def __del__(self):\n \"\"\"wait for all pending tasks to complete before exit.\"\"\"\n if hasattr(self, \"executor\"):\n logger.info(\"Waiting for all pending tasks to complete...\")\n self.executor.shutdown()\n super(AsyncPredictor, self).__del__()\n\n def _check(self):\n config = json.loads(self._service_api_object[\"ServiceConfig\"])\n if config.get(\"metadata\", {}).get(\"type\") != ServiceType.Async:\n logger.warning(\n \"AsyncPredictor is not recommended to make prediction to a standard \"\n \" prediction service.\"\n )\n\n def _get_result(\n self, request_id: str\n ) -> Optional[Tuple[int, Dict[str, str], bytes]]:\n resp = self._send_request(\n method=\"GET\",\n path=_QUEUE_SERVICE_SINK_PATH,\n params={\n \"requestId\": request_id,\n # _raw_ is false because we want to get the encapsulated prediction\n # result in response body.\n \"_raw_\": \"false\",\n },\n )\n logger.debug(\n \"Poll prediction result: request_id=%s status_code=%s, content=%s\",\n request_id,\n resp.status_code,\n resp.content,\n )\n if resp.status_code == 204:\n # Status code 204 means could not find prediction response for the specific\n # request id.\n return\n\n # Raise exception if status code is not 2xx.\n if resp.status_code // 100 != 2:\n raise RuntimeError(\n \"Pulling prediction result failed: status_code={} content={}\".format(\n resp.status_code, resp.content.decode(\"utf-8\")\n )\n )\n return self._parse_encapsulated_response(resp.json()[0])\n\n def _parse_encapsulated_response(self, data) -> Tuple[int, Dict[str, str], bytes]:\n tags = data[\"tags\"]\n # If the status code from prediction service is not 200, a tag with\n # key 'lastCode' will be added to the tags in response.\n status_code = int(tags.get(\"lastCode\", 200))\n data = base64.b64decode(data[\"data\"])\n # currently, headers are not supported in async prediction service.\n headers = dict()\n return status_code, headers, data\n\n async def _get_result_async(\n self, request_id: str\n ) -> Optional[Tuple[int, Dict[str, str], bytes]]:\n resp = await self._send_request_async(\n method=\"GET\",\n path=_QUEUE_SERVICE_SINK_PATH,\n params={\n \"requestId\": request_id,\n # _raw_ is false because we want to get the encapsulated prediction\n # result in response body.\n \"_raw_\": \"false\",\n },\n )\n status_code = resp.status\n content = await resp.read()\n logger.debug(\n \"Get prediction result: request_id=%s status_code=%s, content=%s\",\n request_id,\n status_code,\n content,\n )\n if status_code == 204:\n # Status code 204 means could not find prediction response for the specific\n # request id.\n return\n if status_code // 100 != 2:\n raise RuntimeError(\n \"Pulling prediction result failed: status_code={} content={}\".format(\n status_code, content.decode(\"utf-8\")\n )\n )\n data = (await resp.json())[0]\n return self._parse_encapsulated_response(data)\n\n def _poll_result(\n self, request_id: str, wait_config: WaitConfig\n ) -> Tuple[int, Dict[str, str], bytes]:\n # if max_attempts is negative or zero, then wait forever\n attempts = -1 if wait_config.max_attempts <= 0 else wait_config.max_attempts\n while attempts != 0:\n attempts -= 1\n result = self._get_result(request_id=request_id)\n if not result:\n time.sleep(wait_config.interval)\n continue\n status_code, headers, content = result\n # check real prediction response\n if status_code // 100 != 2:\n raise PredictionException(\n code=status_code,\n message=f\"Prediction failed: status_code={status_code}\"\n f\" content={content.decode()}\",\n )\n return status_code, headers, content\n\n # Polling prediction result timeout.\n raise RuntimeError(\n f\"Polling prediction result timeout: request_id={request_id}, \"\n f\"total_time={wait_config.max_attempts * wait_config.interval}\"\n )\n\n async def _poll_result_async(\n self, request_id, wait_config: WaitConfig\n ) -> Tuple[int, Dict[str, str], bytes]:\n # if max_attempts is negative or zero, then wait forever\n attempts = -1 if wait_config.max_attempts <= 0 else wait_config.max_attempts\n while attempts != 0:\n attempts -= 1\n result = await self._get_result_async(request_id)\n if not result:\n await asyncio.sleep(wait_config.interval)\n continue\n status_code, headers, content = result\n # check real prediction response\n if status_code // 100 != 2:\n raise PredictionException(\n f\"Prediction failed: status_code={status_code} content={content.decode()}\"\n )\n return status_code, headers, content\n\n # Polling prediction result timeout.\n raise RuntimeError(\n f\"Polling prediction result timeout: request_id={request_id}, \"\n f\"total_time={wait_config.max_attempts * wait_config.interval}\"\n )\n\n def _get_request_id(self, resp: requests.models.Response) -> str:\n if resp.status_code // 100 != 2:\n raise RuntimeError(\n f\"Send prediction request failed. status_code={resp.status_code} \"\n f\"message={resp.text}\"\n )\n\n if _QUEUE_SERVICE_REQUEST_ID_HEADER not in resp.headers:\n logger.error(\n f\"Send prediction request failed. Missing request id.\"\n f\" status_code={resp.status_code} content={resp.text}\"\n )\n raise RuntimeError(\"Missing request id in response header.\")\n\n request_id = resp.headers[_QUEUE_SERVICE_REQUEST_ID_HEADER]\n logger.debug(\n f\"Send prediction request successfully. request_id={request_id}\"\n f\" status_code={resp.status_code}\",\n )\n return request_id\n\n async def _get_request_id_async(self, resp: aiohttp.ClientResponse) -> str:\n content = await resp.read()\n if resp.status != 200:\n raise RuntimeError(\n \"Send request to async prediction service failed: status_code={} \"\n \"content={}\".format(resp.status, content.decode(\"utf-8\"))\n )\n\n if _QUEUE_SERVICE_REQUEST_ID_HEADER not in resp.headers:\n logger.error(\n f\"Send prediction request failed. Missing request id.\"\n f\" status_code={resp.status} content={content.decode()}\"\n )\n raise RuntimeError(\"Missing request id in response header.\")\n request_id = resp.headers[_QUEUE_SERVICE_REQUEST_ID_HEADER]\n logger.debug(\n f\"Send prediction request successfully. request_id={request_id}\"\n f\" status_code={resp.status}\",\n )\n return request_id\n\n def _predict_fn(\n self,\n data,\n ):\n \"\"\"Make a prediction with the async prediction service.\"\"\"\n # serialize input data\n data = self._handle_input(data)\n resp = self._send_request(data=data)\n request_id = self._get_request_id(resp)\n logger.debug(\"Async prediction RequestId: \", request_id)\n # poll prediction result\n status, headers, content = self._poll_result(\n request_id=request_id, wait_config=WaitConfig()\n )\n\n return self._handle_output(content)\n\n def _wrap_callback_fn(self, cb: Callable):\n \"\"\"Wrap the callback function to handle the prediction result.\"\"\"\n\n @functools.wraps(cb)\n def _(future: Future):\n return cb(future.result())\n\n return _\n\n def predict(\n self,\n data,\n callback: Optional[Union[Callable, List[Callable]]] = None,\n ):\n \"\"\"Make a prediction with the async prediction service.\n\n The input data is serialized using the `serializer.serialize` method before it\n is sent, and the response body is deserialized using the\n `serializer.deserialize` method the prediction result returns.\n\n Args:\n data: The input data for the prediction. It will be serialized using the\n serializer of the predictor before transmitted to the prediction\n service.\n callback (Union[Callable, List[Callable]], optional): A Callback function,\n or a list of callback functions used to process the prediction result.\n\n Returns:\n AsyncTask: The task object that can be used to retrieve the prediction\n result.\n \"\"\"\n self._post_init_serializer()\n future = self.executor.submit(self._predict_fn, data)\n\n if isinstance(callback, Callable):\n callback = [callback]\n\n if callback:\n for cb in callback:\n future.add_done_callback(self._wrap_callback_fn(cb))\n\n return AsyncTask(future=future)\n\n async def predict_async(self, data, wait_config: WaitConfig = WaitConfig()):\n \"\"\"Make a prediction with the async prediction service.\n\n The serializer object for the predictor is responsible for data transformation\n when the 'predict' method is invoked. The input data is serialized using the\n `serializer.serialize` method before it is sent, and the response is\n deserialized using the `serializer.deserialize` method before the prediction\n result returns.\n\n Args:\n data: The input data for the prediction. It will be serialized using the\n serializer of the predictor before transmitted to the prediction\n service.\n wait_config (WaitConfig): A config object that controls the behavior of\n polling the prediction result.\n\n Returns:\n Prediction result.\n\n \"\"\"\n self._post_init_serializer()\n data = self._handle_input(data)\n resp = await self._send_request_async(data=data)\n request_id = await self._get_request_id_async(resp)\n\n status_code, headers, content = await self._poll_result_async(\n request_id=request_id, wait_config=wait_config\n )\n return self._handle_output(content)\n\n def _raw_predict_fn(self, data, method, path, headers, **kwargs):\n json_data, data = self._handle_raw_input(data)\n resp = self._send_request(\n path=path,\n json=json_data,\n data=data,\n headers=self._build_headers(headers),\n method=method,\n **kwargs,\n )\n request_id = self._get_request_id(resp)\n status, headers, content = self._poll_result(\n request_id, wait_config=WaitConfig()\n )\n return RawResponse(status, headers, content)\n\n def raw_predict(\n self,\n data: Any = None,\n callback: Optional[Union[Callable, List[Callable], None]] = None,\n method: str = \"POST\",\n path: Optional[str] = None,\n headers: Optional[Dict[str, str]] = None,\n **kwargs,\n ) -> AsyncTask:\n \"\"\"Make a prediction with the online prediction service.\n\n Args:\n data (Any): Input data to be sent to the prediction service. If it is a\n file-like object, bytes, or string, it will be sent as the request body.\n Otherwise, it will be treated as a JSON serializable object and sent as\n JSON.\n callback (Union[Callable, List[Callable]], optional): A Callback function,\n or a list of callback functions used to process the prediction result.\n path (str, optional): Path for the request to be sent to. If it is provided,\n it will be appended to the endpoint URL (Default None).\n headers (dict, optional): Request headers.\n method (str, optional): Request method, default to 'POST'.\n **kwargs: Additional keyword arguments for the request.\n Returns:\n AsyncTask: The task object that can be used to retrieve the prediction\n result.\n\n Examples:\n\n from pai.predictor import AsyncPredictor, AsyncTask\n\n predictor = AsyncPredictor()\n task: AsyncTask = predictor.raw_predict(data=\"YourPredictionData\")\n print(task.result())\n\n \"\"\"\n\n future = self.executor.submit(\n self._raw_predict_fn, data, method, path, headers, **kwargs\n )\n cbs = [callback] if isinstance(callback, Callable) else callback\n if cbs:\n for cb in cbs:\n future.add_done_callback(self._wrap_callback_fn(cb))\n\n return AsyncTask(future=future)\n\n async def raw_predict_async(\n self,\n data,\n wait_config: WaitConfig = WaitConfig(),\n method: str = \"POST\",\n headers: Optional[Dict[str, str]] = None,\n path: Optional[str] = None,\n **kwargs,\n ) -> RawResponse:\n \"\"\"Make a prediction with the online prediction service.\n\n Args:\n data (Any): Input data to be sent to the prediction service. If it is a\n file-like object, bytes, or string, it will be sent as the request body.\n Otherwise, it will be treated as a JSON serializable object and sent as\n JSON.\n wait_config (WaitConfig): A config object that controls the behavior of\n polling the prediction result.\n path (str, optional): Path for the request to be sent to. If it is provided,\n it will be appended to the endpoint URL (Default None).\n headers (dict, optional): Request headers.\n method (str, optional): Request method, default to 'POST'.\n **kwargs: Additional keyword arguments for the request.\n Returns:\n RawResponse: Prediction result.\n\n \"\"\"\n if self.service_status not in ServiceStatus.completed_status():\n self.wait_for_ready()\n json_data, data = self._handle_raw_input(data)\n\n resp = await self._send_request_async(\n data=data,\n method=method,\n json=json_data,\n path=path,\n headers=headers,\n **kwargs,\n )\n request_id = await self._get_request_id_async(resp)\n # Polling the prediction result.\n status_code, headers, content = await self._poll_result_async(\n request_id=request_id, wait_config=wait_config\n )\n return self._handle_raw_output(status_code, headers, content)" }, { "identifier": "LocalPredictor", "path": "pai/predictor.py", "snippet": "class LocalPredictor(PredictorBase):\n \"\"\"Perform prediction to a local service running with docker.\"\"\"\n\n def __init__(\n self,\n port: int,\n container_id: Optional[str] = None,\n serializer: Optional[SerializerBase] = None,\n ):\n \"\"\"LocalPredictor initializer.\n\n Args:\n port (int): The port of the local service.\n container_id (str, optional): The container id of the local service.\n serializer (SerializerBase, optional): A serializer object that transforms.\n \"\"\"\n self.container_id = container_id\n self.port = port\n self.serializer = serializer or JsonSerializer()\n self._container_run = (\n self._build_container_run(container_id, port=port)\n if self.container_id\n else None\n )\n\n @classmethod\n def _build_container_run(cls, container_id, port):\n try:\n import docker\n except ImportError:\n raise ImportError(\"Please install docker first: pip install docker\")\n client = docker.from_env()\n container = client.containers.get(container_id)\n\n return ContainerRun(container=container, port=port)\n\n def predict(self, data) -> Any:\n \"\"\"Perform prediction with the given data.\n\n Args:\n data: The data to be predicted.\n \"\"\"\n request_data = self.serializer.serialize(data=data)\n response = requests.post(\n url=\"http://127.0.0.1:{port}/\".format(port=self._container_run.port),\n data=request_data,\n )\n\n if response.status_code // 100 != 2:\n raise PredictionException(\n code=response.status_code,\n message=response.content,\n )\n\n return self.serializer.deserialize(response.content)\n\n def _build_headers(\n self, headers: Optional[Dict[str, str]] = None\n ) -> Dict[str, str]:\n headers = headers or dict()\n headers[\"User-Agent\"] = http_user_agent(headers.get(\"User-Agent\"))\n return headers\n\n def _build_url(self, path: Optional[str] = None):\n url = \"http://127.0.0.1:{}\".format(self.port)\n if path:\n if path.startswith(\"/\"):\n path = path[1:]\n url = posixpath.join(url, path)\n return url\n\n def raw_predict(\n self,\n data: Any = None,\n path: Optional[str] = None,\n headers: Optional[Dict[str, str]] = None,\n method: str = \"POST\",\n timeout: Optional[Union[float, Tuple[float, float]]] = None,\n **kwargs,\n ) -> RawResponse:\n \"\"\"Make a prediction with the online prediction service.\n\n Args:\n data (Any): Input data to be sent to the prediction service. If it is a\n file-like object, bytes, or string, it will be sent as the request body.\n Otherwise, it will be treated as a JSON serializable object and sent as\n JSON.\n path (str, optional): Path for the request to be sent to. If it is provided,\n it will be appended to the endpoint URL (Default None).\n headers (dict, optional): Request headers.\n method (str, optional): Request method, default to 'POST'.\n timeout(float, tuple(float, float), optional): Timeout setting for the\n request (Default 10).\n Returns:\n RawResponse: Prediction response from the service.\n\n Raises:\n PredictionException: Raise if status code of the prediction response does\n not equal 2xx.\n \"\"\"\n if isinstance(data, (IOBase, bytes, str)):\n # if data is a file-like object, bytes, or string, it will be sent as\n # request body\n json_data, data = None, data\n else:\n # otherwise, it will be treated as a JSON serializable object and sent as\n # JSON.\n json_data, data = data, None\n header = self._build_headers(headers=headers)\n url = self._build_url(path)\n resp = requests.request(\n url=url,\n json=json_data,\n data=data,\n headers=header,\n method=method,\n timeout=timeout,\n **kwargs,\n )\n resp = RawResponse(\n status_code=resp.status_code,\n content=resp.content,\n headers=dict(resp.headers),\n )\n if resp.status_code // 100 != 2:\n raise PredictionException(resp.status_code, resp.content)\n return resp\n\n def delete_service(self):\n \"\"\"Delete the docker container that running the service.\"\"\"\n if self._container_run:\n self._container_run.stop()\n\n def wait_for_ready(self):\n self._container_run.wait_for_ready()\n # ensure the server is ready.\n self._wait_local_server_ready()\n time.sleep(5)\n\n def _wait_local_server_ready(\n self,\n interval: int = 5,\n ):\n \"\"\"Wait for the local model server to be ready.\"\"\"\n container_run = self._container_run\n while True:\n try:\n # Check whether the container is still running.\n if not container_run.is_running():\n raise RuntimeError(\n \"Container exited unexpectedly, status: {}\".format(\n container_run.status\n )\n )\n\n # Make a HEAD request to the server, just test for connection.\n requests.head(\n f\"http://127.0.0.1:{container_run.port}/\",\n )\n break\n except requests.ConnectionError:\n # ConnectionError means server is not ready.\n logging.debug(\"Waiting for the container to be ready...\")\n time.sleep(interval)\n continue" }, { "identifier": "Predictor", "path": "pai/predictor.py", "snippet": "class Predictor(PredictorBase, _ServicePredictorMixin):\n \"\"\"Predictor is responsible for making prediction to an online service.\n\n The `predictor.predict` method sends the input data to the online prediction service\n and returns the prediction result. The serializer object of the predictor is\n responsible for data transformation when the `predict` method is invoked. The input\n data is serialized using the `serializer.serialize` method before it is sent, and\n the response is deserialized using the `serializer.deserialize` method before the\n prediction result returns.\n\n Examples::\n\n # Initialize a predictor object from an existing service using PyTorch\n # processor.\n torch_predictor = Predictor(service_name=\"example_torch_service\")\n result = torch_predictor.predict(numpy.asarray([[22,33,44], [19,22,33]]))\n assert isinstance(result, numpy.ndarray)\n\n \"\"\"\n\n def __init__(\n self,\n service_name: str,\n endpoint_type: str = EndpointType.INTERNET,\n serializer: Optional[SerializerBase] = None,\n session: Optional[Session] = None,\n ):\n \"\"\"Construct a `Predictor` object using an existing prediction service.\n\n Args:\n service_name (str): Name of the existing prediction service.\n endpoint_type (str): Selects the endpoint used by the predictor, which\n should be one of `INTERNET` or `INTRANET`. The `INTERNET` endpoint type\n means that the predictor calls the service over a public endpoint, while\n the `INTRANET` endpoint type is over a VPC endpoint.\n serializer (SerializerBase, optional): A serializer object that transforms\n the input Python object for data transmission and deserialize the\n response data to Python object.\n session (Session, optional): A PAI session object used for communicating\n with PAI service.\n \"\"\"\n super(Predictor, self).__init__(\n service_name=service_name,\n session=session or get_default_session(),\n endpoint_type=endpoint_type,\n serializer=serializer,\n )\n self._check()\n\n def _check(self):\n config = json.loads(self._service_api_object[\"ServiceConfig\"])\n if config.get(\"metadata\", {}).get(\"type\") == ServiceType.Async:\n logger.warning(\n \"Predictor is not recommended to make prediction to a async\"\n \" prediction service.\"\n )\n\n def predict(self, data):\n \"\"\"Make a prediction with the online prediction service.\n\n The serializer object for the predictor is responsible for data transformation\n when the 'predict' method is invoked. The input data is serialized using the\n `serializer.serialize` method before it is sent, and the response is\n deserialized using the `serializer.deserialize` method before the prediction\n result returns.\n\n Args:\n data: The input data for the prediction. It will be serialized using the\n serializer of the predictor before transmitted to the prediction\n service.\n\n Returns:\n object: Prediction result.\n\n Raises:\n PredictionException: Raise if status code of the prediction response does\n not equal 2xx.\n \"\"\"\n self._post_init_serializer()\n data = self._handle_input(data)\n resp = self._send_request(\n data,\n )\n if resp.status_code // 100 != 2:\n raise PredictionException(resp.status_code, resp.content)\n return self._handle_output(\n resp.content,\n )\n\n def raw_predict(\n self,\n data: Any = None,\n path: Optional[str] = None,\n headers: Optional[Dict[str, str]] = None,\n method: str = \"POST\",\n timeout: Optional[Union[float, Tuple[float, float]]] = None,\n **kwargs,\n ) -> RawResponse:\n \"\"\"Make a prediction with the online prediction service.\n\n Args:\n data (Any): Input data to be sent to the prediction service. If it is a\n file-like object, bytes, or string, it will be sent as the request body.\n Otherwise, it will be treated as a JSON serializable object and sent as\n JSON.\n path (str, optional): Path for the request to be sent to. If it is provided,\n it will be appended to the endpoint URL (Default None).\n headers (dict, optional): Request headers.\n method (str, optional): Request method, default to 'POST'.\n timeout(float, tuple(float, float), optional): Timeout setting for the\n request (Default 10).\n **kwargs: Additional keyword arguments for the request.\n Returns:\n RawResponse: Prediction response from the service.\n\n Raises:\n PredictionException: Raise if status code of the prediction response does\n not equal 2xx.\n \"\"\"\n json_data, data = self._handle_raw_input(data)\n resp = self._send_request(\n data=data,\n json=json_data,\n method=method,\n path=path,\n headers=headers,\n timeout=timeout,\n **kwargs,\n )\n if resp.status_code // 100 != 2:\n raise PredictionException(resp.status_code, resp.content)\n\n resp = RawResponse(\n status_code=resp.status_code,\n content=resp.content,\n headers=dict(resp.headers),\n )\n return resp" }, { "identifier": "ServiceType", "path": "pai/predictor.py", "snippet": "class ServiceType(object):\n Standard = \"Standard\"\n Async = \"Async\"" }, { "identifier": "SerializerBase", "path": "pai/serializers.py", "snippet": "class SerializerBase(ABC):\n \"\"\"Abstract class for creating a Serializer class for predictor.\"\"\"\n\n @abstractmethod\n def serialize(self, data) -> bytes:\n \"\"\"Serialize the input data to bytes for transmitting.\"\"\"\n\n @abstractmethod\n def deserialize(self, data: bytes):\n \"\"\"Deserialize the data from raw bytes to Python object .\"\"\"\n\n def inspect_from_service(\n self, service_name: str, *, session: Optional[Session] = None\n ):\n \"\"\"Inspect the online prediction service to complete the serializer instance\n initialization.\n\n The implementation of the `inspect_from_service` method is optional. You only\n need to implement it if your serializer requires additional information from\n service metadata or if it needs to send a request to the service in order to\n be initialized.\n\n \"\"\"" }, { "identifier": "Session", "path": "pai/session.py", "snippet": "class Session(ResourceAPIsContainerMixin):\n \"\"\"A class responsible for communicating with PAI services.\"\"\"\n\n def __init__(\n self,\n region_id: str,\n workspace_id: Optional[str] = None,\n credential_config: Optional[CredentialConfig] = None,\n oss_bucket_name: Optional[str] = None,\n oss_endpoint: Optional[str] = None,\n **kwargs,\n ):\n \"\"\"PAI Session Initializer.\n\n Args:\n credential_config (:class:`alibabacloud_credentials.models.Config`, optional):\n The credential config used to access the Alibaba Cloud.\n region_id (str): The ID of the Alibaba Cloud region where the service\n is located.\n workspace_id (str, optional): ID of the workspace used in the default\n session.\n oss_bucket_name (str, optional): The name of the OSS bucket used in the\n session.\n oss_endpoint (str, optional): The endpoint for the OSS bucket.\n \"\"\"\n\n if not region_id:\n raise ValueError(\"Region ID must be provided.\")\n\n self._credential_config = credential_config\n self._region_id = region_id\n self._workspace_id = workspace_id\n self._oss_bucket_name = oss_bucket_name\n self._oss_endpoint = oss_endpoint\n\n header = kwargs.pop(\"header\", None)\n super(Session, self).__init__(header=header)\n\n @property\n def region_id(self) -> str:\n return self._region_id\n\n @property\n def is_inner(self) -> bool:\n return self._region_id in INNER_REGION_IDS\n\n @property\n def oss_bucket_name(self) -> str:\n return self._oss_bucket_name\n\n @property\n def oss_endpoint(self) -> str:\n return self._oss_endpoint\n\n @property\n def credential_config(self) -> CredentialConfig:\n return self._credential_config\n\n @property\n def workspace_name(self):\n if hasattr(self, \"_workspace_name\") and self._workspace_name:\n return self._workspace_name\n\n if not self._workspace_id:\n raise ValueError(\"Workspace id is not set.\")\n workspace_api_obj = self.workspace_api.get(workspace_id=self._workspace_id)\n self._workspace_name = workspace_api_obj[\"WorkspaceName\"]\n return self._workspace_name\n\n @property\n def provider(self) -> str:\n caller_identity = self._acs_sts_client.get_caller_identity().body\n return caller_identity.account_id\n\n @property\n def workspace_id(self) -> str:\n \"\"\"ID of the workspace used by the session.\"\"\"\n return self._workspace_id\n\n @property\n def console_uri(self) -> str:\n \"\"\"The web console URI for PAI service.\"\"\"\n if self.is_inner:\n return \"https://pai-next.alibaba-inc.com\"\n else:\n return \"https://pai.console.aliyun.com/console\"\n\n def _init_oss_config(\n self,\n ):\n \"\"\"Initialize a OssConfig instance.\"\"\"\n if not self._oss_bucket_name:\n # If OSS bucket name is not provided, use the default OSS storage URI\n # that is configured for the workspace.\n default_oss_uri = self.workspace_api.get_default_storage_uri(\n self.workspace_id\n )\n if not default_oss_uri:\n raise RuntimeError(\n \"No default OSS URI is configured for the workspace.\"\n )\n oss_uri_obj = OssUriObj(default_oss_uri)\n self._oss_bucket_name = oss_uri_obj.bucket_name\n\n if not self._oss_endpoint:\n self._oss_endpoint = self._get_default_oss_endpoint()\n\n def _get_oss_auth(self):\n auth = oss2.ProviderAuth(\n credentials_provider=CredentialProviderWrapper(\n config=self._credential_config,\n )\n )\n return auth\n\n @property\n def oss_bucket(self):\n \"\"\"A OSS2 bucket instance used by the session.\"\"\"\n if not self._oss_bucket_name or not self._oss_endpoint:\n self._init_oss_config()\n oss_bucket = oss2.Bucket(\n auth=self._get_oss_auth(),\n endpoint=self._oss_endpoint,\n bucket_name=self._oss_bucket_name,\n )\n return oss_bucket\n\n def save_config(self, config_path=None):\n \"\"\"Save the configuration of the session to a local file.\"\"\"\n attrs = {key.lstrip(\"_\"): value for key, value in vars(self).items()}\n config = {\n key: value\n for key, value in attrs.items()\n if key in _DEFAULT_CONFIG_KEYS and value is not None\n }\n\n config_path = config_path or DEFAULT_CONFIG_PATH\n os.makedirs(os.path.dirname(config_path), exist_ok=True)\n with open(config_path, \"w\") as f:\n f.write(json.dumps(config, indent=4))\n logger.info(\"Write PAI config succeed: config_path=%s\" % config_path)\n\n def patch_oss_endpoint(self, oss_uri: str):\n oss_uri_obj = OssUriObj(oss_uri)\n if oss_uri_obj.endpoint:\n return oss_uri\n\n # patch endpoint using current OSS bucket endpoint.\n endpoint = self.oss_bucket.endpoint\n if endpoint.startswith(\"http://\"):\n endpoint = endpoint.lstrip(\"http://\")\n elif endpoint.startswith(\"https://\"):\n endpoint = endpoint.lstrip(\"https://\")\n return \"oss://{bucket_name}.{endpoint}/{key}\".format(\n bucket_name=oss_uri_obj.bucket_name,\n endpoint=endpoint,\n key=oss_uri_obj.object_key,\n )\n\n def _get_default_oss_endpoint(self) -> str:\n \"\"\"Returns a default OSS endpoint.\"\"\"\n\n # OSS Endpoint document:\n # https://help.aliyun.com/document_detail/31837.html\n internet_endpoint = \"oss-{}.aliyuncs.com\".format(self.region_id)\n internal_endpoint = \"oss-{}-internal.aliyuncs.com\".format(self.region_id)\n\n return (\n internet_endpoint\n if is_domain_connectable(internal_endpoint)\n else internet_endpoint\n )\n\n def get_oss_bucket(self, bucket_name: str, endpoint: str = None) -> oss2.Bucket:\n \"\"\"Get a OSS bucket using the credentials of the session.\n\n Args:\n bucket_name (str): The name of the bucket.\n endpoint (str): Endpoint of the bucket.\n\n Returns:\n :class:`oss2.Bucket`: A OSS bucket instance.\n\n \"\"\"\n endpoint = endpoint or self._oss_endpoint or self._get_default_oss_endpoint()\n oss_bucket = oss2.Bucket(\n auth=self._get_oss_auth(),\n endpoint=endpoint,\n bucket_name=bucket_name,\n )\n return oss_bucket\n\n @classmethod\n def get_storage_path_by_category(\n cls, category: str, dir_name: Optional[str] = None\n ) -> str:\n \"\"\"Get an OSS storage path for the resource.\n\n Args:\n category (str): The category of the resource.\n dir_name (str, optional): The directory name of the resource.\n\n Returns:\n str: A OSS storage path.\n\n \"\"\"\n dir_name = dir_name or datetime.now().strftime(\"%Y%m%d_%H%M%S_%f\")\n storage_path = posixpath.join(\"pai\", category, dir_name).strip()\n\n if not storage_path.endswith(\"/\"):\n storage_path += \"/\"\n return storage_path\n\n def is_supported_training_instance(self, instance_type: str) -> bool:\n \"\"\"Check if the instance type is supported for training.\"\"\"\n instance_generator = make_list_resource_iterator(self.job_api.list_ecs_specs)\n machine_spec = next(\n (\n item\n for item in instance_generator\n if item[\"InstanceType\"] == instance_type\n ),\n None,\n )\n return bool(machine_spec)\n\n def is_gpu_training_instance(self, instance_type: str) -> bool:\n \"\"\"Check if the instance type is GPU instance for training.\"\"\"\n instance_generator = make_list_resource_iterator(self.job_api.list_ecs_specs)\n machine_spec = next(\n (\n item\n for item in instance_generator\n if item[\"InstanceType\"] == instance_type\n ),\n None,\n )\n if not machine_spec:\n raise ValueError(\n f\"Instance type {instance_type} is not supported for training job. \"\n \"Please provide a supported instance type.\"\n )\n return machine_spec[\"AcceleratorType\"] == \"GPU\"\n\n def is_supported_inference_instance(self, instance_type: str) -> bool:\n \"\"\"Check if the instance type is supported for inference.\"\"\"\n res = self.service_api.describe_machine()[\"InstanceMetas\"]\n spec = next(\n (item for item in res if item[\"InstanceType\"] == instance_type), None\n )\n return bool(spec)\n\n def is_gpu_inference_instance(self, instance_type: str) -> bool:\n \"\"\"Check if the instance type is GPU instance for inference.\"\"\"\n res = self.service_api.describe_machine()[\"InstanceMetas\"]\n spec = next(\n (item for item in res if item[\"InstanceType\"] == instance_type), None\n )\n\n if not spec:\n raise ValueError(\n f\"Instance type {instance_type} is not supported for deploying. \"\n \"Please provide a supported instance type.\"\n )\n return bool(spec[\"GPU\"])" }, { "identifier": "get_default_session", "path": "pai/session.py", "snippet": "def get_default_session() -> \"Session\":\n \"\"\"Get the default session used by the program.\n\n If the global default session is set, the function will try to initialize\n a session from config file.\n\n Returns:\n :class:`pai.session.Session`: The default session.\n\n \"\"\"\n global _default_session\n if not _default_session:\n config = load_default_config_file()\n if not config:\n return\n _default_session = Session(**config)\n return _default_session" } ]
import copy import distutils.dir_util import json import logging import os.path import posixpath import shlex import shutil import tempfile import textwrap import time import requests from typing import Any, Dict, Iterator, List, Optional, Tuple, Union from addict import Dict as AttrDict from oss2 import ObjectIterator from .common import git_utils from .common.consts import INSTANCE_TYPE_LOCAL_GPU, ModelFormat from .common.docker_utils import ContainerRun, run_container from .common.oss_utils import OssUriObj, download, is_oss_uri, upload from .common.utils import ( generate_repr, is_local_run_instance_type, random_str, to_plain_text, ) from .exception import DuplicatedMountException, MountPathIsOccupiedException from .image import ImageInfo from .predictor import AsyncPredictor, LocalPredictor, Predictor, ServiceType from .serializers import SerializerBase from .session import Session, get_default_session from .estimator import AlgorithmEstimator
17,036
raise TypeError() cfg_dict.update(args[0]) super(InferenceSpec, self).__setattr__( "_cfg_dict", self._transform_value(cfg_dict) ) super(InferenceSpec, self).__setattr__("__properties", properties) def __repr__(self): return json.dumps(self.to_dict(), sort_keys=True, indent=4) def _transform_value(self, value): if isinstance(value, (List, Tuple)): return [self._transform_value(item) for item in value] elif isinstance(value, (Dict, AttrDict)): return AttrDict( {key: self._transform_value(value) for key, value in value.items()} ) return value def __missing__(self, name): return self._cfg_dict.__missing__(name) def __setitem__(self, name, value): return self._cfg_dict.__setitem__(name, self._transform_value(value)) def __setattr__(self, name, value): if name in getattr(self, "__properties"): super(InferenceSpec, self).__setattr__(name, self._transform_value(value)) else: self._cfg_dict.__setattr__(name, self._transform_value(value)) def __getattr__(self, item): if item.startswith("_"): return getattr(self, item) return self._cfg_dict.__getitem__(item) def __contains__(self, item): return item in self._cfg_dict def to_dict(self) -> Dict: """Return a dictionary that represent the InferenceSpec.""" return self._cfg_dict.to_dict() def add_option(self, name: str, value): """Add an option to the inference_spec instance. Args: name (str): Name of the option to set, represented as the JSON path of the parameter for the InferenceSpec. To view the full supported parameters, please see the following hyperlink: `Parameters of model services <https://help.aliyun.com/document_detail/450525.htm>`_. value: Value for the option. Examples: >>> infer_spec = InferenceSpec(processor="tensorflow_gpu_1.12") >>> infer_spec.add_option("metadata.rpc.keepalive", 10000) >>> infer_spec.metadata.rpc.keepalive 10000 >>> infer_spec.to_dict() {'processor': 'tensorflow_gpu_1.12', 'metadata': {'rpc': {'keepalive': 10000}}} """ src = self._transform_value(value) for k in reversed(name.split(".")): src = {k: src} self._cfg_dict.update(AttrDict(src)) def merge_options(self, options: Dict[str, Any]): """Merge options from a dictionary.""" for key, value in options.items(): self.add_option(key, value) @classmethod def from_dict(cls, config: Dict[str, Any]) -> "InferenceSpec": """Initialize a InferenceSpec from a dictionary. You can use this method to initialize a InferenceSpec instance from a dictionary. Returns: :class:`pai.model.InferenceSpec`: A InferenceSpec instance. """ config = config or dict() return cls(**config) def is_container_serving(self): return "containers" in self._cfg_dict @classmethod def _upload_source_dir(cls, source_dir, session): """Upload source files to OSS bucket.""" if not os.path.exists(source_dir): raise ValueError(f"Input source code path does not exist: {source_dir}.") if not os.path.isdir(source_dir): raise ValueError( f"Input source code path should be a directory: {source_dir}." ) target_dir = session.get_storage_path_by_category(category="inference_src") # upload local script data to the OSS bucket. uploaded_source_code = upload( source_dir, target_dir, session.oss_bucket, ) logger.debug("Uploaded source code to OSS: %s", uploaded_source_code) return uploaded_source_code def mount( self, source: str, mount_path: str,
# Copyright 2023 Alibaba, Inc. or its affiliates. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. logger = logging.getLogger(__name__) # Reserved ports for internal use, do not use them for service _RESERVED_PORTS = [8080, 9090] class DefaultServiceConfig(object): """Default configuration used in creating prediction service.""" # Listen Port listen_port = 8000 # Default model path in container model_path = "/eas/workspace/model/" # Default user code path in container code_path = "/ml/usercode/" class ResourceConfig(object): """A class that represents the resource used by a PAI prediction service instance.""" def __init__(self, cpu: int, memory: int, gpu: int = None, gpu_memory: int = None): """ResourceConfig initializer. The public resource group does not support requesting GPU resources with `ResourceConfig`. Use the 'gpu' and 'gpu_memory' parameter only for services deployed to dedicated resource groups that provide GPU machine instances. Args: cpu (int): The number of CPUs that each instance requires. memory (int): The amount of memory that each instance requires, must be an integer, Unit: MB. gpu (int): The number of GPUs that each instance requires. gpu_memory (int): The amount of GPU memory that each instance requires. The value must be an integer, Unit: GB. PAI allows memory resources of a GPU to be allocated to multiple instances. If you want multiple instances to share the memory resources of a GPU, set the gpu parameter to 0. If you set the ``gpu`` parameter to 1, each instance occupies a GPU and the gpu_memory parameter does not take effect. .. note:: **Important** PAI does not enable the strict isolation of GPU memory. To prevent out of memory (OOM) errors, make sure that the GPU memory used by each instance does not exceed the requested amount. """ self.cpu = cpu self.memory = memory self.gpu = gpu self.gpu_memory = gpu_memory def __repr__(self): return ( f"ResourceConfig(cpu={self.cpu}, memory={self.memory}MB, gpu={self.gpu or 0}," f" gpu_memory={self.gpu_memory or 0}GB)" ) def __str__(self): return self.__repr__() def to_dict(self): """Transform the ResourceConfig instance to a dictionary. Returns: dict: """ res = { "cpu": self.cpu, "gpu": self.gpu, "gpu_memory": self.gpu_memory, "memory": self.memory, } return {k: v for k, v in res.items() if v is not None} class InferenceSpec(object): """A class used to describe how to create a prediction service. InferenceSpec is using to describe how the model is serving in PAI. To view the full supported parameters, please see the following hyperlink: `Parameters of model services <https://help.aliyun.com/document_detail/450525.htm>`_. Example of how to config a InferneceSpec:: >>> # build an inference_spec that using XGBoost processor. >>> infer_spec = InferenceSpec(processor="xgboost") >>> infer_spec.metadata.rpc.keepalive = 1000 >>> infer_spec.warm_up_data_path = "oss://bucket-name/path/to/warmup-data" >>> infer_spec.add_option("metadata.rpc.max_batch_size", 8) >>> print(infer_spec.processor) xgboost >>> print(infer_spec.metadata.rpc.keepalive) 1000 >>> print(infer_spec.metadata.rpc.max_batch_size) 8 >>> print(infer_spec.to_dict()) {'processor': 'xgboost', 'metadata': {'rpc': {'keepalive': 1000, 'max_batch_size': 8}}, 'warm_up_data_path': 'oss://bucket-name/path/to/warmup-data'} """ def __init__(self, *args, **kwargs): """InferenceSpec initializer. Args: **kwargs: Parameters of the inference spec. """ properties = kwargs.pop("__properties", []) cfg_dict = copy.deepcopy(kwargs) cfg_dict = {k: v for k, v in cfg_dict.items() if not k.startswith("_")} if args: if len(args) > 1: raise TypeError() cfg_dict.update(args[0]) super(InferenceSpec, self).__setattr__( "_cfg_dict", self._transform_value(cfg_dict) ) super(InferenceSpec, self).__setattr__("__properties", properties) def __repr__(self): return json.dumps(self.to_dict(), sort_keys=True, indent=4) def _transform_value(self, value): if isinstance(value, (List, Tuple)): return [self._transform_value(item) for item in value] elif isinstance(value, (Dict, AttrDict)): return AttrDict( {key: self._transform_value(value) for key, value in value.items()} ) return value def __missing__(self, name): return self._cfg_dict.__missing__(name) def __setitem__(self, name, value): return self._cfg_dict.__setitem__(name, self._transform_value(value)) def __setattr__(self, name, value): if name in getattr(self, "__properties"): super(InferenceSpec, self).__setattr__(name, self._transform_value(value)) else: self._cfg_dict.__setattr__(name, self._transform_value(value)) def __getattr__(self, item): if item.startswith("_"): return getattr(self, item) return self._cfg_dict.__getitem__(item) def __contains__(self, item): return item in self._cfg_dict def to_dict(self) -> Dict: """Return a dictionary that represent the InferenceSpec.""" return self._cfg_dict.to_dict() def add_option(self, name: str, value): """Add an option to the inference_spec instance. Args: name (str): Name of the option to set, represented as the JSON path of the parameter for the InferenceSpec. To view the full supported parameters, please see the following hyperlink: `Parameters of model services <https://help.aliyun.com/document_detail/450525.htm>`_. value: Value for the option. Examples: >>> infer_spec = InferenceSpec(processor="tensorflow_gpu_1.12") >>> infer_spec.add_option("metadata.rpc.keepalive", 10000) >>> infer_spec.metadata.rpc.keepalive 10000 >>> infer_spec.to_dict() {'processor': 'tensorflow_gpu_1.12', 'metadata': {'rpc': {'keepalive': 10000}}} """ src = self._transform_value(value) for k in reversed(name.split(".")): src = {k: src} self._cfg_dict.update(AttrDict(src)) def merge_options(self, options: Dict[str, Any]): """Merge options from a dictionary.""" for key, value in options.items(): self.add_option(key, value) @classmethod def from_dict(cls, config: Dict[str, Any]) -> "InferenceSpec": """Initialize a InferenceSpec from a dictionary. You can use this method to initialize a InferenceSpec instance from a dictionary. Returns: :class:`pai.model.InferenceSpec`: A InferenceSpec instance. """ config = config or dict() return cls(**config) def is_container_serving(self): return "containers" in self._cfg_dict @classmethod def _upload_source_dir(cls, source_dir, session): """Upload source files to OSS bucket.""" if not os.path.exists(source_dir): raise ValueError(f"Input source code path does not exist: {source_dir}.") if not os.path.isdir(source_dir): raise ValueError( f"Input source code path should be a directory: {source_dir}." ) target_dir = session.get_storage_path_by_category(category="inference_src") # upload local script data to the OSS bucket. uploaded_source_code = upload( source_dir, target_dir, session.oss_bucket, ) logger.debug("Uploaded source code to OSS: %s", uploaded_source_code) return uploaded_source_code def mount( self, source: str, mount_path: str,
session: Session = None,
21
2023-12-01 01:40:12+00:00
24k
zerolink-io/zerolink-python
zerolink/req.py
[ { "identifier": "settings", "path": "zerolink/settings.py", "snippet": " CONFIG_FILE = os.path.join(os.environ[\"APPDATA\"], \"zerolink\", \"config\")\n CONFIG_FILE = os.path.join(os.environ[\"HOME\"], \".config\", \"zerolink\", \"config\")\n CONFIG_FILE = os.path.join(\n os.environ[\"HOME\"], \"Library\", \"Application Support\", \"zerolink\", \"config\"\n )\ndef create_config() -> None:\ndef get_config() -> configparser.ConfigParser:\ndef get_config_path() -> str:\ndef get_config_var(var: str) -> str:\ndef write_config_var(var: str, value: str):\ndef write_api_key(api_key: str):\ndef read_api_key() -> Optional[str]:" }, { "identifier": "APIError", "path": "zerolink/exc.py", "snippet": "class APIError(Exception):\n def __init__(self, message: str) -> None:\n self.message = message\n\n def __str__(self) -> str:\n return self.message" }, { "identifier": "AuthenticationError", "path": "zerolink/exc.py", "snippet": "class AuthenticationError(Exception):\n def __init__(self) -> None:\n pass\n\n def __str__(self) -> str:\n return \"No API key. Please run `zerolink key` or set the ZEROLINK_API_KEY environment variable\"" }, { "identifier": "Client", "path": "zerolink_client/client.py", "snippet": "class Client:\n \"\"\"A class for keeping track of data related to the API\n\n The following are accepted as keyword arguments and will be used to construct httpx Clients internally:\n\n ``base_url``: The base URL for the API, all requests are made to a relative path to this URL\n\n ``cookies``: A dictionary of cookies to be sent with every request\n\n ``headers``: A dictionary of headers to be sent with every request\n\n ``timeout``: The maximum amount of a time a request can take. API functions will raise\n httpx.TimeoutException if this is exceeded.\n\n ``verify_ssl``: Whether or not to verify the SSL certificate of the API server. This should be True in production,\n but can be set to False for testing purposes.\n\n ``follow_redirects``: Whether or not to follow redirects. Default value is False.\n\n ``httpx_args``: A dictionary of additional arguments to be passed to the ``httpx.Client`` and ``httpx.AsyncClient`` constructor.\n\n\n Attributes:\n raise_on_unexpected_status: Whether or not to raise an errors.UnexpectedStatus if the API returns a\n status code that was not documented in the source OpenAPI document. Can also be provided as a keyword\n argument to the constructor.\n \"\"\"\n\n raise_on_unexpected_status: bool = field(default=False, kw_only=True)\n _base_url: str\n _cookies: Dict[str, str] = field(factory=dict, kw_only=True)\n _headers: Dict[str, str] = field(factory=dict, kw_only=True)\n _timeout: Optional[httpx.Timeout] = field(default=None, kw_only=True)\n _verify_ssl: Union[str, bool, ssl.SSLContext] = field(default=True, kw_only=True)\n _follow_redirects: bool = field(default=False, kw_only=True)\n _httpx_args: Dict[str, Any] = field(factory=dict, kw_only=True)\n _client: Optional[httpx.Client] = field(default=None, init=False)\n _async_client: Optional[httpx.AsyncClient] = field(default=None, init=False)\n\n def with_headers(self, headers: Dict[str, str]) -> \"Client\":\n \"\"\"Get a new client matching this one with additional headers\"\"\"\n if self._client is not None:\n self._client.headers.update(headers)\n if self._async_client is not None:\n self._async_client.headers.update(headers)\n return evolve(self, headers={**self._headers, **headers})\n\n def with_cookies(self, cookies: Dict[str, str]) -> \"Client\":\n \"\"\"Get a new client matching this one with additional cookies\"\"\"\n if self._client is not None:\n self._client.cookies.update(cookies)\n if self._async_client is not None:\n self._async_client.cookies.update(cookies)\n return evolve(self, cookies={**self._cookies, **cookies})\n\n def with_timeout(self, timeout: httpx.Timeout) -> \"Client\":\n \"\"\"Get a new client matching this one with a new timeout (in seconds)\"\"\"\n if self._client is not None:\n self._client.timeout = timeout\n if self._async_client is not None:\n self._async_client.timeout = timeout\n return evolve(self, timeout=timeout)\n\n def set_httpx_client(self, client: httpx.Client) -> \"Client\":\n \"\"\"Manually the underlying httpx.Client\n\n **NOTE**: This will override any other settings on the client, including cookies, headers, and timeout.\n \"\"\"\n self._client = client\n return self\n\n def get_httpx_client(self) -> httpx.Client:\n \"\"\"Get the underlying httpx.Client, constructing a new one if not previously set\"\"\"\n if self._client is None:\n self._client = httpx.Client(\n base_url=self._base_url,\n cookies=self._cookies,\n headers=self._headers,\n timeout=self._timeout,\n verify=self._verify_ssl,\n follow_redirects=self._follow_redirects,\n **self._httpx_args,\n )\n return self._client\n\n def __enter__(self) -> \"Client\":\n \"\"\"Enter a context manager for self.client—you cannot enter twice (see httpx docs)\"\"\"\n self.get_httpx_client().__enter__()\n return self\n\n def __exit__(self, *args: Any, **kwargs: Any) -> None:\n \"\"\"Exit a context manager for internal httpx.Client (see httpx docs)\"\"\"\n self.get_httpx_client().__exit__(*args, **kwargs)\n\n def set_async_httpx_client(self, async_client: httpx.AsyncClient) -> \"Client\":\n \"\"\"Manually the underlying httpx.AsyncClient\n\n **NOTE**: This will override any other settings on the client, including cookies, headers, and timeout.\n \"\"\"\n self._async_client = async_client\n return self\n\n def get_async_httpx_client(self) -> httpx.AsyncClient:\n \"\"\"Get the underlying httpx.AsyncClient, constructing a new one if not previously set\"\"\"\n if self._async_client is None:\n self._async_client = httpx.AsyncClient(\n base_url=self._base_url,\n cookies=self._cookies,\n headers=self._headers,\n timeout=self._timeout,\n verify=self._verify_ssl,\n follow_redirects=self._follow_redirects,\n **self._httpx_args,\n )\n return self._async_client\n\n async def __aenter__(self) -> \"Client\":\n \"\"\"Enter a context manager for underlying httpx.AsyncClient—you cannot enter twice (see httpx docs)\"\"\"\n await self.get_async_httpx_client().__aenter__()\n return self\n\n async def __aexit__(self, *args: Any, **kwargs: Any) -> None:\n \"\"\"Exit a context manager for underlying httpx.AsyncClient (see httpx docs)\"\"\"\n await self.get_async_httpx_client().__aexit__(*args, **kwargs)" }, { "identifier": "finetune", "path": "zerolink_client/api/default/finetune.py", "snippet": "def _get_kwargs(\n *,\n file: Union[File, str],\n) -> Dict[str, Any]:\ndef _parse_response(*, client: Union[AuthenticatedClient, Client], response: httpx.Response) -> Optional[Union[CreateTuneJobResponse, HTTPValidationError]]:\ndef _build_response(*, client: Union[AuthenticatedClient, Client], response: httpx.Response) -> Response[Union[CreateTuneJobResponse, HTTPValidationError]]:\ndef sync_detailed(\n *,\n client: Union[AuthenticatedClient, Client],\n file: Union[File, str],\n) -> Response[Union[CreateTuneJobResponse, HTTPValidationError]]:\ndef sync(\n *,\n client: Union[AuthenticatedClient, Client],\n file: Union[File, str],\n) -> Optional[Union[CreateTuneJobResponse, HTTPValidationError]]:\nasync def asyncio_detailed(\n *,\n client: Union[AuthenticatedClient, Client],\n file: Union[File, str],\n) -> Response[Union[CreateTuneJobResponse, HTTPValidationError]]:\nasync def asyncio(\n *,\n client: Union[AuthenticatedClient, Client],\n file: Union[File, str],\n) -> Optional[Union[CreateTuneJobResponse, HTTPValidationError]]:" }, { "identifier": "get_models_models_get", "path": "zerolink_client/api/default/get_models_models_get.py", "snippet": "def _get_kwargs() -> Dict[str, Any]:\ndef _parse_response(*, client: Union[AuthenticatedClient, Client], response: httpx.Response) -> Optional[ModelList]:\ndef _build_response(*, client: Union[AuthenticatedClient, Client], response: httpx.Response) -> Response[ModelList]:\ndef sync_detailed(\n *,\n client: Union[AuthenticatedClient, Client],\n) -> Response[ModelList]:\ndef sync(\n *,\n client: Union[AuthenticatedClient, Client],\n) -> Optional[ModelList]:\nasync def asyncio_detailed(\n *,\n client: Union[AuthenticatedClient, Client],\n) -> Response[ModelList]:\nasync def asyncio(\n *,\n client: Union[AuthenticatedClient, Client],\n) -> Optional[ModelList]:" }, { "identifier": "desc_entity_id", "path": "zerolink_client/api/entity/desc_entity_id.py", "snippet": "def _get_kwargs(\n id: str,\n) -> Dict[str, Any]:\ndef _parse_response(*, client: Union[AuthenticatedClient, Client], response: httpx.Response) -> Optional[Union[Entity, HTTPValidationError]]:\ndef _build_response(*, client: Union[AuthenticatedClient, Client], response: httpx.Response) -> Response[Union[Entity, HTTPValidationError]]:\ndef sync_detailed(\n id: str,\n *,\n client: Union[AuthenticatedClient, Client],\n) -> Response[Union[Entity, HTTPValidationError]]:\ndef sync(\n id: str,\n *,\n client: Union[AuthenticatedClient, Client],\n) -> Optional[Union[Entity, HTTPValidationError]]:\nasync def asyncio_detailed(\n id: str,\n *,\n client: Union[AuthenticatedClient, Client],\n) -> Response[Union[Entity, HTTPValidationError]]:\nasync def asyncio(\n id: str,\n *,\n client: Union[AuthenticatedClient, Client],\n) -> Optional[Union[Entity, HTTPValidationError]]:" }, { "identifier": "desc_entity_ontology", "path": "zerolink_client/api/entity/desc_entity_ontology.py", "snippet": "def _get_kwargs(\n id: str,\n) -> Dict[str, Any]:\ndef _parse_response(*, client: Union[AuthenticatedClient, Client], response: httpx.Response) -> Optional[Union[Any, HTTPValidationError]]:\ndef _build_response(*, client: Union[AuthenticatedClient, Client], response: httpx.Response) -> Response[Union[Any, HTTPValidationError]]:\ndef sync_detailed(\n id: str,\n *,\n client: Union[AuthenticatedClient, Client],\n) -> Response[Union[Any, HTTPValidationError]]:\ndef sync(\n id: str,\n *,\n client: Union[AuthenticatedClient, Client],\n) -> Optional[Union[Any, HTTPValidationError]]:\nasync def asyncio_detailed(\n id: str,\n *,\n client: Union[AuthenticatedClient, Client],\n) -> Response[Union[Any, HTTPValidationError]]:\nasync def asyncio(\n id: str,\n *,\n client: Union[AuthenticatedClient, Client],\n) -> Optional[Union[Any, HTTPValidationError]]:" }, { "identifier": "lookup_entity", "path": "zerolink_client/api/entity/lookup_entity.py", "snippet": "def _get_kwargs(\n name: str,\n) -> Dict[str, Any]:\ndef _parse_response(*, client: Union[AuthenticatedClient, Client], response: httpx.Response) -> Optional[Union[HTTPValidationError, List[\"Entity\"]]]:\ndef _build_response(*, client: Union[AuthenticatedClient, Client], response: httpx.Response) -> Response[Union[HTTPValidationError, List[\"Entity\"]]]:\ndef sync_detailed(\n name: str,\n *,\n client: Union[AuthenticatedClient, Client],\n) -> Response[Union[HTTPValidationError, List[\"Entity\"]]]:\ndef sync(\n name: str,\n *,\n client: Union[AuthenticatedClient, Client],\n) -> Optional[Union[HTTPValidationError, List[\"Entity\"]]]:\nasync def asyncio_detailed(\n name: str,\n *,\n client: Union[AuthenticatedClient, Client],\n) -> Response[Union[HTTPValidationError, List[\"Entity\"]]]:\nasync def asyncio(\n name: str,\n *,\n client: Union[AuthenticatedClient, Client],\n) -> Optional[Union[HTTPValidationError, List[\"Entity\"]]]:" }, { "identifier": "lookup_relation", "path": "zerolink_client/api/entity/lookup_relation.py", "snippet": "def _get_kwargs(\n name: str,\n) -> Dict[str, Any]:\ndef _parse_response(*, client: Union[AuthenticatedClient, Client], response: httpx.Response) -> Optional[Union[HTTPValidationError, List[\"Relation\"]]]:\ndef _build_response(*, client: Union[AuthenticatedClient, Client], response: httpx.Response) -> Response[Union[HTTPValidationError, List[\"Relation\"]]]:\ndef sync_detailed(\n name: str,\n *,\n client: Union[AuthenticatedClient, Client],\n) -> Response[Union[HTTPValidationError, List[\"Relation\"]]]:\ndef sync(\n name: str,\n *,\n client: Union[AuthenticatedClient, Client],\n) -> Optional[Union[HTTPValidationError, List[\"Relation\"]]]:\nasync def asyncio_detailed(\n name: str,\n *,\n client: Union[AuthenticatedClient, Client],\n) -> Response[Union[HTTPValidationError, List[\"Relation\"]]]:\nasync def asyncio(\n name: str,\n *,\n client: Union[AuthenticatedClient, Client],\n) -> Optional[Union[HTTPValidationError, List[\"Relation\"]]]:" }, { "identifier": "search_entity", "path": "zerolink_client/api/entity/search_entity.py", "snippet": "def _get_kwargs(\n name: str,\n *,\n limit: Union[Unset, int] = 10,\n) -> Dict[str, Any]:\ndef _parse_response(*, client: Union[AuthenticatedClient, Client], response: httpx.Response) -> Optional[Union[HTTPValidationError, List[\"Match\"]]]:\ndef _build_response(*, client: Union[AuthenticatedClient, Client], response: httpx.Response) -> Response[Union[HTTPValidationError, List[\"Match\"]]]:\ndef sync_detailed(\n name: str,\n *,\n client: Union[AuthenticatedClient, Client],\n limit: Union[Unset, int] = 10,\n) -> Response[Union[HTTPValidationError, List[\"Match\"]]]:\ndef sync(\n name: str,\n *,\n client: Union[AuthenticatedClient, Client],\n limit: Union[Unset, int] = 10,\n) -> Optional[Union[HTTPValidationError, List[\"Match\"]]]:\nasync def asyncio_detailed(\n name: str,\n *,\n client: Union[AuthenticatedClient, Client],\n limit: Union[Unset, int] = 10,\n) -> Response[Union[HTTPValidationError, List[\"Match\"]]]:\nasync def asyncio(\n name: str,\n *,\n client: Union[AuthenticatedClient, Client],\n limit: Union[Unset, int] = 10,\n) -> Optional[Union[HTTPValidationError, List[\"Match\"]]]:" }, { "identifier": "extract_text", "path": "zerolink_client/api/extract/extract_text.py", "snippet": "def _get_kwargs(\n *,\n body: TextExtract,\n session_id: int,\n) -> Dict[str, Any]:\ndef _parse_response(*, client: Union[AuthenticatedClient, Client], response: httpx.Response) -> Optional[Union[AssertionResponse, HTTPValidationError]]:\ndef _build_response(*, client: Union[AuthenticatedClient, Client], response: httpx.Response) -> Response[Union[AssertionResponse, HTTPValidationError]]:\ndef sync_detailed(\n *,\n client: Union[AuthenticatedClient, Client],\n body: TextExtract,\n session_id: int,\n) -> Response[Union[AssertionResponse, HTTPValidationError]]:\ndef sync(\n *,\n client: Union[AuthenticatedClient, Client],\n body: TextExtract,\n session_id: int,\n) -> Optional[Union[AssertionResponse, HTTPValidationError]]:\nasync def asyncio_detailed(\n *,\n client: Union[AuthenticatedClient, Client],\n body: TextExtract,\n session_id: int,\n) -> Response[Union[AssertionResponse, HTTPValidationError]]:\nasync def asyncio(\n *,\n client: Union[AuthenticatedClient, Client],\n body: TextExtract,\n session_id: int,\n) -> Optional[Union[AssertionResponse, HTTPValidationError]]:" }, { "identifier": "create_userattribute", "path": "zerolink_client/api/fact/create_userattribute.py", "snippet": "def _get_kwargs(\n *,\n body: CreateAttribute,\n session_id: int,\n) -> Dict[str, Any]:\ndef _parse_response(*, client: Union[AuthenticatedClient, Client], response: httpx.Response) -> Optional[Union[GenericResponse, HTTPValidationError]]:\ndef _build_response(*, client: Union[AuthenticatedClient, Client], response: httpx.Response) -> Response[Union[GenericResponse, HTTPValidationError]]:\ndef sync_detailed(\n *,\n client: Union[AuthenticatedClient, Client],\n body: CreateAttribute,\n session_id: int,\n) -> Response[Union[GenericResponse, HTTPValidationError]]:\ndef sync(\n *,\n client: Union[AuthenticatedClient, Client],\n body: CreateAttribute,\n session_id: int,\n) -> Optional[Union[GenericResponse, HTTPValidationError]]:\nasync def asyncio_detailed(\n *,\n client: Union[AuthenticatedClient, Client],\n body: CreateAttribute,\n session_id: int,\n) -> Response[Union[GenericResponse, HTTPValidationError]]:\nasync def asyncio(\n *,\n client: Union[AuthenticatedClient, Client],\n body: CreateAttribute,\n session_id: int,\n) -> Optional[Union[GenericResponse, HTTPValidationError]]:" }, { "identifier": "create_userentity", "path": "zerolink_client/api/fact/create_userentity.py", "snippet": "def _get_kwargs(\n *,\n body: CreateEntity,\n session_id: int,\n) -> Dict[str, Any]:\ndef _parse_response(*, client: Union[AuthenticatedClient, Client], response: httpx.Response) -> Optional[Union[CreateEntityResponse, HTTPValidationError]]:\ndef _build_response(*, client: Union[AuthenticatedClient, Client], response: httpx.Response) -> Response[Union[CreateEntityResponse, HTTPValidationError]]:\ndef sync_detailed(\n *,\n client: Union[AuthenticatedClient, Client],\n body: CreateEntity,\n session_id: int,\n) -> Response[Union[CreateEntityResponse, HTTPValidationError]]:\ndef sync(\n *,\n client: Union[AuthenticatedClient, Client],\n body: CreateEntity,\n session_id: int,\n) -> Optional[Union[CreateEntityResponse, HTTPValidationError]]:\nasync def asyncio_detailed(\n *,\n client: Union[AuthenticatedClient, Client],\n body: CreateEntity,\n session_id: int,\n) -> Response[Union[CreateEntityResponse, HTTPValidationError]]:\nasync def asyncio(\n *,\n client: Union[AuthenticatedClient, Client],\n body: CreateEntity,\n session_id: int,\n) -> Optional[Union[CreateEntityResponse, HTTPValidationError]]:" }, { "identifier": "create_userrule", "path": "zerolink_client/api/fact/create_userrule.py", "snippet": "def _get_kwargs(\n *,\n body: CreateRule,\n session_id: int,\n) -> Dict[str, Any]:\ndef _parse_response(*, client: Union[AuthenticatedClient, Client], response: httpx.Response) -> Optional[Union[CreateRuleResponse, HTTPValidationError]]:\ndef _build_response(*, client: Union[AuthenticatedClient, Client], response: httpx.Response) -> Response[Union[CreateRuleResponse, HTTPValidationError]]:\ndef sync_detailed(\n *,\n client: Union[AuthenticatedClient, Client],\n body: CreateRule,\n session_id: int,\n) -> Response[Union[CreateRuleResponse, HTTPValidationError]]:\ndef sync(\n *,\n client: Union[AuthenticatedClient, Client],\n body: CreateRule,\n session_id: int,\n) -> Optional[Union[CreateRuleResponse, HTTPValidationError]]:\nasync def asyncio_detailed(\n *,\n client: Union[AuthenticatedClient, Client],\n body: CreateRule,\n session_id: int,\n) -> Response[Union[CreateRuleResponse, HTTPValidationError]]:\nasync def asyncio(\n *,\n client: Union[AuthenticatedClient, Client],\n body: CreateRule,\n session_id: int,\n) -> Optional[Union[CreateRuleResponse, HTTPValidationError]]:" }, { "identifier": "create_usertriple", "path": "zerolink_client/api/fact/create_usertriple.py", "snippet": "def _get_kwargs(\n *,\n body: CreateTriple,\n session_id: int,\n) -> Dict[str, Any]:\ndef _parse_response(*, client: Union[AuthenticatedClient, Client], response: httpx.Response) -> Optional[Union[CreateFactResponse, HTTPValidationError]]:\ndef _build_response(*, client: Union[AuthenticatedClient, Client], response: httpx.Response) -> Response[Union[CreateFactResponse, HTTPValidationError]]:\ndef sync_detailed(\n *,\n client: Union[AuthenticatedClient, Client],\n body: CreateTriple,\n session_id: int,\n) -> Response[Union[CreateFactResponse, HTTPValidationError]]:\ndef sync(\n *,\n client: Union[AuthenticatedClient, Client],\n body: CreateTriple,\n session_id: int,\n) -> Optional[Union[CreateFactResponse, HTTPValidationError]]:\nasync def asyncio_detailed(\n *,\n client: Union[AuthenticatedClient, Client],\n body: CreateTriple,\n session_id: int,\n) -> Response[Union[CreateFactResponse, HTTPValidationError]]:\nasync def asyncio(\n *,\n client: Union[AuthenticatedClient, Client],\n body: CreateTriple,\n session_id: int,\n) -> Optional[Union[CreateFactResponse, HTTPValidationError]]:" }, { "identifier": "get_triple", "path": "zerolink_client/api/kg/get_triple.py", "snippet": "def _get_kwargs(\n name: str,\n *,\n limit: Union[Unset, int] = 10,\n threshold: Union[Unset, float] = 0.3,\n) -> Dict[str, Any]:\ndef _parse_response(*, client: Union[AuthenticatedClient, Client], response: httpx.Response) -> Optional[Union[HTTPValidationError, List[\"Triple\"]]]:\ndef _build_response(*, client: Union[AuthenticatedClient, Client], response: httpx.Response) -> Response[Union[HTTPValidationError, List[\"Triple\"]]]:\ndef sync_detailed(\n name: str,\n *,\n client: Union[AuthenticatedClient, Client],\n limit: Union[Unset, int] = 10,\n threshold: Union[Unset, float] = 0.3,\n) -> Response[Union[HTTPValidationError, List[\"Triple\"]]]:\ndef sync(\n name: str,\n *,\n client: Union[AuthenticatedClient, Client],\n limit: Union[Unset, int] = 10,\n threshold: Union[Unset, float] = 0.3,\n) -> Optional[Union[HTTPValidationError, List[\"Triple\"]]]:\nasync def asyncio_detailed(\n name: str,\n *,\n client: Union[AuthenticatedClient, Client],\n limit: Union[Unset, int] = 10,\n threshold: Union[Unset, float] = 0.3,\n) -> Response[Union[HTTPValidationError, List[\"Triple\"]]]:\nasync def asyncio(\n name: str,\n *,\n client: Union[AuthenticatedClient, Client],\n limit: Union[Unset, int] = 10,\n threshold: Union[Unset, float] = 0.3,\n) -> Optional[Union[HTTPValidationError, List[\"Triple\"]]]:" }, { "identifier": "post_question", "path": "zerolink_client/api/question/post_question.py", "snippet": "def _get_kwargs(\n *,\n body: Question,\n session_id: Union[Unset, int] = UNSET,\n) -> Dict[str, Any]:\ndef _parse_response(*, client: Union[AuthenticatedClient, Client], response: httpx.Response) -> Optional[Union[HTTPValidationError, QuestionResponse]]:\ndef _build_response(*, client: Union[AuthenticatedClient, Client], response: httpx.Response) -> Response[Union[HTTPValidationError, QuestionResponse]]:\ndef sync_detailed(\n *,\n client: Union[AuthenticatedClient, Client],\n body: Question,\n session_id: Union[Unset, int] = UNSET,\n) -> Response[Union[HTTPValidationError, QuestionResponse]]:\ndef sync(\n *,\n client: Union[AuthenticatedClient, Client],\n body: Question,\n session_id: Union[Unset, int] = UNSET,\n) -> Optional[Union[HTTPValidationError, QuestionResponse]]:\nasync def asyncio_detailed(\n *,\n client: Union[AuthenticatedClient, Client],\n body: Question,\n session_id: Union[Unset, int] = UNSET,\n) -> Response[Union[HTTPValidationError, QuestionResponse]]:\nasync def asyncio(\n *,\n client: Union[AuthenticatedClient, Client],\n body: Question,\n session_id: Union[Unset, int] = UNSET,\n) -> Optional[Union[HTTPValidationError, QuestionResponse]]:" }, { "identifier": "create_session", "path": "zerolink_client/api/session/create_session.py", "snippet": "def _get_kwargs(\n user_id: str,\n *,\n name: Union[Unset, str] = UNSET,\n) -> Dict[str, Any]:\ndef _parse_response(*, client: Union[AuthenticatedClient, Client], response: httpx.Response) -> Optional[Union[ChatSession, HTTPValidationError]]:\ndef _build_response(*, client: Union[AuthenticatedClient, Client], response: httpx.Response) -> Response[Union[ChatSession, HTTPValidationError]]:\ndef sync_detailed(\n user_id: str,\n *,\n client: Union[AuthenticatedClient, Client],\n name: Union[Unset, str] = UNSET,\n) -> Response[Union[ChatSession, HTTPValidationError]]:\ndef sync(\n user_id: str,\n *,\n client: Union[AuthenticatedClient, Client],\n name: Union[Unset, str] = UNSET,\n) -> Optional[Union[ChatSession, HTTPValidationError]]:\nasync def asyncio_detailed(\n user_id: str,\n *,\n client: Union[AuthenticatedClient, Client],\n name: Union[Unset, str] = UNSET,\n) -> Response[Union[ChatSession, HTTPValidationError]]:\nasync def asyncio(\n user_id: str,\n *,\n client: Union[AuthenticatedClient, Client],\n name: Union[Unset, str] = UNSET,\n) -> Optional[Union[ChatSession, HTTPValidationError]]:" }, { "identifier": "get_session_entities", "path": "zerolink_client/api/session/get_session_entities.py", "snippet": "def _get_kwargs(\n session_id: int,\n) -> Dict[str, Any]:\ndef _parse_response(*, client: Union[AuthenticatedClient, Client], response: httpx.Response) -> Optional[Union[HTTPValidationError, List[\"GenericEntity\"]]]:\ndef _build_response(*, client: Union[AuthenticatedClient, Client], response: httpx.Response) -> Response[Union[HTTPValidationError, List[\"GenericEntity\"]]]:\ndef sync_detailed(\n session_id: int,\n *,\n client: Union[AuthenticatedClient, Client],\n) -> Response[Union[HTTPValidationError, List[\"GenericEntity\"]]]:\ndef sync(\n session_id: int,\n *,\n client: Union[AuthenticatedClient, Client],\n) -> Optional[Union[HTTPValidationError, List[\"GenericEntity\"]]]:\nasync def asyncio_detailed(\n session_id: int,\n *,\n client: Union[AuthenticatedClient, Client],\n) -> Response[Union[HTTPValidationError, List[\"GenericEntity\"]]]:\nasync def asyncio(\n session_id: int,\n *,\n client: Union[AuthenticatedClient, Client],\n) -> Optional[Union[HTTPValidationError, List[\"GenericEntity\"]]]:" }, { "identifier": "get_session_facts", "path": "zerolink_client/api/session/get_session_facts.py", "snippet": "def _get_kwargs(\n session_id: int,\n) -> Dict[str, Any]:\ndef _parse_response(*, client: Union[AuthenticatedClient, Client], response: httpx.Response) -> Optional[Union[HTTPValidationError, List[\"GenericTriple\"]]]:\ndef _build_response(*, client: Union[AuthenticatedClient, Client], response: httpx.Response) -> Response[Union[HTTPValidationError, List[\"GenericTriple\"]]]:\ndef sync_detailed(\n session_id: int,\n *,\n client: Union[AuthenticatedClient, Client],\n) -> Response[Union[HTTPValidationError, List[\"GenericTriple\"]]]:\ndef sync(\n session_id: int,\n *,\n client: Union[AuthenticatedClient, Client],\n) -> Optional[Union[HTTPValidationError, List[\"GenericTriple\"]]]:\nasync def asyncio_detailed(\n session_id: int,\n *,\n client: Union[AuthenticatedClient, Client],\n) -> Response[Union[HTTPValidationError, List[\"GenericTriple\"]]]:\nasync def asyncio(\n session_id: int,\n *,\n client: Union[AuthenticatedClient, Client],\n) -> Optional[Union[HTTPValidationError, List[\"GenericTriple\"]]]:" }, { "identifier": "get_user_session", "path": "zerolink_client/api/session/get_user_session.py", "snippet": "def _get_kwargs(\n user_id: str,\n session_name: str,\n) -> Dict[str, Any]:\ndef _parse_response(*, client: Union[AuthenticatedClient, Client], response: httpx.Response) -> Optional[Union[ChatSession, HTTPValidationError]]:\ndef _build_response(*, client: Union[AuthenticatedClient, Client], response: httpx.Response) -> Response[Union[ChatSession, HTTPValidationError]]:\ndef sync_detailed(\n user_id: str,\n session_name: str,\n *,\n client: Union[AuthenticatedClient, Client],\n) -> Response[Union[ChatSession, HTTPValidationError]]:\ndef sync(\n user_id: str,\n session_name: str,\n *,\n client: Union[AuthenticatedClient, Client],\n) -> Optional[Union[ChatSession, HTTPValidationError]]:\nasync def asyncio_detailed(\n user_id: str,\n session_name: str,\n *,\n client: Union[AuthenticatedClient, Client],\n) -> Response[Union[ChatSession, HTTPValidationError]]:\nasync def asyncio(\n user_id: str,\n session_name: str,\n *,\n client: Union[AuthenticatedClient, Client],\n) -> Optional[Union[ChatSession, HTTPValidationError]]:" }, { "identifier": "create_user", "path": "zerolink_client/api/user/create_user.py", "snippet": "def _get_kwargs() -> Dict[str, Any]:\ndef _parse_response(*, client: Union[AuthenticatedClient, Client], response: httpx.Response) -> Optional[CreateUser]:\ndef _build_response(*, client: Union[AuthenticatedClient, Client], response: httpx.Response) -> Response[CreateUser]:\ndef sync_detailed(\n *,\n client: Union[AuthenticatedClient, Client],\n) -> Response[CreateUser]:\ndef sync(\n *,\n client: Union[AuthenticatedClient, Client],\n) -> Optional[CreateUser]:\nasync def asyncio_detailed(\n *,\n client: Union[AuthenticatedClient, Client],\n) -> Response[CreateUser]:\nasync def asyncio(\n *,\n client: Union[AuthenticatedClient, Client],\n) -> Optional[CreateUser]:" }, { "identifier": "ChatSession", "path": "zerolink_client/models/chat_session.py", "snippet": "class ChatSession:\n \"\"\"A user chat session.\n\n Attributes:\n id (int):\n name (str): The name of the chat session\n index (int):\n requests (List['Req']):\n responses (List['Rep']):\n created_on (datetime.datetime):\n \"\"\"\n\n id: int\n name: str\n index: int\n requests: List[\"Req\"]\n responses: List[\"Rep\"]\n created_on: datetime.datetime\n additional_properties: Dict[str, Any] = _attrs_field(init=False, factory=dict)\n\n def to_dict(self) -> Dict[str, Any]:\n from ..models.rep import Rep\n from ..models.req import Req\n\n id = self.id\n\n name = self.name\n\n index = self.index\n\n requests = []\n for requests_item_data in self.requests:\n requests_item = requests_item_data.to_dict()\n requests.append(requests_item)\n\n responses = []\n for responses_item_data in self.responses:\n responses_item = responses_item_data.to_dict()\n responses.append(responses_item)\n\n created_on = self.created_on.isoformat()\n\n field_dict: Dict[str, Any] = {}\n field_dict.update(self.additional_properties)\n field_dict.update(\n {\n \"id\": id,\n \"name\": name,\n \"index\": index,\n \"requests\": requests,\n \"responses\": responses,\n \"created_on\": created_on,\n }\n )\n\n return field_dict\n\n @classmethod\n def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T:\n from ..models.rep import Rep\n from ..models.req import Req\n\n d = src_dict.copy()\n id = d.pop(\"id\")\n\n name = d.pop(\"name\")\n\n index = d.pop(\"index\")\n\n requests = []\n _requests = d.pop(\"requests\")\n for requests_item_data in _requests:\n requests_item = Req.from_dict(requests_item_data)\n\n requests.append(requests_item)\n\n responses = []\n _responses = d.pop(\"responses\")\n for responses_item_data in _responses:\n responses_item = Rep.from_dict(responses_item_data)\n\n responses.append(responses_item)\n\n created_on = isoparse(d.pop(\"created_on\"))\n\n chat_session = cls(\n id=id,\n name=name,\n index=index,\n requests=requests,\n responses=responses,\n created_on=created_on,\n )\n\n chat_session.additional_properties = d\n return chat_session\n\n @property\n def additional_keys(self) -> List[str]:\n return list(self.additional_properties.keys())\n\n def __getitem__(self, key: str) -> Any:\n return self.additional_properties[key]\n\n def __setitem__(self, key: str, value: Any) -> None:\n self.additional_properties[key] = value\n\n def __delitem__(self, key: str) -> None:\n del self.additional_properties[key]\n\n def __contains__(self, key: str) -> bool:\n return key in self.additional_properties" }, { "identifier": "CreateAttribute", "path": "zerolink_client/models/create_attribute.py", "snippet": "class CreateAttribute:\n \"\"\"\n Attributes:\n subject (str): EID of a builtin entity\n predicate (str): Name of attribute\n attribute (Attribute):\n \"\"\"\n\n subject: str\n predicate: str\n attribute: \"Attribute\"\n additional_properties: Dict[str, Any] = _attrs_field(init=False, factory=dict)\n\n def to_dict(self) -> Dict[str, Any]:\n from ..models.attribute import Attribute\n\n subject = self.subject\n\n predicate = self.predicate\n\n attribute = self.attribute.to_dict()\n\n field_dict: Dict[str, Any] = {}\n field_dict.update(self.additional_properties)\n field_dict.update(\n {\n \"subject\": subject,\n \"predicate\": predicate,\n \"attribute\": attribute,\n }\n )\n\n return field_dict\n\n @classmethod\n def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T:\n from ..models.attribute import Attribute\n\n d = src_dict.copy()\n subject = d.pop(\"subject\")\n\n predicate = d.pop(\"predicate\")\n\n attribute = Attribute.from_dict(d.pop(\"attribute\"))\n\n create_attribute = cls(\n subject=subject,\n predicate=predicate,\n attribute=attribute,\n )\n\n create_attribute.additional_properties = d\n return create_attribute\n\n @property\n def additional_keys(self) -> List[str]:\n return list(self.additional_properties.keys())\n\n def __getitem__(self, key: str) -> Any:\n return self.additional_properties[key]\n\n def __setitem__(self, key: str, value: Any) -> None:\n self.additional_properties[key] = value\n\n def __delitem__(self, key: str) -> None:\n del self.additional_properties[key]\n\n def __contains__(self, key: str) -> bool:\n return key in self.additional_properties" }, { "identifier": "CreateEntity", "path": "zerolink_client/models/create_entity.py", "snippet": "class CreateEntity:\n \"\"\"\n Attributes:\n entity (str): Name of entity\n entity_type (Union[Unset, EntityType]): Entity types are entities that map to base ontological entities in\n Foundation.\n entity_str (Union[Unset, str]): User specified type\n is_class (Union[Unset, bool]): Whether the entity is a class or instance Default: False.\n \"\"\"\n\n entity: str\n entity_type: Union[Unset, EntityType] = UNSET\n entity_str: Union[Unset, str] = UNSET\n is_class: Union[Unset, bool] = False\n additional_properties: Dict[str, Any] = _attrs_field(init=False, factory=dict)\n\n def to_dict(self) -> Dict[str, Any]:\n entity = self.entity\n\n entity_type: Union[Unset, str] = UNSET\n if not isinstance(self.entity_type, Unset):\n entity_type = self.entity_type.value\n\n entity_str = self.entity_str\n\n is_class = self.is_class\n\n field_dict: Dict[str, Any] = {}\n field_dict.update(self.additional_properties)\n field_dict.update(\n {\n \"entity\": entity,\n }\n )\n if entity_type is not UNSET:\n field_dict[\"entity_type\"] = entity_type\n if entity_str is not UNSET:\n field_dict[\"entity_str\"] = entity_str\n if is_class is not UNSET:\n field_dict[\"is_class\"] = is_class\n\n return field_dict\n\n @classmethod\n def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T:\n d = src_dict.copy()\n entity = d.pop(\"entity\")\n\n _entity_type = d.pop(\"entity_type\", UNSET)\n entity_type: Union[Unset, EntityType]\n if isinstance(_entity_type, Unset):\n entity_type = UNSET\n else:\n entity_type = EntityType(_entity_type)\n\n entity_str = d.pop(\"entity_str\", UNSET)\n\n is_class = d.pop(\"is_class\", UNSET)\n\n create_entity = cls(\n entity=entity,\n entity_type=entity_type,\n entity_str=entity_str,\n is_class=is_class,\n )\n\n create_entity.additional_properties = d\n return create_entity\n\n @property\n def additional_keys(self) -> List[str]:\n return list(self.additional_properties.keys())\n\n def __getitem__(self, key: str) -> Any:\n return self.additional_properties[key]\n\n def __setitem__(self, key: str, value: Any) -> None:\n self.additional_properties[key] = value\n\n def __delitem__(self, key: str) -> None:\n del self.additional_properties[key]\n\n def __contains__(self, key: str) -> bool:\n return key in self.additional_properties" }, { "identifier": "CreateRule", "path": "zerolink_client/models/create_rule.py", "snippet": "class CreateRule:\n \"\"\"\n Attributes:\n rule (str): Textual representation of the rule to parse\n context (Union[Unset, CreateRuleContext]): Context of entities to use for parsing the rule\n \"\"\"\n\n rule: str\n context: Union[Unset, \"CreateRuleContext\"] = UNSET\n additional_properties: Dict[str, Any] = _attrs_field(init=False, factory=dict)\n\n def to_dict(self) -> Dict[str, Any]:\n from ..models.create_rule_context import CreateRuleContext\n\n rule = self.rule\n\n context: Union[Unset, Dict[str, Any]] = UNSET\n if not isinstance(self.context, Unset):\n context = self.context.to_dict()\n\n field_dict: Dict[str, Any] = {}\n field_dict.update(self.additional_properties)\n field_dict.update(\n {\n \"rule\": rule,\n }\n )\n if context is not UNSET:\n field_dict[\"context\"] = context\n\n return field_dict\n\n @classmethod\n def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T:\n from ..models.create_rule_context import CreateRuleContext\n\n d = src_dict.copy()\n rule = d.pop(\"rule\")\n\n _context = d.pop(\"context\", UNSET)\n context: Union[Unset, CreateRuleContext]\n if isinstance(_context, Unset):\n context = UNSET\n else:\n context = CreateRuleContext.from_dict(_context)\n\n create_rule = cls(\n rule=rule,\n context=context,\n )\n\n create_rule.additional_properties = d\n return create_rule\n\n @property\n def additional_keys(self) -> List[str]:\n return list(self.additional_properties.keys())\n\n def __getitem__(self, key: str) -> Any:\n return self.additional_properties[key]\n\n def __setitem__(self, key: str, value: Any) -> None:\n self.additional_properties[key] = value\n\n def __delitem__(self, key: str) -> None:\n del self.additional_properties[key]\n\n def __contains__(self, key: str) -> bool:\n return key in self.additional_properties" }, { "identifier": "CreateRuleResponse", "path": "zerolink_client/models/create_rule_response.py", "snippet": "class CreateRuleResponse:\n \"\"\"\n Attributes:\n id (str):\n \"\"\"\n\n id: str\n additional_properties: Dict[str, Any] = _attrs_field(init=False, factory=dict)\n\n def to_dict(self) -> Dict[str, Any]:\n id = self.id\n\n field_dict: Dict[str, Any] = {}\n field_dict.update(self.additional_properties)\n field_dict.update(\n {\n \"id\": id,\n }\n )\n\n return field_dict\n\n @classmethod\n def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T:\n d = src_dict.copy()\n id = d.pop(\"id\")\n\n create_rule_response = cls(\n id=id,\n )\n\n create_rule_response.additional_properties = d\n return create_rule_response\n\n @property\n def additional_keys(self) -> List[str]:\n return list(self.additional_properties.keys())\n\n def __getitem__(self, key: str) -> Any:\n return self.additional_properties[key]\n\n def __setitem__(self, key: str, value: Any) -> None:\n self.additional_properties[key] = value\n\n def __delitem__(self, key: str) -> None:\n del self.additional_properties[key]\n\n def __contains__(self, key: str) -> bool:\n return key in self.additional_properties" }, { "identifier": "CreateTriple", "path": "zerolink_client/models/create_triple.py", "snippet": "class CreateTriple:\n \"\"\"\n Attributes:\n predicate (str): Name of predicate relation\n user_subject (Union[Unset, str]): EID of a user entity\n subject (Union[Unset, str]): EID of a builtin entity\n user_object (Union[Unset, str]): EID of a user entity\n object_ (Union[Unset, str]): EID of a builtin entity\n \"\"\"\n\n predicate: str\n user_subject: Union[Unset, str] = UNSET\n subject: Union[Unset, str] = UNSET\n user_object: Union[Unset, str] = UNSET\n object_: Union[Unset, str] = UNSET\n additional_properties: Dict[str, Any] = _attrs_field(init=False, factory=dict)\n\n def to_dict(self) -> Dict[str, Any]:\n predicate = self.predicate\n\n user_subject = self.user_subject\n\n subject = self.subject\n\n user_object = self.user_object\n\n object_ = self.object_\n\n field_dict: Dict[str, Any] = {}\n field_dict.update(self.additional_properties)\n field_dict.update(\n {\n \"predicate\": predicate,\n }\n )\n if user_subject is not UNSET:\n field_dict[\"user_subject\"] = user_subject\n if subject is not UNSET:\n field_dict[\"subject\"] = subject\n if user_object is not UNSET:\n field_dict[\"user_object\"] = user_object\n if object_ is not UNSET:\n field_dict[\"object\"] = object_\n\n return field_dict\n\n @classmethod\n def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T:\n d = src_dict.copy()\n predicate = d.pop(\"predicate\")\n\n user_subject = d.pop(\"user_subject\", UNSET)\n\n subject = d.pop(\"subject\", UNSET)\n\n user_object = d.pop(\"user_object\", UNSET)\n\n object_ = d.pop(\"object\", UNSET)\n\n create_triple = cls(\n predicate=predicate,\n user_subject=user_subject,\n subject=subject,\n user_object=user_object,\n object_=object_,\n )\n\n create_triple.additional_properties = d\n return create_triple\n\n @property\n def additional_keys(self) -> List[str]:\n return list(self.additional_properties.keys())\n\n def __getitem__(self, key: str) -> Any:\n return self.additional_properties[key]\n\n def __setitem__(self, key: str, value: Any) -> None:\n self.additional_properties[key] = value\n\n def __delitem__(self, key: str) -> None:\n del self.additional_properties[key]\n\n def __contains__(self, key: str) -> bool:\n return key in self.additional_properties" }, { "identifier": "CreateTuneJobResponse", "path": "zerolink_client/models/create_tune_job_response.py", "snippet": "class CreateTuneJobResponse:\n \"\"\"\n Attributes:\n id (str):\n status (str):\n \"\"\"\n\n id: str\n status: str\n additional_properties: Dict[str, Any] = _attrs_field(init=False, factory=dict)\n\n def to_dict(self) -> Dict[str, Any]:\n id = self.id\n\n status = self.status\n\n field_dict: Dict[str, Any] = {}\n field_dict.update(self.additional_properties)\n field_dict.update(\n {\n \"id\": id,\n \"status\": status,\n }\n )\n\n return field_dict\n\n @classmethod\n def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T:\n d = src_dict.copy()\n id = d.pop(\"id\")\n\n status = d.pop(\"status\")\n\n create_tune_job_response = cls(\n id=id,\n status=status,\n )\n\n create_tune_job_response.additional_properties = d\n return create_tune_job_response\n\n @property\n def additional_keys(self) -> List[str]:\n return list(self.additional_properties.keys())\n\n def __getitem__(self, key: str) -> Any:\n return self.additional_properties[key]\n\n def __setitem__(self, key: str, value: Any) -> None:\n self.additional_properties[key] = value\n\n def __delitem__(self, key: str) -> None:\n del self.additional_properties[key]\n\n def __contains__(self, key: str) -> bool:\n return key in self.additional_properties" }, { "identifier": "Entity", "path": "zerolink_client/models/entity.py", "snippet": "class Entity:\n \"\"\"\n Attributes:\n id (str):\n entity (str):\n description (Union[Unset, str]):\n source (Union[Unset, str]):\n source_url (Union[Unset, str]):\n ontology (Union[Unset, Graph]):\n source_id (Union[Unset, str]):\n \"\"\"\n\n id: str\n entity: str\n description: Union[Unset, str] = UNSET\n source: Union[Unset, str] = UNSET\n source_url: Union[Unset, str] = UNSET\n ontology: Union[Unset, \"Graph\"] = UNSET\n source_id: Union[Unset, str] = UNSET\n additional_properties: Dict[str, Any] = _attrs_field(init=False, factory=dict)\n\n def to_dict(self) -> Dict[str, Any]:\n from ..models.graph import Graph\n\n id = self.id\n\n entity = self.entity\n\n description = self.description\n\n source = self.source\n\n source_url = self.source_url\n\n ontology: Union[Unset, Dict[str, Any]] = UNSET\n if not isinstance(self.ontology, Unset):\n ontology = self.ontology.to_dict()\n\n source_id = self.source_id\n\n field_dict: Dict[str, Any] = {}\n field_dict.update(self.additional_properties)\n field_dict.update(\n {\n \"id\": id,\n \"entity\": entity,\n }\n )\n if description is not UNSET:\n field_dict[\"description\"] = description\n if source is not UNSET:\n field_dict[\"source\"] = source\n if source_url is not UNSET:\n field_dict[\"source_url\"] = source_url\n if ontology is not UNSET:\n field_dict[\"ontology\"] = ontology\n if source_id is not UNSET:\n field_dict[\"source_id\"] = source_id\n\n return field_dict\n\n @classmethod\n def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T:\n from ..models.graph import Graph\n\n d = src_dict.copy()\n id = d.pop(\"id\")\n\n entity = d.pop(\"entity\")\n\n description = d.pop(\"description\", UNSET)\n\n source = d.pop(\"source\", UNSET)\n\n source_url = d.pop(\"source_url\", UNSET)\n\n _ontology = d.pop(\"ontology\", UNSET)\n ontology: Union[Unset, Graph]\n if isinstance(_ontology, Unset):\n ontology = UNSET\n else:\n ontology = Graph.from_dict(_ontology)\n\n source_id = d.pop(\"source_id\", UNSET)\n\n entity = cls(\n id=id,\n entity=entity,\n description=description,\n source=source,\n source_url=source_url,\n ontology=ontology,\n source_id=source_id,\n )\n\n entity.additional_properties = d\n return entity\n\n @property\n def additional_keys(self) -> List[str]:\n return list(self.additional_properties.keys())\n\n def __getitem__(self, key: str) -> Any:\n return self.additional_properties[key]\n\n def __setitem__(self, key: str, value: Any) -> None:\n self.additional_properties[key] = value\n\n def __delitem__(self, key: str) -> None:\n del self.additional_properties[key]\n\n def __contains__(self, key: str) -> bool:\n return key in self.additional_properties" }, { "identifier": "HTTPValidationError", "path": "zerolink_client/models/http_validation_error.py", "snippet": "class HTTPValidationError:\n \"\"\"\n Attributes:\n detail (Union[Unset, List['ValidationError']]):\n \"\"\"\n\n detail: Union[Unset, List[\"ValidationError\"]] = UNSET\n additional_properties: Dict[str, Any] = _attrs_field(init=False, factory=dict)\n\n def to_dict(self) -> Dict[str, Any]:\n from ..models.validation_error import ValidationError\n\n detail: Union[Unset, List[Dict[str, Any]]] = UNSET\n if not isinstance(self.detail, Unset):\n detail = []\n for detail_item_data in self.detail:\n detail_item = detail_item_data.to_dict()\n detail.append(detail_item)\n\n field_dict: Dict[str, Any] = {}\n field_dict.update(self.additional_properties)\n field_dict.update({})\n if detail is not UNSET:\n field_dict[\"detail\"] = detail\n\n return field_dict\n\n @classmethod\n def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T:\n from ..models.validation_error import ValidationError\n\n d = src_dict.copy()\n detail = []\n _detail = d.pop(\"detail\", UNSET)\n for detail_item_data in _detail or []:\n detail_item = ValidationError.from_dict(detail_item_data)\n\n detail.append(detail_item)\n\n http_validation_error = cls(\n detail=detail,\n )\n\n http_validation_error.additional_properties = d\n return http_validation_error\n\n @property\n def additional_keys(self) -> List[str]:\n return list(self.additional_properties.keys())\n\n def __getitem__(self, key: str) -> Any:\n return self.additional_properties[key]\n\n def __setitem__(self, key: str, value: Any) -> None:\n self.additional_properties[key] = value\n\n def __delitem__(self, key: str) -> None:\n del self.additional_properties[key]\n\n def __contains__(self, key: str) -> bool:\n return key in self.additional_properties" }, { "identifier": "Question", "path": "zerolink_client/models/question.py", "snippet": "class Question:\n \"\"\"A question to be answered by querying the knowledge graph and reasoner.\n\n Attributes:\n body (str): The body of the question\n world (Union[Unset, WorldAssumption]): The world assumption is the assumption about the world that the reasoner\n makes. This is used to determine the answer to a query. For example, if\n the world assumption is \"closed\" then the reasoner will assume that the\n answer to the query is \"no\" if it cannot find a triple to satisfy the\n query. Default: WorldAssumption.CLOSED.\n spatial (Union[Unset, SpatialAssumption]): The spatial assumption is the assumption about space that the\n reasoner\n makes. This is used to determine the answer to a query. For example, if the\n spatial assumption is \"earth\" then the reasoner will only consider\n geographic locations on Earth and will assume all instances of 'location'\n are on Earth. If the spatial assumption is \"universe\" then the reasoner\n then this restriction is lifted and the reasoner will consider all\n locations in the universe. Default: SpatialAssumption.EARTH.\n temporal (Union[Unset, TemporalAssumption]): The temporal assumption is the assumption about time that the\n reasoner\n makes. This is used to determine the answer to a query. For example, if\n the temporal assumption is \"current\" then the reasoner will only consider\n triples that refer to entities that are non-historical. Excluding things\n like the Roman Empire and Francoist Spain. Default: TemporalAssumption.CURRENT.\n context (Union[Unset, ContextAssumption]): The context assumption is the assumption about the context that the\n reasoner makes. This is used to determine the answer to a query. For\n example, if the context assumption is \"none\" then the reasoner will only\n consider basic triples like instance_of and subclass_of. If the context\n assumption is \"local\" then the reasoner will consider triples that are\n defined by the user. If the context assumption is \"global\" then the\n reasoner will consider all queryable triples. Default: ContextAssumption.GLOBAL.\n \"\"\"\n\n body: str\n world: Union[Unset, WorldAssumption] = WorldAssumption.CLOSED\n spatial: Union[Unset, SpatialAssumption] = SpatialAssumption.EARTH\n temporal: Union[Unset, TemporalAssumption] = TemporalAssumption.CURRENT\n context: Union[Unset, ContextAssumption] = ContextAssumption.GLOBAL\n additional_properties: Dict[str, Any] = _attrs_field(init=False, factory=dict)\n\n def to_dict(self) -> Dict[str, Any]:\n body = self.body\n\n world: Union[Unset, str] = UNSET\n if not isinstance(self.world, Unset):\n world = self.world.value\n\n spatial: Union[Unset, str] = UNSET\n if not isinstance(self.spatial, Unset):\n spatial = self.spatial.value\n\n temporal: Union[Unset, str] = UNSET\n if not isinstance(self.temporal, Unset):\n temporal = self.temporal.value\n\n context: Union[Unset, str] = UNSET\n if not isinstance(self.context, Unset):\n context = self.context.value\n\n field_dict: Dict[str, Any] = {}\n field_dict.update(self.additional_properties)\n field_dict.update(\n {\n \"body\": body,\n }\n )\n if world is not UNSET:\n field_dict[\"world\"] = world\n if spatial is not UNSET:\n field_dict[\"spatial\"] = spatial\n if temporal is not UNSET:\n field_dict[\"temporal\"] = temporal\n if context is not UNSET:\n field_dict[\"context\"] = context\n\n return field_dict\n\n @classmethod\n def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T:\n d = src_dict.copy()\n body = d.pop(\"body\")\n\n _world = d.pop(\"world\", UNSET)\n world: Union[Unset, WorldAssumption]\n if isinstance(_world, Unset):\n world = UNSET\n else:\n world = WorldAssumption(_world)\n\n _spatial = d.pop(\"spatial\", UNSET)\n spatial: Union[Unset, SpatialAssumption]\n if isinstance(_spatial, Unset):\n spatial = UNSET\n else:\n spatial = SpatialAssumption(_spatial)\n\n _temporal = d.pop(\"temporal\", UNSET)\n temporal: Union[Unset, TemporalAssumption]\n if isinstance(_temporal, Unset):\n temporal = UNSET\n else:\n temporal = TemporalAssumption(_temporal)\n\n _context = d.pop(\"context\", UNSET)\n context: Union[Unset, ContextAssumption]\n if isinstance(_context, Unset):\n context = UNSET\n else:\n context = ContextAssumption(_context)\n\n question = cls(\n body=body,\n world=world,\n spatial=spatial,\n temporal=temporal,\n context=context,\n )\n\n question.additional_properties = d\n return question\n\n @property\n def additional_keys(self) -> List[str]:\n return list(self.additional_properties.keys())\n\n def __getitem__(self, key: str) -> Any:\n return self.additional_properties[key]\n\n def __setitem__(self, key: str, value: Any) -> None:\n self.additional_properties[key] = value\n\n def __delitem__(self, key: str) -> None:\n del self.additional_properties[key]\n\n def __contains__(self, key: str) -> bool:\n return key in self.additional_properties" }, { "identifier": "QuestionResponse", "path": "zerolink_client/models/question_response.py", "snippet": "class QuestionResponse:\n \"\"\"A response to a question request.\n\n Attributes:\n id (int): The ID of the question\n msg (str): A message describing the result of the question\n status (ResultStatus): The status of a result.\n answers (List[str]): The answers to the question\n methods (List[str]): The methods used to answer the question\n reasoners (List[str]): The reasoners used to answer the question\n query (Union[Unset, QuestionResponseQuery]): The query used to answer the question\n \"\"\"\n\n id: int\n msg: str\n status: ResultStatus\n answers: List[str]\n methods: List[str]\n reasoners: List[str]\n query: Union[Unset, \"QuestionResponseQuery\"] = UNSET\n additional_properties: Dict[str, Any] = _attrs_field(init=False, factory=dict)\n\n def to_dict(self) -> Dict[str, Any]:\n from ..models.question_response_query import QuestionResponseQuery\n\n id = self.id\n\n msg = self.msg\n\n status = self.status.value\n\n answers = self.answers\n\n methods = self.methods\n\n reasoners = self.reasoners\n\n query: Union[Unset, Dict[str, Any]] = UNSET\n if not isinstance(self.query, Unset):\n query = self.query.to_dict()\n\n field_dict: Dict[str, Any] = {}\n field_dict.update(self.additional_properties)\n field_dict.update(\n {\n \"id\": id,\n \"msg\": msg,\n \"status\": status,\n \"answers\": answers,\n \"methods\": methods,\n \"reasoners\": reasoners,\n }\n )\n if query is not UNSET:\n field_dict[\"query\"] = query\n\n return field_dict\n\n @classmethod\n def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T:\n from ..models.question_response_query import QuestionResponseQuery\n\n d = src_dict.copy()\n id = d.pop(\"id\")\n\n msg = d.pop(\"msg\")\n\n status = ResultStatus(d.pop(\"status\"))\n\n answers = cast(List[str], d.pop(\"answers\"))\n\n methods = cast(List[str], d.pop(\"methods\"))\n\n reasoners = cast(List[str], d.pop(\"reasoners\"))\n\n _query = d.pop(\"query\", UNSET)\n query: Union[Unset, QuestionResponseQuery]\n if isinstance(_query, Unset):\n query = UNSET\n else:\n query = QuestionResponseQuery.from_dict(_query)\n\n question_response = cls(\n id=id,\n msg=msg,\n status=status,\n answers=answers,\n methods=methods,\n reasoners=reasoners,\n query=query,\n )\n\n question_response.additional_properties = d\n return question_response\n\n @property\n def additional_keys(self) -> List[str]:\n return list(self.additional_properties.keys())\n\n def __getitem__(self, key: str) -> Any:\n return self.additional_properties[key]\n\n def __setitem__(self, key: str, value: Any) -> None:\n self.additional_properties[key] = value\n\n def __delitem__(self, key: str) -> None:\n del self.additional_properties[key]\n\n def __contains__(self, key: str) -> bool:\n return key in self.additional_properties" }, { "identifier": "TextExtract", "path": "zerolink_client/models/text_extract.py", "snippet": "class TextExtract:\n \"\"\"\n Attributes:\n text (str): Text to extract from\n extraction_model (Union[Unset, ExtractModel]): An enumeration. Default: ExtractModel.BASE.\n \"\"\"\n\n text: str\n extraction_model: Union[Unset, ExtractModel] = ExtractModel.BASE\n additional_properties: Dict[str, Any] = _attrs_field(init=False, factory=dict)\n\n def to_dict(self) -> Dict[str, Any]:\n text = self.text\n\n extraction_model: Union[Unset, str] = UNSET\n if not isinstance(self.extraction_model, Unset):\n extraction_model = self.extraction_model.value\n\n field_dict: Dict[str, Any] = {}\n field_dict.update(self.additional_properties)\n field_dict.update(\n {\n \"text\": text,\n }\n )\n if extraction_model is not UNSET:\n field_dict[\"extraction_model\"] = extraction_model\n\n return field_dict\n\n @classmethod\n def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T:\n d = src_dict.copy()\n text = d.pop(\"text\")\n\n _extraction_model = d.pop(\"extraction_model\", UNSET)\n extraction_model: Union[Unset, ExtractModel]\n if isinstance(_extraction_model, Unset):\n extraction_model = UNSET\n else:\n extraction_model = ExtractModel(_extraction_model)\n\n text_extract = cls(\n text=text,\n extraction_model=extraction_model,\n )\n\n text_extract.additional_properties = d\n return text_extract\n\n @property\n def additional_keys(self) -> List[str]:\n return list(self.additional_properties.keys())\n\n def __getitem__(self, key: str) -> Any:\n return self.additional_properties[key]\n\n def __setitem__(self, key: str, value: Any) -> None:\n self.additional_properties[key] = value\n\n def __delitem__(self, key: str) -> None:\n del self.additional_properties[key]\n\n def __contains__(self, key: str) -> bool:\n return key in self.additional_properties" }, { "identifier": "File", "path": "zerolink_client/types.py", "snippet": "class File:\n \"\"\"Contains information for file uploads\"\"\"\n\n payload: BinaryIO\n file_name: Optional[str] = None\n mime_type: Optional[str] = None\n\n def to_tuple(self) -> FileJsonType:\n \"\"\"Return a tuple representation that httpx will accept for multipart/form-data\"\"\"\n return self.file_name, self.payload, self.mime_type" }, { "identifier": "UNSET", "path": "zerolink_client/types.py", "snippet": "UNSET: Unset = Unset()" } ]
from typing import Any, Optional, cast from zerolink import settings from zerolink.exc import APIError, AuthenticationError from zerolink_client import Client from zerolink_client.api.default import finetune, get_models_models_get from zerolink_client.api.entity import ( desc_entity_id, desc_entity_ontology, lookup_entity, lookup_relation, search_entity, ) from zerolink_client.api.extract import extract_text from zerolink_client.api.fact import ( create_userattribute, create_userentity, create_userrule, create_usertriple, ) from zerolink_client.api.kg import get_triple from zerolink_client.api.question import post_question from zerolink_client.api.session import ( create_session, get_session_entities, get_session_facts, get_user_session, ) from zerolink_client.api.user import create_user from zerolink_client.models import ( ChatSession, CreateAttribute, CreateEntity, CreateRule, CreateRuleResponse, CreateTriple, CreateTuneJobResponse, Entity, HTTPValidationError, Question, QuestionResponse, TextExtract, ) from zerolink_client.types import File, UNSET
15,942
# ------------------------------------------------------------------------ # Endpoints # ------------------------------------------------------------------------ client = Client( base_url=settings.server_url, raise_on_unexpected_status=False, ) def check_api_key() -> None: """ Check if the API key is set. """ if settings.api_key is None: raise AuthenticationError() else: pass def get_user_id() -> str: """ Get the user ID from the server. Only used for Demo server. """ client._headers["Authorization"] = settings.api_key rep = create_user.sync(client=client) if rep is None: raise Exception("Failed to authenticate.") settings.api_key = rep.user_id if isinstance(rep, HTTPValidationError):
# ------------------------------------------------------------------------ # Endpoints # ------------------------------------------------------------------------ client = Client( base_url=settings.server_url, raise_on_unexpected_status=False, ) def check_api_key() -> None: """ Check if the API key is set. """ if settings.api_key is None: raise AuthenticationError() else: pass def get_user_id() -> str: """ Get the user ID from the server. Only used for Demo server. """ client._headers["Authorization"] = settings.api_key rep = create_user.sync(client=client) if rep is None: raise Exception("Failed to authenticate.") settings.api_key = rep.user_id if isinstance(rep, HTTPValidationError):
raise APIError(str(rep))
1
2023-12-03 07:50:04+00:00
24k
JunMa11/UHNSeg-Quiz
nnunetv2/training/nnUNetTrainer/variants/network_architecture/nnUNetTrainerNoDeepSupervision.py
[ { "identifier": "DC_and_BCE_loss", "path": "nnunetv2/training/loss/compound_losses.py", "snippet": "class DC_and_BCE_loss(nn.Module):\n def __init__(self, bce_kwargs, soft_dice_kwargs, weight_ce=1, weight_dice=1, use_ignore_label: bool = False,\n dice_class=MemoryEfficientSoftDiceLoss):\n \"\"\"\n DO NOT APPLY NONLINEARITY IN YOUR NETWORK!\n\n target mut be one hot encoded\n IMPORTANT: We assume use_ignore_label is located in target[:, -1]!!!\n\n :param soft_dice_kwargs:\n :param bce_kwargs:\n :param aggregate:\n \"\"\"\n super(DC_and_BCE_loss, self).__init__()\n if use_ignore_label:\n bce_kwargs['reduction'] = 'none'\n\n self.weight_dice = weight_dice\n self.weight_ce = weight_ce\n self.use_ignore_label = use_ignore_label\n\n self.ce = nn.BCEWithLogitsLoss(**bce_kwargs)\n self.dc = dice_class(apply_nonlin=torch.sigmoid, **soft_dice_kwargs)\n\n def forward(self, net_output: torch.Tensor, target: torch.Tensor):\n if self.use_ignore_label:\n # target is one hot encoded here. invert it so that it is True wherever we can compute the loss\n mask = (1 - target[:, -1:]).bool()\n # remove ignore channel now that we have the mask\n target_regions = torch.clone(target[:, :-1])\n else:\n target_regions = target\n mask = None\n\n dc_loss = self.dc(net_output, target_regions, loss_mask=mask)\n if mask is not None:\n ce_loss = (self.ce(net_output, target_regions) * mask).sum() / torch.clip(mask.sum(), min=1e-8)\n else:\n ce_loss = self.ce(net_output, target_regions)\n result = self.weight_ce * ce_loss + self.weight_dice * dc_loss\n return result" }, { "identifier": "DC_and_CE_loss", "path": "nnunetv2/training/loss/compound_losses.py", "snippet": "class DC_and_CE_loss(nn.Module):\n def __init__(self, soft_dice_kwargs, ce_kwargs, weight_ce=1, weight_dice=1, ignore_label=None,\n dice_class=SoftDiceLoss):\n \"\"\"\n Weights for CE and Dice do not need to sum to one. You can set whatever you want.\n :param soft_dice_kwargs:\n :param ce_kwargs:\n :param aggregate:\n :param square_dice:\n :param weight_ce:\n :param weight_dice:\n \"\"\"\n super(DC_and_CE_loss, self).__init__()\n if ignore_label is not None:\n ce_kwargs['ignore_index'] = ignore_label\n\n self.weight_dice = weight_dice\n self.weight_ce = weight_ce\n self.ignore_label = ignore_label\n\n self.ce = RobustCrossEntropyLoss(**ce_kwargs)\n self.dc = dice_class(apply_nonlin=softmax_helper_dim1, **soft_dice_kwargs)\n\n def forward(self, net_output: torch.Tensor, target: torch.Tensor):\n \"\"\"\n target must be b, c, x, y(, z) with c=1\n :param net_output:\n :param target:\n :return:\n \"\"\"\n if self.ignore_label is not None:\n assert target.shape[1] == 1, 'ignore label is not implemented for one hot encoded target variables ' \\\n '(DC_and_CE_loss)'\n mask = (target != self.ignore_label).bool()\n # remove ignore label from target, replace with one of the known labels. It doesn't matter because we\n # ignore gradients in those areas anyway\n target_dice = torch.clone(target)\n target_dice[target == self.ignore_label] = 0\n num_fg = mask.sum()\n else:\n target_dice = target\n mask = None\n\n dc_loss = self.dc(net_output, target_dice, loss_mask=mask) \\\n if self.weight_dice != 0 else 0\n ce_loss = self.ce(net_output, target[:, 0].long()) \\\n if self.weight_ce != 0 and (self.ignore_label is None or num_fg > 0) else 0\n\n result = self.weight_ce * ce_loss + self.weight_dice * dc_loss\n return result" }, { "identifier": "get_tp_fp_fn_tn", "path": "nnunetv2/training/loss/dice.py", "snippet": "def get_tp_fp_fn_tn(net_output, gt, axes=None, mask=None, square=False):\n \"\"\"\n net_output must be (b, c, x, y(, z)))\n gt must be a label map (shape (b, 1, x, y(, z)) OR shape (b, x, y(, z))) or one hot encoding (b, c, x, y(, z))\n if mask is provided it must have shape (b, 1, x, y(, z)))\n :param net_output:\n :param gt:\n :param axes: can be (, ) = no summation\n :param mask: mask must be 1 for valid pixels and 0 for invalid pixels\n :param square: if True then fp, tp and fn will be squared before summation\n :return:\n \"\"\"\n if axes is None:\n axes = tuple(range(2, len(net_output.size())))\n\n shp_x = net_output.shape\n shp_y = gt.shape\n\n with torch.no_grad():\n if len(shp_x) != len(shp_y):\n gt = gt.view((shp_y[0], 1, *shp_y[1:]))\n\n if net_output.shape == gt.shape:\n # if this is the case then gt is probably already a one hot encoding\n y_onehot = gt\n else:\n gt = gt.long()\n y_onehot = torch.zeros(shp_x, device=net_output.device)\n y_onehot.scatter_(1, gt, 1)\n\n tp = net_output * y_onehot\n fp = net_output * (1 - y_onehot)\n fn = (1 - net_output) * y_onehot\n tn = (1 - net_output) * (1 - y_onehot)\n\n if mask is not None:\n with torch.no_grad():\n mask_here = torch.tile(mask, (1, tp.shape[1], *[1 for i in range(2, len(tp.shape))]))\n tp *= mask_here\n fp *= mask_here\n fn *= mask_here\n tn *= mask_here\n # benchmark whether tiling the mask would be faster (torch.tile). It probably is for large batch sizes\n # OK it barely makes a difference but the implementation above is a tiny bit faster + uses less vram\n # (using nnUNetv2_train 998 3d_fullres 0)\n # tp = torch.stack(tuple(x_i * mask[:, 0] for x_i in torch.unbind(tp, dim=1)), dim=1)\n # fp = torch.stack(tuple(x_i * mask[:, 0] for x_i in torch.unbind(fp, dim=1)), dim=1)\n # fn = torch.stack(tuple(x_i * mask[:, 0] for x_i in torch.unbind(fn, dim=1)), dim=1)\n # tn = torch.stack(tuple(x_i * mask[:, 0] for x_i in torch.unbind(tn, dim=1)), dim=1)\n\n if square:\n tp = tp ** 2\n fp = fp ** 2\n fn = fn ** 2\n tn = tn ** 2\n\n if len(axes) > 0:\n tp = tp.sum(dim=axes, keepdim=False)\n fp = fp.sum(dim=axes, keepdim=False)\n fn = fn.sum(dim=axes, keepdim=False)\n tn = tn.sum(dim=axes, keepdim=False)\n\n return tp, fp, fn, tn" }, { "identifier": "MemoryEfficientSoftDiceLoss", "path": "nnunetv2/training/loss/dice.py", "snippet": "class MemoryEfficientSoftDiceLoss(nn.Module):\n def __init__(self, apply_nonlin: Callable = None, batch_dice: bool = False, do_bg: bool = True, smooth: float = 1.,\n ddp: bool = True):\n \"\"\"\n saves 1.6 GB on Dataset017 3d_lowres\n \"\"\"\n super(MemoryEfficientSoftDiceLoss, self).__init__()\n\n self.do_bg = do_bg\n self.batch_dice = batch_dice\n self.apply_nonlin = apply_nonlin\n self.smooth = smooth\n self.ddp = ddp\n\n def forward(self, x, y, loss_mask=None):\n if self.apply_nonlin is not None:\n x = self.apply_nonlin(x)\n\n # make everything shape (b, c)\n axes = list(range(2, len(x.shape)))\n with torch.no_grad():\n if len(x.shape) != len(y.shape):\n y = y.view((y.shape[0], 1, *y.shape[1:]))\n\n if x.shape == y.shape:\n # if this is the case then gt is probably already a one hot encoding\n y_onehot = y\n else:\n gt = y.long()\n y_onehot = torch.zeros(x.shape, device=x.device, dtype=torch.bool)\n y_onehot.scatter_(1, gt, 1)\n\n if not self.do_bg:\n y_onehot = y_onehot[:, 1:]\n\n sum_gt = y_onehot.sum(axes) if loss_mask is None else (y_onehot * loss_mask).sum(axes)\n\n # this one MUST be outside the with torch.no_grad(): context. Otherwise no gradients for you\n if not self.do_bg:\n x = x[:, 1:]\n\n intersect = (x * y_onehot).sum(axes) if loss_mask is None else (x * y_onehot * loss_mask).sum(axes)\n sum_pred = x.sum(axes) if loss_mask is None else (x * loss_mask).sum(axes)\n\n if self.ddp and self.batch_dice:\n intersect = AllGatherGrad.apply(intersect).sum(0)\n sum_pred = AllGatherGrad.apply(sum_pred).sum(0)\n sum_gt = AllGatherGrad.apply(sum_gt).sum(0)\n\n if self.batch_dice:\n intersect = intersect.sum(0)\n sum_pred = sum_pred.sum(0)\n sum_gt = sum_gt.sum(0)\n\n dc = (2 * intersect + self.smooth) / (torch.clip(sum_gt + sum_pred + self.smooth, 1e-8))\n\n dc = dc.mean()\n return -dc" }, { "identifier": "nnUNetTrainer", "path": "nnunetv2/training/nnUNetTrainer/nnUNetTrainer.py", "snippet": "class nnUNetTrainer(object):\n def __init__(self, plans: dict, configuration: str, fold: int, dataset_json: dict, unpack_dataset: bool = True,\n device: torch.device = torch.device('cuda')):\n # From https://grugbrain.dev/. Worth a read ya big brains ;-)\n\n # apex predator of grug is complexity\n # complexity bad\n # say again:\n # complexity very bad\n # you say now:\n # complexity very, very bad\n # given choice between complexity or one on one against t-rex, grug take t-rex: at least grug see t-rex\n # complexity is spirit demon that enter codebase through well-meaning but ultimately very clubbable non grug-brain developers and project managers who not fear complexity spirit demon or even know about sometime\n # one day code base understandable and grug can get work done, everything good!\n # next day impossible: complexity demon spirit has entered code and very dangerous situation!\n\n # OK OK I am guilty. But I tried.\n # https://www.osnews.com/images/comics/wtfm.jpg\n # https://i.pinimg.com/originals/26/b2/50/26b250a738ea4abc7a5af4d42ad93af0.jpg\n\n self.is_ddp = dist.is_available() and dist.is_initialized()\n self.local_rank = 0 if not self.is_ddp else dist.get_rank()\n\n self.device = device\n\n # print what device we are using\n if self.is_ddp: # implicitly it's clear that we use cuda in this case\n print(f\"I am local rank {self.local_rank}. {device_count()} GPUs are available. The world size is \"\n f\"{dist.get_world_size()}.\"\n f\"Setting device to {self.device}\")\n self.device = torch.device(type='cuda', index=self.local_rank)\n else:\n if self.device.type == 'cuda':\n # we might want to let the user pick this but for now please pick the correct GPU with CUDA_VISIBLE_DEVICES=X\n self.device = torch.device(type='cuda', index=0)\n print(f\"Using device: {self.device}\")\n\n # loading and saving this class for continuing from checkpoint should not happen based on pickling. This\n # would also pickle the network etc. Bad, bad. Instead we just reinstantiate and then load the checkpoint we\n # need. So let's save the init args\n self.my_init_kwargs = {}\n for k in inspect.signature(self.__init__).parameters.keys():\n self.my_init_kwargs[k] = locals()[k]\n\n ### Saving all the init args into class variables for later access\n self.plans_manager = PlansManager(plans)\n self.configuration_manager = self.plans_manager.get_configuration(configuration)\n self.configuration_name = configuration\n self.dataset_json = dataset_json\n self.fold = fold\n self.unpack_dataset = unpack_dataset\n\n ### Setting all the folder names. We need to make sure things don't crash in case we are just running\n # inference and some of the folders may not be defined!\n self.preprocessed_dataset_folder_base = join(nnUNet_preprocessed, self.plans_manager.dataset_name) \\\n if nnUNet_preprocessed is not None else None\n self.output_folder_base = join(nnUNet_results, self.plans_manager.dataset_name,\n self.__class__.__name__ + '__' + self.plans_manager.plans_name + \"__\" + configuration) \\\n if nnUNet_results is not None else None\n self.output_folder = join(self.output_folder_base, f'fold_{fold}')\n\n self.preprocessed_dataset_folder = join(self.preprocessed_dataset_folder_base,\n self.configuration_manager.data_identifier)\n # unlike the previous nnunet folder_with_segs_from_previous_stage is now part of the plans. For now it has to\n # be a different configuration in the same plans\n # IMPORTANT! the mapping must be bijective, so lowres must point to fullres and vice versa (using\n # \"previous_stage\" and \"next_stage\"). Otherwise it won't work!\n self.is_cascaded = self.configuration_manager.previous_stage_name is not None\n self.folder_with_segs_from_previous_stage = \\\n join(nnUNet_results, self.plans_manager.dataset_name,\n self.__class__.__name__ + '__' + self.plans_manager.plans_name + \"__\" +\n self.configuration_manager.previous_stage_name, 'predicted_next_stage', self.configuration_name) \\\n if self.is_cascaded else None\n\n ### Some hyperparameters for you to fiddle with\n self.initial_lr = 1e-2\n self.weight_decay = 3e-5\n self.oversample_foreground_percent = 0.33\n self.num_iterations_per_epoch = 250\n self.num_val_iterations_per_epoch = 50\n self.num_epochs = 1000\n self.current_epoch = 0\n\n ### Dealing with labels/regions\n self.label_manager = self.plans_manager.get_label_manager(dataset_json)\n # labels can either be a list of int (regular training) or a list of tuples of int (region-based training)\n # needed for predictions. We do sigmoid in case of (overlapping) regions\n\n self.num_input_channels = None # -> self.initialize()\n self.network = None # -> self._get_network()\n self.optimizer = self.lr_scheduler = None # -> self.initialize\n self.grad_scaler = GradScaler() if self.device.type == 'cuda' else None\n self.loss = None # -> self.initialize\n\n ### Simple logging. Don't take that away from me!\n # initialize log file. This is just our log for the print statements etc. Not to be confused with lightning\n # logging\n timestamp = datetime.now()\n maybe_mkdir_p(self.output_folder)\n self.log_file = join(self.output_folder, \"training_log_%d_%d_%d_%02.0d_%02.0d_%02.0d.txt\" %\n (timestamp.year, timestamp.month, timestamp.day, timestamp.hour, timestamp.minute,\n timestamp.second))\n self.logger = nnUNetLogger()\n\n ### placeholders\n self.dataloader_train = self.dataloader_val = None # see on_train_start\n\n ### initializing stuff for remembering things and such\n self._best_ema = None\n\n ### inference things\n self.inference_allowed_mirroring_axes = None # this variable is set in\n # self.configure_rotation_dummyDA_mirroring_and_inital_patch_size and will be saved in checkpoints\n\n ### checkpoint saving stuff\n self.save_every = 50\n self.disable_checkpointing = False\n\n ## DDP batch size and oversampling can differ between workers and needs adaptation\n # we need to change the batch size in DDP because we don't use any of those distributed samplers\n self._set_batch_size_and_oversample()\n\n self.was_initialized = False\n\n self.print_to_log_file(\"\\n#######################################################################\\n\"\n \"Please cite the following paper when using nnU-Net:\\n\"\n \"Isensee, F., Jaeger, P. F., Kohl, S. A., Petersen, J., & Maier-Hein, K. H. (2021). \"\n \"nnU-Net: a self-configuring method for deep learning-based biomedical image segmentation. \"\n \"Nature methods, 18(2), 203-211.\\n\"\n \"#######################################################################\\n\",\n also_print_to_console=True, add_timestamp=False)\n\n def initialize(self):\n if not self.was_initialized:\n self.num_input_channels = determine_num_input_channels(self.plans_manager, self.configuration_manager,\n self.dataset_json)\n\n self.network = self.build_network_architecture(self.plans_manager, self.dataset_json,\n self.configuration_manager,\n self.num_input_channels,\n enable_deep_supervision=True).to(self.device)\n # compile network for free speedup\n if self._do_i_compile():\n self.print_to_log_file('Compiling network...')\n self.network = torch.compile(self.network)\n\n self.optimizer, self.lr_scheduler = self.configure_optimizers()\n # if ddp, wrap in DDP wrapper\n if self.is_ddp:\n self.network = torch.nn.SyncBatchNorm.convert_sync_batchnorm(self.network)\n self.network = DDP(self.network, device_ids=[self.local_rank])\n\n self.loss = self._build_loss()\n self.was_initialized = True\n else:\n raise RuntimeError(\"You have called self.initialize even though the trainer was already initialized. \"\n \"That should not happen.\")\n\n def _do_i_compile(self):\n return ('nnUNet_compile' in os.environ.keys()) and (os.environ['nnUNet_compile'].lower() in ('true', '1', 't'))\n\n def _save_debug_information(self):\n # saving some debug information\n if self.local_rank == 0:\n dct = {}\n for k in self.__dir__():\n if not k.startswith(\"__\"):\n if not callable(getattr(self, k)) or k in ['loss', ]:\n dct[k] = str(getattr(self, k))\n elif k in ['network', ]:\n dct[k] = str(getattr(self, k).__class__.__name__)\n else:\n # print(k)\n pass\n if k in ['dataloader_train', 'dataloader_val']:\n if hasattr(getattr(self, k), 'generator'):\n dct[k + '.generator'] = str(getattr(self, k).generator)\n if hasattr(getattr(self, k), 'num_processes'):\n dct[k + '.num_processes'] = str(getattr(self, k).num_processes)\n if hasattr(getattr(self, k), 'transform'):\n dct[k + '.transform'] = str(getattr(self, k).transform)\n import subprocess\n hostname = subprocess.getoutput(['hostname'])\n dct['hostname'] = hostname\n torch_version = torch.__version__\n if self.device.type == 'cuda':\n gpu_name = torch.cuda.get_device_name()\n dct['gpu_name'] = gpu_name\n cudnn_version = torch.backends.cudnn.version()\n else:\n cudnn_version = 'None'\n dct['device'] = str(self.device)\n dct['torch_version'] = torch_version\n dct['cudnn_version'] = cudnn_version\n save_json(dct, join(self.output_folder, \"debug.json\"))\n\n @staticmethod\n def build_network_architecture(plans_manager: PlansManager,\n dataset_json,\n configuration_manager: ConfigurationManager,\n num_input_channels,\n enable_deep_supervision: bool = True) -> nn.Module:\n \"\"\"\n his is where you build the architecture according to the plans. There is no obligation to use\n get_network_from_plans, this is just a utility we use for the nnU-Net default architectures. You can do what\n you want. Even ignore the plans and just return something static (as long as it can process the requested\n patch size)\n but don't bug us with your bugs arising from fiddling with this :-P\n This is the function that is called in inference as well! This is needed so that all network architecture\n variants can be loaded at inference time (inference will use the same nnUNetTrainer that was used for\n training, so if you change the network architecture during training by deriving a new trainer class then\n inference will know about it).\n\n If you need to know how many segmentation outputs your custom architecture needs to have, use the following snippet:\n > label_manager = plans_manager.get_label_manager(dataset_json)\n > label_manager.num_segmentation_heads\n (why so complicated? -> We can have either classical training (classes) or regions. If we have regions,\n the number of outputs is != the number of classes. Also there is the ignore label for which no output\n should be generated. label_manager takes care of all that for you.)\n\n \"\"\"\n return get_network_from_plans(plans_manager, dataset_json, configuration_manager,\n num_input_channels, deep_supervision=enable_deep_supervision)\n\n def _get_deep_supervision_scales(self):\n deep_supervision_scales = list(list(i) for i in 1 / np.cumprod(np.vstack(\n self.configuration_manager.pool_op_kernel_sizes), axis=0))[:-1]\n return deep_supervision_scales\n\n def _set_batch_size_and_oversample(self):\n if not self.is_ddp:\n # set batch size to what the plan says, leave oversample untouched\n self.batch_size = self.configuration_manager.batch_size\n else:\n # batch size is distributed over DDP workers and we need to change oversample_percent for each worker\n batch_sizes = []\n oversample_percents = []\n\n world_size = dist.get_world_size()\n my_rank = dist.get_rank()\n\n global_batch_size = self.configuration_manager.batch_size\n assert global_batch_size >= world_size, 'Cannot run DDP if the batch size is smaller than the number of ' \\\n 'GPUs... Duh.'\n\n batch_size_per_GPU = np.ceil(global_batch_size / world_size).astype(int)\n\n for rank in range(world_size):\n if (rank + 1) * batch_size_per_GPU > global_batch_size:\n batch_size = batch_size_per_GPU - ((rank + 1) * batch_size_per_GPU - global_batch_size)\n else:\n batch_size = batch_size_per_GPU\n\n batch_sizes.append(batch_size)\n\n sample_id_low = 0 if len(batch_sizes) == 0 else np.sum(batch_sizes[:-1])\n sample_id_high = np.sum(batch_sizes)\n\n if sample_id_high / global_batch_size < (1 - self.oversample_foreground_percent):\n oversample_percents.append(0.0)\n elif sample_id_low / global_batch_size > (1 - self.oversample_foreground_percent):\n oversample_percents.append(1.0)\n else:\n percent_covered_by_this_rank = sample_id_high / global_batch_size - sample_id_low / global_batch_size\n oversample_percent_here = 1 - (((1 - self.oversample_foreground_percent) -\n sample_id_low / global_batch_size) / percent_covered_by_this_rank)\n oversample_percents.append(oversample_percent_here)\n\n print(\"worker\", my_rank, \"oversample\", oversample_percents[my_rank])\n print(\"worker\", my_rank, \"batch_size\", batch_sizes[my_rank])\n # self.print_to_log_file(\"worker\", my_rank, \"oversample\", oversample_percents[my_rank])\n # self.print_to_log_file(\"worker\", my_rank, \"batch_size\", batch_sizes[my_rank])\n\n self.batch_size = batch_sizes[my_rank]\n self.oversample_foreground_percent = oversample_percents[my_rank]\n\n def _build_loss(self):\n if self.label_manager.has_regions:\n loss = DC_and_BCE_loss({},\n {'batch_dice': self.configuration_manager.batch_dice,\n 'do_bg': True, 'smooth': 1e-5, 'ddp': self.is_ddp},\n use_ignore_label=self.label_manager.ignore_label is not None,\n dice_class=MemoryEfficientSoftDiceLoss)\n else:\n loss = DC_and_CE_loss({'batch_dice': self.configuration_manager.batch_dice,\n 'smooth': 1e-5, 'do_bg': False, 'ddp': self.is_ddp}, {}, weight_ce=1, weight_dice=1,\n ignore_label=self.label_manager.ignore_label, dice_class=MemoryEfficientSoftDiceLoss)\n\n deep_supervision_scales = self._get_deep_supervision_scales()\n\n # we give each output a weight which decreases exponentially (division by 2) as the resolution decreases\n # this gives higher resolution outputs more weight in the loss\n weights = np.array([1 / (2 ** i) for i in range(len(deep_supervision_scales))])\n weights[-1] = 0\n\n # we don't use the lowest 2 outputs. Normalize weights so that they sum to 1\n weights = weights / weights.sum()\n # now wrap the loss\n loss = DeepSupervisionWrapper(loss, weights)\n return loss\n\n def configure_rotation_dummyDA_mirroring_and_inital_patch_size(self):\n \"\"\"\n This function is stupid and certainly one of the weakest spots of this implementation. Not entirely sure how we can fix it.\n \"\"\"\n patch_size = self.configuration_manager.patch_size\n dim = len(patch_size)\n # todo rotation should be defined dynamically based on patch size (more isotropic patch sizes = more rotation)\n if dim == 2:\n do_dummy_2d_data_aug = False\n # todo revisit this parametrization\n if max(patch_size) / min(patch_size) > 1.5:\n rotation_for_DA = {\n 'x': (-15. / 360 * 2. * np.pi, 15. / 360 * 2. * np.pi),\n 'y': (0, 0),\n 'z': (0, 0)\n }\n else:\n rotation_for_DA = {\n 'x': (-180. / 360 * 2. * np.pi, 180. / 360 * 2. * np.pi),\n 'y': (0, 0),\n 'z': (0, 0)\n }\n mirror_axes = (0, 1)\n elif dim == 3:\n # todo this is not ideal. We could also have patch_size (64, 16, 128) in which case a full 180deg 2d rot would be bad\n # order of the axes is determined by spacing, not image size\n do_dummy_2d_data_aug = (max(patch_size) / patch_size[0]) > ANISO_THRESHOLD\n if do_dummy_2d_data_aug:\n # why do we rotate 180 deg here all the time? We should also restrict it\n rotation_for_DA = {\n 'x': (-180. / 360 * 2. * np.pi, 180. / 360 * 2. * np.pi),\n 'y': (0, 0),\n 'z': (0, 0)\n }\n else:\n rotation_for_DA = {\n 'x': (-30. / 360 * 2. * np.pi, 30. / 360 * 2. * np.pi),\n 'y': (-30. / 360 * 2. * np.pi, 30. / 360 * 2. * np.pi),\n 'z': (-30. / 360 * 2. * np.pi, 30. / 360 * 2. * np.pi),\n }\n mirror_axes = (0, 1, 2)\n else:\n raise RuntimeError()\n\n # todo this function is stupid. It doesn't even use the correct scale range (we keep things as they were in the\n # old nnunet for now)\n initial_patch_size = get_patch_size(patch_size[-dim:],\n *rotation_for_DA.values(),\n (0.85, 1.25))\n if do_dummy_2d_data_aug:\n initial_patch_size[0] = patch_size[0]\n\n self.print_to_log_file(f'do_dummy_2d_data_aug: {do_dummy_2d_data_aug}')\n self.inference_allowed_mirroring_axes = mirror_axes\n\n return rotation_for_DA, do_dummy_2d_data_aug, initial_patch_size, mirror_axes\n\n def print_to_log_file(self, *args, also_print_to_console=True, add_timestamp=True):\n if self.local_rank == 0:\n timestamp = time()\n dt_object = datetime.fromtimestamp(timestamp)\n\n if add_timestamp:\n args = (f\"{dt_object}:\", *args)\n\n successful = False\n max_attempts = 5\n ctr = 0\n while not successful and ctr < max_attempts:\n try:\n with open(self.log_file, 'a+') as f:\n for a in args:\n f.write(str(a))\n f.write(\" \")\n f.write(\"\\n\")\n successful = True\n except IOError:\n print(f\"{datetime.fromtimestamp(timestamp)}: failed to log: \", sys.exc_info())\n sleep(0.5)\n ctr += 1\n if also_print_to_console:\n print(*args)\n elif also_print_to_console:\n print(*args)\n\n def print_plans(self):\n if self.local_rank == 0:\n dct = deepcopy(self.plans_manager.plans)\n del dct['configurations']\n self.print_to_log_file(f\"\\nThis is the configuration used by this \"\n f\"training:\\nConfiguration name: {self.configuration_name}\\n\",\n self.configuration_manager, '\\n', add_timestamp=False)\n self.print_to_log_file('These are the global plan.json settings:\\n', dct, '\\n', add_timestamp=False)\n\n def configure_optimizers(self):\n optimizer = torch.optim.SGD(self.network.parameters(), self.initial_lr, weight_decay=self.weight_decay,\n momentum=0.99, nesterov=True)\n lr_scheduler = PolyLRScheduler(optimizer, self.initial_lr, self.num_epochs)\n return optimizer, lr_scheduler\n\n def plot_network_architecture(self):\n if self._do_i_compile():\n self.print_to_log_file(\"Unable to plot network architecture: nnUNet_compile is enabled!\")\n return\n\n if self.local_rank == 0:\n try:\n # raise NotImplementedError('hiddenlayer no longer works and we do not have a viable alternative :-(')\n # pip install git+https://github.com/saugatkandel/hiddenlayer.git\n\n # from torchviz import make_dot\n # # not viable.\n # make_dot(tuple(self.network(torch.rand((1, self.num_input_channels,\n # *self.configuration_manager.patch_size),\n # device=self.device)))).render(\n # join(self.output_folder, \"network_architecture.pdf\"), format='pdf')\n # self.optimizer.zero_grad()\n\n # broken.\n\n import hiddenlayer as hl\n g = hl.build_graph(self.network,\n torch.rand((1, self.num_input_channels,\n *self.configuration_manager.patch_size),\n device=self.device),\n transforms=None)\n g.save(join(self.output_folder, \"network_architecture.pdf\"))\n del g\n except Exception as e:\n self.print_to_log_file(\"Unable to plot network architecture:\")\n self.print_to_log_file(e)\n\n # self.print_to_log_file(\"\\nprinting the network instead:\\n\")\n # self.print_to_log_file(self.network)\n # self.print_to_log_file(\"\\n\")\n finally:\n empty_cache(self.device)\n\n def do_split(self):\n \"\"\"\n The default split is a 5 fold CV on all available training cases. nnU-Net will create a split (it is seeded,\n so always the same) and save it as splits_final.pkl file in the preprocessed data directory.\n Sometimes you may want to create your own split for various reasons. For this you will need to create your own\n splits_final.pkl file. If this file is present, nnU-Net is going to use it and whatever splits are defined in\n it. You can create as many splits in this file as you want. Note that if you define only 4 splits (fold 0-3)\n and then set fold=4 when training (that would be the fifth split), nnU-Net will print a warning and proceed to\n use a random 80:20 data split.\n :return:\n \"\"\"\n if self.fold == \"all\":\n # if fold==all then we use all images for training and validation\n case_identifiers = get_case_identifiers(self.preprocessed_dataset_folder)\n tr_keys = case_identifiers\n val_keys = tr_keys\n else:\n splits_file = join(self.preprocessed_dataset_folder_base, \"splits_final.json\")\n dataset = nnUNetDataset(self.preprocessed_dataset_folder, case_identifiers=None,\n num_images_properties_loading_threshold=0,\n folder_with_segs_from_previous_stage=self.folder_with_segs_from_previous_stage)\n # if the split file does not exist we need to create it\n if not isfile(splits_file):\n self.print_to_log_file(\"Creating new 5-fold cross-validation split...\")\n splits = []\n all_keys_sorted = np.sort(list(dataset.keys()))\n kfold = KFold(n_splits=5, shuffle=True, random_state=12345)\n for i, (train_idx, test_idx) in enumerate(kfold.split(all_keys_sorted)):\n train_keys = np.array(all_keys_sorted)[train_idx]\n test_keys = np.array(all_keys_sorted)[test_idx]\n splits.append({})\n splits[-1]['train'] = list(train_keys)\n splits[-1]['val'] = list(test_keys)\n save_json(splits, splits_file)\n\n else:\n self.print_to_log_file(\"Using splits from existing split file:\", splits_file)\n splits = load_json(splits_file)\n self.print_to_log_file(f\"The split file contains {len(splits)} splits.\")\n\n self.print_to_log_file(\"Desired fold for training: %d\" % self.fold)\n if self.fold < len(splits):\n tr_keys = splits[self.fold]['train']\n val_keys = splits[self.fold]['val']\n self.print_to_log_file(\"This split has %d training and %d validation cases.\"\n % (len(tr_keys), len(val_keys)))\n else:\n self.print_to_log_file(\"INFO: You requested fold %d for training but splits \"\n \"contain only %d folds. I am now creating a \"\n \"random (but seeded) 80:20 split!\" % (self.fold, len(splits)))\n # if we request a fold that is not in the split file, create a random 80:20 split\n rnd = np.random.RandomState(seed=12345 + self.fold)\n keys = np.sort(list(dataset.keys()))\n idx_tr = rnd.choice(len(keys), int(len(keys) * 0.8), replace=False)\n idx_val = [i for i in range(len(keys)) if i not in idx_tr]\n tr_keys = [keys[i] for i in idx_tr]\n val_keys = [keys[i] for i in idx_val]\n self.print_to_log_file(\"This random 80:20 split has %d training and %d validation cases.\"\n % (len(tr_keys), len(val_keys)))\n if any([i in val_keys for i in tr_keys]):\n self.print_to_log_file('WARNING: Some validation cases are also in the training set. Please check the '\n 'splits.json or ignore if this is intentional.')\n return tr_keys, val_keys\n\n def get_tr_and_val_datasets(self):\n # create dataset split\n tr_keys, val_keys = self.do_split()\n\n # load the datasets for training and validation. Note that we always draw random samples so we really don't\n # care about distributing training cases across GPUs.\n dataset_tr = nnUNetDataset(self.preprocessed_dataset_folder, tr_keys,\n folder_with_segs_from_previous_stage=self.folder_with_segs_from_previous_stage,\n num_images_properties_loading_threshold=0)\n dataset_val = nnUNetDataset(self.preprocessed_dataset_folder, val_keys,\n folder_with_segs_from_previous_stage=self.folder_with_segs_from_previous_stage,\n num_images_properties_loading_threshold=0)\n return dataset_tr, dataset_val\n\n def get_dataloaders(self):\n # we use the patch size to determine whether we need 2D or 3D dataloaders. We also use it to determine whether\n # we need to use dummy 2D augmentation (in case of 3D training) and what our initial patch size should be\n patch_size = self.configuration_manager.patch_size\n dim = len(patch_size)\n\n # needed for deep supervision: how much do we need to downscale the segmentation targets for the different\n # outputs?\n deep_supervision_scales = self._get_deep_supervision_scales()\n\n rotation_for_DA, do_dummy_2d_data_aug, initial_patch_size, mirror_axes = \\\n self.configure_rotation_dummyDA_mirroring_and_inital_patch_size()\n\n # training pipeline\n tr_transforms = self.get_training_transforms(\n patch_size, rotation_for_DA, deep_supervision_scales, mirror_axes, do_dummy_2d_data_aug,\n order_resampling_data=3, order_resampling_seg=1,\n use_mask_for_norm=self.configuration_manager.use_mask_for_norm,\n is_cascaded=self.is_cascaded, foreground_labels=self.label_manager.foreground_labels,\n regions=self.label_manager.foreground_regions if self.label_manager.has_regions else None,\n ignore_label=self.label_manager.ignore_label)\n\n # validation pipeline\n val_transforms = self.get_validation_transforms(deep_supervision_scales,\n is_cascaded=self.is_cascaded,\n foreground_labels=self.label_manager.foreground_labels,\n regions=self.label_manager.foreground_regions if\n self.label_manager.has_regions else None,\n ignore_label=self.label_manager.ignore_label)\n\n dl_tr, dl_val = self.get_plain_dataloaders(initial_patch_size, dim)\n\n allowed_num_processes = get_allowed_n_proc_DA()\n if allowed_num_processes == 0:\n mt_gen_train = SingleThreadedAugmenter(dl_tr, tr_transforms)\n mt_gen_val = SingleThreadedAugmenter(dl_val, val_transforms)\n else:\n mt_gen_train = LimitedLenWrapper(self.num_iterations_per_epoch, data_loader=dl_tr, transform=tr_transforms,\n num_processes=allowed_num_processes, num_cached=6, seeds=None,\n pin_memory=self.device.type == 'cuda', wait_time=0.02)\n mt_gen_val = LimitedLenWrapper(self.num_val_iterations_per_epoch, data_loader=dl_val,\n transform=val_transforms, num_processes=max(1, allowed_num_processes // 2),\n num_cached=3, seeds=None, pin_memory=self.device.type == 'cuda',\n wait_time=0.02)\n return mt_gen_train, mt_gen_val\n\n def get_plain_dataloaders(self, initial_patch_size: Tuple[int, ...], dim: int):\n dataset_tr, dataset_val = self.get_tr_and_val_datasets()\n\n if dim == 2:\n dl_tr = nnUNetDataLoader2D(dataset_tr, self.batch_size,\n initial_patch_size,\n self.configuration_manager.patch_size,\n self.label_manager,\n oversample_foreground_percent=self.oversample_foreground_percent,\n sampling_probabilities=None, pad_sides=None)\n dl_val = nnUNetDataLoader2D(dataset_val, self.batch_size,\n self.configuration_manager.patch_size,\n self.configuration_manager.patch_size,\n self.label_manager,\n oversample_foreground_percent=self.oversample_foreground_percent,\n sampling_probabilities=None, pad_sides=None)\n else:\n dl_tr = nnUNetDataLoader3D(dataset_tr, self.batch_size,\n initial_patch_size,\n self.configuration_manager.patch_size,\n self.label_manager,\n oversample_foreground_percent=self.oversample_foreground_percent,\n sampling_probabilities=None, pad_sides=None)\n dl_val = nnUNetDataLoader3D(dataset_val, self.batch_size,\n self.configuration_manager.patch_size,\n self.configuration_manager.patch_size,\n self.label_manager,\n oversample_foreground_percent=self.oversample_foreground_percent,\n sampling_probabilities=None, pad_sides=None)\n return dl_tr, dl_val\n\n @staticmethod\n def get_training_transforms(patch_size: Union[np.ndarray, Tuple[int]],\n rotation_for_DA: dict,\n deep_supervision_scales: Union[List, Tuple],\n mirror_axes: Tuple[int, ...],\n do_dummy_2d_data_aug: bool,\n order_resampling_data: int = 3,\n order_resampling_seg: int = 1,\n border_val_seg: int = -1,\n use_mask_for_norm: List[bool] = None,\n is_cascaded: bool = False,\n foreground_labels: Union[Tuple[int, ...], List[int]] = None,\n regions: List[Union[List[int], Tuple[int, ...], int]] = None,\n ignore_label: int = None) -> AbstractTransform:\n tr_transforms = []\n if do_dummy_2d_data_aug:\n ignore_axes = (0,)\n tr_transforms.append(Convert3DTo2DTransform())\n patch_size_spatial = patch_size[1:]\n else:\n patch_size_spatial = patch_size\n ignore_axes = None\n\n tr_transforms.append(SpatialTransform(\n patch_size_spatial, patch_center_dist_from_border=None,\n do_elastic_deform=False, alpha=(0, 0), sigma=(0, 0),\n do_rotation=True, angle_x=rotation_for_DA['x'], angle_y=rotation_for_DA['y'], angle_z=rotation_for_DA['z'],\n p_rot_per_axis=1, # todo experiment with this\n do_scale=True, scale=(0.7, 1.4),\n border_mode_data=\"constant\", border_cval_data=0, order_data=order_resampling_data,\n border_mode_seg=\"constant\", border_cval_seg=border_val_seg, order_seg=order_resampling_seg,\n random_crop=False, # random cropping is part of our dataloaders\n p_el_per_sample=0, p_scale_per_sample=0.2, p_rot_per_sample=0.2,\n independent_scale_for_each_axis=False # todo experiment with this\n ))\n\n if do_dummy_2d_data_aug:\n tr_transforms.append(Convert2DTo3DTransform())\n\n tr_transforms.append(GaussianNoiseTransform(p_per_sample=0.1))\n tr_transforms.append(GaussianBlurTransform((0.5, 1.), different_sigma_per_channel=True, p_per_sample=0.2,\n p_per_channel=0.5))\n tr_transforms.append(BrightnessMultiplicativeTransform(multiplier_range=(0.75, 1.25), p_per_sample=0.15))\n tr_transforms.append(ContrastAugmentationTransform(p_per_sample=0.15))\n tr_transforms.append(SimulateLowResolutionTransform(zoom_range=(0.5, 1), per_channel=True,\n p_per_channel=0.5,\n order_downsample=0, order_upsample=3, p_per_sample=0.25,\n ignore_axes=ignore_axes))\n tr_transforms.append(GammaTransform((0.7, 1.5), True, True, retain_stats=True, p_per_sample=0.1))\n tr_transforms.append(GammaTransform((0.7, 1.5), False, True, retain_stats=True, p_per_sample=0.3))\n\n if mirror_axes is not None and len(mirror_axes) > 0:\n tr_transforms.append(MirrorTransform(mirror_axes))\n\n if use_mask_for_norm is not None and any(use_mask_for_norm):\n tr_transforms.append(MaskTransform([i for i in range(len(use_mask_for_norm)) if use_mask_for_norm[i]],\n mask_idx_in_seg=0, set_outside_to=0))\n\n tr_transforms.append(RemoveLabelTransform(-1, 0))\n\n if is_cascaded:\n assert foreground_labels is not None, 'We need foreground_labels for cascade augmentations'\n tr_transforms.append(MoveSegAsOneHotToData(1, foreground_labels, 'seg', 'data'))\n tr_transforms.append(ApplyRandomBinaryOperatorTransform(\n channel_idx=list(range(-len(foreground_labels), 0)),\n p_per_sample=0.4,\n key=\"data\",\n strel_size=(1, 8),\n p_per_label=1))\n tr_transforms.append(\n RemoveRandomConnectedComponentFromOneHotEncodingTransform(\n channel_idx=list(range(-len(foreground_labels), 0)),\n key=\"data\",\n p_per_sample=0.2,\n fill_with_other_class_p=0,\n dont_do_if_covers_more_than_x_percent=0.15))\n\n tr_transforms.append(RenameTransform('seg', 'target', True))\n\n if regions is not None:\n # the ignore label must also be converted\n tr_transforms.append(ConvertSegmentationToRegionsTransform(list(regions) + [ignore_label]\n if ignore_label is not None else regions,\n 'target', 'target'))\n\n if deep_supervision_scales is not None:\n tr_transforms.append(DownsampleSegForDSTransform2(deep_supervision_scales, 0, input_key='target',\n output_key='target'))\n tr_transforms.append(NumpyToTensor(['data', 'target'], 'float'))\n tr_transforms = Compose(tr_transforms)\n return tr_transforms\n\n @staticmethod\n def get_validation_transforms(deep_supervision_scales: Union[List, Tuple],\n is_cascaded: bool = False,\n foreground_labels: Union[Tuple[int, ...], List[int]] = None,\n regions: List[Union[List[int], Tuple[int, ...], int]] = None,\n ignore_label: int = None) -> AbstractTransform:\n val_transforms = []\n val_transforms.append(RemoveLabelTransform(-1, 0))\n\n if is_cascaded:\n val_transforms.append(MoveSegAsOneHotToData(1, foreground_labels, 'seg', 'data'))\n\n val_transforms.append(RenameTransform('seg', 'target', True))\n\n if regions is not None:\n # the ignore label must also be converted\n val_transforms.append(ConvertSegmentationToRegionsTransform(list(regions) + [ignore_label]\n if ignore_label is not None else regions,\n 'target', 'target'))\n\n if deep_supervision_scales is not None:\n val_transforms.append(DownsampleSegForDSTransform2(deep_supervision_scales, 0, input_key='target',\n output_key='target'))\n\n val_transforms.append(NumpyToTensor(['data', 'target'], 'float'))\n val_transforms = Compose(val_transforms)\n return val_transforms\n\n def set_deep_supervision_enabled(self, enabled: bool):\n \"\"\"\n This function is specific for the default architecture in nnU-Net. If you change the architecture, there are\n chances you need to change this as well!\n \"\"\"\n if self.is_ddp:\n self.network.module.decoder.deep_supervision = enabled\n else:\n self.network.decoder.deep_supervision = enabled\n\n def on_train_start(self):\n if not self.was_initialized:\n self.initialize()\n\n maybe_mkdir_p(self.output_folder)\n\n # make sure deep supervision is on in the network\n self.set_deep_supervision_enabled(True)\n\n self.print_plans()\n empty_cache(self.device)\n\n # maybe unpack\n if self.unpack_dataset and self.local_rank == 0:\n self.print_to_log_file('unpacking dataset...')\n unpack_dataset(self.preprocessed_dataset_folder, unpack_segmentation=True, overwrite_existing=False,\n num_processes=max(1, round(get_allowed_n_proc_DA() // 2)))\n self.print_to_log_file('unpacking done...')\n\n if self.is_ddp:\n dist.barrier()\n\n # dataloaders must be instantiated here because they need access to the training data which may not be present\n # when doing inference\n self.dataloader_train, self.dataloader_val = self.get_dataloaders()\n\n # copy plans and dataset.json so that they can be used for restoring everything we need for inference\n save_json(self.plans_manager.plans, join(self.output_folder_base, 'plans.json'), sort_keys=False)\n save_json(self.dataset_json, join(self.output_folder_base, 'dataset.json'), sort_keys=False)\n\n # we don't really need the fingerprint but its still handy to have it with the others\n shutil.copy(join(self.preprocessed_dataset_folder_base, 'dataset_fingerprint.json'),\n join(self.output_folder_base, 'dataset_fingerprint.json'))\n\n # produces a pdf in output folder\n self.plot_network_architecture()\n\n self._save_debug_information()\n\n # print(f\"batch size: {self.batch_size}\")\n # print(f\"oversample: {self.oversample_foreground_percent}\")\n\n def on_train_end(self):\n # dirty hack because on_epoch_end increments the epoch counter and this is executed afterwards.\n # This will lead to the wrong current epoch to be stored\n self.current_epoch -= 1\n self.save_checkpoint(join(self.output_folder, \"checkpoint_final.pth\"))\n self.current_epoch += 1\n\n # now we can delete latest\n if self.local_rank == 0 and isfile(join(self.output_folder, \"checkpoint_latest.pth\")):\n os.remove(join(self.output_folder, \"checkpoint_latest.pth\"))\n\n # shut down dataloaders\n old_stdout = sys.stdout\n with open(os.devnull, 'w') as f:\n sys.stdout = f\n if self.dataloader_train is not None:\n self.dataloader_train._finish()\n if self.dataloader_val is not None:\n self.dataloader_val._finish()\n sys.stdout = old_stdout\n\n empty_cache(self.device)\n self.print_to_log_file(\"Training done.\")\n\n def on_train_epoch_start(self):\n self.network.train()\n self.lr_scheduler.step(self.current_epoch)\n self.print_to_log_file('')\n self.print_to_log_file(f'Epoch {self.current_epoch}')\n self.print_to_log_file(\n f\"Current learning rate: {np.round(self.optimizer.param_groups[0]['lr'], decimals=5)}\")\n # lrs are the same for all workers so we don't need to gather them in case of DDP training\n self.logger.log('lrs', self.optimizer.param_groups[0]['lr'], self.current_epoch)\n\n def train_step(self, batch: dict) -> dict:\n data = batch['data']\n target = batch['target']\n\n data = data.to(self.device, non_blocking=True)\n if isinstance(target, list):\n target = [i.to(self.device, non_blocking=True) for i in target]\n else:\n target = target.to(self.device, non_blocking=True)\n\n self.optimizer.zero_grad(set_to_none=True)\n # Autocast is a little bitch.\n # If the device_type is 'cpu' then it's slow as heck and needs to be disabled.\n # If the device_type is 'mps' then it will complain that mps is not implemented, even if enabled=False is set. Whyyyyyyy. (this is why we don't make use of enabled=False)\n # So autocast will only be active if we have a cuda device.\n with autocast(self.device.type, enabled=True) if self.device.type == 'cuda' else dummy_context():\n output = self.network(data)\n # del data\n l = self.loss(output, target)\n\n if self.grad_scaler is not None:\n self.grad_scaler.scale(l).backward()\n self.grad_scaler.unscale_(self.optimizer)\n torch.nn.utils.clip_grad_norm_(self.network.parameters(), 12)\n self.grad_scaler.step(self.optimizer)\n self.grad_scaler.update()\n else:\n l.backward()\n torch.nn.utils.clip_grad_norm_(self.network.parameters(), 12)\n self.optimizer.step()\n return {'loss': l.detach().cpu().numpy()}\n\n def on_train_epoch_end(self, train_outputs: List[dict]):\n outputs = collate_outputs(train_outputs)\n\n if self.is_ddp:\n losses_tr = [None for _ in range(dist.get_world_size())]\n dist.all_gather_object(losses_tr, outputs['loss'])\n loss_here = np.vstack(losses_tr).mean()\n else:\n loss_here = np.mean(outputs['loss'])\n\n self.logger.log('train_losses', loss_here, self.current_epoch)\n\n def on_validation_epoch_start(self):\n self.network.eval()\n\n def validation_step(self, batch: dict) -> dict:\n data = batch['data']\n target = batch['target']\n\n data = data.to(self.device, non_blocking=True)\n if isinstance(target, list):\n target = [i.to(self.device, non_blocking=True) for i in target]\n else:\n target = target.to(self.device, non_blocking=True)\n\n # Autocast is a little bitch.\n # If the device_type is 'cpu' then it's slow as heck and needs to be disabled.\n # If the device_type is 'mps' then it will complain that mps is not implemented, even if enabled=False is set. Whyyyyyyy. (this is why we don't make use of enabled=False)\n # So autocast will only be active if we have a cuda device.\n with autocast(self.device.type, enabled=True) if self.device.type == 'cuda' else dummy_context():\n output = self.network(data)\n del data\n l = self.loss(output, target)\n\n # we only need the output with the highest output resolution\n output = output[0]\n target = target[0]\n\n # the following is needed for online evaluation. Fake dice (green line)\n axes = [0] + list(range(2, output.ndim))\n\n if self.label_manager.has_regions:\n predicted_segmentation_onehot = (torch.sigmoid(output) > 0.5).long()\n else:\n # no need for softmax\n output_seg = output.argmax(1)[:, None]\n predicted_segmentation_onehot = torch.zeros(output.shape, device=output.device, dtype=torch.float32)\n predicted_segmentation_onehot.scatter_(1, output_seg, 1)\n del output_seg\n\n if self.label_manager.has_ignore_label:\n if not self.label_manager.has_regions:\n mask = (target != self.label_manager.ignore_label).float()\n # CAREFUL that you don't rely on target after this line!\n target[target == self.label_manager.ignore_label] = 0\n else:\n mask = 1 - target[:, -1:]\n # CAREFUL that you don't rely on target after this line!\n target = target[:, :-1]\n else:\n mask = None\n\n tp, fp, fn, _ = get_tp_fp_fn_tn(predicted_segmentation_onehot, target, axes=axes, mask=mask)\n\n tp_hard = tp.detach().cpu().numpy()\n fp_hard = fp.detach().cpu().numpy()\n fn_hard = fn.detach().cpu().numpy()\n if not self.label_manager.has_regions:\n # if we train with regions all segmentation heads predict some kind of foreground. In conventional\n # (softmax training) there needs tobe one output for the background. We are not interested in the\n # background Dice\n # [1:] in order to remove background\n tp_hard = tp_hard[1:]\n fp_hard = fp_hard[1:]\n fn_hard = fn_hard[1:]\n\n return {'loss': l.detach().cpu().numpy(), 'tp_hard': tp_hard, 'fp_hard': fp_hard, 'fn_hard': fn_hard}\n\n def on_validation_epoch_end(self, val_outputs: List[dict]):\n outputs_collated = collate_outputs(val_outputs)\n tp = np.sum(outputs_collated['tp_hard'], 0)\n fp = np.sum(outputs_collated['fp_hard'], 0)\n fn = np.sum(outputs_collated['fn_hard'], 0)\n\n if self.is_ddp:\n world_size = dist.get_world_size()\n\n tps = [None for _ in range(world_size)]\n dist.all_gather_object(tps, tp)\n tp = np.vstack([i[None] for i in tps]).sum(0)\n\n fps = [None for _ in range(world_size)]\n dist.all_gather_object(fps, fp)\n fp = np.vstack([i[None] for i in fps]).sum(0)\n\n fns = [None for _ in range(world_size)]\n dist.all_gather_object(fns, fn)\n fn = np.vstack([i[None] for i in fns]).sum(0)\n\n losses_val = [None for _ in range(world_size)]\n dist.all_gather_object(losses_val, outputs_collated['loss'])\n loss_here = np.vstack(losses_val).mean()\n else:\n loss_here = np.mean(outputs_collated['loss'])\n\n global_dc_per_class = [i for i in [2 * i / (2 * i + j + k) for i, j, k in\n zip(tp, fp, fn)]]\n mean_fg_dice = np.nanmean(global_dc_per_class)\n self.logger.log('mean_fg_dice', mean_fg_dice, self.current_epoch)\n self.logger.log('dice_per_class_or_region', global_dc_per_class, self.current_epoch)\n self.logger.log('val_losses', loss_here, self.current_epoch)\n\n def on_epoch_start(self):\n self.logger.log('epoch_start_timestamps', time(), self.current_epoch)\n\n def on_epoch_end(self):\n self.logger.log('epoch_end_timestamps', time(), self.current_epoch)\n\n # todo find a solution for this stupid shit\n self.print_to_log_file('train_loss', np.round(self.logger.my_fantastic_logging['train_losses'][-1], decimals=4))\n self.print_to_log_file('val_loss', np.round(self.logger.my_fantastic_logging['val_losses'][-1], decimals=4))\n self.print_to_log_file('Pseudo dice', [np.round(i, decimals=4) for i in\n self.logger.my_fantastic_logging['dice_per_class_or_region'][-1]])\n self.print_to_log_file(\n f\"Epoch time: {np.round(self.logger.my_fantastic_logging['epoch_end_timestamps'][-1] - self.logger.my_fantastic_logging['epoch_start_timestamps'][-1], decimals=2)} s\")\n\n # handling periodic checkpointing\n current_epoch = self.current_epoch\n if (current_epoch + 1) % self.save_every == 0 and current_epoch != (self.num_epochs - 1):\n self.save_checkpoint(join(self.output_folder, 'checkpoint_latest.pth'))\n\n # handle 'best' checkpointing. ema_fg_dice is computed by the logger and can be accessed like this\n if self._best_ema is None or self.logger.my_fantastic_logging['ema_fg_dice'][-1] > self._best_ema:\n self._best_ema = self.logger.my_fantastic_logging['ema_fg_dice'][-1]\n self.print_to_log_file(f\"Yayy! New best EMA pseudo Dice: {np.round(self._best_ema, decimals=4)}\")\n self.save_checkpoint(join(self.output_folder, 'checkpoint_best.pth'))\n\n if self.local_rank == 0:\n self.logger.plot_progress_png(self.output_folder)\n\n self.current_epoch += 1\n\n def save_checkpoint(self, filename: str) -> None:\n if self.local_rank == 0:\n if not self.disable_checkpointing:\n if self.is_ddp:\n mod = self.network.module\n else:\n mod = self.network\n if isinstance(mod, OptimizedModule):\n mod = mod._orig_mod\n\n checkpoint = {\n 'network_weights': mod.state_dict(),\n 'optimizer_state': self.optimizer.state_dict(),\n 'grad_scaler_state': self.grad_scaler.state_dict() if self.grad_scaler is not None else None,\n 'logging': self.logger.get_checkpoint(),\n '_best_ema': self._best_ema,\n 'current_epoch': self.current_epoch + 1,\n 'init_args': self.my_init_kwargs,\n 'trainer_name': self.__class__.__name__,\n 'inference_allowed_mirroring_axes': self.inference_allowed_mirroring_axes,\n }\n torch.save(checkpoint, filename)\n else:\n self.print_to_log_file('No checkpoint written, checkpointing is disabled')\n\n def load_checkpoint(self, filename_or_checkpoint: Union[dict, str]) -> None:\n if not self.was_initialized:\n self.initialize()\n\n if isinstance(filename_or_checkpoint, str):\n checkpoint = torch.load(filename_or_checkpoint, map_location=self.device)\n # if state dict comes from nn.DataParallel but we use non-parallel model here then the state dict keys do not\n # match. Use heuristic to make it match\n new_state_dict = {}\n for k, value in checkpoint['network_weights'].items():\n key = k\n if key not in self.network.state_dict().keys() and key.startswith('module.'):\n key = key[7:]\n new_state_dict[key] = value\n\n self.my_init_kwargs = checkpoint['init_args']\n self.current_epoch = checkpoint['current_epoch']\n self.logger.load_checkpoint(checkpoint['logging'])\n self._best_ema = checkpoint['_best_ema']\n self.inference_allowed_mirroring_axes = checkpoint[\n 'inference_allowed_mirroring_axes'] if 'inference_allowed_mirroring_axes' in checkpoint.keys() else self.inference_allowed_mirroring_axes\n\n # messing with state dict naming schemes. Facepalm.\n if self.is_ddp:\n if isinstance(self.network.module, OptimizedModule):\n self.network.module._orig_mod.load_state_dict(new_state_dict)\n else:\n self.network.module.load_state_dict(new_state_dict)\n else:\n if isinstance(self.network, OptimizedModule):\n self.network._orig_mod.load_state_dict(new_state_dict)\n else:\n self.network.load_state_dict(new_state_dict)\n self.optimizer.load_state_dict(checkpoint['optimizer_state'])\n if self.grad_scaler is not None:\n if checkpoint['grad_scaler_state'] is not None:\n self.grad_scaler.load_state_dict(checkpoint['grad_scaler_state'])\n\n def perform_actual_validation(self, save_probabilities: bool = False):\n self.set_deep_supervision_enabled(False)\n self.network.eval()\n\n predictor = nnUNetPredictor(tile_step_size=0.5, use_gaussian=True, use_mirroring=True,\n perform_everything_on_gpu=True, device=self.device, verbose=False,\n verbose_preprocessing=False, allow_tqdm=False)\n predictor.manual_initialization(self.network, self.plans_manager, self.configuration_manager, None,\n self.dataset_json, self.__class__.__name__,\n self.inference_allowed_mirroring_axes)\n\n with multiprocessing.get_context(\"spawn\").Pool(default_num_processes) as segmentation_export_pool:\n worker_list = [i for i in segmentation_export_pool._pool]\n validation_output_folder = join(self.output_folder, 'validation')\n maybe_mkdir_p(validation_output_folder)\n\n # we cannot use self.get_tr_and_val_datasets() here because we might be DDP and then we have to distribute\n # the validation keys across the workers.\n _, val_keys = self.do_split()\n if self.is_ddp:\n val_keys = val_keys[self.local_rank:: dist.get_world_size()]\n\n dataset_val = nnUNetDataset(self.preprocessed_dataset_folder, val_keys,\n folder_with_segs_from_previous_stage=self.folder_with_segs_from_previous_stage,\n num_images_properties_loading_threshold=0)\n\n next_stages = self.configuration_manager.next_stage_names\n\n if next_stages is not None:\n _ = [maybe_mkdir_p(join(self.output_folder_base, 'predicted_next_stage', n)) for n in next_stages]\n\n results = []\n\n for k in dataset_val.keys():\n proceed = not check_workers_alive_and_busy(segmentation_export_pool, worker_list, results,\n allowed_num_queued=2)\n while not proceed:\n sleep(0.1)\n proceed = not check_workers_alive_and_busy(segmentation_export_pool, worker_list, results,\n allowed_num_queued=2)\n\n self.print_to_log_file(f\"predicting {k}\")\n data, seg, properties = dataset_val.load_case(k)\n\n if self.is_cascaded:\n data = np.vstack((data, convert_labelmap_to_one_hot(seg[-1], self.label_manager.foreground_labels,\n output_dtype=data.dtype)))\n with warnings.catch_warnings():\n # ignore 'The given NumPy array is not writable' warning\n warnings.simplefilter(\"ignore\")\n data = torch.from_numpy(data)\n\n output_filename_truncated = join(validation_output_folder, k)\n\n try:\n prediction = predictor.predict_sliding_window_return_logits(data)\n except RuntimeError:\n predictor.perform_everything_on_gpu = False\n prediction = predictor.predict_sliding_window_return_logits(data)\n predictor.perform_everything_on_gpu = True\n\n prediction = prediction.cpu()\n\n # this needs to go into background processes\n results.append(\n segmentation_export_pool.starmap_async(\n export_prediction_from_logits, (\n (prediction, properties, self.configuration_manager, self.plans_manager,\n self.dataset_json, output_filename_truncated, save_probabilities),\n )\n )\n )\n # for debug purposes\n # export_prediction(prediction_for_export, properties, self.configuration, self.plans, self.dataset_json,\n # output_filename_truncated, save_probabilities)\n\n # if needed, export the softmax prediction for the next stage\n if next_stages is not None:\n for n in next_stages:\n next_stage_config_manager = self.plans_manager.get_configuration(n)\n expected_preprocessed_folder = join(nnUNet_preprocessed, self.plans_manager.dataset_name,\n next_stage_config_manager.data_identifier)\n\n try:\n # we do this so that we can use load_case and do not have to hard code how loading training cases is implemented\n tmp = nnUNetDataset(expected_preprocessed_folder, [k],\n num_images_properties_loading_threshold=0)\n d, s, p = tmp.load_case(k)\n except FileNotFoundError:\n self.print_to_log_file(\n f\"Predicting next stage {n} failed for case {k} because the preprocessed file is missing! \"\n f\"Run the preprocessing for this configuration first!\")\n continue\n\n target_shape = d.shape[1:]\n output_folder = join(self.output_folder_base, 'predicted_next_stage', n)\n output_file = join(output_folder, k + '.npz')\n\n # resample_and_save(prediction, target_shape, output_file, self.plans_manager, self.configuration_manager, properties,\n # self.dataset_json)\n results.append(segmentation_export_pool.starmap_async(\n resample_and_save, (\n (prediction, target_shape, output_file, self.plans_manager,\n self.configuration_manager,\n properties,\n self.dataset_json),\n )\n ))\n\n _ = [r.get() for r in results]\n\n if self.is_ddp:\n dist.barrier()\n\n if self.local_rank == 0:\n metrics = compute_metrics_on_folder(join(self.preprocessed_dataset_folder_base, 'gt_segmentations'),\n validation_output_folder,\n join(validation_output_folder, 'summary.json'),\n self.plans_manager.image_reader_writer_class(),\n self.dataset_json[\"file_ending\"],\n self.label_manager.foreground_regions if self.label_manager.has_regions else\n self.label_manager.foreground_labels,\n self.label_manager.ignore_label, chill=True)\n self.print_to_log_file(\"Validation complete\", also_print_to_console=True)\n self.print_to_log_file(\"Mean Validation Dice: \", (metrics['foreground_mean'][\"Dice\"]), also_print_to_console=True)\n\n self.set_deep_supervision_enabled(True)\n compute_gaussian.cache_clear()\n\n def run_training(self):\n self.on_train_start()\n\n for epoch in range(self.current_epoch, self.num_epochs):\n self.on_epoch_start()\n\n self.on_train_epoch_start()\n train_outputs = []\n for batch_id in range(self.num_iterations_per_epoch):\n train_outputs.append(self.train_step(next(self.dataloader_train)))\n self.on_train_epoch_end(train_outputs)\n\n with torch.no_grad():\n self.on_validation_epoch_start()\n val_outputs = []\n for batch_id in range(self.num_val_iterations_per_epoch):\n val_outputs.append(self.validation_step(next(self.dataloader_val)))\n self.on_validation_epoch_end(val_outputs)\n\n self.on_epoch_end()\n\n self.on_train_end()" }, { "identifier": "dummy_context", "path": "nnunetv2/utilities/helpers.py", "snippet": "class dummy_context(object):\n def __enter__(self):\n pass\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n pass" }, { "identifier": "determine_num_input_channels", "path": "nnunetv2/utilities/label_handling/label_handling.py", "snippet": "def determine_num_input_channels(plans_manager: PlansManager,\n configuration_or_config_manager: Union[str, ConfigurationManager],\n dataset_json: dict) -> int:\n if isinstance(configuration_or_config_manager, str):\n config_manager = plans_manager.get_configuration(configuration_or_config_manager)\n else:\n config_manager = configuration_or_config_manager\n\n label_manager = plans_manager.get_label_manager(dataset_json)\n num_modalities = len(dataset_json['modality']) if 'modality' in dataset_json.keys() else len(dataset_json['channel_names'])\n\n # cascade has different number of input channels\n if config_manager.previous_stage_name is not None:\n num_label_inputs = len(label_manager.foreground_labels)\n num_input_channels = num_modalities + num_label_inputs\n else:\n num_input_channels = num_modalities\n return num_input_channels" } ]
import torch from torch import autocast from nnunetv2.training.loss.compound_losses import DC_and_BCE_loss, DC_and_CE_loss from nnunetv2.training.loss.dice import get_tp_fp_fn_tn, MemoryEfficientSoftDiceLoss from nnunetv2.training.nnUNetTrainer.nnUNetTrainer import nnUNetTrainer from nnunetv2.utilities.helpers import dummy_context from nnunetv2.utilities.label_handling.label_handling import determine_num_input_channels from torch.nn.parallel import DistributedDataParallel as DDP
17,667
class nnUNetTrainerNoDeepSupervision(nnUNetTrainer): def _build_loss(self): if self.label_manager.has_regions: loss = DC_and_BCE_loss({}, {'batch_dice': self.configuration_manager.batch_dice, 'do_bg': True, 'smooth': 1e-5, 'ddp': self.is_ddp}, use_ignore_label=self.label_manager.ignore_label is not None, dice_class=MemoryEfficientSoftDiceLoss) else: loss = DC_and_CE_loss({'batch_dice': self.configuration_manager.batch_dice, 'smooth': 1e-5, 'do_bg': False, 'ddp': self.is_ddp}, {}, weight_ce=1, weight_dice=1, ignore_label=self.label_manager.ignore_label, dice_class=MemoryEfficientSoftDiceLoss) return loss def _get_deep_supervision_scales(self): return None def initialize(self): if not self.was_initialized: self.num_input_channels = determine_num_input_channels(self.plans_manager, self.configuration_manager, self.dataset_json) self.network = self.build_network_architecture(self.plans_manager, self.dataset_json, self.configuration_manager, self.num_input_channels, enable_deep_supervision=False).to(self.device) self.optimizer, self.lr_scheduler = self.configure_optimizers() # if ddp, wrap in DDP wrapper if self.is_ddp: self.network = torch.nn.SyncBatchNorm.convert_sync_batchnorm(self.network) self.network = DDP(self.network, device_ids=[self.local_rank]) self.loss = self._build_loss() self.was_initialized = True else: raise RuntimeError("You have called self.initialize even though the trainer was already initialized. " "That should not happen.") def set_deep_supervision_enabled(self, enabled: bool): pass def validation_step(self, batch: dict) -> dict: data = batch['data'] target = batch['target'] data = data.to(self.device, non_blocking=True) if isinstance(target, list): target = [i.to(self.device, non_blocking=True) for i in target] else: target = target.to(self.device, non_blocking=True) self.optimizer.zero_grad(set_to_none=True) # Autocast is a little bitch. # If the device_type is 'cpu' then it's slow as heck and needs to be disabled. # If the device_type is 'mps' then it will complain that mps is not implemented, even if enabled=False is set. Whyyyyyyy. (this is why we don't make use of enabled=False) # So autocast will only be active if we have a cuda device.
class nnUNetTrainerNoDeepSupervision(nnUNetTrainer): def _build_loss(self): if self.label_manager.has_regions: loss = DC_and_BCE_loss({}, {'batch_dice': self.configuration_manager.batch_dice, 'do_bg': True, 'smooth': 1e-5, 'ddp': self.is_ddp}, use_ignore_label=self.label_manager.ignore_label is not None, dice_class=MemoryEfficientSoftDiceLoss) else: loss = DC_and_CE_loss({'batch_dice': self.configuration_manager.batch_dice, 'smooth': 1e-5, 'do_bg': False, 'ddp': self.is_ddp}, {}, weight_ce=1, weight_dice=1, ignore_label=self.label_manager.ignore_label, dice_class=MemoryEfficientSoftDiceLoss) return loss def _get_deep_supervision_scales(self): return None def initialize(self): if not self.was_initialized: self.num_input_channels = determine_num_input_channels(self.plans_manager, self.configuration_manager, self.dataset_json) self.network = self.build_network_architecture(self.plans_manager, self.dataset_json, self.configuration_manager, self.num_input_channels, enable_deep_supervision=False).to(self.device) self.optimizer, self.lr_scheduler = self.configure_optimizers() # if ddp, wrap in DDP wrapper if self.is_ddp: self.network = torch.nn.SyncBatchNorm.convert_sync_batchnorm(self.network) self.network = DDP(self.network, device_ids=[self.local_rank]) self.loss = self._build_loss() self.was_initialized = True else: raise RuntimeError("You have called self.initialize even though the trainer was already initialized. " "That should not happen.") def set_deep_supervision_enabled(self, enabled: bool): pass def validation_step(self, batch: dict) -> dict: data = batch['data'] target = batch['target'] data = data.to(self.device, non_blocking=True) if isinstance(target, list): target = [i.to(self.device, non_blocking=True) for i in target] else: target = target.to(self.device, non_blocking=True) self.optimizer.zero_grad(set_to_none=True) # Autocast is a little bitch. # If the device_type is 'cpu' then it's slow as heck and needs to be disabled. # If the device_type is 'mps' then it will complain that mps is not implemented, even if enabled=False is set. Whyyyyyyy. (this is why we don't make use of enabled=False) # So autocast will only be active if we have a cuda device.
with autocast(self.device.type, enabled=True) if self.device.type == 'cuda' else dummy_context():
5
2023-12-04 19:43:14+00:00
24k
opisaac9001/TTS-With-ooba-and-voice
TTS/tts/models/neuralhmm_tts.py
[ { "identifier": "Encoder", "path": "TTS/tts/layers/overflow/common_layers.py", "snippet": "class Encoder(nn.Module):\n r\"\"\"Neural HMM Encoder\n\n Same as Tacotron 2 encoder but increases the input length by states per phone\n\n Args:\n num_chars (int): Number of characters in the input.\n state_per_phone (int): Number of states per phone.\n in_out_channels (int): number of input and output channels.\n n_convolutions (int): number of convolutional layers.\n \"\"\"\n\n def __init__(self, num_chars, state_per_phone, in_out_channels=512, n_convolutions=3):\n super().__init__()\n\n self.state_per_phone = state_per_phone\n self.in_out_channels = in_out_channels\n\n self.emb = nn.Embedding(num_chars, in_out_channels)\n self.convolutions = nn.ModuleList()\n for _ in range(n_convolutions):\n self.convolutions.append(ConvBNBlock(in_out_channels, in_out_channels, 5, \"relu\"))\n self.lstm = nn.LSTM(\n in_out_channels,\n int(in_out_channels / 2) * state_per_phone,\n num_layers=1,\n batch_first=True,\n bias=True,\n bidirectional=True,\n )\n self.rnn_state = None\n\n def forward(self, x: torch.FloatTensor, x_len: torch.LongTensor) -> Tuple[torch.FloatTensor, torch.LongTensor]:\n \"\"\"Forward pass to the encoder.\n\n Args:\n x (torch.FloatTensor): input text indices.\n - shape: :math:`(b, T_{in})`\n x_len (torch.LongTensor): input text lengths.\n - shape: :math:`(b,)`\n\n Returns:\n Tuple[torch.FloatTensor, torch.LongTensor]: encoder outputs and output lengths.\n -shape: :math:`((b, T_{in} * states_per_phone, in_out_channels), (b,))`\n \"\"\"\n b, T = x.shape\n o = self.emb(x).transpose(1, 2)\n for layer in self.convolutions:\n o = layer(o)\n o = o.transpose(1, 2)\n o = nn.utils.rnn.pack_padded_sequence(o, x_len.cpu(), batch_first=True)\n self.lstm.flatten_parameters()\n o, _ = self.lstm(o)\n o, _ = nn.utils.rnn.pad_packed_sequence(o, batch_first=True)\n o = o.reshape(b, T * self.state_per_phone, self.in_out_channels)\n x_len = x_len * self.state_per_phone\n return o, x_len\n\n def inference(self, x, x_len):\n \"\"\"Inference to the encoder.\n\n Args:\n x (torch.FloatTensor): input text indices.\n - shape: :math:`(b, T_{in})`\n x_len (torch.LongTensor): input text lengths.\n - shape: :math:`(b,)`\n\n Returns:\n Tuple[torch.FloatTensor, torch.LongTensor]: encoder outputs and output lengths.\n -shape: :math:`((b, T_{in} * states_per_phone, in_out_channels), (b,))`\n \"\"\"\n b, T = x.shape\n o = self.emb(x).transpose(1, 2)\n for layer in self.convolutions:\n o = layer(o)\n o = o.transpose(1, 2)\n # self.lstm.flatten_parameters()\n o, _ = self.lstm(o)\n o = o.reshape(b, T * self.state_per_phone, self.in_out_channels)\n x_len = x_len * self.state_per_phone\n return o, x_len" }, { "identifier": "OverflowUtils", "path": "TTS/tts/layers/overflow/common_layers.py", "snippet": "class OverflowUtils:\n @staticmethod\n def get_data_parameters_for_flat_start(\n data_loader: torch.utils.data.DataLoader, out_channels: int, states_per_phone: int\n ):\n \"\"\"Generates data parameters for flat starting the HMM.\n\n Args:\n data_loader (torch.utils.data.Dataloader): _description_\n out_channels (int): mel spectrogram channels\n states_per_phone (_type_): HMM states per phone\n \"\"\"\n\n # State related information for transition_p\n total_state_len = 0\n total_mel_len = 0\n\n # Useful for data mean an std\n total_mel_sum = 0\n total_mel_sq_sum = 0\n\n for batch in tqdm(data_loader, leave=False):\n text_lengths = batch[\"token_id_lengths\"]\n mels = batch[\"mel\"]\n mel_lengths = batch[\"mel_lengths\"]\n\n total_state_len += torch.sum(text_lengths)\n total_mel_len += torch.sum(mel_lengths)\n total_mel_sum += torch.sum(mels)\n total_mel_sq_sum += torch.sum(torch.pow(mels, 2))\n\n data_mean = total_mel_sum / (total_mel_len * out_channels)\n data_std = torch.sqrt((total_mel_sq_sum / (total_mel_len * out_channels)) - torch.pow(data_mean, 2))\n average_num_states = total_state_len / len(data_loader.dataset)\n average_mel_len = total_mel_len / len(data_loader.dataset)\n average_duration_each_state = average_mel_len / average_num_states\n init_transition_prob = 1 / average_duration_each_state\n\n return data_mean, data_std, (init_transition_prob * states_per_phone)\n\n @staticmethod\n @torch.no_grad()\n def update_flat_start_transition(model, transition_p):\n model.neural_hmm.output_net.parametermodel.flat_start_output_layer(0.0, 1.0, transition_p)\n\n @staticmethod\n def log_clamped(x, eps=1e-04):\n \"\"\"\n Avoids the log(0) problem\n\n Args:\n x (torch.tensor): input tensor\n eps (float, optional): lower bound. Defaults to 1e-04.\n\n Returns:\n torch.tensor: :math:`log(x)`\n \"\"\"\n clamped_x = torch.clamp(x, min=eps)\n return torch.log(clamped_x)\n\n @staticmethod\n def inverse_sigmod(x):\n r\"\"\"\n Inverse of the sigmoid function\n \"\"\"\n if not torch.is_tensor(x):\n x = torch.tensor(x)\n return OverflowUtils.log_clamped(x / (1.0 - x))\n\n @staticmethod\n def inverse_softplus(x):\n r\"\"\"\n Inverse of the softplus function\n \"\"\"\n if not torch.is_tensor(x):\n x = torch.tensor(x)\n return OverflowUtils.log_clamped(torch.exp(x) - 1.0)\n\n @staticmethod\n def logsumexp(x, dim):\n r\"\"\"\n Differentiable LogSumExp: Does not creates nan gradients\n when all the inputs are -inf yeilds 0 gradients.\n Args:\n x : torch.Tensor - The input tensor\n dim: int - The dimension on which the log sum exp has to be applied\n \"\"\"\n\n m, _ = x.max(dim=dim)\n mask = m == -float(\"inf\")\n s = (x - m.masked_fill_(mask, 0).unsqueeze(dim=dim)).exp().sum(dim=dim)\n return s.masked_fill_(mask, 1).log() + m.masked_fill_(mask, -float(\"inf\"))\n\n @staticmethod\n def double_pad(list_of_different_shape_tensors):\n r\"\"\"\n Pads the list of tensors in 2 dimensions\n \"\"\"\n second_dim_lens = [len(a) for a in [i[0] for i in list_of_different_shape_tensors]]\n second_dim_max = max(second_dim_lens)\n padded_x = [F.pad(x, (0, second_dim_max - len(x[0]))) for x in list_of_different_shape_tensors]\n return nn.utils.rnn.pad_sequence(padded_x, batch_first=True)" }, { "identifier": "NeuralHMM", "path": "TTS/tts/layers/overflow/neural_hmm.py", "snippet": "class NeuralHMM(nn.Module):\n \"\"\"Autoregressive left to right HMM model primarily used in \"Neural HMMs are all you need (for high-quality attention-free TTS)\"\n\n Paper::\n https://arxiv.org/abs/2108.13320\n\n Paper abstract::\n Neural sequence-to-sequence TTS has achieved significantly better output quality than statistical speech synthesis using\n HMMs. However, neural TTS is generally not probabilistic and uses non-monotonic attention. Attention failures increase\n training time and can make synthesis babble incoherently. This paper describes how the old and new paradigms can be\n combined to obtain the advantages of both worlds, by replacing attention in neural TTS with an autoregressive left-right\n no-skip hidden Markov model defined by a neural network. Based on this proposal, we modify Tacotron 2 to obtain an\n HMM-based neural TTS model with monotonic alignment, trained to maximise the full sequence likelihood without\n approximation. We also describe how to combine ideas from classical and contemporary TTS for best results. The resulting\n example system is smaller and simpler than Tacotron 2, and learns to speak with fewer iterations and less data, whilst\n achieving comparable naturalness prior to the post-net. Our approach also allows easy control over speaking rate.\n\n Args:\n frame_channels (int): Output dimension to generate.\n ar_order (int): Autoregressive order of the model. In ablations of Neural HMM it was found that more autoregression while giving more variation hurts naturalness of the synthesised audio.\n deterministic_transition (bool): deterministic duration generation based on duration quantiles as defiend in \"S. Ronanki, O. Watts, S. King, and G. E. Henter, “Medianbased generation of synthetic speech durations using a nonparametric approach,” in Proc. SLT, 2016.\". Defaults to True.\n encoder_dim (int): Channels of encoder input and character embedding tensors. Defaults to 512.\n prenet_type (str): `original` or `bn`. `original` sets the default Prenet and `bn` uses Batch Normalization version of the Prenet.\n prenet_dim (int): Dimension of the Prenet.\n prenet_n_layers (int): Number of layers in the Prenet.\n prenet_dropout (float): Dropout probability of the Prenet.\n prenet_dropout_at_inference (bool): If True, dropout is applied at inference time.\n memory_rnn_dim (int): Size of the memory RNN to process output of prenet.\n outputnet_size (List[int]): Size of the output network inside the neural HMM.\n flat_start_params (dict): Parameters for the flat start initialization of the neural HMM.\n std_floor (float): Floor value for the standard deviation of the neural HMM. Prevents model cheating by putting point mass and getting infinite likelihood at any datapoint.\n use_grad_checkpointing (bool, optional): Use gradient checkpointing to save memory. Defaults to True.\n \"\"\"\n\n def __init__(\n self,\n frame_channels: int,\n ar_order: int,\n deterministic_transition: bool,\n encoder_dim: int,\n prenet_type: str,\n prenet_dim: int,\n prenet_n_layers: int,\n prenet_dropout: float,\n prenet_dropout_at_inference: bool,\n memory_rnn_dim: int,\n outputnet_size: List[int],\n flat_start_params: dict,\n std_floor: float,\n use_grad_checkpointing: bool = True,\n ):\n super().__init__()\n\n self.frame_channels = frame_channels\n self.ar_order = ar_order\n self.deterministic_transition = deterministic_transition\n self.prenet_dim = prenet_dim\n self.memory_rnn_dim = memory_rnn_dim\n self.use_grad_checkpointing = use_grad_checkpointing\n\n self.transition_model = TransitionModel()\n self.emission_model = EmissionModel()\n\n assert ar_order > 0, f\"AR order must be greater than 0 provided {ar_order}\"\n\n self.ar_order = ar_order\n self.prenet = Prenet(\n in_features=frame_channels * ar_order,\n prenet_type=prenet_type,\n prenet_dropout=prenet_dropout,\n dropout_at_inference=prenet_dropout_at_inference,\n out_features=[self.prenet_dim for _ in range(prenet_n_layers)],\n bias=False,\n )\n self.memory_rnn = nn.LSTMCell(input_size=prenet_dim, hidden_size=memory_rnn_dim)\n self.output_net = Outputnet(\n encoder_dim, memory_rnn_dim, frame_channels, outputnet_size, flat_start_params, std_floor\n )\n self.register_buffer(\"go_tokens\", torch.zeros(ar_order, 1))\n\n def forward(self, inputs, inputs_len, mels, mel_lens):\n r\"\"\"HMM forward algorithm for training uses logarithmic version of Rabiner (1989) forward algorithm.\n\n Args:\n inputs (torch.FloatTensor): Encoder outputs\n inputs_len (torch.LongTensor): Encoder output lengths\n mels (torch.FloatTensor): Mel inputs\n mel_lens (torch.LongTensor): Length of mel inputs\n\n Shapes:\n - inputs: (B, T, D_out_enc)\n - inputs_len: (B)\n - mels: (B, D_mel, T_mel)\n - mel_lens: (B)\n\n Returns:\n log_prob (torch.FloatTensor): Log probability of the sequence\n \"\"\"\n # Get dimensions of inputs\n batch_size, N, _ = inputs.shape\n T_max = torch.max(mel_lens)\n mels = mels.permute(0, 2, 1)\n\n # Intialize forward algorithm\n log_state_priors = self._initialize_log_state_priors(inputs)\n log_c, log_alpha_scaled, transition_matrix, means = self._initialize_forward_algorithm_variables(mels, N)\n\n # Initialize autoregression elements\n ar_inputs = self._add_go_token(mels)\n h_memory, c_memory = self._init_lstm_states(batch_size, self.memory_rnn_dim, mels)\n\n for t in range(T_max):\n # Process Autoregression\n h_memory, c_memory = self._process_ar_timestep(t, ar_inputs, h_memory, c_memory)\n # Get mean, std and transition vector from decoder for this timestep\n # Note: Gradient checkpointing currently doesn't works with multiple gpus inside a loop\n if self.use_grad_checkpointing and self.training:\n mean, std, transition_vector = checkpoint(self.output_net, h_memory, inputs)\n else:\n mean, std, transition_vector = self.output_net(h_memory, inputs)\n\n if t == 0:\n log_alpha_temp = log_state_priors + self.emission_model(mels[:, 0], mean, std, inputs_len)\n else:\n log_alpha_temp = self.emission_model(mels[:, t], mean, std, inputs_len) + self.transition_model(\n log_alpha_scaled[:, t - 1, :], transition_vector, inputs_len\n )\n log_c[:, t] = torch.logsumexp(log_alpha_temp, dim=1)\n log_alpha_scaled[:, t, :] = log_alpha_temp - log_c[:, t].unsqueeze(1)\n transition_matrix[:, t] = transition_vector # needed for absorption state calculation\n\n # Save for plotting\n means.append(mean.detach())\n\n log_c, log_alpha_scaled = self._mask_lengths(mel_lens, log_c, log_alpha_scaled)\n\n sum_final_log_c = self.get_absorption_state_scaling_factor(\n mel_lens, log_alpha_scaled, inputs_len, transition_matrix\n )\n\n log_probs = torch.sum(log_c, dim=1) + sum_final_log_c\n\n return log_probs, log_alpha_scaled, transition_matrix, means\n\n @staticmethod\n def _mask_lengths(mel_lens, log_c, log_alpha_scaled):\n \"\"\"\n Mask the lengths of the forward variables so that the variable lenghts\n do not contribute in the loss calculation\n Args:\n mel_inputs (torch.FloatTensor): (batch, T, frame_channels)\n mel_inputs_lengths (torch.IntTensor): (batch)\n log_c (torch.FloatTensor): (batch, T)\n Returns:\n log_c (torch.FloatTensor) : scaled probabilities (batch, T)\n log_alpha_scaled (torch.FloatTensor): forward probabilities (batch, T, N)\n \"\"\"\n mask_log_c = sequence_mask(mel_lens)\n log_c = log_c * mask_log_c\n mask_log_alpha_scaled = mask_log_c.unsqueeze(2)\n log_alpha_scaled = log_alpha_scaled * mask_log_alpha_scaled\n return log_c, log_alpha_scaled\n\n def _process_ar_timestep(\n self,\n t,\n ar_inputs,\n h_memory,\n c_memory,\n ):\n \"\"\"\n Process autoregression in timestep\n 1. At a specific t timestep\n 2. Perform data dropout if applied (we did not use it)\n 3. Run the autoregressive frame through the prenet (has dropout)\n 4. Run the prenet output through the post prenet rnn\n\n Args:\n t (int): mel-spec timestep\n ar_inputs (torch.FloatTensor): go-token appended mel-spectrograms\n - shape: (b, D_out, T_out)\n h_post_prenet (torch.FloatTensor): previous timestep rnn hidden state\n - shape: (b, memory_rnn_dim)\n c_post_prenet (torch.FloatTensor): previous timestep rnn cell state\n - shape: (b, memory_rnn_dim)\n\n Returns:\n h_post_prenet (torch.FloatTensor): rnn hidden state of the current timestep\n c_post_prenet (torch.FloatTensor): rnn cell state of the current timestep\n \"\"\"\n prenet_input = ar_inputs[:, t : t + self.ar_order].flatten(1)\n memory_inputs = self.prenet(prenet_input)\n h_memory, c_memory = self.memory_rnn(memory_inputs, (h_memory, c_memory))\n return h_memory, c_memory\n\n def _add_go_token(self, mel_inputs):\n \"\"\"Append the go token to create the autoregressive input\n Args:\n mel_inputs (torch.FloatTensor): (batch_size, T, n_mel_channel)\n Returns:\n ar_inputs (torch.FloatTensor): (batch_size, T, n_mel_channel)\n \"\"\"\n batch_size, T, _ = mel_inputs.shape\n go_tokens = self.go_tokens.unsqueeze(0).expand(batch_size, self.ar_order, self.frame_channels)\n ar_inputs = torch.cat((go_tokens, mel_inputs), dim=1)[:, :T]\n return ar_inputs\n\n @staticmethod\n def _initialize_forward_algorithm_variables(mel_inputs, N):\n r\"\"\"Initialize placeholders for forward algorithm variables, to use a stable\n version we will use log_alpha_scaled and the scaling constant\n\n Args:\n mel_inputs (torch.FloatTensor): (b, T_max, frame_channels)\n N (int): number of states\n Returns:\n log_c (torch.FloatTensor): Scaling constant (b, T_max)\n \"\"\"\n b, T_max, _ = mel_inputs.shape\n log_alpha_scaled = mel_inputs.new_zeros((b, T_max, N))\n log_c = mel_inputs.new_zeros(b, T_max)\n transition_matrix = mel_inputs.new_zeros((b, T_max, N))\n\n # Saving for plotting later, will not have gradient tapes\n means = []\n return log_c, log_alpha_scaled, transition_matrix, means\n\n @staticmethod\n def _init_lstm_states(batch_size, hidden_state_dim, device_tensor):\n r\"\"\"\n Initialize Hidden and Cell states for LSTM Cell\n\n Args:\n batch_size (Int): batch size\n hidden_state_dim (Int): dimensions of the h and c\n device_tensor (torch.FloatTensor): useful for the device and type\n\n Returns:\n (torch.FloatTensor): shape (batch_size, hidden_state_dim)\n can be hidden state for LSTM\n (torch.FloatTensor): shape (batch_size, hidden_state_dim)\n can be the cell state for LSTM\n \"\"\"\n return (\n device_tensor.new_zeros(batch_size, hidden_state_dim),\n device_tensor.new_zeros(batch_size, hidden_state_dim),\n )\n\n def get_absorption_state_scaling_factor(self, mels_len, log_alpha_scaled, inputs_len, transition_vector):\n \"\"\"Returns the final scaling factor of absorption state\n\n Args:\n mels_len (torch.IntTensor): Input size of mels to\n get the last timestep of log_alpha_scaled\n log_alpha_scaled (torch.FloatTEnsor): State probabilities\n text_lengths (torch.IntTensor): length of the states to\n mask the values of states lengths\n (\n Useful when the batch has very different lengths,\n when the length of an observation is less than\n the number of max states, then the log alpha after\n the state value is filled with -infs. So we mask\n those values so that it only consider the states\n which are needed for that length\n )\n transition_vector (torch.FloatTensor): transtiion vector for each state per timestep\n\n Shapes:\n - mels_len: (batch_size)\n - log_alpha_scaled: (batch_size, N, T)\n - text_lengths: (batch_size)\n - transition_vector: (batch_size, N, T)\n\n Returns:\n sum_final_log_c (torch.FloatTensor): (batch_size)\n\n \"\"\"\n N = torch.max(inputs_len)\n max_inputs_len = log_alpha_scaled.shape[2]\n state_lengths_mask = sequence_mask(inputs_len, max_len=max_inputs_len)\n\n last_log_alpha_scaled_index = (\n (mels_len - 1).unsqueeze(-1).expand(-1, N).unsqueeze(1)\n ) # Batch X Hidden State Size\n last_log_alpha_scaled = torch.gather(log_alpha_scaled, 1, last_log_alpha_scaled_index).squeeze(1)\n last_log_alpha_scaled = last_log_alpha_scaled.masked_fill(~state_lengths_mask, -float(\"inf\"))\n\n last_transition_vector = torch.gather(transition_vector, 1, last_log_alpha_scaled_index).squeeze(1)\n last_transition_probability = torch.sigmoid(last_transition_vector)\n log_probability_of_transitioning = OverflowUtils.log_clamped(last_transition_probability)\n\n last_transition_probability_index = self.get_mask_for_last_item(inputs_len, inputs_len.device)\n log_probability_of_transitioning = log_probability_of_transitioning.masked_fill(\n ~last_transition_probability_index, -float(\"inf\")\n )\n final_log_c = last_log_alpha_scaled + log_probability_of_transitioning\n\n # If the length of the mel is less than the number of states it will select the -inf values leading to nan gradients\n # Ideally, we should clean the dataset otherwise this is a little hack uncomment the line below\n final_log_c = final_log_c.clamp(min=torch.finfo(final_log_c.dtype).min)\n\n sum_final_log_c = torch.logsumexp(final_log_c, dim=1)\n return sum_final_log_c\n\n @staticmethod\n def get_mask_for_last_item(lengths, device, out_tensor=None):\n \"\"\"Returns n-1 mask for the last item in the sequence.\n\n Args:\n lengths (torch.IntTensor): lengths in a batch\n device (str, optional): Defaults to \"cpu\".\n out_tensor (torch.Tensor, optional): uses the memory of a specific tensor.\n Defaults to None.\n\n Returns:\n - Shape: :math:`(b, max_len)`\n \"\"\"\n max_len = torch.max(lengths).item()\n ids = (\n torch.arange(0, max_len, device=device) if out_tensor is None else torch.arange(0, max_len, out=out_tensor)\n )\n mask = ids == lengths.unsqueeze(1) - 1\n return mask\n\n @torch.inference_mode()\n def inference(\n self,\n inputs: torch.FloatTensor,\n input_lens: torch.LongTensor,\n sampling_temp: float,\n max_sampling_time: int,\n duration_threshold: float,\n ):\n \"\"\"Inference from autoregressive neural HMM\n\n Args:\n inputs (torch.FloatTensor): input states\n - shape: :math:`(b, T, d)`\n input_lens (torch.LongTensor): input state lengths\n - shape: :math:`(b)`\n sampling_temp (float): sampling temperature\n max_sampling_temp (int): max sampling temperature\n duration_threshold (float): duration threshold to switch to next state\n - Use this to change the spearking rate of the synthesised audio\n \"\"\"\n\n b = inputs.shape[0]\n outputs = {\n \"hmm_outputs\": [],\n \"hmm_outputs_len\": [],\n \"alignments\": [],\n \"input_parameters\": [],\n \"output_parameters\": [],\n }\n for i in range(b):\n neural_hmm_outputs, states_travelled, input_parameters, output_parameters = self.sample(\n inputs[i : i + 1], input_lens[i], sampling_temp, max_sampling_time, duration_threshold\n )\n\n outputs[\"hmm_outputs\"].append(neural_hmm_outputs)\n outputs[\"hmm_outputs_len\"].append(neural_hmm_outputs.shape[0])\n outputs[\"alignments\"].append(states_travelled)\n outputs[\"input_parameters\"].append(input_parameters)\n outputs[\"output_parameters\"].append(output_parameters)\n\n outputs[\"hmm_outputs\"] = nn.utils.rnn.pad_sequence(outputs[\"hmm_outputs\"], batch_first=True)\n outputs[\"hmm_outputs_len\"] = torch.tensor(\n outputs[\"hmm_outputs_len\"], dtype=input_lens.dtype, device=input_lens.device\n )\n return outputs\n\n @torch.inference_mode()\n def sample(self, inputs, input_lens, sampling_temp, max_sampling_time, duration_threshold):\n \"\"\"Samples an output from the parameter models\n\n Args:\n inputs (torch.FloatTensor): input states\n - shape: :math:`(1, T, d)`\n input_lens (torch.LongTensor): input state lengths\n - shape: :math:`(1)`\n sampling_temp (float): sampling temperature\n max_sampling_time (int): max sampling time\n duration_threshold (float): duration threshold to switch to next state\n\n Returns:\n outputs (torch.FloatTensor): Output Observations\n - Shape: :math:`(T, output_dim)`\n states_travelled (list[int]): Hidden states travelled\n - Shape: :math:`(T)`\n input_parameters (list[torch.FloatTensor]): Input parameters\n output_parameters (list[torch.FloatTensor]): Output parameters\n \"\"\"\n states_travelled, outputs, t = [], [], 0\n\n # Sample initial state\n current_state = 0\n states_travelled.append(current_state)\n\n # Prepare autoregression\n prenet_input = self.go_tokens.unsqueeze(0).expand(1, self.ar_order, self.frame_channels)\n h_memory, c_memory = self._init_lstm_states(1, self.memory_rnn_dim, prenet_input)\n\n input_parameter_values = []\n output_parameter_values = []\n quantile = 1\n while True:\n memory_input = self.prenet(prenet_input.flatten(1).unsqueeze(0))\n # will be 1 while sampling\n h_memory, c_memory = self.memory_rnn(memory_input.squeeze(0), (h_memory, c_memory))\n\n z_t = inputs[:, current_state].unsqueeze(0) # Add fake time dimension\n mean, std, transition_vector = self.output_net(h_memory, z_t)\n\n transition_probability = torch.sigmoid(transition_vector.flatten())\n staying_probability = torch.sigmoid(-transition_vector.flatten())\n\n # Save for plotting\n input_parameter_values.append([prenet_input, current_state])\n output_parameter_values.append([mean, std, transition_probability])\n\n x_t = self.emission_model.sample(mean, std, sampling_temp=sampling_temp)\n\n # Prepare autoregressive input for next iteration\n prenet_input = torch.cat((prenet_input, x_t), dim=1)[:, 1:]\n\n outputs.append(x_t.flatten())\n\n transition_matrix = torch.cat((staying_probability, transition_probability))\n quantile *= staying_probability\n if not self.deterministic_transition:\n switch = transition_matrix.multinomial(1)[0].item()\n else:\n switch = quantile < duration_threshold\n\n if switch:\n current_state += 1\n quantile = 1\n\n states_travelled.append(current_state)\n\n if (current_state == input_lens) or (max_sampling_time and t == max_sampling_time - 1):\n break\n\n t += 1\n\n return (\n torch.stack(outputs, dim=0),\n F.one_hot(input_lens.new_tensor(states_travelled)),\n input_parameter_values,\n output_parameter_values,\n )\n\n @staticmethod\n def _initialize_log_state_priors(text_embeddings):\n \"\"\"Creates the log pi in forward algorithm.\n\n Args:\n text_embeddings (torch.FloatTensor): used to create the log pi\n on current device\n\n Shapes:\n - text_embeddings: (B, T, D_out_enc)\n \"\"\"\n N = text_embeddings.shape[1]\n log_state_priors = text_embeddings.new_full([N], -float(\"inf\"))\n log_state_priors[0] = 0.0\n return log_state_priors" }, { "identifier": "get_spec_from_most_probable_state", "path": "TTS/tts/layers/overflow/plotting_utils.py", "snippet": "def get_spec_from_most_probable_state(log_alpha_scaled, means, decoder=None):\n \"\"\"Get the most probable state means from the log_alpha_scaled.\n\n Args:\n log_alpha_scaled (torch.Tensor): Log alpha scaled values.\n - Shape: :math:`(T, N)`\n means (torch.Tensor): Means of the states.\n - Shape: :math:`(N, T, D_out)`\n decoder (torch.nn.Module): Decoder module to decode the latent to melspectrogram. Defaults to None.\n \"\"\"\n max_state_numbers = torch.max(log_alpha_scaled, dim=1)[1]\n max_len = means.shape[0]\n n_mel_channels = means.shape[2]\n max_state_numbers = max_state_numbers.unsqueeze(1).unsqueeze(1).expand(max_len, 1, n_mel_channels)\n means = torch.gather(means, 1, max_state_numbers).squeeze(1).to(log_alpha_scaled.dtype)\n if decoder is not None:\n mel = (\n decoder(means.T.unsqueeze(0), torch.tensor([means.shape[0]], device=means.device), reverse=True)[0]\n .squeeze(0)\n .T\n )\n else:\n mel = means\n return mel" }, { "identifier": "plot_transition_probabilities_to_numpy", "path": "TTS/tts/layers/overflow/plotting_utils.py", "snippet": "def plot_transition_probabilities_to_numpy(states, transition_probabilities, output_fig=False):\n \"\"\"Generates trainsition probabilities plot for the states and the probability of transition.\n\n Args:\n states (torch.IntTensor): the states\n transition_probabilities (torch.FloatTensor): the transition probabilities\n \"\"\"\n states = validate_numpy_array(states)\n transition_probabilities = validate_numpy_array(transition_probabilities)\n\n fig, ax = plt.subplots(figsize=(30, 3))\n ax.plot(transition_probabilities, \"o\")\n ax.set_title(\"Transition probability of state\")\n ax.set_xlabel(\"hidden state\")\n ax.set_ylabel(\"probability\")\n ax.set_xticks([i for i in range(len(transition_probabilities))]) # pylint: disable=unnecessary-comprehension\n ax.set_xticklabels([int(x) for x in states], rotation=90)\n plt.tight_layout()\n if not output_fig:\n plt.close()\n return fig" }, { "identifier": "BaseTTS", "path": "TTS/tts/models/base_tts.py", "snippet": "class BaseTTS(BaseTrainerModel):\n \"\"\"Base `tts` class. Every new `tts` model must inherit this.\n\n It defines common `tts` specific functions on top of `Model` implementation.\n \"\"\"\n\n MODEL_TYPE = \"tts\"\n\n def __init__(\n self,\n config: Coqpit,\n ap: \"AudioProcessor\",\n tokenizer: \"TTSTokenizer\",\n speaker_manager: SpeakerManager = None,\n language_manager: LanguageManager = None,\n ):\n super().__init__()\n self.config = config\n self.ap = ap\n self.tokenizer = tokenizer\n self.speaker_manager = speaker_manager\n self.language_manager = language_manager\n self._set_model_args(config)\n\n def _set_model_args(self, config: Coqpit):\n \"\"\"Setup model args based on the config type (`ModelConfig` or `ModelArgs`).\n\n `ModelArgs` has all the fields reuqired to initialize the model architecture.\n\n `ModelConfig` has all the fields required for training, inference and containes `ModelArgs`.\n\n If the config is for training with a name like \"*Config\", then the model args are embeded in the\n config.model_args\n\n If the config is for the model with a name like \"*Args\", then we assign the directly.\n \"\"\"\n # don't use isintance not to import recursively\n if \"Config\" in config.__class__.__name__:\n config_num_chars = (\n self.config.model_args.num_chars if hasattr(self.config, \"model_args\") else self.config.num_chars\n )\n num_chars = config_num_chars if self.tokenizer is None else self.tokenizer.characters.num_chars\n if \"characters\" in config:\n self.config.num_chars = num_chars\n if hasattr(self.config, \"model_args\"):\n config.model_args.num_chars = num_chars\n self.args = self.config.model_args\n else:\n self.config = config\n self.args = config.model_args\n elif \"Args\" in config.__class__.__name__:\n self.args = config\n else:\n raise ValueError(\"config must be either a *Config or *Args\")\n\n def init_multispeaker(self, config: Coqpit, data: List = None):\n \"\"\"Initialize a speaker embedding layer if needen and define expected embedding channel size for defining\n `in_channels` size of the connected layers.\n\n This implementation yields 3 possible outcomes:\n\n 1. If `config.use_speaker_embedding` and `config.use_d_vector_file are False, do nothing.\n 2. If `config.use_d_vector_file` is True, set expected embedding channel size to `config.d_vector_dim` or 512.\n 3. If `config.use_speaker_embedding`, initialize a speaker embedding layer with channel size of\n `config.d_vector_dim` or 512.\n\n You can override this function for new models.\n\n Args:\n config (Coqpit): Model configuration.\n \"\"\"\n # set number of speakers\n if self.speaker_manager is not None:\n self.num_speakers = self.speaker_manager.num_speakers\n elif hasattr(config, \"num_speakers\"):\n self.num_speakers = config.num_speakers\n\n # set ultimate speaker embedding size\n if config.use_speaker_embedding or config.use_d_vector_file:\n self.embedded_speaker_dim = (\n config.d_vector_dim if \"d_vector_dim\" in config and config.d_vector_dim is not None else 512\n )\n # init speaker embedding layer\n if config.use_speaker_embedding and not config.use_d_vector_file:\n print(\" > Init speaker_embedding layer.\")\n self.speaker_embedding = nn.Embedding(self.num_speakers, self.embedded_speaker_dim)\n self.speaker_embedding.weight.data.normal_(0, 0.3)\n\n def get_aux_input(self, **kwargs) -> Dict:\n \"\"\"Prepare and return `aux_input` used by `forward()`\"\"\"\n return {\"speaker_id\": None, \"style_wav\": None, \"d_vector\": None, \"language_id\": None}\n\n def get_aux_input_from_test_sentences(self, sentence_info):\n if hasattr(self.config, \"model_args\"):\n config = self.config.model_args\n else:\n config = self.config\n\n # extract speaker and language info\n text, speaker_name, style_wav, language_name = None, None, None, None\n\n if isinstance(sentence_info, list):\n if len(sentence_info) == 1:\n text = sentence_info[0]\n elif len(sentence_info) == 2:\n text, speaker_name = sentence_info\n elif len(sentence_info) == 3:\n text, speaker_name, style_wav = sentence_info\n elif len(sentence_info) == 4:\n text, speaker_name, style_wav, language_name = sentence_info\n else:\n text = sentence_info\n\n # get speaker id/d_vector\n speaker_id, d_vector, language_id = None, None, None\n if self.speaker_manager is not None:\n if config.use_d_vector_file:\n if speaker_name is None:\n d_vector = self.speaker_manager.get_random_embedding()\n else:\n d_vector = self.speaker_manager.get_d_vector_by_name(speaker_name)\n elif config.use_speaker_embedding:\n if speaker_name is None:\n speaker_id = self.speaker_manager.get_random_id()\n else:\n speaker_id = self.speaker_manager.name_to_id[speaker_name]\n\n # get language id\n if self.language_manager is not None and config.use_language_embedding and language_name is not None:\n language_id = self.language_manager.name_to_id[language_name]\n\n return {\n \"text\": text,\n \"speaker_id\": speaker_id,\n \"style_wav\": style_wav,\n \"d_vector\": d_vector,\n \"language_id\": language_id,\n }\n\n def format_batch(self, batch: Dict) -> Dict:\n \"\"\"Generic batch formatting for `TTSDataset`.\n\n You must override this if you use a custom dataset.\n\n Args:\n batch (Dict): [description]\n\n Returns:\n Dict: [description]\n \"\"\"\n # setup input batch\n text_input = batch[\"token_id\"]\n text_lengths = batch[\"token_id_lengths\"]\n speaker_names = batch[\"speaker_names\"]\n linear_input = batch[\"linear\"]\n mel_input = batch[\"mel\"]\n mel_lengths = batch[\"mel_lengths\"]\n stop_targets = batch[\"stop_targets\"]\n item_idx = batch[\"item_idxs\"]\n d_vectors = batch[\"d_vectors\"]\n speaker_ids = batch[\"speaker_ids\"]\n attn_mask = batch[\"attns\"]\n waveform = batch[\"waveform\"]\n pitch = batch[\"pitch\"]\n energy = batch[\"energy\"]\n language_ids = batch[\"language_ids\"]\n max_text_length = torch.max(text_lengths.float())\n max_spec_length = torch.max(mel_lengths.float())\n\n # compute durations from attention masks\n durations = None\n if attn_mask is not None:\n durations = torch.zeros(attn_mask.shape[0], attn_mask.shape[2])\n for idx, am in enumerate(attn_mask):\n # compute raw durations\n c_idxs = am[:, : text_lengths[idx], : mel_lengths[idx]].max(1)[1]\n # c_idxs, counts = torch.unique_consecutive(c_idxs, return_counts=True)\n c_idxs, counts = torch.unique(c_idxs, return_counts=True)\n dur = torch.ones([text_lengths[idx]]).to(counts.dtype)\n dur[c_idxs] = counts\n # smooth the durations and set any 0 duration to 1\n # by cutting off from the largest duration indeces.\n extra_frames = dur.sum() - mel_lengths[idx]\n largest_idxs = torch.argsort(-dur)[:extra_frames]\n dur[largest_idxs] -= 1\n assert (\n dur.sum() == mel_lengths[idx]\n ), f\" [!] total duration {dur.sum()} vs spectrogram length {mel_lengths[idx]}\"\n durations[idx, : text_lengths[idx]] = dur\n\n # set stop targets wrt reduction factor\n stop_targets = stop_targets.view(text_input.shape[0], stop_targets.size(1) // self.config.r, -1)\n stop_targets = (stop_targets.sum(2) > 0.0).unsqueeze(2).float().squeeze(2)\n stop_target_lengths = torch.divide(mel_lengths, self.config.r).ceil_()\n\n return {\n \"text_input\": text_input,\n \"text_lengths\": text_lengths,\n \"speaker_names\": speaker_names,\n \"mel_input\": mel_input,\n \"mel_lengths\": mel_lengths,\n \"linear_input\": linear_input,\n \"stop_targets\": stop_targets,\n \"stop_target_lengths\": stop_target_lengths,\n \"attn_mask\": attn_mask,\n \"durations\": durations,\n \"speaker_ids\": speaker_ids,\n \"d_vectors\": d_vectors,\n \"max_text_length\": float(max_text_length),\n \"max_spec_length\": float(max_spec_length),\n \"item_idx\": item_idx,\n \"waveform\": waveform,\n \"pitch\": pitch,\n \"energy\": energy,\n \"language_ids\": language_ids,\n \"audio_unique_names\": batch[\"audio_unique_names\"],\n }\n\n def get_sampler(self, config: Coqpit, dataset: TTSDataset, num_gpus=1):\n weights = None\n data_items = dataset.samples\n\n if getattr(config, \"use_language_weighted_sampler\", False):\n alpha = getattr(config, \"language_weighted_sampler_alpha\", 1.0)\n print(\" > Using Language weighted sampler with alpha:\", alpha)\n weights = get_language_balancer_weights(data_items) * alpha\n\n if getattr(config, \"use_speaker_weighted_sampler\", False):\n alpha = getattr(config, \"speaker_weighted_sampler_alpha\", 1.0)\n print(\" > Using Speaker weighted sampler with alpha:\", alpha)\n if weights is not None:\n weights += get_speaker_balancer_weights(data_items) * alpha\n else:\n weights = get_speaker_balancer_weights(data_items) * alpha\n\n if getattr(config, \"use_length_weighted_sampler\", False):\n alpha = getattr(config, \"length_weighted_sampler_alpha\", 1.0)\n print(\" > Using Length weighted sampler with alpha:\", alpha)\n if weights is not None:\n weights += get_length_balancer_weights(data_items) * alpha\n else:\n weights = get_length_balancer_weights(data_items) * alpha\n\n if weights is not None:\n sampler = WeightedRandomSampler(weights, len(weights))\n else:\n sampler = None\n\n # sampler for DDP\n if sampler is None:\n sampler = DistributedSampler(dataset) if num_gpus > 1 else None\n else: # If a sampler is already defined use this sampler and DDP sampler together\n sampler = DistributedSamplerWrapper(sampler) if num_gpus > 1 else sampler\n\n return sampler\n\n def get_data_loader(\n self,\n config: Coqpit,\n assets: Dict,\n is_eval: bool,\n samples: Union[List[Dict], List[List]],\n verbose: bool,\n num_gpus: int,\n rank: int = None,\n ) -> \"DataLoader\":\n if is_eval and not config.run_eval:\n loader = None\n else:\n # setup multi-speaker attributes\n if self.speaker_manager is not None:\n if hasattr(config, \"model_args\"):\n speaker_id_mapping = (\n self.speaker_manager.name_to_id if config.model_args.use_speaker_embedding else None\n )\n d_vector_mapping = self.speaker_manager.embeddings if config.model_args.use_d_vector_file else None\n config.use_d_vector_file = config.model_args.use_d_vector_file\n else:\n speaker_id_mapping = self.speaker_manager.name_to_id if config.use_speaker_embedding else None\n d_vector_mapping = self.speaker_manager.embeddings if config.use_d_vector_file else None\n else:\n speaker_id_mapping = None\n d_vector_mapping = None\n\n # setup multi-lingual attributes\n if self.language_manager is not None:\n language_id_mapping = self.language_manager.name_to_id if self.args.use_language_embedding else None\n else:\n language_id_mapping = None\n\n # init dataloader\n dataset = TTSDataset(\n outputs_per_step=config.r if \"r\" in config else 1,\n compute_linear_spec=config.model.lower() == \"tacotron\" or config.compute_linear_spec,\n compute_f0=config.get(\"compute_f0\", False),\n f0_cache_path=config.get(\"f0_cache_path\", None),\n compute_energy=config.get(\"compute_energy\", False),\n energy_cache_path=config.get(\"energy_cache_path\", None),\n samples=samples,\n ap=self.ap,\n return_wav=config.return_wav if \"return_wav\" in config else False,\n batch_group_size=0 if is_eval else config.batch_group_size * config.batch_size,\n min_text_len=config.min_text_len,\n max_text_len=config.max_text_len,\n min_audio_len=config.min_audio_len,\n max_audio_len=config.max_audio_len,\n phoneme_cache_path=config.phoneme_cache_path,\n precompute_num_workers=config.precompute_num_workers,\n use_noise_augment=False if is_eval else config.use_noise_augment,\n verbose=verbose,\n speaker_id_mapping=speaker_id_mapping,\n d_vector_mapping=d_vector_mapping if config.use_d_vector_file else None,\n tokenizer=self.tokenizer,\n start_by_longest=config.start_by_longest,\n language_id_mapping=language_id_mapping,\n )\n\n # wait all the DDP process to be ready\n if num_gpus > 1:\n dist.barrier()\n\n # sort input sequences from short to long\n dataset.preprocess_samples()\n\n # get samplers\n sampler = self.get_sampler(config, dataset, num_gpus)\n\n loader = DataLoader(\n dataset,\n batch_size=config.eval_batch_size if is_eval else config.batch_size,\n shuffle=config.shuffle if sampler is None else False, # if there is no other sampler\n collate_fn=dataset.collate_fn,\n drop_last=config.drop_last, # setting this False might cause issues in AMP training.\n sampler=sampler,\n num_workers=config.num_eval_loader_workers if is_eval else config.num_loader_workers,\n pin_memory=False,\n )\n return loader\n\n def _get_test_aux_input(\n self,\n ) -> Dict:\n d_vector = None\n if self.config.use_d_vector_file:\n d_vector = [self.speaker_manager.embeddings[name][\"embedding\"] for name in self.speaker_manager.embeddings]\n d_vector = (random.sample(sorted(d_vector), 1),)\n\n aux_inputs = {\n \"speaker_id\": None\n if not self.config.use_speaker_embedding\n else random.sample(sorted(self.speaker_manager.name_to_id.values()), 1),\n \"d_vector\": d_vector,\n \"style_wav\": None, # TODO: handle GST style input\n }\n return aux_inputs\n\n def test_run(self, assets: Dict) -> Tuple[Dict, Dict]:\n \"\"\"Generic test run for `tts` models used by `Trainer`.\n\n You can override this for a different behaviour.\n\n Args:\n assets (dict): A dict of training assets. For `tts` models, it must include `{'audio_processor': ap}`.\n\n Returns:\n Tuple[Dict, Dict]: Test figures and audios to be projected to Tensorboard.\n \"\"\"\n print(\" | > Synthesizing test sentences.\")\n test_audios = {}\n test_figures = {}\n test_sentences = self.config.test_sentences\n aux_inputs = self._get_test_aux_input()\n for idx, sen in enumerate(test_sentences):\n if isinstance(sen, list):\n aux_inputs = self.get_aux_input_from_test_sentences(sen)\n sen = aux_inputs[\"text\"]\n outputs_dict = synthesis(\n self,\n sen,\n self.config,\n \"cuda\" in str(next(self.parameters()).device),\n speaker_id=aux_inputs[\"speaker_id\"],\n d_vector=aux_inputs[\"d_vector\"],\n style_wav=aux_inputs[\"style_wav\"],\n use_griffin_lim=True,\n do_trim_silence=False,\n )\n test_audios[\"{}-audio\".format(idx)] = outputs_dict[\"wav\"]\n test_figures[\"{}-prediction\".format(idx)] = plot_spectrogram(\n outputs_dict[\"outputs\"][\"model_outputs\"], self.ap, output_fig=False\n )\n test_figures[\"{}-alignment\".format(idx)] = plot_alignment(\n outputs_dict[\"outputs\"][\"alignments\"], output_fig=False\n )\n return test_figures, test_audios\n\n def on_init_start(self, trainer):\n \"\"\"Save the speaker.pth and language_ids.json at the beginning of the training. Also update both paths.\"\"\"\n if self.speaker_manager is not None:\n output_path = os.path.join(trainer.output_path, \"speakers.pth\")\n self.speaker_manager.save_ids_to_file(output_path)\n trainer.config.speakers_file = output_path\n # some models don't have `model_args` set\n if hasattr(trainer.config, \"model_args\"):\n trainer.config.model_args.speakers_file = output_path\n trainer.config.save_json(os.path.join(trainer.output_path, \"config.json\"))\n print(f\" > `speakers.pth` is saved to {output_path}.\")\n print(\" > `speakers_file` is updated in the config.json.\")\n\n if self.language_manager is not None:\n output_path = os.path.join(trainer.output_path, \"language_ids.json\")\n self.language_manager.save_ids_to_file(output_path)\n trainer.config.language_ids_file = output_path\n if hasattr(trainer.config, \"model_args\"):\n trainer.config.model_args.language_ids_file = output_path\n trainer.config.save_json(os.path.join(trainer.output_path, \"config.json\"))\n print(f\" > `language_ids.json` is saved to {output_path}.\")\n print(\" > `language_ids_file` is updated in the config.json.\")" }, { "identifier": "SpeakerManager", "path": "TTS/tts/utils/speakers.py", "snippet": "class SpeakerManager(EmbeddingManager):\n \"\"\"Manage the speakers for multi-speaker 🐸TTS models. Load a datafile and parse the information\n in a way that can be queried by speaker or clip.\n\n There are 3 different scenarios considered:\n\n 1. Models using speaker embedding layers. The datafile only maps speaker names to ids used by the embedding layer.\n 2. Models using d-vectors. The datafile includes a dictionary in the following format.\n\n ::\n\n {\n 'clip_name.wav':{\n 'name': 'speakerA',\n 'embedding'[<d_vector_values>]\n },\n ...\n }\n\n\n 3. Computing the d-vectors by the speaker encoder. It loads the speaker encoder model and\n computes the d-vectors for a given clip or speaker.\n\n Args:\n d_vectors_file_path (str, optional): Path to the metafile including x vectors. Defaults to \"\".\n speaker_id_file_path (str, optional): Path to the metafile that maps speaker names to ids used by\n TTS models. Defaults to \"\".\n encoder_model_path (str, optional): Path to the speaker encoder model file. Defaults to \"\".\n encoder_config_path (str, optional): Path to the spealer encoder config file. Defaults to \"\".\n\n Examples:\n >>> # load audio processor and speaker encoder\n >>> ap = AudioProcessor(**config.audio)\n >>> manager = SpeakerManager(encoder_model_path=encoder_model_path, encoder_config_path=encoder_config_path)\n >>> # load a sample audio and compute embedding\n >>> waveform = ap.load_wav(sample_wav_path)\n >>> mel = ap.melspectrogram(waveform)\n >>> d_vector = manager.compute_embeddings(mel.T)\n \"\"\"\n\n def __init__(\n self,\n data_items: List[List[Any]] = None,\n d_vectors_file_path: str = \"\",\n speaker_id_file_path: str = \"\",\n encoder_model_path: str = \"\",\n encoder_config_path: str = \"\",\n use_cuda: bool = False,\n ):\n super().__init__(\n embedding_file_path=d_vectors_file_path,\n id_file_path=speaker_id_file_path,\n encoder_model_path=encoder_model_path,\n encoder_config_path=encoder_config_path,\n use_cuda=use_cuda,\n )\n\n if data_items:\n self.set_ids_from_data(data_items, parse_key=\"speaker_name\")\n\n @property\n def num_speakers(self):\n return len(self.name_to_id)\n\n @property\n def speaker_names(self):\n return list(self.name_to_id.keys())\n\n def get_speakers(self) -> List:\n return self.name_to_id\n\n @staticmethod\n def init_from_config(config: \"Coqpit\", samples: Union[List[List], List[Dict]] = None) -> \"SpeakerManager\":\n \"\"\"Initialize a speaker manager from config\n\n Args:\n config (Coqpit): Config object.\n samples (Union[List[List], List[Dict]], optional): List of data samples to parse out the speaker names.\n Defaults to None.\n\n Returns:\n SpeakerEncoder: Speaker encoder object.\n \"\"\"\n speaker_manager = None\n if get_from_config_or_model_args_with_default(config, \"use_speaker_embedding\", False):\n if samples:\n speaker_manager = SpeakerManager(data_items=samples)\n if get_from_config_or_model_args_with_default(config, \"speaker_file\", None):\n speaker_manager = SpeakerManager(\n speaker_id_file_path=get_from_config_or_model_args_with_default(config, \"speaker_file\", None)\n )\n if get_from_config_or_model_args_with_default(config, \"speakers_file\", None):\n speaker_manager = SpeakerManager(\n speaker_id_file_path=get_from_config_or_model_args_with_default(config, \"speakers_file\", None)\n )\n\n if get_from_config_or_model_args_with_default(config, \"use_d_vector_file\", False):\n speaker_manager = SpeakerManager()\n if get_from_config_or_model_args_with_default(config, \"d_vector_file\", None):\n speaker_manager = SpeakerManager(\n d_vectors_file_path=get_from_config_or_model_args_with_default(config, \"d_vector_file\", None)\n )\n return speaker_manager" }, { "identifier": "TTSTokenizer", "path": "TTS/tts/utils/text/tokenizer.py", "snippet": "class TTSTokenizer:\n \"\"\"🐸TTS tokenizer to convert input characters to token IDs and back.\n\n Token IDs for OOV chars are discarded but those are stored in `self.not_found_characters` for later.\n\n Args:\n use_phonemes (bool):\n Whether to use phonemes instead of characters. Defaults to False.\n\n characters (Characters):\n A Characters object to use for character-to-ID and ID-to-character mappings.\n\n text_cleaner (callable):\n A function to pre-process the text before tokenization and phonemization. Defaults to None.\n\n phonemizer (Phonemizer):\n A phonemizer object or a dict that maps language codes to phonemizer objects. Defaults to None.\n\n Example:\n\n >>> from TTS.tts.utils.text.tokenizer import TTSTokenizer\n >>> tokenizer = TTSTokenizer(use_phonemes=False, characters=Graphemes())\n >>> text = \"Hello world!\"\n >>> ids = tokenizer.text_to_ids(text)\n >>> text_hat = tokenizer.ids_to_text(ids)\n >>> assert text == text_hat\n \"\"\"\n\n def __init__(\n self,\n use_phonemes=False,\n text_cleaner: Callable = None,\n characters: \"BaseCharacters\" = None,\n phonemizer: Union[\"Phonemizer\", Dict] = None,\n add_blank: bool = False,\n use_eos_bos=False,\n ):\n self.text_cleaner = text_cleaner\n self.use_phonemes = use_phonemes\n self.add_blank = add_blank\n self.use_eos_bos = use_eos_bos\n self.characters = characters\n self.not_found_characters = []\n self.phonemizer = phonemizer\n\n @property\n def characters(self):\n return self._characters\n\n @characters.setter\n def characters(self, new_characters):\n self._characters = new_characters\n self.pad_id = self.characters.char_to_id(self.characters.pad) if self.characters.pad else None\n self.blank_id = self.characters.char_to_id(self.characters.blank) if self.characters.blank else None\n\n def encode(self, text: str) -> List[int]:\n \"\"\"Encodes a string of text as a sequence of IDs.\"\"\"\n token_ids = []\n for char in text:\n try:\n idx = self.characters.char_to_id(char)\n token_ids.append(idx)\n except KeyError:\n # discard but store not found characters\n if char not in self.not_found_characters:\n self.not_found_characters.append(char)\n print(text)\n print(f\" [!] Character {repr(char)} not found in the vocabulary. Discarding it.\")\n return token_ids\n\n def decode(self, token_ids: List[int]) -> str:\n \"\"\"Decodes a sequence of IDs to a string of text.\"\"\"\n text = \"\"\n for token_id in token_ids:\n text += self.characters.id_to_char(token_id)\n return text\n\n def text_to_ids(self, text: str, language: str = None) -> List[int]: # pylint: disable=unused-argument\n \"\"\"Converts a string of text to a sequence of token IDs.\n\n Args:\n text(str):\n The text to convert to token IDs.\n\n language(str):\n The language code of the text. Defaults to None.\n\n TODO:\n - Add support for language-specific processing.\n\n 1. Text normalizatin\n 2. Phonemization (if use_phonemes is True)\n 3. Add blank char between characters\n 4. Add BOS and EOS characters\n 5. Text to token IDs\n \"\"\"\n # TODO: text cleaner should pick the right routine based on the language\n if self.text_cleaner is not None:\n text = self.text_cleaner(text)\n if self.use_phonemes:\n text = self.phonemizer.phonemize(text, separator=\"\", language=language)\n text = self.encode(text)\n if self.add_blank:\n text = self.intersperse_blank_char(text, True)\n if self.use_eos_bos:\n text = self.pad_with_bos_eos(text)\n return text\n\n def ids_to_text(self, id_sequence: List[int]) -> str:\n \"\"\"Converts a sequence of token IDs to a string of text.\"\"\"\n return self.decode(id_sequence)\n\n def pad_with_bos_eos(self, char_sequence: List[str]):\n \"\"\"Pads a sequence with the special BOS and EOS characters.\"\"\"\n return [self.characters.bos_id] + list(char_sequence) + [self.characters.eos_id]\n\n def intersperse_blank_char(self, char_sequence: List[str], use_blank_char: bool = False):\n \"\"\"Intersperses the blank character between characters in a sequence.\n\n Use the ```blank``` character if defined else use the ```pad``` character.\n \"\"\"\n char_to_use = self.characters.blank_id if use_blank_char else self.characters.pad\n result = [char_to_use] * (len(char_sequence) * 2 + 1)\n result[1::2] = char_sequence\n return result\n\n def print_logs(self, level: int = 0):\n indent = \"\\t\" * level\n print(f\"{indent}| > add_blank: {self.add_blank}\")\n print(f\"{indent}| > use_eos_bos: {self.use_eos_bos}\")\n print(f\"{indent}| > use_phonemes: {self.use_phonemes}\")\n if self.use_phonemes:\n print(f\"{indent}| > phonemizer:\")\n self.phonemizer.print_logs(level + 1)\n if len(self.not_found_characters) > 0:\n print(f\"{indent}| > {len(self.not_found_characters)} not found characters:\")\n for char in self.not_found_characters:\n print(f\"{indent}| > {char}\")\n\n @staticmethod\n def init_from_config(config: \"Coqpit\", characters: \"BaseCharacters\" = None):\n \"\"\"Init Tokenizer object from config\n\n Args:\n config (Coqpit): Coqpit model config.\n characters (BaseCharacters): Defines the model character set. If not set, use the default options based on\n the config values. Defaults to None.\n \"\"\"\n # init cleaners\n text_cleaner = None\n if isinstance(config.text_cleaner, (str, list)):\n text_cleaner = getattr(cleaners, config.text_cleaner)\n\n # init characters\n if characters is None:\n # set characters based on defined characters class\n if config.characters and config.characters.characters_class:\n CharactersClass = import_class(config.characters.characters_class)\n characters, new_config = CharactersClass.init_from_config(config)\n # set characters based on config\n else:\n if config.use_phonemes:\n # init phoneme set\n characters, new_config = IPAPhonemes().init_from_config(config)\n else:\n # init character set\n characters, new_config = Graphemes().init_from_config(config)\n\n else:\n characters, new_config = characters.init_from_config(config)\n\n # set characters class\n new_config.characters.characters_class = get_import_path(characters)\n\n # init phonemizer\n phonemizer = None\n if config.use_phonemes:\n if \"phonemizer\" in config and config.phonemizer == \"multi_phonemizer\":\n lang_to_phonemizer_name = {}\n for dataset in config.datasets:\n if dataset.language != \"\":\n lang_to_phonemizer_name[dataset.language] = dataset.phonemizer\n else:\n raise ValueError(\"Multi phonemizer requires language to be set for each dataset.\")\n phonemizer = MultiPhonemizer(lang_to_phonemizer_name)\n else:\n phonemizer_kwargs = {\"language\": config.phoneme_language}\n if \"phonemizer\" in config and config.phonemizer:\n phonemizer = get_phonemizer_by_name(config.phonemizer, **phonemizer_kwargs)\n else:\n try:\n phonemizer = get_phonemizer_by_name(\n DEF_LANG_TO_PHONEMIZER[config.phoneme_language], **phonemizer_kwargs\n )\n new_config.phonemizer = phonemizer.name()\n except KeyError as e:\n raise ValueError(\n f\"\"\"No phonemizer found for language {config.phoneme_language}.\n You may need to install a third party library for this language.\"\"\"\n ) from e\n\n return (\n TTSTokenizer(\n config.use_phonemes, text_cleaner, characters, phonemizer, config.add_blank, config.enable_eos_bos_chars\n ),\n new_config,\n )" }, { "identifier": "plot_alignment", "path": "TTS/tts/utils/visual.py", "snippet": "def plot_alignment(alignment, info=None, fig_size=(16, 10), title=None, output_fig=False, plot_log=False):\n if isinstance(alignment, torch.Tensor):\n alignment_ = alignment.detach().cpu().numpy().squeeze()\n else:\n alignment_ = alignment\n alignment_ = alignment_.astype(np.float32) if alignment_.dtype == np.float16 else alignment_\n fig, ax = plt.subplots(figsize=fig_size)\n im = ax.imshow(\n alignment_.T, aspect=\"auto\", origin=\"lower\", interpolation=\"none\", norm=LogNorm() if plot_log else None\n )\n fig.colorbar(im, ax=ax)\n xlabel = \"Decoder timestep\"\n if info is not None:\n xlabel += \"\\n\\n\" + info\n plt.xlabel(xlabel)\n plt.ylabel(\"Encoder timestep\")\n # plt.yticks(range(len(text)), list(text))\n plt.tight_layout()\n if title is not None:\n plt.title(title)\n if not output_fig:\n plt.close()\n return fig" }, { "identifier": "plot_spectrogram", "path": "TTS/tts/utils/visual.py", "snippet": "def plot_spectrogram(spectrogram, ap=None, fig_size=(16, 10), output_fig=False):\n if isinstance(spectrogram, torch.Tensor):\n spectrogram_ = spectrogram.detach().cpu().numpy().squeeze().T\n else:\n spectrogram_ = spectrogram.T\n spectrogram_ = spectrogram_.astype(np.float32) if spectrogram_.dtype == np.float16 else spectrogram_\n if ap is not None:\n spectrogram_ = ap.denormalize(spectrogram_) # pylint: disable=protected-access\n fig = plt.figure(figsize=fig_size)\n plt.imshow(spectrogram_, aspect=\"auto\", origin=\"lower\")\n plt.colorbar()\n plt.tight_layout()\n if not output_fig:\n plt.close()\n return fig" }, { "identifier": "format_aux_input", "path": "TTS/utils/generic_utils.py", "snippet": "def format_aux_input(def_args: Dict, kwargs: Dict) -> Dict:\n \"\"\"Format kwargs to hande auxilary inputs to models.\n\n Args:\n def_args (Dict): A dictionary of argument names and their default values if not defined in `kwargs`.\n kwargs (Dict): A `dict` or `kwargs` that includes auxilary inputs to the model.\n\n Returns:\n Dict: arguments with formatted auxilary inputs.\n \"\"\"\n kwargs = kwargs.copy()\n for name in def_args:\n if name not in kwargs or kwargs[name] is None:\n kwargs[name] = def_args[name]\n return kwargs" }, { "identifier": "load_fsspec", "path": "TTS/utils/io.py", "snippet": "def load_fsspec(\n path: str,\n map_location: Union[str, Callable, torch.device, Dict[Union[str, torch.device], Union[str, torch.device]]] = None,\n cache: bool = True,\n **kwargs,\n) -> Any:\n \"\"\"Like torch.load but can load from other locations (e.g. s3:// , gs://).\n\n Args:\n path: Any path or url supported by fsspec.\n map_location: torch.device or str.\n cache: If True, cache a remote file locally for subsequent calls. It is cached under `get_user_data_dir()/tts_cache`. Defaults to True.\n **kwargs: Keyword arguments forwarded to torch.load.\n\n Returns:\n Object stored in path.\n \"\"\"\n is_local = os.path.isdir(path) or os.path.isfile(path)\n if cache and not is_local:\n with fsspec.open(\n f\"filecache::{path}\",\n filecache={\"cache_storage\": str(get_user_data_dir(\"tts_cache\"))},\n mode=\"rb\",\n ) as f:\n return torch.load(f, map_location=map_location, **kwargs)\n else:\n with fsspec.open(path, \"rb\") as f:\n return torch.load(f, map_location=map_location, **kwargs)" } ]
import os import torch from typing import Dict, List, Union from coqpit import Coqpit from torch import nn from trainer.logging.tensorboard_logger import TensorboardLogger from TTS.tts.layers.overflow.common_layers import Encoder, OverflowUtils from TTS.tts.layers.overflow.neural_hmm import NeuralHMM from TTS.tts.layers.overflow.plotting_utils import ( get_spec_from_most_probable_state, plot_transition_probabilities_to_numpy, ) from TTS.tts.models.base_tts import BaseTTS from TTS.tts.utils.speakers import SpeakerManager from TTS.tts.utils.text.tokenizer import TTSTokenizer from TTS.tts.utils.visual import plot_alignment, plot_spectrogram from TTS.utils.generic_utils import format_aux_input from TTS.utils.io import load_fsspec from TTS.utils.audio import AudioProcessor
18,230
""" text, text_len, mels, mel_len = self.preprocess_batch(text, text_len, mels, mel_len) encoder_outputs, encoder_output_len = self.encoder(text, text_len) log_probs, fwd_alignments, transition_vectors, means = self.neural_hmm( encoder_outputs, encoder_output_len, mels.transpose(1, 2), mel_len ) outputs = { "log_probs": log_probs, "alignments": fwd_alignments, "transition_vectors": transition_vectors, "means": means, } return outputs @staticmethod def _training_stats(batch): stats = {} stats["avg_text_length"] = batch["text_lengths"].float().mean() stats["avg_spec_length"] = batch["mel_lengths"].float().mean() stats["avg_text_batch_occupancy"] = (batch["text_lengths"].float() / batch["text_lengths"].float().max()).mean() stats["avg_spec_batch_occupancy"] = (batch["mel_lengths"].float() / batch["mel_lengths"].float().max()).mean() return stats def train_step(self, batch: dict, criterion: nn.Module): text_input = batch["text_input"] text_lengths = batch["text_lengths"] mel_input = batch["mel_input"] mel_lengths = batch["mel_lengths"] outputs = self.forward( text=text_input, text_len=text_lengths, mels=mel_input, mel_len=mel_lengths, ) loss_dict = criterion(outputs["log_probs"] / (mel_lengths.sum() + text_lengths.sum())) # for printing useful statistics on terminal loss_dict.update(self._training_stats(batch)) return outputs, loss_dict def eval_step(self, batch: Dict, criterion: nn.Module): return self.train_step(batch, criterion) def _format_aux_input(self, aux_input: Dict, default_input_dict): """Set missing fields to their default value. Args: aux_inputs (Dict): Dictionary containing the auxiliary inputs. """ default_input_dict = default_input_dict.copy() default_input_dict.update( { "sampling_temp": self.sampling_temp, "max_sampling_time": self.max_sampling_time, "duration_threshold": self.duration_threshold, } ) if aux_input: return format_aux_input(default_input_dict, aux_input) return default_input_dict @torch.no_grad() def inference( self, text: torch.Tensor, aux_input={"x_lengths": None, "sampling_temp": None, "max_sampling_time": None, "duration_threshold": None}, ): # pylint: disable=dangerous-default-value """Sampling from the model Args: text (torch.Tensor): :math:`[B, T_in]` aux_inputs (_type_, optional): _description_. Defaults to None. Returns: outputs: Dictionary containing the following - mel (torch.Tensor): :math:`[B, T_out, C]` - hmm_outputs_len (torch.Tensor): :math:`[B]` - state_travelled (List[List[int]]): List of lists containing the state travelled for each sample in the batch. - input_parameters (list[torch.FloatTensor]): Input parameters to the neural HMM. - output_parameters (list[torch.FloatTensor]): Output parameters to the neural HMM. """ default_input_dict = { "x_lengths": torch.sum(text != 0, dim=1), } aux_input = self._format_aux_input(aux_input, default_input_dict) encoder_outputs, encoder_output_len = self.encoder.inference(text, aux_input["x_lengths"]) outputs = self.neural_hmm.inference( encoder_outputs, encoder_output_len, sampling_temp=aux_input["sampling_temp"], max_sampling_time=aux_input["max_sampling_time"], duration_threshold=aux_input["duration_threshold"], ) mels, mel_outputs_len = outputs["hmm_outputs"], outputs["hmm_outputs_len"] mels = self.inverse_normalize(mels) outputs.update({"model_outputs": mels, "model_outputs_len": mel_outputs_len}) outputs["alignments"] = OverflowUtils.double_pad(outputs["alignments"]) return outputs @staticmethod def get_criterion(): return NLLLoss() @staticmethod def init_from_config(config: "NeuralhmmTTSConfig", samples: Union[List[List], List[Dict]] = None, verbose=True): """Initiate model from config Args: config (VitsConfig): Model config. samples (Union[List[List], List[Dict]]): Training samples to parse speaker ids for training. Defaults to None. verbose (bool): If True, print init messages. Defaults to True. """ ap = AudioProcessor.init_from_config(config, verbose)
class NeuralhmmTTS(BaseTTS): """Neural HMM TTS model. Paper:: https://arxiv.org/abs/2108.13320 Paper abstract:: Neural sequence-to-sequence TTS has achieved significantly better output quality than statistical speech synthesis using HMMs.However, neural TTS is generally not probabilistic and uses non-monotonic attention. Attention failures increase training time and can make synthesis babble incoherently. This paper describes how the old and new paradigms can be combined to obtain the advantages of both worlds, by replacing attention in neural TTS with an autoregressive left-right no-skip hidden Markov model defined by a neural network. Based on this proposal, we modify Tacotron 2 to obtain an HMM-based neural TTS model with monotonic alignment, trained to maximise the full sequence likelihood without approximation. We also describe how to combine ideas from classical and contemporary TTS for best results. The resulting example system is smaller and simpler than Tacotron 2, and learns to speak with fewer iterations and less data, whilst achieving comparable naturalness prior to the post-net. Our approach also allows easy control over speaking rate. Audio examples and code are available at https://shivammehta25.github.io/Neural-HMM/ . Note: - This is a parameter efficient version of OverFlow (15.3M vs 28.6M). Since it has half the number of parameters as OverFlow the synthesis output quality is suboptimal (but comparable to Tacotron2 without Postnet), but it learns to speak with even lesser amount of data and is still significantly faster than other attention-based methods. - Neural HMMs uses flat start initialization i.e it computes the means and std and transition probabilities of the dataset and uses them to initialize the model. This benefits the model and helps with faster learning If you change the dataset or want to regenerate the parameters change the `force_generate_statistics` and `mel_statistics_parameter_path` accordingly. - To enable multi-GPU training, set the `use_grad_checkpointing=False` in config. This will significantly increase the memory usage. This is because to compute the actual data likelihood (not an approximation using MAS/Viterbi) we must use all the states at the previous time step during the forward pass to decide the probability distribution at the current step i.e the difference between the forward algorithm and viterbi approximation. Check :class:`TTS.tts.configs.neuralhmm_tts_config.NeuralhmmTTSConfig` for class arguments. """ def __init__( self, config: "NeuralhmmTTSConfig", ap: "AudioProcessor" = None, tokenizer: "TTSTokenizer" = None, speaker_manager: SpeakerManager = None, ): super().__init__(config, ap, tokenizer, speaker_manager) # pass all config fields to `self` # for fewer code change self.config = config for key in config: setattr(self, key, config[key]) self.encoder = Encoder(config.num_chars, config.state_per_phone, config.encoder_in_out_features) self.neural_hmm = NeuralHMM( frame_channels=self.out_channels, ar_order=self.ar_order, deterministic_transition=self.deterministic_transition, encoder_dim=self.encoder_in_out_features, prenet_type=self.prenet_type, prenet_dim=self.prenet_dim, prenet_n_layers=self.prenet_n_layers, prenet_dropout=self.prenet_dropout, prenet_dropout_at_inference=self.prenet_dropout_at_inference, memory_rnn_dim=self.memory_rnn_dim, outputnet_size=self.outputnet_size, flat_start_params=self.flat_start_params, std_floor=self.std_floor, use_grad_checkpointing=self.use_grad_checkpointing, ) self.register_buffer("mean", torch.tensor(0)) self.register_buffer("std", torch.tensor(1)) def update_mean_std(self, statistics_dict: Dict): self.mean.data = torch.tensor(statistics_dict["mean"]) self.std.data = torch.tensor(statistics_dict["std"]) def preprocess_batch(self, text, text_len, mels, mel_len): if self.mean.item() == 0 or self.std.item() == 1: statistics_dict = torch.load(self.mel_statistics_parameter_path) self.update_mean_std(statistics_dict) mels = self.normalize(mels) return text, text_len, mels, mel_len def normalize(self, x): return x.sub(self.mean).div(self.std) def inverse_normalize(self, x): return x.mul(self.std).add(self.mean) def forward(self, text, text_len, mels, mel_len): """ Forward pass for training and computing the log likelihood of a given batch. Shapes: Shapes: text: :math:`[B, T_in]` text_len: :math:`[B]` mels: :math:`[B, T_out, C]` mel_len: :math:`[B]` """ text, text_len, mels, mel_len = self.preprocess_batch(text, text_len, mels, mel_len) encoder_outputs, encoder_output_len = self.encoder(text, text_len) log_probs, fwd_alignments, transition_vectors, means = self.neural_hmm( encoder_outputs, encoder_output_len, mels.transpose(1, 2), mel_len ) outputs = { "log_probs": log_probs, "alignments": fwd_alignments, "transition_vectors": transition_vectors, "means": means, } return outputs @staticmethod def _training_stats(batch): stats = {} stats["avg_text_length"] = batch["text_lengths"].float().mean() stats["avg_spec_length"] = batch["mel_lengths"].float().mean() stats["avg_text_batch_occupancy"] = (batch["text_lengths"].float() / batch["text_lengths"].float().max()).mean() stats["avg_spec_batch_occupancy"] = (batch["mel_lengths"].float() / batch["mel_lengths"].float().max()).mean() return stats def train_step(self, batch: dict, criterion: nn.Module): text_input = batch["text_input"] text_lengths = batch["text_lengths"] mel_input = batch["mel_input"] mel_lengths = batch["mel_lengths"] outputs = self.forward( text=text_input, text_len=text_lengths, mels=mel_input, mel_len=mel_lengths, ) loss_dict = criterion(outputs["log_probs"] / (mel_lengths.sum() + text_lengths.sum())) # for printing useful statistics on terminal loss_dict.update(self._training_stats(batch)) return outputs, loss_dict def eval_step(self, batch: Dict, criterion: nn.Module): return self.train_step(batch, criterion) def _format_aux_input(self, aux_input: Dict, default_input_dict): """Set missing fields to their default value. Args: aux_inputs (Dict): Dictionary containing the auxiliary inputs. """ default_input_dict = default_input_dict.copy() default_input_dict.update( { "sampling_temp": self.sampling_temp, "max_sampling_time": self.max_sampling_time, "duration_threshold": self.duration_threshold, } ) if aux_input: return format_aux_input(default_input_dict, aux_input) return default_input_dict @torch.no_grad() def inference( self, text: torch.Tensor, aux_input={"x_lengths": None, "sampling_temp": None, "max_sampling_time": None, "duration_threshold": None}, ): # pylint: disable=dangerous-default-value """Sampling from the model Args: text (torch.Tensor): :math:`[B, T_in]` aux_inputs (_type_, optional): _description_. Defaults to None. Returns: outputs: Dictionary containing the following - mel (torch.Tensor): :math:`[B, T_out, C]` - hmm_outputs_len (torch.Tensor): :math:`[B]` - state_travelled (List[List[int]]): List of lists containing the state travelled for each sample in the batch. - input_parameters (list[torch.FloatTensor]): Input parameters to the neural HMM. - output_parameters (list[torch.FloatTensor]): Output parameters to the neural HMM. """ default_input_dict = { "x_lengths": torch.sum(text != 0, dim=1), } aux_input = self._format_aux_input(aux_input, default_input_dict) encoder_outputs, encoder_output_len = self.encoder.inference(text, aux_input["x_lengths"]) outputs = self.neural_hmm.inference( encoder_outputs, encoder_output_len, sampling_temp=aux_input["sampling_temp"], max_sampling_time=aux_input["max_sampling_time"], duration_threshold=aux_input["duration_threshold"], ) mels, mel_outputs_len = outputs["hmm_outputs"], outputs["hmm_outputs_len"] mels = self.inverse_normalize(mels) outputs.update({"model_outputs": mels, "model_outputs_len": mel_outputs_len}) outputs["alignments"] = OverflowUtils.double_pad(outputs["alignments"]) return outputs @staticmethod def get_criterion(): return NLLLoss() @staticmethod def init_from_config(config: "NeuralhmmTTSConfig", samples: Union[List[List], List[Dict]] = None, verbose=True): """Initiate model from config Args: config (VitsConfig): Model config. samples (Union[List[List], List[Dict]]): Training samples to parse speaker ids for training. Defaults to None. verbose (bool): If True, print init messages. Defaults to True. """ ap = AudioProcessor.init_from_config(config, verbose)
tokenizer, new_config = TTSTokenizer.init_from_config(config)
7
2023-11-29 08:15:06+00:00
24k
magic-research/magic-animate
magicanimate/pipelines/pipeline_animation.py
[ { "identifier": "UNet3DConditionModel", "path": "magicanimate/models/unet_controlnet.py", "snippet": "class UNet3DConditionModel(ModelMixin, ConfigMixin):\n _supports_gradient_checkpointing = True\n\n @register_to_config\n def __init__(\n self,\n sample_size: Optional[int] = None,\n in_channels: int = 4,\n out_channels: int = 4,\n center_input_sample: bool = False,\n flip_sin_to_cos: bool = True,\n freq_shift: int = 0, \n down_block_types: Tuple[str] = (\n \"CrossAttnDownBlock3D\",\n \"CrossAttnDownBlock3D\",\n \"CrossAttnDownBlock3D\",\n \"DownBlock3D\",\n ),\n mid_block_type: str = \"UNetMidBlock3DCrossAttn\",\n up_block_types: Tuple[str] = (\n \"UpBlock3D\",\n \"CrossAttnUpBlock3D\",\n \"CrossAttnUpBlock3D\",\n \"CrossAttnUpBlock3D\"\n ),\n only_cross_attention: Union[bool, Tuple[bool]] = False,\n block_out_channels: Tuple[int] = (320, 640, 1280, 1280),\n layers_per_block: int = 2,\n downsample_padding: int = 1,\n mid_block_scale_factor: float = 1,\n act_fn: str = \"silu\",\n norm_num_groups: int = 32,\n norm_eps: float = 1e-5,\n cross_attention_dim: int = 1280,\n attention_head_dim: Union[int, Tuple[int]] = 8,\n dual_cross_attention: bool = False,\n use_linear_projection: bool = False,\n class_embed_type: Optional[str] = None,\n num_class_embeds: Optional[int] = None,\n upcast_attention: bool = False,\n resnet_time_scale_shift: str = \"default\",\n \n # Additional\n use_motion_module = False,\n motion_module_resolutions = ( 1,2,4,8 ),\n motion_module_mid_block = False,\n motion_module_decoder_only = False,\n motion_module_type = None,\n motion_module_kwargs = {},\n unet_use_cross_frame_attention = None,\n unet_use_temporal_attention = None,\n ):\n super().__init__()\n\n self.sample_size = sample_size\n time_embed_dim = block_out_channels[0] * 4\n\n # input\n self.conv_in = InflatedConv3d(in_channels, block_out_channels[0], kernel_size=3, padding=(1, 1))\n\n # time\n self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift)\n timestep_input_dim = block_out_channels[0]\n\n self.time_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)\n\n # class embedding\n if class_embed_type is None and num_class_embeds is not None:\n self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim)\n elif class_embed_type == \"timestep\":\n self.class_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)\n elif class_embed_type == \"identity\":\n self.class_embedding = nn.Identity(time_embed_dim, time_embed_dim)\n else:\n self.class_embedding = None\n\n self.down_blocks = nn.ModuleList([])\n self.mid_block = None\n self.up_blocks = nn.ModuleList([])\n\n if isinstance(only_cross_attention, bool):\n only_cross_attention = [only_cross_attention] * len(down_block_types)\n\n if isinstance(attention_head_dim, int):\n attention_head_dim = (attention_head_dim,) * len(down_block_types)\n\n # down\n output_channel = block_out_channels[0]\n for i, down_block_type in enumerate(down_block_types):\n res = 2 ** i\n input_channel = output_channel\n output_channel = block_out_channels[i]\n is_final_block = i == len(block_out_channels) - 1\n\n down_block = get_down_block(\n down_block_type,\n num_layers=layers_per_block,\n in_channels=input_channel,\n out_channels=output_channel,\n temb_channels=time_embed_dim,\n add_downsample=not is_final_block,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n resnet_groups=norm_num_groups,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=attention_head_dim[i],\n downsample_padding=downsample_padding,\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention[i],\n upcast_attention=upcast_attention,\n resnet_time_scale_shift=resnet_time_scale_shift,\n\n unet_use_cross_frame_attention=unet_use_cross_frame_attention,\n unet_use_temporal_attention=unet_use_temporal_attention,\n \n use_motion_module=use_motion_module and (res in motion_module_resolutions) and (not motion_module_decoder_only),\n motion_module_type=motion_module_type,\n motion_module_kwargs=motion_module_kwargs,\n )\n self.down_blocks.append(down_block)\n\n # mid\n if mid_block_type == \"UNetMidBlock3DCrossAttn\":\n self.mid_block = UNetMidBlock3DCrossAttn(\n in_channels=block_out_channels[-1],\n temb_channels=time_embed_dim,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n output_scale_factor=mid_block_scale_factor,\n resnet_time_scale_shift=resnet_time_scale_shift,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=attention_head_dim[-1],\n resnet_groups=norm_num_groups,\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n upcast_attention=upcast_attention,\n\n unet_use_cross_frame_attention=unet_use_cross_frame_attention,\n unet_use_temporal_attention=unet_use_temporal_attention,\n \n use_motion_module=use_motion_module and motion_module_mid_block,\n motion_module_type=motion_module_type,\n motion_module_kwargs=motion_module_kwargs,\n )\n else:\n raise ValueError(f\"unknown mid_block_type : {mid_block_type}\")\n \n # count how many layers upsample the videos\n self.num_upsamplers = 0\n\n # up\n reversed_block_out_channels = list(reversed(block_out_channels))\n reversed_attention_head_dim = list(reversed(attention_head_dim))\n only_cross_attention = list(reversed(only_cross_attention))\n output_channel = reversed_block_out_channels[0]\n for i, up_block_type in enumerate(up_block_types):\n res = 2 ** (3 - i)\n is_final_block = i == len(block_out_channels) - 1\n\n prev_output_channel = output_channel\n output_channel = reversed_block_out_channels[i]\n input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)]\n\n # add upsample block for all BUT final layer\n if not is_final_block:\n add_upsample = True\n self.num_upsamplers += 1\n else:\n add_upsample = False\n\n up_block = get_up_block(\n up_block_type,\n num_layers=layers_per_block + 1,\n in_channels=input_channel,\n out_channels=output_channel,\n prev_output_channel=prev_output_channel,\n temb_channels=time_embed_dim,\n add_upsample=add_upsample,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n resnet_groups=norm_num_groups,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=reversed_attention_head_dim[i],\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention[i],\n upcast_attention=upcast_attention,\n resnet_time_scale_shift=resnet_time_scale_shift,\n\n unet_use_cross_frame_attention=unet_use_cross_frame_attention,\n unet_use_temporal_attention=unet_use_temporal_attention,\n\n use_motion_module=use_motion_module and (res in motion_module_resolutions),\n motion_module_type=motion_module_type,\n motion_module_kwargs=motion_module_kwargs,\n )\n self.up_blocks.append(up_block)\n prev_output_channel = output_channel\n\n # out\n self.conv_norm_out = nn.GroupNorm(num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=norm_eps)\n self.conv_act = nn.SiLU()\n self.conv_out = InflatedConv3d(block_out_channels[0], out_channels, kernel_size=3, padding=1)\n\n def set_attention_slice(self, slice_size):\n r\"\"\"\n Enable sliced attention computation.\n\n When this option is enabled, the attention module will split the input tensor in slices, to compute attention\n in several steps. This is useful to save some memory in exchange for a small speed decrease.\n\n Args:\n slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `\"auto\"`):\n When `\"auto\"`, halves the input to the attention heads, so attention will be computed in two steps. If\n `\"max\"`, maxium amount of memory will be saved by running only one slice at a time. If a number is\n provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim`\n must be a multiple of `slice_size`.\n \"\"\"\n sliceable_head_dims = []\n\n def fn_recursive_retrieve_slicable_dims(module: torch.nn.Module):\n if hasattr(module, \"set_attention_slice\"):\n sliceable_head_dims.append(module.sliceable_head_dim)\n\n for child in module.children():\n fn_recursive_retrieve_slicable_dims(child)\n\n # retrieve number of attention layers\n for module in self.children():\n fn_recursive_retrieve_slicable_dims(module)\n\n num_slicable_layers = len(sliceable_head_dims)\n\n if slice_size == \"auto\":\n # half the attention head size is usually a good trade-off between\n # speed and memory\n slice_size = [dim // 2 for dim in sliceable_head_dims]\n elif slice_size == \"max\":\n # make smallest slice possible\n slice_size = num_slicable_layers * [1]\n\n slice_size = num_slicable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size\n\n if len(slice_size) != len(sliceable_head_dims):\n raise ValueError(\n f\"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different\"\n f\" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}.\"\n )\n\n for i in range(len(slice_size)):\n size = slice_size[i]\n dim = sliceable_head_dims[i]\n if size is not None and size > dim:\n raise ValueError(f\"size {size} has to be smaller or equal to {dim}.\")\n\n # Recursively walk through all the children.\n # Any children which exposes the set_attention_slice method\n # gets the message\n def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]):\n if hasattr(module, \"set_attention_slice\"):\n module.set_attention_slice(slice_size.pop())\n\n for child in module.children():\n fn_recursive_set_attention_slice(child, slice_size)\n\n reversed_slice_size = list(reversed(slice_size))\n for module in self.children():\n fn_recursive_set_attention_slice(module, reversed_slice_size)\n\n def _set_gradient_checkpointing(self, module, value=False):\n if isinstance(module, (CrossAttnDownBlock3D, DownBlock3D, CrossAttnUpBlock3D, UpBlock3D)):\n module.gradient_checkpointing = value\n\n def forward(\n self,\n sample: torch.FloatTensor,\n timestep: Union[torch.Tensor, float, int],\n encoder_hidden_states: torch.Tensor,\n class_labels: Optional[torch.Tensor] = None,\n attention_mask: Optional[torch.Tensor] = None,\n # for controlnet\n down_block_additional_residuals: Optional[Tuple[torch.Tensor]] = None,\n mid_block_additional_residual: Optional[torch.Tensor] = None,\n return_dict: bool = True,\n ) -> Union[UNet3DConditionOutput, Tuple]:\n r\"\"\"\n Args:\n sample (`torch.FloatTensor`): (batch, channel, height, width) noisy inputs tensor\n timestep (`torch.FloatTensor` or `float` or `int`): (batch) timesteps\n encoder_hidden_states (`torch.FloatTensor`): (batch, sequence_length, feature_dim) encoder hidden states\n return_dict (`bool`, *optional*, defaults to `True`):\n Whether or not to return a [`models.unet_2d_condition.UNet2DConditionOutput`] instead of a plain tuple.\n\n Returns:\n [`~models.unet_2d_condition.UNet2DConditionOutput`] or `tuple`:\n [`~models.unet_2d_condition.UNet2DConditionOutput`] if `return_dict` is True, otherwise a `tuple`. When\n returning a tuple, the first element is the sample tensor.\n \"\"\"\n # By default samples have to be AT least a multiple of the overall upsampling factor.\n # The overall upsampling factor is equal to 2 ** (# num of upsampling layears).\n # However, the upsampling interpolation output size can be forced to fit any upsampling size\n # on the fly if necessary.\n default_overall_up_factor = 2**self.num_upsamplers\n\n # upsample size should be forwarded when sample is not a multiple of `default_overall_up_factor`\n forward_upsample_size = False\n upsample_size = None\n\n if any(s % default_overall_up_factor != 0 for s in sample.shape[-2:]):\n logger.info(\"Forward upsample size to force interpolation output size.\")\n forward_upsample_size = True\n\n # prepare attention_mask\n if attention_mask is not None:\n attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0\n attention_mask = attention_mask.unsqueeze(1)\n\n # center input if necessary\n if self.config.center_input_sample:\n sample = 2 * sample - 1.0\n\n # time\n timesteps = timestep\n if not torch.is_tensor(timesteps):\n # This would be a good case for the `match` statement (Python 3.10+)\n is_mps = sample.device.type == \"mps\"\n if isinstance(timestep, float):\n dtype = torch.float32 if is_mps else torch.float64\n else:\n dtype = torch.int32 if is_mps else torch.int64\n timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device)\n elif len(timesteps.shape) == 0:\n timesteps = timesteps[None].to(sample.device)\n\n # broadcast to batch dimension in a way that's compatible with ONNX/Core ML\n timesteps = timesteps.expand(sample.shape[0])\n\n t_emb = self.time_proj(timesteps)\n\n # timesteps does not contain any weights and will always return f32 tensors\n # but time_embedding might actually be running in fp16. so we need to cast here.\n # there might be better ways to encapsulate this.\n t_emb = t_emb.to(dtype=self.dtype)\n emb = self.time_embedding(t_emb)\n\n if self.class_embedding is not None:\n if class_labels is None:\n raise ValueError(\"class_labels should be provided when num_class_embeds > 0\")\n\n if self.config.class_embed_type == \"timestep\":\n class_labels = self.time_proj(class_labels)\n\n class_emb = self.class_embedding(class_labels).to(dtype=self.dtype)\n emb = emb + class_emb\n\n # pre-process\n sample = self.conv_in(sample)\n\n # down\n is_controlnet = mid_block_additional_residual is not None and down_block_additional_residuals is not None\n\n down_block_res_samples = (sample,)\n for downsample_block in self.down_blocks:\n if hasattr(downsample_block, \"has_cross_attention\") and downsample_block.has_cross_attention:\n sample, res_samples = downsample_block(\n hidden_states=sample,\n temb=emb,\n encoder_hidden_states=encoder_hidden_states,\n attention_mask=attention_mask,\n )\n else:\n sample, res_samples = downsample_block(hidden_states=sample, temb=emb, encoder_hidden_states=encoder_hidden_states)\n\n down_block_res_samples += res_samples\n\n if is_controlnet:\n new_down_block_res_samples = ()\n\n for down_block_res_sample, down_block_additional_residual in zip(\n down_block_res_samples, down_block_additional_residuals\n ):\n down_block_res_sample = down_block_res_sample + down_block_additional_residual\n new_down_block_res_samples = new_down_block_res_samples + (down_block_res_sample,)\n\n down_block_res_samples = new_down_block_res_samples\n\n # mid\n sample = self.mid_block(\n sample, emb, encoder_hidden_states=encoder_hidden_states, attention_mask=attention_mask\n )\n\n if is_controlnet:\n sample = sample + mid_block_additional_residual\n\n # up\n for i, upsample_block in enumerate(self.up_blocks):\n is_final_block = i == len(self.up_blocks) - 1\n\n res_samples = down_block_res_samples[-len(upsample_block.resnets) :]\n down_block_res_samples = down_block_res_samples[: -len(upsample_block.resnets)]\n\n # if we have not reached the final block and need to forward the\n # upsample size, we do it here\n if not is_final_block and forward_upsample_size:\n upsample_size = down_block_res_samples[-1].shape[2:]\n\n if hasattr(upsample_block, \"has_cross_attention\") and upsample_block.has_cross_attention:\n sample = upsample_block(\n hidden_states=sample,\n temb=emb,\n res_hidden_states_tuple=res_samples,\n encoder_hidden_states=encoder_hidden_states,\n upsample_size=upsample_size,\n attention_mask=attention_mask,\n )\n else:\n sample = upsample_block(\n hidden_states=sample, temb=emb, res_hidden_states_tuple=res_samples, upsample_size=upsample_size, encoder_hidden_states=encoder_hidden_states,\n )\n\n # post-process\n sample = self.conv_norm_out(sample)\n sample = self.conv_act(sample)\n sample = self.conv_out(sample)\n\n if not return_dict:\n return (sample,)\n\n return UNet3DConditionOutput(sample=sample)\n\n @classmethod\n def from_pretrained_2d(cls, pretrained_model_path, subfolder=None, unet_additional_kwargs=None):\n if subfolder is not None:\n pretrained_model_path = os.path.join(pretrained_model_path, subfolder)\n print(f\"loaded temporal unet's pretrained weights from {pretrained_model_path} ...\")\n\n config_file = os.path.join(pretrained_model_path, 'config.json')\n if not os.path.isfile(config_file):\n raise RuntimeError(f\"{config_file} does not exist\")\n with open(config_file, \"r\") as f:\n config = json.load(f)\n config[\"_class_name\"] = cls.__name__\n config[\"down_block_types\"] = [\n \"CrossAttnDownBlock3D\",\n \"CrossAttnDownBlock3D\",\n \"CrossAttnDownBlock3D\",\n \"DownBlock3D\"\n ]\n config[\"up_block_types\"] = [\n \"UpBlock3D\",\n \"CrossAttnUpBlock3D\",\n \"CrossAttnUpBlock3D\",\n \"CrossAttnUpBlock3D\"\n ]\n # config[\"mid_block_type\"] = \"UNetMidBlock3DCrossAttn\"\n\n from diffusers.utils import WEIGHTS_NAME\n model = cls.from_config(config, **unet_additional_kwargs)\n model_file = os.path.join(pretrained_model_path, WEIGHTS_NAME)\n if not os.path.isfile(model_file):\n raise RuntimeError(f\"{model_file} does not exist\")\n state_dict = torch.load(model_file, map_location=\"cpu\")\n\n m, u = model.load_state_dict(state_dict, strict=False)\n print(f\"### missing keys: {len(m)}; \\n### unexpected keys: {len(u)};\")\n # print(f\"### missing keys:\\n{m}\\n### unexpected keys:\\n{u}\\n\")\n \n params = [p.numel() if \"temporal\" in n else 0 for n, p in model.named_parameters()]\n print(f\"### Temporal Module Parameters: {sum(params) / 1e6} M\")\n \n return model" }, { "identifier": "ControlNetModel", "path": "magicanimate/models/controlnet.py", "snippet": "class ControlNetModel(ModelMixin, ConfigMixin):\n _supports_gradient_checkpointing = True\n\n @register_to_config\n def __init__(\n self,\n in_channels: int = 4,\n flip_sin_to_cos: bool = True,\n freq_shift: int = 0,\n down_block_types: Tuple[str] = (\n \"CrossAttnDownBlock2D\",\n \"CrossAttnDownBlock2D\",\n \"CrossAttnDownBlock2D\",\n \"DownBlock2D\",\n ),\n only_cross_attention: Union[bool, Tuple[bool]] = False,\n block_out_channels: Tuple[int] = (320, 640, 1280, 1280),\n layers_per_block: int = 2,\n downsample_padding: int = 1,\n mid_block_scale_factor: float = 1,\n act_fn: str = \"silu\",\n norm_num_groups: Optional[int] = 32,\n norm_eps: float = 1e-5,\n cross_attention_dim: int = 1280,\n attention_head_dim: Union[int, Tuple[int]] = 8,\n use_linear_projection: bool = False,\n class_embed_type: Optional[str] = None,\n num_class_embeds: Optional[int] = None,\n upcast_attention: bool = False,\n resnet_time_scale_shift: str = \"default\",\n projection_class_embeddings_input_dim: Optional[int] = None,\n controlnet_conditioning_channel_order: str = \"rgb\",\n conditioning_embedding_out_channels: Optional[Tuple[int]] = (16, 32, 96, 256),\n ):\n super().__init__()\n\n # Check inputs\n if len(block_out_channels) != len(down_block_types):\n raise ValueError(\n f\"Must provide the same number of `block_out_channels` as `down_block_types`. `block_out_channels`: {block_out_channels}. `down_block_types`: {down_block_types}.\"\n )\n\n if not isinstance(only_cross_attention, bool) and len(only_cross_attention) != len(down_block_types):\n raise ValueError(\n f\"Must provide the same number of `only_cross_attention` as `down_block_types`. `only_cross_attention`: {only_cross_attention}. `down_block_types`: {down_block_types}.\"\n )\n\n if not isinstance(attention_head_dim, int) and len(attention_head_dim) != len(down_block_types):\n raise ValueError(\n f\"Must provide the same number of `attention_head_dim` as `down_block_types`. `attention_head_dim`: {attention_head_dim}. `down_block_types`: {down_block_types}.\"\n )\n\n # input\n conv_in_kernel = 3\n conv_in_padding = (conv_in_kernel - 1) // 2\n self.conv_in = nn.Conv2d(\n in_channels, block_out_channels[0], kernel_size=conv_in_kernel, padding=conv_in_padding\n )\n\n # time\n time_embed_dim = block_out_channels[0] * 4\n\n self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift)\n timestep_input_dim = block_out_channels[0]\n\n self.time_embedding = TimestepEmbedding(\n timestep_input_dim,\n time_embed_dim,\n act_fn=act_fn,\n )\n\n # class embedding\n if class_embed_type is None and num_class_embeds is not None:\n self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim)\n elif class_embed_type == \"timestep\":\n self.class_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)\n elif class_embed_type == \"identity\":\n self.class_embedding = nn.Identity(time_embed_dim, time_embed_dim)\n elif class_embed_type == \"projection\":\n if projection_class_embeddings_input_dim is None:\n raise ValueError(\n \"`class_embed_type`: 'projection' requires `projection_class_embeddings_input_dim` be set\"\n )\n # The projection `class_embed_type` is the same as the timestep `class_embed_type` except\n # 1. the `class_labels` inputs are not first converted to sinusoidal embeddings\n # 2. it projects from an arbitrary input dimension.\n #\n # Note that `TimestepEmbedding` is quite general, being mainly linear layers and activations.\n # When used for embedding actual timesteps, the timesteps are first converted to sinusoidal embeddings.\n # As a result, `TimestepEmbedding` can be passed arbitrary vectors.\n self.class_embedding = TimestepEmbedding(projection_class_embeddings_input_dim, time_embed_dim)\n else:\n self.class_embedding = None\n\n # control net conditioning embedding\n self.controlnet_cond_embedding = ControlNetConditioningEmbedding(\n conditioning_embedding_channels=block_out_channels[0],\n block_out_channels=conditioning_embedding_out_channels,\n )\n\n self.down_blocks = nn.ModuleList([])\n self.controlnet_down_blocks = nn.ModuleList([])\n\n if isinstance(only_cross_attention, bool):\n only_cross_attention = [only_cross_attention] * len(down_block_types)\n\n if isinstance(attention_head_dim, int):\n attention_head_dim = (attention_head_dim,) * len(down_block_types)\n\n # down\n output_channel = block_out_channels[0]\n\n controlnet_block = nn.Conv2d(output_channel, output_channel, kernel_size=1)\n controlnet_block = zero_module(controlnet_block)\n self.controlnet_down_blocks.append(controlnet_block)\n\n for i, down_block_type in enumerate(down_block_types):\n input_channel = output_channel\n output_channel = block_out_channels[i]\n is_final_block = i == len(block_out_channels) - 1\n\n down_block = get_down_block(\n down_block_type,\n num_layers=layers_per_block,\n in_channels=input_channel,\n out_channels=output_channel,\n temb_channels=time_embed_dim,\n add_downsample=not is_final_block,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n resnet_groups=norm_num_groups,\n cross_attention_dim=cross_attention_dim,\n num_attention_heads=attention_head_dim[i],\n downsample_padding=downsample_padding,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention[i],\n upcast_attention=upcast_attention,\n resnet_time_scale_shift=resnet_time_scale_shift,\n )\n self.down_blocks.append(down_block)\n\n for _ in range(layers_per_block):\n controlnet_block = nn.Conv2d(output_channel, output_channel, kernel_size=1)\n controlnet_block = zero_module(controlnet_block)\n self.controlnet_down_blocks.append(controlnet_block)\n\n if not is_final_block:\n controlnet_block = nn.Conv2d(output_channel, output_channel, kernel_size=1)\n controlnet_block = zero_module(controlnet_block)\n self.controlnet_down_blocks.append(controlnet_block)\n\n # mid\n mid_block_channel = block_out_channels[-1]\n\n controlnet_block = nn.Conv2d(mid_block_channel, mid_block_channel, kernel_size=1)\n controlnet_block = zero_module(controlnet_block)\n self.controlnet_mid_block = controlnet_block\n\n self.mid_block = UNetMidBlock2DCrossAttn(\n in_channels=mid_block_channel,\n temb_channels=time_embed_dim,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n output_scale_factor=mid_block_scale_factor,\n resnet_time_scale_shift=resnet_time_scale_shift,\n cross_attention_dim=cross_attention_dim,\n num_attention_heads=attention_head_dim[-1],\n resnet_groups=norm_num_groups,\n use_linear_projection=use_linear_projection,\n upcast_attention=upcast_attention,\n )\n\n @classmethod\n def from_unet(\n cls,\n unet: UNet2DConditionModel,\n controlnet_conditioning_channel_order: str = \"rgb\",\n conditioning_embedding_out_channels: Optional[Tuple[int]] = (16, 32, 96, 256),\n load_weights_from_unet: bool = True,\n ):\n r\"\"\"\n Instantiate Controlnet class from UNet2DConditionModel.\n\n Parameters:\n unet (`UNet2DConditionModel`):\n UNet model which weights are copied to the ControlNet. Note that all configuration options are also\n copied where applicable.\n \"\"\"\n controlnet = cls(\n in_channels=unet.config.in_channels,\n flip_sin_to_cos=unet.config.flip_sin_to_cos,\n freq_shift=unet.config.freq_shift,\n down_block_types=unet.config.down_block_types,\n only_cross_attention=unet.config.only_cross_attention,\n block_out_channels=unet.config.block_out_channels,\n layers_per_block=unet.config.layers_per_block,\n downsample_padding=unet.config.downsample_padding,\n mid_block_scale_factor=unet.config.mid_block_scale_factor,\n act_fn=unet.config.act_fn,\n norm_num_groups=unet.config.norm_num_groups,\n norm_eps=unet.config.norm_eps,\n cross_attention_dim=unet.config.cross_attention_dim,\n attention_head_dim=unet.config.attention_head_dim,\n use_linear_projection=unet.config.use_linear_projection,\n class_embed_type=unet.config.class_embed_type,\n num_class_embeds=unet.config.num_class_embeds,\n upcast_attention=unet.config.upcast_attention,\n resnet_time_scale_shift=unet.config.resnet_time_scale_shift,\n projection_class_embeddings_input_dim=unet.config.projection_class_embeddings_input_dim,\n controlnet_conditioning_channel_order=controlnet_conditioning_channel_order,\n conditioning_embedding_out_channels=conditioning_embedding_out_channels,\n )\n\n if load_weights_from_unet:\n controlnet.conv_in.load_state_dict(unet.conv_in.state_dict())\n controlnet.time_proj.load_state_dict(unet.time_proj.state_dict())\n controlnet.time_embedding.load_state_dict(unet.time_embedding.state_dict())\n\n if controlnet.class_embedding:\n controlnet.class_embedding.load_state_dict(unet.class_embedding.state_dict())\n\n controlnet.down_blocks.load_state_dict(unet.down_blocks.state_dict())\n controlnet.mid_block.load_state_dict(unet.mid_block.state_dict())\n\n return controlnet\n\n # @property\n # # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors\n # def attn_processors(self) -> Dict[str, AttentionProcessor]:\n # r\"\"\"\n # Returns:\n # `dict` of attention processors: A dictionary containing all attention processors used in the model with\n # indexed by its weight name.\n # \"\"\"\n # # set recursively\n # processors = {}\n\n # def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]):\n # if hasattr(module, \"set_processor\"):\n # processors[f\"{name}.processor\"] = module.processor\n\n # for sub_name, child in module.named_children():\n # fn_recursive_add_processors(f\"{name}.{sub_name}\", child, processors)\n\n # return processors\n\n # for name, module in self.named_children():\n # fn_recursive_add_processors(name, module, processors)\n\n # return processors\n\n # # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.set_attn_processor\n # def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]):\n # r\"\"\"\n # Parameters:\n # `processor (`dict` of `AttentionProcessor` or `AttentionProcessor`):\n # The instantiated processor class or a dictionary of processor classes that will be set as the processor\n # of **all** `Attention` layers.\n # In case `processor` is a dict, the key needs to define the path to the corresponding cross attention processor. This is strongly recommended when setting trainable attention processors.:\n\n # \"\"\"\n # count = len(self.attn_processors.keys())\n\n # if isinstance(processor, dict) and len(processor) != count:\n # raise ValueError(\n # f\"A dict of processors was passed, but the number of processors {len(processor)} does not match the\"\n # f\" number of attention layers: {count}. Please make sure to pass {count} processor classes.\"\n # )\n\n # def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor):\n # if hasattr(module, \"set_processor\"):\n # if not isinstance(processor, dict):\n # module.set_processor(processor)\n # else:\n # module.set_processor(processor.pop(f\"{name}.processor\"))\n\n # for sub_name, child in module.named_children():\n # fn_recursive_attn_processor(f\"{name}.{sub_name}\", child, processor)\n\n # for name, module in self.named_children():\n # fn_recursive_attn_processor(name, module, processor)\n\n # # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.set_default_attn_processor\n # def set_default_attn_processor(self):\n # \"\"\"\n # Disables custom attention processors and sets the default attention implementation.\n # \"\"\"\n # self.set_attn_processor(AttnProcessor())\n\n # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.set_attention_slice\n def set_attention_slice(self, slice_size):\n r\"\"\"\n Enable sliced attention computation.\n\n When this option is enabled, the attention module will split the input tensor in slices, to compute attention\n in several steps. This is useful to save some memory in exchange for a small speed decrease.\n\n Args:\n slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `\"auto\"`):\n When `\"auto\"`, halves the input to the attention heads, so attention will be computed in two steps. If\n `\"max\"`, maximum amount of memory will be saved by running only one slice at a time. If a number is\n provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim`\n must be a multiple of `slice_size`.\n \"\"\"\n sliceable_head_dims = []\n\n def fn_recursive_retrieve_sliceable_dims(module: torch.nn.Module):\n if hasattr(module, \"set_attention_slice\"):\n sliceable_head_dims.append(module.sliceable_head_dim)\n\n for child in module.children():\n fn_recursive_retrieve_sliceable_dims(child)\n\n # retrieve number of attention layers\n for module in self.children():\n fn_recursive_retrieve_sliceable_dims(module)\n\n num_sliceable_layers = len(sliceable_head_dims)\n\n if slice_size == \"auto\":\n # half the attention head size is usually a good trade-off between\n # speed and memory\n slice_size = [dim // 2 for dim in sliceable_head_dims]\n elif slice_size == \"max\":\n # make smallest slice possible\n slice_size = num_sliceable_layers * [1]\n\n slice_size = num_sliceable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size\n\n if len(slice_size) != len(sliceable_head_dims):\n raise ValueError(\n f\"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different\"\n f\" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}.\"\n )\n\n for i in range(len(slice_size)):\n size = slice_size[i]\n dim = sliceable_head_dims[i]\n if size is not None and size > dim:\n raise ValueError(f\"size {size} has to be smaller or equal to {dim}.\")\n\n # Recursively walk through all the children.\n # Any children which exposes the set_attention_slice method\n # gets the message\n def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]):\n if hasattr(module, \"set_attention_slice\"):\n module.set_attention_slice(slice_size.pop())\n\n for child in module.children():\n fn_recursive_set_attention_slice(child, slice_size)\n\n reversed_slice_size = list(reversed(slice_size))\n for module in self.children():\n fn_recursive_set_attention_slice(module, reversed_slice_size)\n\n def _set_gradient_checkpointing(self, module, value=False):\n if isinstance(module, (CrossAttnDownBlock2D, DownBlock2D)):\n module.gradient_checkpointing = value\n\n def forward(\n self,\n sample: torch.FloatTensor,\n timestep: Union[torch.Tensor, float, int],\n encoder_hidden_states: torch.Tensor,\n controlnet_cond: torch.FloatTensor,\n conditioning_scale: float = 1.0,\n class_labels: Optional[torch.Tensor] = None,\n timestep_cond: Optional[torch.Tensor] = None,\n attention_mask: Optional[torch.Tensor] = None,\n cross_attention_kwargs: Optional[Dict[str, Any]] = None,\n return_dict: bool = True,\n ) -> Union[ControlNetOutput, Tuple]:\n # check channel order\n channel_order = self.config.controlnet_conditioning_channel_order\n\n if channel_order == \"rgb\":\n # in rgb order by default\n ...\n elif channel_order == \"bgr\":\n controlnet_cond = torch.flip(controlnet_cond, dims=[1])\n else:\n raise ValueError(f\"unknown `controlnet_conditioning_channel_order`: {channel_order}\")\n\n # prepare attention_mask\n if attention_mask is not None:\n attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0\n attention_mask = attention_mask.unsqueeze(1)\n\n # 1. time\n timesteps = timestep\n if not torch.is_tensor(timesteps):\n # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can\n # This would be a good case for the `match` statement (Python 3.10+)\n is_mps = sample.device.type == \"mps\"\n if isinstance(timestep, float):\n dtype = torch.float32 if is_mps else torch.float64\n else:\n dtype = torch.int32 if is_mps else torch.int64\n timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device)\n elif len(timesteps.shape) == 0:\n timesteps = timesteps[None].to(sample.device)\n\n # broadcast to batch dimension in a way that's compatible with ONNX/Core ML\n timesteps = timesteps.expand(sample.shape[0])\n\n t_emb = self.time_proj(timesteps)\n\n # timesteps does not contain any weights and will always return f32 tensors\n # but time_embedding might actually be running in fp16. so we need to cast here.\n # there might be better ways to encapsulate this.\n t_emb = t_emb.to(dtype=self.dtype)\n\n emb = self.time_embedding(t_emb, timestep_cond)\n\n if self.class_embedding is not None:\n if class_labels is None:\n raise ValueError(\"class_labels should be provided when num_class_embeds > 0\")\n\n if self.config.class_embed_type == \"timestep\":\n class_labels = self.time_proj(class_labels)\n\n class_emb = self.class_embedding(class_labels).to(dtype=self.dtype)\n emb = emb + class_emb\n\n # 2. pre-process\n sample = self.conv_in(sample)\n\n controlnet_cond = self.controlnet_cond_embedding(controlnet_cond)\n\n sample += controlnet_cond\n\n # 3. down\n down_block_res_samples = (sample,)\n for downsample_block in self.down_blocks:\n if hasattr(downsample_block, \"has_cross_attention\") and downsample_block.has_cross_attention:\n sample, res_samples = downsample_block(\n hidden_states=sample,\n temb=emb,\n encoder_hidden_states=encoder_hidden_states,\n attention_mask=attention_mask,\n # cross_attention_kwargs=cross_attention_kwargs,\n )\n else:\n sample, res_samples = downsample_block(hidden_states=sample, temb=emb)\n\n down_block_res_samples += res_samples\n\n # 4. mid\n if self.mid_block is not None:\n sample = self.mid_block(\n sample,\n emb,\n encoder_hidden_states=encoder_hidden_states,\n attention_mask=attention_mask,\n # cross_attention_kwargs=cross_attention_kwargs,\n )\n\n # 5. Control net blocks\n\n controlnet_down_block_res_samples = ()\n\n for down_block_res_sample, controlnet_block in zip(down_block_res_samples, self.controlnet_down_blocks):\n down_block_res_sample = controlnet_block(down_block_res_sample)\n controlnet_down_block_res_samples += (down_block_res_sample,)\n\n down_block_res_samples = controlnet_down_block_res_samples\n\n mid_block_res_sample = self.controlnet_mid_block(sample)\n\n # 6. scaling\n down_block_res_samples = [sample * conditioning_scale for sample in down_block_res_samples]\n mid_block_res_sample *= conditioning_scale\n\n if not return_dict:\n return (down_block_res_samples, mid_block_res_sample)\n\n return ControlNetOutput(\n down_block_res_samples=down_block_res_samples, mid_block_res_sample=mid_block_res_sample\n )" }, { "identifier": "ReferenceAttentionControl", "path": "magicanimate/models/mutual_self_attention.py", "snippet": "class ReferenceAttentionControl():\n \n def __init__(self, \n unet,\n mode=\"write\",\n do_classifier_free_guidance=False,\n attention_auto_machine_weight = float('inf'),\n gn_auto_machine_weight = 1.0,\n style_fidelity = 1.0,\n reference_attn=True,\n reference_adain=False,\n fusion_blocks=\"midup\",\n batch_size=1, \n ) -> None:\n # 10. Modify self attention and group norm\n self.unet = unet\n assert mode in [\"read\", \"write\"]\n assert fusion_blocks in [\"midup\", \"full\"]\n self.reference_attn = reference_attn\n self.reference_adain = reference_adain\n self.fusion_blocks = fusion_blocks\n self.register_reference_hooks(\n mode, \n do_classifier_free_guidance,\n attention_auto_machine_weight,\n gn_auto_machine_weight,\n style_fidelity,\n reference_attn,\n reference_adain,\n fusion_blocks,\n batch_size=batch_size, \n )\n\n def register_reference_hooks(\n self, \n mode, \n do_classifier_free_guidance,\n attention_auto_machine_weight,\n gn_auto_machine_weight,\n style_fidelity,\n reference_attn,\n reference_adain,\n dtype=torch.float16,\n batch_size=1, \n num_images_per_prompt=1, \n device=torch.device(\"cpu\"), \n fusion_blocks='midup',\n ):\n MODE = mode\n do_classifier_free_guidance = do_classifier_free_guidance\n attention_auto_machine_weight = attention_auto_machine_weight\n gn_auto_machine_weight = gn_auto_machine_weight\n style_fidelity = style_fidelity\n reference_attn = reference_attn\n reference_adain = reference_adain\n fusion_blocks = fusion_blocks\n num_images_per_prompt = num_images_per_prompt\n dtype=dtype\n if do_classifier_free_guidance:\n uc_mask = (\n torch.Tensor([1] * batch_size * num_images_per_prompt * 16 + [0] * batch_size * num_images_per_prompt * 16)\n .to(device)\n .bool()\n )\n else:\n uc_mask = (\n torch.Tensor([0] * batch_size * num_images_per_prompt * 2)\n .to(device)\n .bool()\n )\n \n def hacked_basic_transformer_inner_forward(\n self,\n hidden_states: torch.FloatTensor,\n attention_mask: Optional[torch.FloatTensor] = None,\n encoder_hidden_states: Optional[torch.FloatTensor] = None,\n encoder_attention_mask: Optional[torch.FloatTensor] = None,\n timestep: Optional[torch.LongTensor] = None,\n cross_attention_kwargs: Dict[str, Any] = None,\n class_labels: Optional[torch.LongTensor] = None,\n video_length=None,\n ):\n if self.use_ada_layer_norm:\n norm_hidden_states = self.norm1(hidden_states, timestep)\n elif self.use_ada_layer_norm_zero:\n norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(\n hidden_states, timestep, class_labels, hidden_dtype=hidden_states.dtype\n )\n else:\n norm_hidden_states = self.norm1(hidden_states)\n\n # 1. Self-Attention\n cross_attention_kwargs = cross_attention_kwargs if cross_attention_kwargs is not None else {}\n if self.only_cross_attention:\n attn_output = self.attn1(\n norm_hidden_states,\n encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None,\n attention_mask=attention_mask,\n **cross_attention_kwargs,\n )\n else:\n if MODE == \"write\":\n self.bank.append(norm_hidden_states.clone())\n attn_output = self.attn1(\n norm_hidden_states,\n encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None,\n attention_mask=attention_mask,\n **cross_attention_kwargs,\n )\n if MODE == \"read\":\n self.bank = [rearrange(d.unsqueeze(1).repeat(1, video_length, 1, 1), \"b t l c -> (b t) l c\")[:hidden_states.shape[0]] for d in self.bank]\n hidden_states_uc = self.attn1(norm_hidden_states, \n encoder_hidden_states=torch.cat([norm_hidden_states] + self.bank, dim=1),\n attention_mask=attention_mask) + hidden_states\n hidden_states_c = hidden_states_uc.clone()\n _uc_mask = uc_mask.clone()\n if do_classifier_free_guidance:\n if hidden_states.shape[0] != _uc_mask.shape[0]:\n _uc_mask = (\n torch.Tensor([1] * (hidden_states.shape[0]//2) + [0] * (hidden_states.shape[0]//2))\n .to(device)\n .bool()\n )\n hidden_states_c[_uc_mask] = self.attn1(\n norm_hidden_states[_uc_mask],\n encoder_hidden_states=norm_hidden_states[_uc_mask],\n attention_mask=attention_mask,\n ) + hidden_states[_uc_mask]\n hidden_states = hidden_states_c.clone()\n \n self.bank.clear()\n if self.attn2 is not None:\n # Cross-Attention\n norm_hidden_states = (\n self.norm2(hidden_states, timestep) if self.use_ada_layer_norm else self.norm2(hidden_states)\n )\n hidden_states = (\n self.attn2(\n norm_hidden_states, encoder_hidden_states=encoder_hidden_states, attention_mask=attention_mask\n )\n + hidden_states\n )\n\n # Feed-forward\n hidden_states = self.ff(self.norm3(hidden_states)) + hidden_states\n\n # Temporal-Attention\n if self.unet_use_temporal_attention:\n d = hidden_states.shape[1]\n hidden_states = rearrange(hidden_states, \"(b f) d c -> (b d) f c\", f=video_length)\n norm_hidden_states = (\n self.norm_temp(hidden_states, timestep) if self.use_ada_layer_norm else self.norm_temp(hidden_states)\n )\n hidden_states = self.attn_temp(norm_hidden_states) + hidden_states\n hidden_states = rearrange(hidden_states, \"(b d) f c -> (b f) d c\", d=d)\n\n return hidden_states\n \n if self.use_ada_layer_norm_zero:\n attn_output = gate_msa.unsqueeze(1) * attn_output\n hidden_states = attn_output + hidden_states\n\n if self.attn2 is not None:\n norm_hidden_states = (\n self.norm2(hidden_states, timestep) if self.use_ada_layer_norm else self.norm2(hidden_states)\n )\n\n # 2. Cross-Attention\n attn_output = self.attn2(\n norm_hidden_states,\n encoder_hidden_states=encoder_hidden_states,\n attention_mask=encoder_attention_mask,\n **cross_attention_kwargs,\n )\n hidden_states = attn_output + hidden_states\n\n # 3. Feed-forward\n norm_hidden_states = self.norm3(hidden_states)\n\n if self.use_ada_layer_norm_zero:\n norm_hidden_states = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]\n\n ff_output = self.ff(norm_hidden_states)\n\n if self.use_ada_layer_norm_zero:\n ff_output = gate_mlp.unsqueeze(1) * ff_output\n\n hidden_states = ff_output + hidden_states\n\n return hidden_states\n\n def hacked_mid_forward(self, *args, **kwargs):\n eps = 1e-6\n x = self.original_forward(*args, **kwargs)\n if MODE == \"write\":\n if gn_auto_machine_weight >= self.gn_weight:\n var, mean = torch.var_mean(x, dim=(2, 3), keepdim=True, correction=0)\n self.mean_bank.append(mean)\n self.var_bank.append(var)\n if MODE == \"read\":\n if len(self.mean_bank) > 0 and len(self.var_bank) > 0:\n var, mean = torch.var_mean(x, dim=(2, 3), keepdim=True, correction=0)\n std = torch.maximum(var, torch.zeros_like(var) + eps) ** 0.5\n mean_acc = sum(self.mean_bank) / float(len(self.mean_bank))\n var_acc = sum(self.var_bank) / float(len(self.var_bank))\n std_acc = torch.maximum(var_acc, torch.zeros_like(var_acc) + eps) ** 0.5\n x_uc = (((x - mean) / std) * std_acc) + mean_acc\n x_c = x_uc.clone()\n if do_classifier_free_guidance and style_fidelity > 0:\n x_c[uc_mask] = x[uc_mask]\n x = style_fidelity * x_c + (1.0 - style_fidelity) * x_uc\n self.mean_bank = []\n self.var_bank = []\n return x\n\n def hack_CrossAttnDownBlock2D_forward(\n self,\n hidden_states: torch.FloatTensor,\n temb: Optional[torch.FloatTensor] = None,\n encoder_hidden_states: Optional[torch.FloatTensor] = None,\n attention_mask: Optional[torch.FloatTensor] = None,\n cross_attention_kwargs: Optional[Dict[str, Any]] = None,\n encoder_attention_mask: Optional[torch.FloatTensor] = None,\n ):\n eps = 1e-6\n\n # TODO(Patrick, William) - attention mask is not used\n output_states = ()\n\n for i, (resnet, attn) in enumerate(zip(self.resnets, self.attentions)):\n hidden_states = resnet(hidden_states, temb)\n hidden_states = attn(\n hidden_states,\n encoder_hidden_states=encoder_hidden_states,\n cross_attention_kwargs=cross_attention_kwargs,\n attention_mask=attention_mask,\n encoder_attention_mask=encoder_attention_mask,\n return_dict=False,\n )[0]\n if MODE == \"write\":\n if gn_auto_machine_weight >= self.gn_weight:\n var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)\n self.mean_bank.append([mean])\n self.var_bank.append([var])\n if MODE == \"read\":\n if len(self.mean_bank) > 0 and len(self.var_bank) > 0:\n var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)\n std = torch.maximum(var, torch.zeros_like(var) + eps) ** 0.5\n mean_acc = sum(self.mean_bank[i]) / float(len(self.mean_bank[i]))\n var_acc = sum(self.var_bank[i]) / float(len(self.var_bank[i]))\n std_acc = torch.maximum(var_acc, torch.zeros_like(var_acc) + eps) ** 0.5\n hidden_states_uc = (((hidden_states - mean) / std) * std_acc) + mean_acc\n hidden_states_c = hidden_states_uc.clone()\n if do_classifier_free_guidance and style_fidelity > 0:\n hidden_states_c[uc_mask] = hidden_states[uc_mask].to(hidden_states_c.dtype)\n hidden_states = style_fidelity * hidden_states_c + (1.0 - style_fidelity) * hidden_states_uc\n\n output_states = output_states + (hidden_states,)\n\n if MODE == \"read\":\n self.mean_bank = []\n self.var_bank = []\n\n if self.downsamplers is not None:\n for downsampler in self.downsamplers:\n hidden_states = downsampler(hidden_states)\n\n output_states = output_states + (hidden_states,)\n\n return hidden_states, output_states\n\n def hacked_DownBlock2D_forward(self, hidden_states, temb=None):\n eps = 1e-6\n\n output_states = ()\n\n for i, resnet in enumerate(self.resnets):\n hidden_states = resnet(hidden_states, temb)\n\n if MODE == \"write\":\n if gn_auto_machine_weight >= self.gn_weight:\n var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)\n self.mean_bank.append([mean])\n self.var_bank.append([var])\n if MODE == \"read\":\n if len(self.mean_bank) > 0 and len(self.var_bank) > 0:\n var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)\n std = torch.maximum(var, torch.zeros_like(var) + eps) ** 0.5\n mean_acc = sum(self.mean_bank[i]) / float(len(self.mean_bank[i]))\n var_acc = sum(self.var_bank[i]) / float(len(self.var_bank[i]))\n std_acc = torch.maximum(var_acc, torch.zeros_like(var_acc) + eps) ** 0.5\n hidden_states_uc = (((hidden_states - mean) / std) * std_acc) + mean_acc\n hidden_states_c = hidden_states_uc.clone()\n if do_classifier_free_guidance and style_fidelity > 0:\n hidden_states_c[uc_mask] = hidden_states[uc_mask].to(hidden_states_c.dtype)\n hidden_states = style_fidelity * hidden_states_c + (1.0 - style_fidelity) * hidden_states_uc\n\n output_states = output_states + (hidden_states,)\n\n if MODE == \"read\":\n self.mean_bank = []\n self.var_bank = []\n\n if self.downsamplers is not None:\n for downsampler in self.downsamplers:\n hidden_states = downsampler(hidden_states)\n\n output_states = output_states + (hidden_states,)\n\n return hidden_states, output_states\n\n def hacked_CrossAttnUpBlock2D_forward(\n self,\n hidden_states: torch.FloatTensor,\n res_hidden_states_tuple: Tuple[torch.FloatTensor, ...],\n temb: Optional[torch.FloatTensor] = None,\n encoder_hidden_states: Optional[torch.FloatTensor] = None,\n cross_attention_kwargs: Optional[Dict[str, Any]] = None,\n upsample_size: Optional[int] = None,\n attention_mask: Optional[torch.FloatTensor] = None,\n encoder_attention_mask: Optional[torch.FloatTensor] = None,\n ):\n eps = 1e-6\n # TODO(Patrick, William) - attention mask is not used\n for i, (resnet, attn) in enumerate(zip(self.resnets, self.attentions)):\n # pop res hidden states\n res_hidden_states = res_hidden_states_tuple[-1]\n res_hidden_states_tuple = res_hidden_states_tuple[:-1]\n hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)\n hidden_states = resnet(hidden_states, temb)\n hidden_states = attn(\n hidden_states,\n encoder_hidden_states=encoder_hidden_states,\n cross_attention_kwargs=cross_attention_kwargs,\n attention_mask=attention_mask,\n encoder_attention_mask=encoder_attention_mask,\n return_dict=False,\n )[0]\n\n if MODE == \"write\":\n if gn_auto_machine_weight >= self.gn_weight:\n var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)\n self.mean_bank.append([mean])\n self.var_bank.append([var])\n if MODE == \"read\":\n if len(self.mean_bank) > 0 and len(self.var_bank) > 0:\n var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)\n std = torch.maximum(var, torch.zeros_like(var) + eps) ** 0.5\n mean_acc = sum(self.mean_bank[i]) / float(len(self.mean_bank[i]))\n var_acc = sum(self.var_bank[i]) / float(len(self.var_bank[i]))\n std_acc = torch.maximum(var_acc, torch.zeros_like(var_acc) + eps) ** 0.5\n hidden_states_uc = (((hidden_states - mean) / std) * std_acc) + mean_acc\n hidden_states_c = hidden_states_uc.clone()\n if do_classifier_free_guidance and style_fidelity > 0:\n hidden_states_c[uc_mask] = hidden_states[uc_mask].to(hidden_states_c.dtype)\n hidden_states = style_fidelity * hidden_states_c + (1.0 - style_fidelity) * hidden_states_uc\n\n if MODE == \"read\":\n self.mean_bank = []\n self.var_bank = []\n\n if self.upsamplers is not None:\n for upsampler in self.upsamplers:\n hidden_states = upsampler(hidden_states, upsample_size)\n\n return hidden_states\n\n def hacked_UpBlock2D_forward(self, hidden_states, res_hidden_states_tuple, temb=None, upsample_size=None):\n eps = 1e-6\n for i, resnet in enumerate(self.resnets):\n # pop res hidden states\n res_hidden_states = res_hidden_states_tuple[-1]\n res_hidden_states_tuple = res_hidden_states_tuple[:-1]\n hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)\n hidden_states = resnet(hidden_states, temb)\n\n if MODE == \"write\":\n if gn_auto_machine_weight >= self.gn_weight:\n var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)\n self.mean_bank.append([mean])\n self.var_bank.append([var])\n if MODE == \"read\":\n if len(self.mean_bank) > 0 and len(self.var_bank) > 0:\n var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)\n std = torch.maximum(var, torch.zeros_like(var) + eps) ** 0.5\n mean_acc = sum(self.mean_bank[i]) / float(len(self.mean_bank[i]))\n var_acc = sum(self.var_bank[i]) / float(len(self.var_bank[i]))\n std_acc = torch.maximum(var_acc, torch.zeros_like(var_acc) + eps) ** 0.5\n hidden_states_uc = (((hidden_states - mean) / std) * std_acc) + mean_acc\n hidden_states_c = hidden_states_uc.clone()\n if do_classifier_free_guidance and style_fidelity > 0:\n hidden_states_c[uc_mask] = hidden_states[uc_mask].to(hidden_states_c.dtype)\n hidden_states = style_fidelity * hidden_states_c + (1.0 - style_fidelity) * hidden_states_uc\n\n if MODE == \"read\":\n self.mean_bank = []\n self.var_bank = []\n\n if self.upsamplers is not None:\n for upsampler in self.upsamplers:\n hidden_states = upsampler(hidden_states, upsample_size)\n\n return hidden_states\n\n if self.reference_attn:\n if self.fusion_blocks == \"midup\":\n attn_modules = [module for module in (torch_dfs(self.unet.mid_block)+torch_dfs(self.unet.up_blocks)) if isinstance(module, BasicTransformerBlock) or isinstance(module, _BasicTransformerBlock)]\n elif self.fusion_blocks == \"full\":\n attn_modules = [module for module in torch_dfs(self.unet) if isinstance(module, BasicTransformerBlock) or isinstance(module, _BasicTransformerBlock)] \n attn_modules = sorted(attn_modules, key=lambda x: -x.norm1.normalized_shape[0])\n\n for i, module in enumerate(attn_modules):\n module._original_inner_forward = module.forward\n module.forward = hacked_basic_transformer_inner_forward.__get__(module, BasicTransformerBlock)\n module.bank = []\n module.attn_weight = float(i) / float(len(attn_modules))\n\n if self.reference_adain:\n gn_modules = [self.unet.mid_block]\n self.unet.mid_block.gn_weight = 0\n\n down_blocks = self.unet.down_blocks\n for w, module in enumerate(down_blocks):\n module.gn_weight = 1.0 - float(w) / float(len(down_blocks))\n gn_modules.append(module)\n\n up_blocks = self.unet.up_blocks\n for w, module in enumerate(up_blocks):\n module.gn_weight = float(w) / float(len(up_blocks))\n gn_modules.append(module)\n\n for i, module in enumerate(gn_modules):\n if getattr(module, \"original_forward\", None) is None:\n module.original_forward = module.forward\n if i == 0:\n # mid_block\n module.forward = hacked_mid_forward.__get__(module, torch.nn.Module)\n elif isinstance(module, CrossAttnDownBlock2D):\n module.forward = hack_CrossAttnDownBlock2D_forward.__get__(module, CrossAttnDownBlock2D)\n elif isinstance(module, DownBlock2D):\n module.forward = hacked_DownBlock2D_forward.__get__(module, DownBlock2D)\n elif isinstance(module, CrossAttnUpBlock2D):\n module.forward = hacked_CrossAttnUpBlock2D_forward.__get__(module, CrossAttnUpBlock2D)\n elif isinstance(module, UpBlock2D):\n module.forward = hacked_UpBlock2D_forward.__get__(module, UpBlock2D)\n module.mean_bank = []\n module.var_bank = []\n module.gn_weight *= 2\n \n def update(self, writer, dtype=torch.float16):\n if self.reference_attn:\n if self.fusion_blocks == \"midup\":\n reader_attn_modules = [module for module in (torch_dfs(self.unet.mid_block)+torch_dfs(self.unet.up_blocks)) if isinstance(module, _BasicTransformerBlock)]\n writer_attn_modules = [module for module in (torch_dfs(writer.unet.mid_block)+torch_dfs(writer.unet.up_blocks)) if isinstance(module, BasicTransformerBlock)]\n elif self.fusion_blocks == \"full\":\n reader_attn_modules = [module for module in torch_dfs(self.unet) if isinstance(module, _BasicTransformerBlock)]\n writer_attn_modules = [module for module in torch_dfs(writer.unet) if isinstance(module, BasicTransformerBlock)]\n reader_attn_modules = sorted(reader_attn_modules, key=lambda x: -x.norm1.normalized_shape[0]) \n writer_attn_modules = sorted(writer_attn_modules, key=lambda x: -x.norm1.normalized_shape[0])\n for r, w in zip(reader_attn_modules, writer_attn_modules):\n r.bank = [v.clone().to(dtype) for v in w.bank]\n # w.bank.clear()\n if self.reference_adain:\n reader_gn_modules = [self.unet.mid_block]\n \n down_blocks = self.unet.down_blocks\n for w, module in enumerate(down_blocks):\n reader_gn_modules.append(module)\n\n up_blocks = self.unet.up_blocks\n for w, module in enumerate(up_blocks):\n reader_gn_modules.append(module)\n \n writer_gn_modules = [writer.unet.mid_block]\n \n down_blocks = writer.unet.down_blocks\n for w, module in enumerate(down_blocks):\n writer_gn_modules.append(module)\n\n up_blocks = writer.unet.up_blocks\n for w, module in enumerate(up_blocks):\n writer_gn_modules.append(module)\n \n for r, w in zip(reader_gn_modules, writer_gn_modules):\n if len(w.mean_bank) > 0 and isinstance(w.mean_bank[0], list):\n r.mean_bank = [[v.clone().to(dtype) for v in vl] for vl in w.mean_bank]\n r.var_bank = [[v.clone().to(dtype) for v in vl] for vl in w.var_bank]\n else:\n r.mean_bank = [v.clone().to(dtype) for v in w.mean_bank]\n r.var_bank = [v.clone().to(dtype) for v in w.var_bank]\n \n def clear(self):\n if self.reference_attn:\n if self.fusion_blocks == \"midup\":\n reader_attn_modules = [module for module in (torch_dfs(self.unet.mid_block)+torch_dfs(self.unet.up_blocks)) if isinstance(module, BasicTransformerBlock) or isinstance(module, _BasicTransformerBlock)]\n elif self.fusion_blocks == \"full\":\n reader_attn_modules = [module for module in torch_dfs(self.unet) if isinstance(module, BasicTransformerBlock) or isinstance(module, _BasicTransformerBlock)]\n reader_attn_modules = sorted(reader_attn_modules, key=lambda x: -x.norm1.normalized_shape[0])\n for r in reader_attn_modules:\n r.bank.clear()\n if self.reference_adain:\n reader_gn_modules = [self.unet.mid_block]\n \n down_blocks = self.unet.down_blocks\n for w, module in enumerate(down_blocks):\n reader_gn_modules.append(module)\n\n up_blocks = self.unet.up_blocks\n for w, module in enumerate(up_blocks):\n reader_gn_modules.append(module)\n \n for r in reader_gn_modules:\n r.mean_bank.clear()\n r.var_bank.clear()" }, { "identifier": "get_context_scheduler", "path": "magicanimate/pipelines/context.py", "snippet": "def get_context_scheduler(name: str) -> Callable:\n if name == \"uniform\":\n return uniform\n else:\n raise ValueError(f\"Unknown context_overlap policy {name}\")" }, { "identifier": "get_total_steps", "path": "magicanimate/pipelines/context.py", "snippet": "def get_total_steps(\n scheduler,\n timesteps: List[int],\n num_steps: Optional[int] = None,\n num_frames: int = ...,\n context_size: Optional[int] = None,\n context_stride: int = 3,\n context_overlap: int = 4,\n closed_loop: bool = True,\n):\n return sum(\n len(\n list(\n scheduler(\n i,\n num_steps,\n num_frames,\n context_size,\n context_stride,\n context_overlap,\n )\n )\n )\n for i in range(len(timesteps))\n )" }, { "identifier": "get_tensor_interpolation_method", "path": "magicanimate/utils/util.py", "snippet": "def get_tensor_interpolation_method():\n return tensor_interpolation" } ]
import inspect, math import numpy as np import torch import torch.distributed as dist from typing import Callable, List, Optional, Union from dataclasses import dataclass from PIL import Image from tqdm import tqdm from diffusers.utils import is_accelerate_available from packaging import version from transformers import CLIPTextModel, CLIPTokenizer from diffusers.configuration_utils import FrozenDict from diffusers.models import AutoencoderKL from diffusers.pipeline_utils import DiffusionPipeline from diffusers.schedulers import ( DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, ) from diffusers.utils import deprecate, logging, BaseOutput from einops import rearrange from magicanimate.models.unet_controlnet import UNet3DConditionModel from magicanimate.models.controlnet import ControlNetModel from magicanimate.models.mutual_self_attention import ReferenceAttentionControl from magicanimate.pipelines.context import ( get_context_scheduler, get_total_steps ) from magicanimate.utils.util import get_tensor_interpolation_method from accelerate import cpu_offload
19,383
verbose=False ): """ Inverse sampling for DDIM Inversion """ if verbose: print("timestep: ", timestep) next_step = timestep timestep = min(timestep - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps, 999) alpha_prod_t = self.scheduler.alphas_cumprod[timestep] if timestep >= 0 else self.scheduler.final_alpha_cumprod alpha_prod_t_next = self.scheduler.alphas_cumprod[next_step] beta_prod_t = 1 - alpha_prod_t pred_x0 = (x - beta_prod_t**0.5 * model_output) / alpha_prod_t**0.5 pred_dir = (1 - alpha_prod_t_next)**0.5 * model_output x_next = alpha_prod_t_next**0.5 * pred_x0 + pred_dir return x_next, pred_x0 @torch.no_grad() def images2latents(self, images, dtype): """ Convert RGB image to VAE latents """ device = self._execution_device images = torch.from_numpy(images).float().to(dtype) / 127.5 - 1 images = rearrange(images, "f h w c -> f c h w").to(device) latents = [] for frame_idx in range(images.shape[0]): latents.append(self.vae.encode(images[frame_idx:frame_idx+1])['latent_dist'].mean * 0.18215) latents = torch.cat(latents) return latents @torch.no_grad() def invert( self, image: torch.Tensor, prompt, num_inference_steps=20, num_actual_inference_steps=10, eta=0.0, return_intermediates=False, **kwargs): """ Adapted from: https://github.com/Yujun-Shi/DragDiffusion/blob/main/drag_pipeline.py#L440 invert a real image into noise map with determinisc DDIM inversion """ device = self._execution_device batch_size = image.shape[0] if isinstance(prompt, list): if batch_size == 1: image = image.expand(len(prompt), -1, -1, -1) elif isinstance(prompt, str): if batch_size > 1: prompt = [prompt] * batch_size # text embeddings text_input = self.tokenizer( prompt, padding="max_length", max_length=77, return_tensors="pt" ) text_embeddings = self.text_encoder(text_input.input_ids.to(device))[0] print("input text embeddings :", text_embeddings.shape) # define initial latents latents = self.images2latents(image) print("latents shape: ", latents.shape) # interative sampling self.scheduler.set_timesteps(num_inference_steps) print("Valid timesteps: ", reversed(self.scheduler.timesteps)) latents_list = [latents] pred_x0_list = [latents] for i, t in enumerate(tqdm(reversed(self.scheduler.timesteps), desc="DDIM Inversion")): if num_actual_inference_steps is not None and i >= num_actual_inference_steps: continue model_inputs = latents # predict the noise # NOTE: the u-net here is UNet3D, therefore the model_inputs need to be of shape (b c f h w) model_inputs = rearrange(model_inputs, "f c h w -> 1 c f h w") noise_pred = self.unet(model_inputs, t, encoder_hidden_states=text_embeddings).sample noise_pred = rearrange(noise_pred, "b c f h w -> (b f) c h w") # compute the previous noise sample x_t-1 -> x_t latents, pred_x0 = self.next_step(noise_pred, t, latents) latents_list.append(latents) pred_x0_list.append(pred_x0) if return_intermediates: # return the intermediate laters during inversion return latents, latents_list return latents def interpolate_latents(self, latents: torch.Tensor, interpolation_factor:int, device ): if interpolation_factor < 2: return latents new_latents = torch.zeros( (latents.shape[0],latents.shape[1],((latents.shape[2]-1) * interpolation_factor)+1, latents.shape[3],latents.shape[4]), device=latents.device, dtype=latents.dtype, ) org_video_length = latents.shape[2] rate = [i/interpolation_factor for i in range(interpolation_factor)][1:] new_index = 0 v0 = None v1 = None for i0,i1 in zip( range( org_video_length ),range( org_video_length )[1:] ): v0 = latents[:,:,i0,:,:] v1 = latents[:,:,i1,:,:] new_latents[:,:,new_index,:,:] = v0 new_index += 1 for f in rate:
# ************************************************************************* # This file may have been modified by Bytedance Inc. (“Bytedance Inc.'s Mo- # difications”). All Bytedance Inc.'s Modifications are Copyright (2023) B- # ytedance Inc.. # ************************************************************************* # Adapted from https://github.com/showlab/Tune-A-Video/blob/main/tuneavideo/pipelines/pipeline_tuneavideo.py # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ TODO: 1. support multi-controlnet 2. [DONE] support DDIM inversion 3. support Prompt-to-prompt """ logger = logging.get_logger(__name__) # pylint: disable=invalid-name @dataclass class AnimationPipelineOutput(BaseOutput): videos: Union[torch.Tensor, np.ndarray] class AnimationPipeline(DiffusionPipeline): _optional_components = [] def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet3DConditionModel, controlnet: ControlNetModel, scheduler: Union[ DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler, EulerDiscreteScheduler, EulerAncestralDiscreteScheduler, DPMSolverMultistepScheduler, ], ): super().__init__() if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1: deprecation_message = ( f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`" f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure " "to update the config accordingly as leaving `steps_offset` might led to incorrect results" " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub," " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`" " file" ) deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(scheduler.config) new_config["steps_offset"] = 1 scheduler._internal_dict = FrozenDict(new_config) if hasattr(scheduler.config, "clip_sample") and scheduler.config.clip_sample is True: deprecation_message = ( f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`." " `clip_sample` should be set to False in the configuration file. Please make sure to update the" " config accordingly as not setting `clip_sample` in the config might lead to incorrect results in" " future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very" " nice if you could open a Pull request for the `scheduler/scheduler_config.json` file" ) deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(scheduler.config) new_config["clip_sample"] = False scheduler._internal_dict = FrozenDict(new_config) is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse( version.parse(unet.config._diffusers_version).base_version ) < version.parse("0.9.0.dev0") is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64 if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64: deprecation_message = ( "The configuration file of the unet has set the default `sample_size` to smaller than" " 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the" " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-" " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5" " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the" " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`" " in the config might lead to incorrect results in future versions. If you have downloaded this" " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for" " the `unet/config.json` file" ) deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(unet.config) new_config["sample_size"] = 64 unet._internal_dict = FrozenDict(new_config) self.register_modules( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, controlnet=controlnet, scheduler=scheduler, ) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) def enable_vae_slicing(self): self.vae.enable_slicing() def disable_vae_slicing(self): self.vae.disable_slicing() def enable_sequential_cpu_offload(self, gpu_id=0): if is_accelerate_available(): else: raise ImportError("Please install accelerate via `pip install accelerate`") device = torch.device(f"cuda:{gpu_id}") for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae]: if cpu_offloaded_model is not None: cpu_offload(cpu_offloaded_model, device) @property def _execution_device(self): if self.device != torch.device("meta") or not hasattr(self.unet, "_hf_hook"): return self.device for module in self.unet.modules(): if ( hasattr(module, "_hf_hook") and hasattr(module._hf_hook, "execution_device") and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device) return self.device def _encode_prompt(self, prompt, device, num_videos_per_prompt, do_classifier_free_guidance, negative_prompt): batch_size = len(prompt) if isinstance(prompt, list) else 1 text_inputs = self.tokenizer( prompt, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {self.tokenizer.model_max_length} tokens: {removed_text}" ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = text_inputs.attention_mask.to(device) else: attention_mask = None text_embeddings = self.text_encoder( text_input_ids.to(device), attention_mask=attention_mask, ) text_embeddings = text_embeddings[0] # duplicate text embeddings for each generation per prompt, using mps friendly method bs_embed, seq_len, _ = text_embeddings.shape text_embeddings = text_embeddings.repeat(1, num_videos_per_prompt, 1) text_embeddings = text_embeddings.view(bs_embed * num_videos_per_prompt, seq_len, -1) # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [""] * batch_size elif type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) else: uncond_tokens = negative_prompt max_length = text_input_ids.shape[-1] uncond_input = self.tokenizer( uncond_tokens, padding="max_length", max_length=max_length, truncation=True, return_tensors="pt", ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = uncond_input.attention_mask.to(device) else: attention_mask = None uncond_embeddings = self.text_encoder( uncond_input.input_ids.to(device), attention_mask=attention_mask, ) uncond_embeddings = uncond_embeddings[0] # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = uncond_embeddings.shape[1] uncond_embeddings = uncond_embeddings.repeat(1, num_videos_per_prompt, 1) uncond_embeddings = uncond_embeddings.view(batch_size * num_videos_per_prompt, seq_len, -1) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes text_embeddings = torch.cat([uncond_embeddings, text_embeddings]) return text_embeddings def decode_latents(self, latents, rank, decoder_consistency=None): video_length = latents.shape[2] latents = 1 / 0.18215 * latents latents = rearrange(latents, "b c f h w -> (b f) c h w") # video = self.vae.decode(latents).sample video = [] for frame_idx in tqdm(range(latents.shape[0]), disable=(rank!=0)): if decoder_consistency is not None: video.append(decoder_consistency(latents[frame_idx:frame_idx+1])) else: video.append(self.vae.decode(latents[frame_idx:frame_idx+1]).sample) video = torch.cat(video) video = rearrange(video, "(b f) c h w -> b c f h w", f=video_length) video = (video / 2 + 0.5).clamp(0, 1) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloa16 video = video.cpu().float().numpy() return video def prepare_extra_step_kwargs(self, generator, eta): # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs["eta"] = eta # check if the scheduler accepts generator accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs["generator"] = generator return extra_step_kwargs def check_inputs(self, prompt, height, width, callback_steps): if not isinstance(prompt, str) and not isinstance(prompt, list): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if height % 8 != 0 or width % 8 != 0: raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") if (callback_steps is None) or ( callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) ): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(callback_steps)}." ) def prepare_latents(self, batch_size, num_channels_latents, video_length, height, width, dtype, device, generator, latents=None, clip_length=16): shape = (batch_size, num_channels_latents, clip_length, height // self.vae_scale_factor, width // self.vae_scale_factor) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) if latents is None: rand_device = "cpu" if device.type == "mps" else device if isinstance(generator, list): latents = [ torch.randn(shape, generator=generator[i], device=rand_device, dtype=dtype) for i in range(batch_size) ] latents = torch.cat(latents, dim=0).to(device) else: latents = torch.randn(shape, generator=generator, device=rand_device, dtype=dtype).to(device) latents = latents.repeat(1, 1, video_length//clip_length, 1, 1) else: if latents.shape != shape: raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}") latents = latents.to(device) # scale the initial noise by the standard deviation required by the scheduler latents = latents * self.scheduler.init_noise_sigma return latents def prepare_condition(self, condition, num_videos_per_prompt, device, dtype, do_classifier_free_guidance): # prepare conditions for controlnet condition = torch.from_numpy(condition.copy()).to(device=device, dtype=dtype) / 255.0 condition = torch.stack([condition for _ in range(num_videos_per_prompt)], dim=0) condition = rearrange(condition, 'b f h w c -> (b f) c h w').clone() if do_classifier_free_guidance: condition = torch.cat([condition] * 2) return condition def next_step( self, model_output: torch.FloatTensor, timestep: int, x: torch.FloatTensor, eta=0., verbose=False ): """ Inverse sampling for DDIM Inversion """ if verbose: print("timestep: ", timestep) next_step = timestep timestep = min(timestep - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps, 999) alpha_prod_t = self.scheduler.alphas_cumprod[timestep] if timestep >= 0 else self.scheduler.final_alpha_cumprod alpha_prod_t_next = self.scheduler.alphas_cumprod[next_step] beta_prod_t = 1 - alpha_prod_t pred_x0 = (x - beta_prod_t**0.5 * model_output) / alpha_prod_t**0.5 pred_dir = (1 - alpha_prod_t_next)**0.5 * model_output x_next = alpha_prod_t_next**0.5 * pred_x0 + pred_dir return x_next, pred_x0 @torch.no_grad() def images2latents(self, images, dtype): """ Convert RGB image to VAE latents """ device = self._execution_device images = torch.from_numpy(images).float().to(dtype) / 127.5 - 1 images = rearrange(images, "f h w c -> f c h w").to(device) latents = [] for frame_idx in range(images.shape[0]): latents.append(self.vae.encode(images[frame_idx:frame_idx+1])['latent_dist'].mean * 0.18215) latents = torch.cat(latents) return latents @torch.no_grad() def invert( self, image: torch.Tensor, prompt, num_inference_steps=20, num_actual_inference_steps=10, eta=0.0, return_intermediates=False, **kwargs): """ Adapted from: https://github.com/Yujun-Shi/DragDiffusion/blob/main/drag_pipeline.py#L440 invert a real image into noise map with determinisc DDIM inversion """ device = self._execution_device batch_size = image.shape[0] if isinstance(prompt, list): if batch_size == 1: image = image.expand(len(prompt), -1, -1, -1) elif isinstance(prompt, str): if batch_size > 1: prompt = [prompt] * batch_size # text embeddings text_input = self.tokenizer( prompt, padding="max_length", max_length=77, return_tensors="pt" ) text_embeddings = self.text_encoder(text_input.input_ids.to(device))[0] print("input text embeddings :", text_embeddings.shape) # define initial latents latents = self.images2latents(image) print("latents shape: ", latents.shape) # interative sampling self.scheduler.set_timesteps(num_inference_steps) print("Valid timesteps: ", reversed(self.scheduler.timesteps)) latents_list = [latents] pred_x0_list = [latents] for i, t in enumerate(tqdm(reversed(self.scheduler.timesteps), desc="DDIM Inversion")): if num_actual_inference_steps is not None and i >= num_actual_inference_steps: continue model_inputs = latents # predict the noise # NOTE: the u-net here is UNet3D, therefore the model_inputs need to be of shape (b c f h w) model_inputs = rearrange(model_inputs, "f c h w -> 1 c f h w") noise_pred = self.unet(model_inputs, t, encoder_hidden_states=text_embeddings).sample noise_pred = rearrange(noise_pred, "b c f h w -> (b f) c h w") # compute the previous noise sample x_t-1 -> x_t latents, pred_x0 = self.next_step(noise_pred, t, latents) latents_list.append(latents) pred_x0_list.append(pred_x0) if return_intermediates: # return the intermediate laters during inversion return latents, latents_list return latents def interpolate_latents(self, latents: torch.Tensor, interpolation_factor:int, device ): if interpolation_factor < 2: return latents new_latents = torch.zeros( (latents.shape[0],latents.shape[1],((latents.shape[2]-1) * interpolation_factor)+1, latents.shape[3],latents.shape[4]), device=latents.device, dtype=latents.dtype, ) org_video_length = latents.shape[2] rate = [i/interpolation_factor for i in range(interpolation_factor)][1:] new_index = 0 v0 = None v1 = None for i0,i1 in zip( range( org_video_length ),range( org_video_length )[1:] ): v0 = latents[:,:,i0,:,:] v1 = latents[:,:,i1,:,:] new_latents[:,:,new_index,:,:] = v0 new_index += 1 for f in rate:
v = get_tensor_interpolation_method()(v0.to(device=device),v1.to(device=device),f)
5
2023-11-21 08:33:54+00:00
24k
eth-sri/language-model-arithmetic
src/model_arithmetic/evaluation.py
[ { "identifier": "BaseClass", "path": "src/model_arithmetic/base.py", "snippet": "class BaseClass:\n \"\"\"\n Base class for providing a serialization and deserialization mechanism.\n \"\"\"\n def __init__(self, **kwargs):\n \"\"\"\n Instantiates the base class with keyword arguments\n \n Args:\n kwargs (dict): Keyword arguments\n \"\"\"\n self.kwargs = kwargs\n self.__dict__.update(kwargs)\n\n def generate_list_settings(self, list_):\n \"\"\"\n Converts provided list to a normalized list that can be stored as a json object to serialize.\n \n Args:\n list_ (List): List to be converted\n Returns\n Transformed normal list\n \"\"\"\n normal_list = []\n for item in list_:\n if isinstance(item, BaseClass):\n normal_list.append(item.generate_settings())\n elif isinstance(item, dict):\n normal_list.append(self.generate_kwarg_setting(item))\n elif isinstance(item, (tuple, list)):\n normal_list.append(self.generate_list_settings(item))\n else:\n normal_list.append(item)\n return normal_list\n\n def generate_kwarg_setting(self, kwargs):\n \"\"\"\n Converts provided keyword arguments to normal kwargs in terms of serialization.\n\n Args:\n kwargs (dict): kwargs to be converted.\n \"\"\"\n normal_kwargs = dict()\n for kwarg in kwargs:\n if isinstance(kwargs[kwarg], BaseClass):\n normal_kwargs[kwarg] = kwargs[kwarg].generate_settings()\n elif isinstance(kwargs[kwarg], (list, tuple)):\n normal_kwargs[kwarg] = self.generate_list_settings(kwargs[kwarg])\n elif isinstance(kwargs[kwarg], dict):\n normal_kwargs[kwarg] = self.generate_kwarg_setting(kwargs[kwarg])\n else:\n normal_kwargs[kwarg] = kwargs[kwarg]\n \n return normal_kwargs\n\n\n def generate_settings(self):\n \"\"\"\n Generates settings for the instance of the BaseClass.\n\n Returns\n Settings in dictionary format.\n \"\"\"\n settings = {\n \"class\": self.__class__.__name__, \n **self.generate_kwarg_setting({kwarg: self.__dict__[kwarg] for kwarg in self.kwargs}), \n }\n return settings\n \n def save(self, path):\n \"\"\"\n Saves the generated settings into a JSON file at a specified path.\n \n Args:\n path (string): The file path at which the settings have to be saved.\n \"\"\"\n settings = self.generate_settings()\n\n if os.path.dirname(path) != \"\":\n os.makedirs(os.path.dirname(path), exist_ok=True)\n \n with open(path, \"w\") as f:\n json.dump(settings, f, indent=2)\n\n @classmethod\n def get_all_subclasses(cls):\n \"\"\"\n Returns all subclasses of the BaseClass.\n \"\"\"\n all_subclasses = []\n\n for subclass in cls.__subclasses__():\n all_subclasses.append(subclass)\n all_subclasses.extend(subclass.get_all_subclasses())\n\n return all_subclasses\n\n @staticmethod\n def find_class(cls_name):\n \"\"\"\n Searches for a class that matches the given class name.\n\n Args:\n cls_name (string): Class name to be matched\n \"\"\"\n for possible_cls in BaseClass.get_all_subclasses():\n if possible_cls.__name__ == cls_name:\n return possible_cls\n return None\n\n @staticmethod\n def load_from_list_settings(list_):\n \"\"\"\n Deserializes the list saved settings to instantiate the objects.\n\n Args:\n list_ (List): List of saved settings\n \"\"\"\n output_list = []\n for item in list_:\n if isinstance(item, dict):\n output_list.append(BaseClass.load_from_dict(item))\n elif isinstance(item, (tuple, list)):\n output_list.append(BaseClass.load_from_list_settings(item))\n else:\n output_list.append(item)\n\n return output_list\n \n @staticmethod\n def load_from_dict(dict_):\n \"\"\"\n Deserializes the dictionary saved settings to instantiate the objects.\n\n Args:\n dict_ (dict): Dictionary containing saved settings\n \"\"\"\n other_class = BaseClass.find_class(dict_.get(\"class\", None))\n if other_class is not None:\n return other_class.load_from_settings(dict_)\n \n output_dict = dict()\n for key in dict_:\n if isinstance(dict_[key], dict):\n output_dict[key] = BaseClass.load_from_dict(dict_[key])\n elif isinstance(dict_[key], (tuple, list)):\n output_dict[key] = BaseClass.load_from_list_settings(dict_[key])\n else:\n output_dict[key] = dict_[key]\n\n return output_dict\n\n @staticmethod\n def load_from_settings(settings):\n \"\"\"\n Deserializes the saved settings to instantiate the object.\n\n Args:\n settings (dict): Saved settings\n \"\"\"\n cls = BaseClass.find_class(settings[\"class\"])\n\n if cls is None:\n logger.error(f\"Could not find class {settings['class']} when loading class.\")\n return None\n\n kwargs = dict()\n for kwarg in settings:\n if kwarg == \"class\":\n continue\n if isinstance(settings[kwarg], dict):\n kwargs[kwarg] = BaseClass.load_from_dict(settings[kwarg])\n elif isinstance(settings[kwarg], (tuple, list)):\n kwargs[kwarg] = BaseClass.load_from_list_settings(settings[kwarg])\n else:\n kwargs[kwarg] = settings[kwarg]\n\n return cls(**kwargs)\n\n @classmethod\n def _load(cls, path, **kwargs):\n \"\"\"\n Loads the settings from the JSON file at the specified path.\n \n Args:\n path (string): The file path from which the settings have to be loaded.\n kwargs (dict): Additional keywords arguments\n \"\"\"\n with open(path, \"r\") as f:\n settings = json.load(f)\n for kwarg in kwargs:\n settings[kwarg] = kwargs[kwarg]\n return cls.load_from_settings(settings)\n\n @staticmethod\n def load(path, **kwargs):\n \"\"\"\n Loads the settings of the class from the JSON file.\n\n Args:\n path (string): The file path from which the class settings have to be loaded.\n kwargs (dict): Additional keywords arguments\n \"\"\"\n with open(path, \"r\") as f:\n settings = json.load(f)\n cls = BaseClass.find_class(settings[\"class\"])\n return cls._load(path, **kwargs)\n\n def __str__(self) -> str:\n \"\"\"\n Returns a string representation of the class object.\n \"\"\"\n return f\"{self.__class__.__name__}({self.kwargs})\"\n \n def __eq__(self, o: object) -> bool:\n \"\"\"\n Checks whether the provided object is equal to the current object.\n\n Args:\n o (object): Object to compare\n \"\"\"\n if not isinstance(o, BaseClass):\n return False\n \n other_settings = o.generate_settings()\n settings = self.generate_settings()\n\n return other_settings == settings" }, { "identifier": "CustomDataset", "path": "src/model_arithmetic/dataset.py", "snippet": "class CustomDataset(Dataset):\n \"\"\"\n A custom PyTorch Dataset class for tokenized sequence data.\n\n Uses a tokenizer to convert text data from a DataFrame to input_ids (tokens), \n and optionally attaches label data if present in the DataFrame.\n \"\"\"\n def __init__(self, tokenizer, df, max_tokens=128, min_tokens=1, random_cutoff=False):\n \"\"\"\n Initializes the CustomDataset object.\n\n Args:\n tokenizer (Tokenizer): The tokenizer to be used for the text data.\n df (pandas.DataFrame): DataFrame containing the text data, and optionally labels.\n max_tokens (int, optional): Maximum number of tokens per sequence. Defaults to 128.\n min_tokens (int, optional): Minimum number of tokens per sequence. Defaults to 1.\n random_cutoff (bool, optional): Whether to randomly cut off the number of tokens per sequence. Defaults to False.\n \"\"\"\n super().__init__()\n data = df.dropna()\n self.tokenized_dataset = [\n tokenizer(sentence, return_tensors=\"pt\", truncation=True, max_length=max_tokens).input_ids.view(-1) for sentence in tqdm(data[\"text\"].tolist())\n ]\n\n self.df = data\n self.has_labels = \"label\" in data.columns\n self.min_tokens = min_tokens\n self.labels = None\n if self.has_labels:\n self.labels = data[\"label\"].values\n \n self.random_cutoff = random_cutoff\n\n def __len__(self):\n \"\"\"\n Returns the length of the tokenized dataset, \n i.e., the number of tokenized sequences.\n \n Returns:\n int: Number of tokenized sequences.\n \"\"\"\n return len(self.tokenized_dataset)\n\n def __getitem__(self, idx):\n \"\"\"\n Fetches an item from the dataset at the given index.\n\n If labels are available, also fetches the associated label.\n If `random_cutoff` is true, may truncate sequence length randomly.\n\n Args:\n idx (int): Index of the required sequence.\n\n Returns:\n dict: A dictionary with the following structure-\n {\n \"input_ids\": torch.Tensor (Tokenized sequence),\n \"labels\": torch.Tensor (Associated label, if available)\n }\n \"\"\"\n cutoff = len(self.tokenized_dataset[idx])\n if self.random_cutoff:\n cutoff = torch.randint(min(cutoff, self.min_tokens), cutoff + 1, (1,)).item()\n \n if not self.has_labels:\n return {\"input_ids\": self.tokenized_dataset[idx][:cutoff]}\n else:\n return {\"input_ids\": self.tokenized_dataset[idx][:cutoff], \"labels\": torch.tensor([self.labels[idx]], dtype=torch.long)}" }, { "identifier": "load_model", "path": "src/model_arithmetic/basic_model_loader.py", "snippet": "def load_model(dir_or_model, classification=False, token_classification=False, return_tokenizer=False, dtype=torch.bfloat16, load_dtype=True, \n rl=False, peft_config=None):\n \"\"\"\n This function is used to load a model based on several parameters including the type of task it is targeted to perform.\n \n Args:\n dir_or_model: It can be either a directory containing the pre-training model configuration details or a pretrained model.\n\n classification (bool): If True, loads the model for sequence classification.\n\n token_classification (bool): If True, loads the model for token classification.\n\n return_tokenizer (bool): If True, returns the tokenizer along with the model.\n\n dtype: The data type that PyTorch should use internally to store the model’s parameters and do the computation.\n\n load_dtype (bool): If False, sets dtype as torch.float32 regardless of the passed dtype value.\n\n rl (bool): If True, loads model specifically designed to be used in reinforcement learning environment.\n\n peft_config: Configuration details for Peft models. \n \n Returns:\n It returns a model for the required task along with its tokenizer, if specified.\n \"\"\"\n log(logger.debug, f\"Loading model for {dir_or_model} with {classification}, {dtype}, {load_dtype}\")\n is_lora_dir = os.path.isfile(os.path.join(dir_or_model, \"adapter_config.json\"))\n\n if not load_dtype:\n dtype = torch.float32\n\n if is_lora_dir:\n loaded_json = json.load(open(os.path.join(dir_or_model, \"adapter_config.json\"), \"r\"))\n model_name = loaded_json[\"base_model_name_or_path\"]\n else:\n model_name = dir_or_model\n\n original_model_name = model_name\n\n if classification:\n model = AutoModelForSequenceClassification.from_pretrained(model_name, trust_remote_code=True, torch_dtype=dtype, use_auth_token=True, device_map=\"auto\") # to investigate: calling torch_dtype here fails.\n elif token_classification:\n model = AutoModelForTokenClassification.from_pretrained(model_name, trust_remote_code=True, torch_dtype=dtype, use_auth_token=True, device_map=\"auto\")\n elif rl:\n model = AutoModelForCausalLMWithValueHead.from_pretrained(model_name, trust_remote_code=True, torch_dtype=dtype, use_auth_token=True, \n peft_config=peft_config, device_map=\"auto\")\n else:\n if model_name.endswith(\"GPTQ\") or model_name.endswith(\"GGML\"):\n model = AutoGPTQForCausalLM.from_quantized(model_name,\n use_safetensors=True,\n trust_remote_code=True,\n # use_triton=True, # breaks currently, unfortunately generation time of the GPTQ model is quite slow\n quantize_config=None, device_map=\"auto\")\n else:\n model = AutoModelForCausalLM.from_pretrained(model_name, trust_remote_code=True, torch_dtype=dtype, use_auth_token=True, device_map=\"auto\")\n\n if is_lora_dir:\n model = PeftModel.from_pretrained(model, dir_or_model)\n \n try:\n tokenizer = load_tokenizer(original_model_name)\n model.config.pad_token_id = tokenizer.pad_token_id\n except Exception:\n pass\n if return_tokenizer:\n return model, load_tokenizer(original_model_name)\n return model" }, { "identifier": "load_tokenizer", "path": "src/model_arithmetic/basic_model_loader.py", "snippet": "def load_tokenizer(dir_or_model):\n \"\"\"\n This function is used to load the tokenizer for a specific pre-trained model.\n \n Args:\n dir_or_model: It can be either a directory containing the pre-training model configuration details or a pretrained model.\n \n Returns:\n It returns a tokenizer that can convert text to tokens for the specific model input.\n \"\"\"\n log(logger.debug, f\"Loading tokenizer for {dir_or_model}\")\n\n is_lora_dir = os.path.isfile(os.path.join(dir_or_model, \"adapter_config.json\"))\n\n if is_lora_dir:\n loaded_json = json.load(open(os.path.join(dir_or_model, \"adapter_config.json\"), \"r\"))\n model_name = loaded_json[\"base_model_name_or_path\"]\n else:\n model_name = dir_or_model\n \n if os.path.isfile(os.path.join(dir_or_model, \"config.json\")):\n loaded_json = json.load(open(os.path.join(dir_or_model, \"config.json\"), \"r\"))\n model_name = loaded_json[\"_name_or_path\"]\n\n tokenizer = AutoTokenizer.from_pretrained(model_name)\n\n if tokenizer.pad_token is None:\n log(logger.debug, \"Setting pad token to eos token\")\n tokenizer.pad_token = tokenizer.eos_token\n tokenizer.pad_token_id = tokenizer.eos_token_id\n \n return tokenizer" }, { "identifier": "ModelArithmetic", "path": "src/model_arithmetic/model_arithmetic.py", "snippet": "class ModelArithmetic(PreTrainedModel):\n \"\"\"\n Main class for prompt arithmetic. Handles the generation of text based on the formula.\n \"\"\"\n SAVE_FILE = \"prompt_arithmetic.json\"\n _supports_sdpa = True\n\n def __init__(self, formula : Operator, default_model : str = None, dtype=torch.bfloat16, intermediate_argmax : bool = False, epsilon = 1e-12, \n retroactive_operators = [], calculate_statistics=True, needs_input_tokens_lm_eval=False, lm_eval_task=None, tokenizer=None):\n \"\"\"Initializes the prompt arithmetic model.\n\n Args:\n formula (Operator): The formula for which generations need to be made.\n default_model (str, optional): Default model for RunnableOperators that don't have a model associated with them. Defaults to None.\n dtype (torch.dtype, optional): Dtype of the models to load by default. Defaults to torch.bfloat16.\n intermediate_argmax (bool, optional): Something unimportant that was tried out, but now deprecated. Defaults to False.\n epsilon (float, optional): Just some small value. Defaults to 1e-12.\n retroactive_operators (list, optional): The retroactive operators that need to be applied. Defaults to [].\n calculate_statistics (bool, optional): Whether or not to calculate some statistics, can be a tad bit expensive. Defaults to True.\n needs_input_tokens_lm_eval (bool, optional): Whether or not lm eval is used and whether or not the task needs the input tokens. Defaults to False. Only set to true for an lm eval task.\n lm_eval_task (str, optional): Name of the lm eval task. Defaults to None.\n tokenizer (transformers.tokenization_utils_base.PreTrainedTokenizerBase, optional): Tokenizer to use. Defaults to None.\n \"\"\"\n self.formula = formula.clone()\n\n self.default_model = default_model\n self.loaded_models = dict()\n self.model_prediction_history = [] # keeps track of the RunnableOperators predictions for each token (that hasn't finished computing)\n self.logprobs_history = [] # keeps track of the current probability distribution for which each token has been drawn\n self.model_last_token_prediction = [] # keeps track of the last token that has been predicted for each RunnableOperator\n \n self.output_type = namedtuple(\"ModelArithmeticOutput\", [\"logits\", \"logprobs_per_model\"])\n self.intermediate_argmax = intermediate_argmax\n self.retroactive_operators = retroactive_operators\n self.calculate_statistics = calculate_statistics\n\n self.runnable_operators = []\n for runnable_operator in self.formula.runnable_operators():\n if not any([runnable_operator.same_operator(output) for output in self.runnable_operators]):\n self.runnable_operators.append(runnable_operator)\n \n\n # sort the prompts by speculative factor, putting the one with highest speculative factor first\n # => run model with highest speculative factor first, since otherwise the computation might be wasted for the first ones\n # however, we first need to sort by run_priority and then within that by speculative factor\n self.runnable_operators = sorted(self.runnable_operators, key=lambda runnable_operator: (runnable_operator.run_priority, runnable_operator.speculative_factor), reverse=True)\n \n self.load_all_models(dtype=dtype)\n if self.default_model not in self.loaded_models:\n for runnable_operator in self.runnable_operators:\n if isinstance(runnable_operator, PromptedLLM) and runnable_operator.model is not None:\n self.default_model = runnable_operator.model\n break\n if self.default_model is None:\n raise ValueError(\"Default model must be specified if not specified in an llm prompt\")\n\n self.config = self.loaded_models[str(self.default_model)].config\n\n if tokenizer is None:\n self.tokenizer = load_tokenizer(self.default_model)\n else:\n self.tokenizer = tokenizer\n \n self.init_runnable_operators()\n \n self.model_input_tokens = {\n runnable_operator.id(): TokenizedInput(runnable_operator, \n runnable_operator.model, \n self.loaded_models[str(runnable_operator.model)].config,\n self.tokenizer) \n for runnable_operator in self.runnable_operators\n }\n \n self.init_monitor()\n \n self.epsilon = epsilon\n \n self.word_size = len(self.tokenizer)\n \n if Compatibility is not None:\n self.lm_eval_compatibility = Compatibility(\n task_name=lm_eval_task,\n needs_input_tokens_lm_eval=needs_input_tokens_lm_eval,\n tokenizer=self.tokenizer,\n device=self.device,\n max_length=get_max_length(self.config),\n )\n else:\n self.lm_eval_compatibility = None\n \n super().__init__(self.config)\n \n def init_monitor(self):\n \"\"\"\n Initializes the monitor for the prompt arithmetic model.\n \"\"\"\n self.monitor = Monitor(self.runnable_operators)\n \n def init_runnable_operators(self):\n \"\"\"Initializes the runnable operators. This is done after the models have been loaded, because the models are needed for the runnable operators.\n \"\"\"\n for runnable_operator in self.runnable_operators:\n if runnable_operator.model is None:\n runnable_operator.model = self.default_model\n runnable_operator.initialize_after_model_set()\n\n def load_all_models(self, dtype=torch.bfloat16):\n \"\"\"Loads all the models that are needed for the runnable operators. Models are never loaded twice.\n\n Args:\n dtype (torch.dtype, optional): Default Dtype of the models. Defaults to torch.bfloat16.\n \"\"\"\n if self.default_model is None:\n for runnable_operator in self.runnable_operators:\n if isinstance(runnable_operator, PromptedLLM) and runnable_operator.model is not None:\n self.default_model = str(runnable_operator.model)\n break\n \n for runnable_operator in self.runnable_operators:\n if runnable_operator.model is None:\n assert self.default_model is not None, \"Default model must be specified if not specified in prompt\"\n runnable_operator.model = self.default_model\n if runnable_operator.model not in self.loaded_models:\n model = runnable_operator.load_model(dtype=dtype)\n model.eval()\n if model is not None:\n self.loaded_models[str(runnable_operator.model)] = model\n \n if len(self.loaded_models) == 0:\n assert self.default_model is not None, \"Required to at least have one model, for now\"\n self.loaded_models[str(self.default_model)] = load_model(self.default_model, dtype=dtype)\n \n @property\n def device(self):\n \"\"\"Device of the default model. Needed for compatibility with lm_eval\n\n Returns:\n torch.device: Device of the default model.\n \"\"\"\n return self.loaded_models[str(self.default_model)].device\n\n def save_pretrained(self, path : str):\n \"\"\"Saves the model to the specified path.\n\n Args:\n path (str): Path to which to save the model\n \"\"\"\n os.makedirs(path, exist_ok=True)\n all_settings = {\n \"formula\": self.formula.generate_settings(),\n \"default_model\": self.default_model,\n }\n\n with open(os.path.join(path, self.SAVE_FILE), \"w\") as f:\n json.dump(all_settings, f, indent=4, sort_keys=True)\n\n @classmethod\n def from_pretrained(cls, path : str, dtype=torch.bfloat16):\n \"\"\"Loads the model from the specified path.\n\n Args:\n path (str): Path from which to load the model\n dtype (torch.dtype, optional): Default dtype for the models. Defaults to torch.bfloat16.\n\n Returns:\n ModelArithmetic: model arithmetic model\n \"\"\"\n with open(os.path.join(path, cls.SAVE_FILE), \"r\") as f:\n all_settings = json.load(f)\n all_settings[\"formula\"] = Operator.load_from_settings(all_settings[\"formula\"])\n return cls(**all_settings, dtype=dtype)\n\n \n def forward_model(self, runnable_operator, continuation_tokens, model_new_tokens=None, use_cache=False, do_speculation=False):\n \"\"\"Runs a specifc runnable operator on the continuation tokens.\n\n Args:\n runnable_operator (RunnableOperator): The runnable operator to run.\n continuation_tokens (list[list[int]]): List of tokens that need to be continued. The prompt is not included in these tokens\n model_new_tokens (list[int], optional): New tokens for the model. Defaults to None.\n use_cache (bool, optional): Whether or not to allow the model to use cache (eg key-value storage for an LLM). Defaults to False.\n do_speculation (bool, optional): Whether or not to do speculation sampling. Defaults to False.\n\n Returns:\n torch.tensor: logprobs of the model, one logprob distribution for each new token in each sample\n \"\"\"\n start_time = time.time()\n \n tokenized_input_creator = self.model_input_tokens[runnable_operator.id()]\n tokenized_inputs = tokenized_input_creator.add_continuation_tokens(continuation_tokens)\n tokenized_only_input = tokenized_input_creator.get_only_input_tokens()\n \n was_none = model_new_tokens is None\n \n if was_none:\n model_new_tokens = torch.tensor([len(continuation_tokens[i]) + 1 for i in range(len(continuation_tokens))])\n \n if len(self.model_prediction_history) < len(continuation_tokens):\n new_prediction_history = [dict() for _ in range(len(continuation_tokens))]\n else:\n new_prediction_history = [self.model_prediction_history[i].get(self.max_index_prediction_history(i), dict()) for i in range(len(continuation_tokens))]\n \n logprobs = runnable_operator.run(\n loaded_models=self.loaded_models,\n tokenized_inputs=tokenized_inputs,\n model_new_tokens=model_new_tokens,\n new_prediction_history=new_prediction_history,\n other_tokenizer=self.tokenizer,\n tokenized_only_input=tokenized_only_input, \n use_cache=use_cache,\n do_speculation=do_speculation\n )\n \n logprobs = [logprob.to(self.device) for logprob in logprobs]\n \n if was_none:\n logprobs = torch.stack(logprobs, dim=0)\n\n self.monitor.add_result(element=time.time() - start_time, runnable_operator=runnable_operator)\n return logprobs\n \n def group_complete(self, model_history):\n \"\"\"Checks which groups of runnable operators have been completely calculated and which haven't.\n\n Args:\n model_history (dict): Dict mapping the runnable operator id to the logprobs of the model\n\n Returns:\n dict[bool]: Dict mapping the group to whether it has been completely calculated or not\n \"\"\"\n # everything that is a group needs to be either all calculated or all not calculated\n group_calculated = dict()\n groups = set([runnable_operator.group for runnable_operator in self.runnable_operators if runnable_operator.group is not None])\n completed_groups = {group: True for group in groups}\n \n for runnable_operator in self.runnable_operators:\n if runnable_operator.group is not None:\n is_calculated = model_history.get(runnable_operator.id()) is not None\n if runnable_operator.group not in group_calculated:\n group_calculated[runnable_operator.group] = is_calculated\n elif group_calculated[runnable_operator.group] != is_calculated:\n completed_groups[runnable_operator.group] = False\n return completed_groups\n \n def group_model_history(self, model_history):\n \"\"\"Sets the model history on which to evaluate the formula based on the groups. Removes predictions if the group hasn't been completely calculated yet.\n\n Args:\n model_history (dict): Dict mapping the runnable operator id to the logprobs of the model\n\n Returns:\n dict: Adjusted dict mapping\n \"\"\"\n completed_groups = self.group_complete(model_history)\n grouped_model_history = dict()\n for runnable_operator in self.runnable_operators:\n if runnable_operator.group is None or completed_groups[runnable_operator.group]:\n grouped_model_history[runnable_operator.id()] = model_history[runnable_operator.id()]\n else:\n grouped_model_history[runnable_operator.id()] = None\n \n return grouped_model_history\n \n def create_sample_logprobs(self, logprobs, temperature, top_k, top_p):\n \"\"\"Creates the logprobs for each token in each sample.\n\n Args:\n logprobs (torch.tensor): Logprobs of the model\n temperature (float): temperature to use\n top_k (int): top_k to use\n top_p (float): top_p to use\n\n Returns:\n torch.tensor: Logprobs for each token in each sample\n \"\"\"\n if temperature == 0:\n logprobs_argmax = torch.argmax(logprobs, dim=-1)\n logprobs = torch.nn.functional.one_hot(logprobs_argmax, num_classes=logprobs.shape[-1]).float()\n return logprobs\n logprobs = logprobs / temperature\n logprobs = top_k_top_p_filtering(logprobs.unsqueeze(0), top_k=top_k, top_p=top_p)\n return torch.softmax(logprobs, dim=-1).squeeze()\n \n \n\n def process_logprobs(self, model_history):\n \"\"\"Processes model history to get the probability distribution for the token.\n\n Args:\n model_history (dict): Dict mapping the runnable operator id to the logprobs of the model\n\n Returns:\n _type_: _description_\n \"\"\"\n init_time = time.time()\n logprobs_normalized = self.formula.evaluate(model_history)\n self.monitor.add_result(element=time.time() - init_time, indicator=\"formula_evaluation\")\n if not torch.is_tensor(logprobs_normalized):\n return None\n # logprobs_normalized = logprobs_normalized / temperature\n # logprobs_normalized = top_k_top_p_filtering(logprobs_normalized.unsqueeze(0), top_k=top_k, top_p=top_p)\n return logprobs_normalized\n \n def run_retroactive_operators(self, index, tokenized_sentence, temperature, top_k, top_p):\n \"\"\"Runs the retroactive operators on the tokenized sentence. \n\n Args:\n index (int): Index of the sentence in the current batch\n tokenized_sentence (list[int]): Tokenized sentence\n temperature (float): temperature to use\n top_k (int): top_k to use\n top_p (float): top_p to use\n\n Returns:\n list[int]: Adjusted tokenized sentence based on the retroactive operators and whether they accepted it.\n \"\"\"\n for operator in self.retroactive_operators:\n accepted = operator.accept(tokenized_sentence, self.tokenizer)\n if accepted < 0:\n not_accepted_token = tokenized_sentence[accepted]\n self.clear_model_prediction_history(index, tokenized_sentence, from_=len(tokenized_sentence) + accepted)\n tokenized_sentence = tokenized_sentence[:len(tokenized_sentence) + accepted]\n \n self.logprobs_history[index][len(tokenized_sentence)][not_accepted_token] = -torch.inf\n \n if torch.all(self.logprobs_history[index][len(tokenized_sentence)] == -torch.inf):\n self.logprobs_history[index][len(tokenized_sentence)] = torch.zeros_like(self.logprobs_history[index][len(tokenized_sentence)])\n \n probs_to_sample = self.create_sample_logprobs(\n self.logprobs_history[index][len(tokenized_sentence)],\n temperature=temperature,\n top_k=top_k,\n top_p=top_p\n )\n new_token = torch.multinomial(probs_to_sample, 1).item()\n \n tokenized_sentence.append(new_token)\n return self.run_retroactive_operators(index, tokenized_sentence, temperature, top_k, top_p)\n \n return tokenized_sentence\n \n def speculation_sample(self, token, previous_models_probs, new_models_probs):\n \"\"\"Sample a token based on the previous and new model probabilities in the speculative sampling way. Also returns whether the token was accepted or not.\n\n Args:\n token (int): Token that is currently selected\n previous_models_probs (torch.tensor): Model probabilities of the previous models\n new_models_probs (torch.tensor): Model probabilities of the new models\n\n Returns:\n (int, bool): New token and whether or not the input token was accepted\n \"\"\"\n acceptance_prob = torch.minimum(torch.tensor(1.0), new_models_probs[token] / (previous_models_probs[token] + torch.tensor(self.epsilon)))\n # TODO: the next line is taking an enormous amount of time because of asynchronous computing on gpu's and requiring it to be returned immediately\n # Therefore do batch processing\n acceptance_prob = float(acceptance_prob)\n self.monitor.add_result(element=float(acceptance_prob), indicator=\"acceptance_prob\")\n # self.monitor.add_result(element=self.entropy(previous_models_probs).item(), indicator=\"entropy_previous\")\n # self.monitor.add_result(element=previous_models_probs[token].item(), indicator=\"probability_previous\")\n\n if torch.rand(1) < acceptance_prob:\n return token, True\n else:\n new_proba_distrib = torch.relu(new_models_probs - previous_models_probs)\n new_proba_distrib /= torch.sum(new_proba_distrib)\n new_token = torch.multinomial(new_proba_distrib, 1).item()\n return new_token, False\n \n \n def add_new_result(self, generated_tokens, num_new_tokens, runnable_operator, new_model_logprobs, top_p, top_k, temperature):\n \"\"\"Adds a new run of a runnable operator to the model prediction history. Also does speculation sampling if needed.\n\n Args:\n generated_tokens (list[list[int]]): Currently generated tokens by the model\n num_new_tokens (list[int]): Number of new tokens for each sample in the batch\n runnable_operator (RunnableOperator): Runnable operator that was run\n new_model_logprobs (List[torch.tensor]): Output of the run function of the runnable operator\n top_p (flaot): top_p to use\n top_k (int): top_k to use\n temperature (float): temperature to use\n\n Returns:\n list[bool]: For each sample in the batch, whether all tokens in that sample were kept or not\n \"\"\"\n all_kept = []\n for i in range(len(generated_tokens)):\n n_generated_tokens = len(generated_tokens[i])\n kept = True\n for n_token in range(n_generated_tokens - num_new_tokens[i] + 1, n_generated_tokens + 1):\n # initialize the model prediction history\n self.model_prediction_history[i][n_token] = self.model_prediction_history[i].get(n_token, \n {runnable_operator.id(): None for runnable_operator in self.runnable_operators})\n # check if we need to do speculation sampling, only needed when a previous token was sampled\n do_speculation_sample = n_token < n_generated_tokens\n \n # speculation sampling not needed if the model was run before \n if self.model_prediction_history[i][n_token][runnable_operator.id()] is not None:\n do_speculation_sample = False\n \n # speculation sampling not needed if all models have not been run yet: this is the first model on this token\n if all([logprob is None for logprob in self.model_prediction_history[i][n_token].values()]):\n do_speculation_sample = False\n # This means that this token was already fully accepted, so we can just continue (can happen if batch_size > 1 or when end is triggered)\n if self.max_index_prediction_history(i) > n_token:\n continue\n \n # add the new model logprobs\n self.model_prediction_history[i][n_token][runnable_operator.id()] = new_model_logprobs[i][-n_generated_tokens + n_token + num_new_tokens[i] - 1]\n \n group_model_history = self.group_model_history(self.model_prediction_history[i][n_token])\n # group_model_history needs to be separately checked, since it could be that the group is not yet fully calculated\n # also allow no logprobs runnable operators (would lead to errors) if the formula is not finished yet (if it is finished, you need to)\n if all([logprob is None for logprob in group_model_history.values()]) or (not runnable_operator.outputs_logprobs and not self.formula.is_finished(group_model_history)):\n continue\n \n # process the logprobs\n new_model_probs = self.process_logprobs(group_model_history)\n \n if self.intermediate_argmax and not self.formula.is_finished(group_model_history):\n argmax_el = torch.argmax(new_model_probs)\n new_model_probs = torch.zeros_like(new_model_probs)\n new_model_probs[argmax_el] = 1.0\n \n if do_speculation_sample:\n if self.calculate_statistics:\n self.monitor.add_result(self.expected_acceptance_prob(self.create_sample_logprobs(new_model_probs, temperature, top_k, top_p), \n self.create_sample_logprobs(self.logprobs_history[i].get(n_token), temperature, top_k, top_p)), \n indicator=\"expected_acceptance_prob\", runnable_operator=runnable_operator)\n\n new_token, kept = self.speculation_sample(\n token = generated_tokens[i][n_token],\n previous_models_probs=self.create_sample_logprobs(self.logprobs_history[i][n_token], temperature, top_k, top_p),\n new_models_probs=self.create_sample_logprobs(new_model_probs, temperature, top_k, top_p), \n )\n if n_token in self.model_prediction_history[i]:\n self.logprobs_history[i][n_token] = new_model_probs\n \n if not kept:\n # if not kept, we change the generated tokens and remove the model prediction history after that token\n generated_tokens[i][n_token] = new_token\n generated_tokens[i] = generated_tokens[i][:n_token + 1]\n self.clear_model_prediction_history(i, generated_tokens[i], from_=n_token)\n self.trigger_end[i] = False\n \n elif n_token in self.model_prediction_history[i]:\n self.logprobs_history[i][n_token] = new_model_probs\n \n if not kept:\n break\n \n all_kept.append(kept)\n return all_kept\n \n\n def clear_model_prediction_history(self, index, generated_tokens_index, from_=-1):\n \"\"\"Clears the model prediction history for a specific sample in the batch. First deletes all history of finished tokens, then \n deletes history of tokens that were prediction, but then got removed because of speculation\n\n Args:\n index (int): index of the sample in the batch\n generated_tokens_index (list[int]): Generated tokens at the index\n from_ (int, optional): From which token to delete all the history. Defaults to -1.\n \"\"\"\n all_indices = list(self.model_prediction_history[index].keys())\n for token in all_indices:\n all_none = all([logprob is None for logprob in self.model_prediction_history[index][token].values()])\n finished = self.formula.is_finished(self.model_prediction_history[index][token])\n if all_none or finished or (from_ != -1 and token > from_):\n if finished and len(generated_tokens_index) > token and self.calculate_statistics:\n self.add_monitor_token_probs(generated_tokens_index[token], self.model_prediction_history[index][token], self.logprobs_history[index].get(token))\n \n if finished:\n for model_index in range(len(self.model_last_token_prediction)):\n self.model_last_token_prediction[model_index][index] = max(token + 1, self.model_last_token_prediction[model_index][index])\n \n del self.model_prediction_history[index][token]\n \n if from_ > -1:\n for model_index in range(len(self.model_last_token_prediction)):\n self.model_last_token_prediction[model_index][index] = min(from_ + 1, self.model_last_token_prediction[model_index][index])\n \n def max_index_prediction_history(self, index):\n \"\"\"Gets the max index of the model prediction history for a specific runnable operator \n\n Args:\n index (int): index of runnable operator in the list of runnable operators\n\n Returns:\n int: max index of its prediction\n \"\"\"\n keys = list(self.model_prediction_history[index].keys())\n if len(keys) == 0:\n return 0\n return max(self.model_prediction_history[index].keys())\n\n def normal_sample(self, probs):\n \"\"\"Samples from a probability distribution\n\n Args:\n probs (torch.tensor): Probability distribution\n\n Returns:\n int: Sampled token\n \"\"\"\n out = torch.multinomial(probs, 1)\n return out\n \n def KL_divergence(self, p, q):\n \"\"\"Compuates KL divergence between two probability distributions\n\n Args:\n p (torch.tensor): probability distribution\n q (torch.tensor): probability distribution\n\n Returns:\n float: KL divergence\n \"\"\"\n return torch.sum(p * torch.log((p + self.epsilon) / (q + self.epsilon)))\n \n def entropy(self, p):\n \"\"\"Computes entropy of a probability distribution\n\n Args:\n p (torch.tensor): probability distribution\n\n Returns:\n float: entropy\n \"\"\"\n return -torch.sum(p * torch.log(p + self.epsilon))\n \n def expected_acceptance_prob(self, p, q):\n \"\"\"\n Calculates the expected acceptance probability of speculative sampling.\n \n Args:\n p (torch.tensor): probability distribution\n q (torch.tensor): probability distribution\n \"\"\"\n return 1 - 1 / 2 * torch.sum(torch.abs(q - p)).item()\n \n def add_monitor_token_probs(self, token, history, history_logprobs):\n \"\"\"Adds some token probabilities to the monitor\n\n Args:\n token (int): Samples token\n history (dict): Model prediction history at the specific index where the token was drawn from\n history_logprobs (torch.tensor): LogProbability distribution from which the token was drawn.\n \"\"\"\n for runnable_operator in self.runnable_operators:\n if runnable_operator.is_finished(history) and runnable_operator.outputs_logprobs:\n evaluated = runnable_operator.evaluate(history)\n self.monitor.add_result(element=torch.softmax(evaluated, dim=-1)[token].item(), runnable_operator=runnable_operator, indicator=\"token_prob\")\n # add logprob as well\n self.monitor.add_result(element=max(evaluated[token].item(), np.log(self.epsilon)), runnable_operator=runnable_operator, indicator=\"token_logprob\")\n # add KL divergence\n if history_logprobs is not None:\n self.monitor.add_result(element=self.KL_divergence(torch.softmax(history_logprobs, dim=-1), torch.softmax(evaluated, dim=-1)).item(), \n runnable_operator=runnable_operator, indicator=\"KL_divergence\")\n \n self.monitor.add_result(element=self.entropy(torch.softmax(history_logprobs, dim=-1)).item(), indicator=\"entropy\")\n\n def next_token_speculative(self, continuation_tokens, \n top_p=1.0, top_k=0, temperature=1.0, speculation=True, use_cache=True):\n \"\"\"Continues one step in the generation process by running the runnable operators that need to be run and then sampling from the probability distribution.\n\n Args:\n continuation_tokens (list[list[int]]): Current continuation tokens\n top_p (float, optional): top_p to use. Defaults to 1.0.\n top_k (int, optional): top_k to use. Defaults to 0.\n temperature (float, optional): temperature to use. Defaults to 1.0.\n speculation (bool, optional): Whether to use speculation. Defaults to True.\n use_cache (bool, optional): Whether to use cache. Defaults to True.\n\n Returns:\n _type_: _description_\n \"\"\"\n models_ran = []\n for i, runnable_operator in enumerate(self.runnable_operators):\n new_tokens = [len(continuation_tokens[j]) - self.model_last_token_prediction[i][j] + 1 for j in range(len(continuation_tokens))]\n if runnable_operator.run_condition(new_tokens, self.trigger_end) or not speculation:\n logprobs = self.forward_model(runnable_operator, continuation_tokens, model_new_tokens=new_tokens, use_cache=use_cache, do_speculation=speculation)\n all_kept = self.add_new_result(continuation_tokens, new_tokens, runnable_operator, logprobs, top_p, top_k, temperature)\n models_ran.append(i)\n \n self.model_last_token_prediction[i] = [len(continuation_tokens[j]) + int(all_kept[j])\n for j in range(len(continuation_tokens))]\n \n if not all(all_kept):\n break\n \n to_sample_indices = [i for i in range(len(continuation_tokens)) if all_kept[i] and not self.trigger_end[i]]\n\n if len(to_sample_indices) > 0:\n # do batch sampling\n all_required_histories = torch.stack([\n self.create_sample_logprobs(\n self.logprobs_history[i][len(continuation_tokens[i])], \n temperature=temperature,\n top_k=top_k,\n top_p=top_p\n ) for i in to_sample_indices\n ])\n new_tokens = self.normal_sample(all_required_histories)\n for i in range(len(to_sample_indices)):\n continuation_tokens[to_sample_indices[i]].append(new_tokens[i].item())\n\n for i in models_ran:\n self.model_last_token_prediction[i] = [len(continuation_tokens[j]) for j in range(len(continuation_tokens))]\n return continuation_tokens\n\n def __call__(self, input_ids, **kwargs):\n \"\"\"Runs the forward pass of the model. This is needed for compatibility with lm-evaluation-harness\n\n Args:\n input_ids (torch.tensor): input ids\n\n Returns:\n namedtuple: Named tuple of the ModelArithmetic model\n \"\"\"\n return self.forward(input_ids, **kwargs)\n \n def forward(self, input_ids, normalize=True, **kwargs):\n \"\"\"Runs the foward pass. This is needed for compatibility with lm-evaluation-harness\n\n Args:\n input_ids (torch.tensor): input ids\n normalize (bool, optional): Whether or not to normalize the output. Defaults to True.\n\n Returns:\n namedtuple: Named tuple of the ModelArithmetic model\n \"\"\"\n ### this is a bit cheeky, but in order to be compatible with lm-evaluation-harness, we need to implement this method\n logprobs_per_model = {runnable_operator.id(): None for runnable_operator in self.runnable_operators}\n if not isinstance(input_ids, list):\n input_shape = input_ids.shape\n continuation_tokens = self.lm_eval_compatibility.forward_preprocessing(input_ids, self.model_input_tokens)\n else:\n input_shape = None\n continuation_tokens = input_ids\n\n for runnable_operator in self.runnable_operators:\n logprobs = self.forward_model(runnable_operator, continuation_tokens)\n if input_shape is not None:\n logprobs = self.lm_eval_compatibility.forward_post_processing(logprobs, input_shape)\n logprobs_per_model[runnable_operator.id()] = logprobs\n\n output = self.formula.evaluate(logprobs_per_model, normalize=normalize)\n return [output]\n\n def get_decoded_tokens(self, next_tokens_batch):\n \"\"\"Gets decoded tokens from the next tokens\n\n Args:\n next_tokens_batch (list[list[int]]): New tokens for each sample in the batch\n\n Returns:\n list[str]: Decoded tokens\n \"\"\"\n # adding eos token for compatibility with sentencepiece tokenizer\n encoded_sentences = [[self.tokenizer.eos_token_id] + next_tokens for next_tokens in next_tokens_batch]\n decoded_sentences = [self.tokenizer.decode(encoded_sentence, add_special_tokens=False) for encoded_sentence in encoded_sentences]\n decoded_next_tokens = [decoded_sentence[len(self.tokenizer.eos_token):] for decoded_sentence in decoded_sentences]\n return decoded_next_tokens\n \n def clear_memory(self):\n \"\"\"Deletes all loaded models and clears the cache\n \"\"\"\n for runnable_operator in self.runnable_operators:\n runnable_operator.delete_cache()\n self.loaded_models = dict()\n torch.cuda.empty_cache()\n\n def generate_text(self, sentences, max_length=1024, stop_texts=None, batch_size=None,\n temperature=1.0, top_p=1.0, top_k=0, num_return_sequences=1, do_speculation=False, use_cache=True, **kwargs):\n \"\"\"Generates text based on the input params\n\n Args:\n sentences (list[str]): List of input sentences\n max_length (int, optional): Max generation length. Defaults to 128.\n stop_texts (list[str], optional): Strings at which to stop generation. Defaults to None.\n batch_size (int, optional): Batch size. Defaults to None (all at once).\n temperature (float, optional): temperature to use. Defaults to 1.0.\n top_p (float, optional): top_p to use. Defaults to 1.0.\n top_k (int, optional): top_k to use. Defaults to 0.\n num_return_sequences (int, optional): Number of return sequences per sentence. Defaults to 1.\n do_speculation (bool, optional): Whether or not to do speculation. Defaults to True.\n use_cache (bool, optional): Whether or not to use cache. Defaults to True.\n\n Returns:\n list[str]: List of generated texts\n \"\"\"\n assert not do_speculation or any([runnable_operator.speculative_factor == 1 for runnable_operator in self.runnable_operators])\n if isinstance(sentences, str):\n sentences = [sentences]\n if batch_size is None:\n batch_size = len(sentences)\n \n # duplicate each sentence num_return_sequences times, but keep the same sentences next to each other\n sentences = [sentence for sentence in sentences for _ in range(num_return_sequences)]\n\n self.model_prediction_history = [dict() for _ in range(batch_size)]\n self.logprobs_history = [dict() for _ in range(batch_size)]\n self.model_last_token_prediction = [[0 for _ in range(batch_size)] for _ in range(len(self.runnable_operators))]\n self.trigger_end = [False for _ in range(batch_size)]\n self.init_monitor()\n \n if stop_texts is None:\n stop_texts = []\n stop_texts.append(self.tokenizer.eos_token)\n\n start_sentences = sentences[:]\n\n log(logger.debug, f\"Generating {len(sentences)} sentences\")\n\n generated_texts = [\"\" for _ in range(len(sentences))]\n generated_tokens = [[] for _ in range(len(sentences))]\n current_indices = [i for i in range(0, min(len(sentences), batch_size))]\n next_index = len(current_indices)\n \n for runnable_operator_id in self.model_input_tokens:\n self.model_input_tokens[runnable_operator_id].set_inputs([start_sentences[index] for index in current_indices])\n \n total_done = 0\n while len(current_indices) > 0:\n start_time = time.time()\n generated_tokens_batch = [generated_tokens[index] for index in current_indices]\n next_tokens = self.next_token_speculative(generated_tokens_batch, top_p, top_k, \n temperature, speculation=do_speculation, use_cache=use_cache)\n for i in range(len(next_tokens)):\n next_tokens[i] = self.run_retroactive_operators(i, next_tokens[i], temperature, top_k, top_p)\n self.clear_model_prediction_history(i, next_tokens[i])\n decoded_tokens = self.get_decoded_tokens(next_tokens)\n\n for i, index in enumerate(current_indices):\n generated_tokens[index] = next_tokens[i]\n generated_texts[index] = decoded_tokens[i]\n\n indices_to_remove = []\n for i in range(len(current_indices)):\n sentences[current_indices[i]] = start_sentences[current_indices[i]] + generated_texts[current_indices[i]]\n if any([stop_text in generated_texts[current_indices[i]] for stop_text in stop_texts]) or len(generated_tokens[current_indices[i]]) >= max_length:\n if len(self.model_prediction_history[i]) == 0:\n indices_to_remove.append(i)\n else:\n self.trigger_end[i] = True\n \n for i in indices_to_remove[::-1]:\n self.monitor.add_result(element=len(generated_tokens[current_indices[i]]), indicator=\"length\")\n del current_indices[i]\n self.model_prediction_history = self.model_prediction_history[:i] + self.model_prediction_history[i + 1:]\n self.logprobs_history = self.logprobs_history[:i] + self.logprobs_history[i + 1:]\n for j in range(len(self.model_last_token_prediction)):\n self.model_last_token_prediction[j] = self.model_last_token_prediction[j][:i] + self.model_last_token_prediction[j][i + 1:]\n self.trigger_end = self.trigger_end[:i] + self.trigger_end[i + 1:]\n \n for runnable_operator in self.runnable_operators:\n runnable_operator.delete_cache(index=i)\n\n if next_index < len(sentences):\n current_indices.append(next_index)\n self.model_prediction_history.append(dict())\n self.logprobs_history.append(dict())\n self.trigger_end.append(False)\n \n for j in range(len(self.model_last_token_prediction)):\n self.model_last_token_prediction[j].append(0)\n \n next_index += 1\n total_done += 1\n if total_done % 30 == 0:\n log(logger.debug, f\"Progress: {total_done / len(sentences):.3f}\")\n \n for runnable_operator_id in self.model_input_tokens:\n self.model_input_tokens[runnable_operator_id].set_inputs([start_sentences[index] for index in current_indices])\n\n self.monitor.add_result(element=time.time() - start_time)\n \n return generated_texts\n\n def generate(self, input_ids, attention_mask=None, do_sample=False, max_new_tokens=1024, \n stopping_criteria=None, temperature=1.0, top_p=1.0, top_k=0, use_cache=True, eos_token_id=None, pad_token_id=None, **kwargs):\n \"\"\"Generates text based on the input params. Needed for compatibility with lm-evaluation-harness\n\n Args:\n input_ids (torch.tensor): input ids\n attention_mask (torch.tensor, optional): attention mask. Defaults to None.\n do_sample (bool, optional): Whether or not to sample. Defaults to False.\n max_new_tokens (int, optional): Max new number of tokens. Defaults to 128.\n stopping_criteria (_type_, optional): Stopping criteria to use. Defaults to None.\n temperature (float, optional): Temperature to. Defaults to 1.0.\n top_p (float, optional): top_p to use. Defaults to 1.0.\n top_k (int, optional): top_k to use. Defaults to 0.\n use_cache (bool, optional): Whether or not to use cache. Defaults to True.\n eos_token_id (int, optional): eos token id. Defaults to None.\n pad_token_id (int, optional): pad token id. Defaults to None.\n\n Returns:\n list[str]: Generated texts\n \"\"\"\n if not do_sample:\n top_k = 1\n \n batch_size = input_ids.shape[0]\n input_texts = [self.tokenizer.decode(ids, skip_special_tokens=True) for ids in input_ids]\n stopping_sequences = [self.tokenizer.eos_token]\n if stopping_criteria is not None:\n stopping_sequences += [criteria.sequence for criteria in stopping_criteria]\n if eos_token_id is not None:\n stopping_sequences += [self.tokenizer.decode([eos_token_id])]\n \n texts = self.generate_text(input_texts, max_length=max_new_tokens, stop_texts=stopping_sequences,\n batch_size=batch_size, temperature=temperature, top_p=top_p, top_k=top_k, use_cache=use_cache)\n encoded_texts = self.tokenizer.batch_encode_plus(texts, add_special_tokens=False, return_tensors=\"pt\").input_ids.to(self.device)\n # concatenate the input_ids with the encoded_texts\n all_encoded = torch.cat([input_ids, encoded_texts], dim=-1)\n return all_encoded" }, { "identifier": "ENABLE_LOGGING", "path": "src/model_arithmetic/utils.py", "snippet": "ENABLE_LOGGING = False" }, { "identifier": "log", "path": "src/model_arithmetic/utils.py", "snippet": "def log(function, message):\n \"\"\"\n Logs the given message using the provided function if logging is enabled.\n \n Parameters:\n function (callable): The logging function to use.\n message (str): The message to be logged.\n \"\"\"\n if ENABLE_LOGGING:\n function(message)" } ]
from .base import BaseClass from loguru import logger from transformers import set_seed, Trainer, TrainingArguments, DataCollatorWithPadding from .dataset import CustomDataset from sklearn.model_selection import train_test_split from .basic_model_loader import load_model, load_tokenizer from .model_arithmetic import ModelArithmetic from googleapiclient import discovery from dotenv import load_dotenv from torch.utils.data import DataLoader from .utils import ENABLE_LOGGING, log from lm_eval import evaluator import pandas as pd import numpy as np import torch import os import json import time
16,086
real = np.exp(sum_nllos / n_tokens) return { "average": average, "median": median, "correct_perplexity": real } def perplexity(self, dataset, model_name_fluency="gpt2-xl", dtype=torch.float16, **kwargs): """ Calculates the perplexity of the generated sentences. Args: dataset (pd.DataFrame): The dataset to be used for evaluation. Has columns "input" (for input text), "generated" (for generated text). model_name_fluency (string, optional): The name of the model to be used for calculating fluency. dtype (torch.dtype, optional): The data type to be used for the model. **kwargs: Additional keyword arguments. """ log(logger.info, "Calculating fluency") if "perplexity" in self.output: log(logger.info, f"Reloading perplexity. Perplexity is {self.output['perplexity']}") return self.output["perplexity"] tokenizer = load_tokenizer(model_name_fluency) model = load_model(model_name_fluency, dtype=dtype) self.output["perplexity"] = self.get_perplexity(dataset, model, tokenizer) log(logger.info, f"Perplexity is {self.output['perplexity']}") del model torch.cuda.empty_cache() return self.output["perplexity"] def faithfulness_multiple(self, dataset, model_name, **kwargs): """Calculates the faithfulness for all stored classifiers Args: dataset (pd.DataFrame): The dataset to be used for evaluation. model_name (str, list of strings): Classifier names to use """ if not isinstance(model_name, (list, tuple)): model_name = [model_name] results = dict() for model in model_name: name = model if not isinstance(name, str): name = model.__str__() results[name] = self.faithfulness(dataset, model_name=model, **kwargs) self.output["faithfulness"] = results return results def faithfulness(self, dataset, finetune_model=True, classification_with_input=True, model_name="distilbert-base-uncased", model=None, test_size=0.2, max_length=128, epochs=3, batch_size_faithfulness=16, learning_rate=2e-5, warmup_steps=500, weight_decay=0.01, save_model_folder=None, dtype_faithfulness=torch.float32, store_faithfulness=False, **kwargs): """ Calculates the faithfulness of the generated sentences. Args: dataset (pd.DataFrame): The dataset to be used for evaluation. Has columns "input" (for input text), "generated" (for generated text). If finetuning, also has column "output" (for output ground truth). finetune_model (bool, optional): Whether to finetune the model or not. classification_with_input (bool, optional): Whether to use the input of the sentence for classification or not. model_name (str, optional): The name of the model to be used for classification (either path or name). Either this or model should be provided. model (PreTrainedModel, optional): The model to be used for classification. Either this or model_name should be provided. test_size (float, optional): The size of the test set to be used for evaluation. max_length (int, optional): The maximum length of the sentences to be used for evaluation. epochs (int, optional): The number of epochs to be used for training the model. batch_size_faithfulness (int, optional): The batch size to be used for evaluation. learning_rate (float, optional): The learning rate to be used for training the model. warmup_steps (int, optional): The number of warmup steps to be used for training the model. weight_decay (float, optional): The weight decay to be used for training the model. save_model_folder (str, optional): The folder to save the trained model. dtype_faithfulness (torch.dtype, optional): The data type to be used for the model. store_faithfulness (bool, optional): Whether to store the resulting score or not. **kwargs: Additional keyword arguments. """ log(logger.info, "Calculating faithfulness") if ("label" not in dataset.columns or all(dataset["label"] == 1) or all(dataset["label"] == 0)) and finetune_model: log(logger.info, "Dataset does not have good labels, cannot calculate faithfulness") return None if "faithfulness" in self.output: log(logger.info, f"Reloading faithfulness. Faithfulness is {self.output['faithfulness']}") return self.output["faithfulness"] set_seed(42) df = dataset.copy() if classification_with_input: df["text"] = df["input"] + df["generated"] else: df["text"] = df["generated"] if isinstance(model_name, str): tokenizer = load_tokenizer(model_name) args = TrainingArguments( output_dir="../finetune/eval/random", evaluation_strategy="epoch", save_strategy="epoch", num_train_epochs=epochs, per_device_train_batch_size=batch_size_faithfulness, per_device_eval_batch_size=batch_size_faithfulness, warmup_steps=warmup_steps, weight_decay=weight_decay, logging_dir="logs", logging_steps=100, learning_rate=learning_rate, ) if model is None: log(logger.info, "Loading model") model = load_model(model_name, classification=True, dtype=dtype_faithfulness) # we need to train the model on the dataset if finetune_model: log(logger.info, "Finetuning model") df = dataset.copy() df = df.dropna() df["text"] = df["input"] + df["output"] train, val = train_test_split(df, test_size=test_size)
load_dotenv() class Evaluation(BaseClass): """ This class is used for evaluating a model's performance on a given dataset. It includes methods for preparing the dataset, evaluating the model, generating samples, calculating perplexity and faithfulness of the model. """ def __init__(self, generator=None, dataset_location=None, dataset=None, train_dataset=None, train_dataset_location=None, n_input_words=5, bleurt_checkpoint="../models/BLEURT-20", **kwargs): """ Initialize the Evaluation class with the given parameters. Args: generator (ModelArithmetic, optional): The model to be evaluated. dataset_location (string, optional): The location of the dataset to be used for evaluation. Either this or dataset should be provided. Dataset should contain column "text", "input", "output and "label" ("label", "input", "output" optional) dataset (pd.DataFrame, optional): The dataset to be used for evaluation. Either this or dataset_location should be provided. Dataset should contain column "text", "input", "output and "label" ("label", "input", "output" optional) train_dataset (pd.DataFrame, optional): The dataset to be used for training the model. Only used when calculating the faithfulness of the model and when the downstream model still needs to be finetuned. train_dataset_location (string, optional): The location of the dataset to be used for training the model. n_input_words (int, optional): The number of input words to be used in the generator. Only used if the dataset does not contain the column "input". bleurt_checkpoint (string, optional): The location of the BLEURT model checkpoint. **kwargs: Additional keyword arguments. """ self.has_input_task = True self.dataset = None if dataset is not None: self.dataset = dataset.copy() elif dataset_location is not None: self.dataset = pd.read_csv(dataset_location, escapechar='\\', lineterminator="\n") if train_dataset is not None: self.train_dataset = train_dataset elif train_dataset_location is not None: self.train_dataset = pd.read_csv(train_dataset_location, escapechar='\\', lineterminator="\n") else: self.train_dataset = None if self.dataset is not None: self.prepare_dataset(n_input_words) super().__init__(**kwargs, dataset_location=dataset_location, generator=generator, has_input_task=self.has_input_task, output=dict(), extra_kwargs=None, bleurt_checkpoint=bleurt_checkpoint, train_dataset_location=None) if isinstance(generator, ModelArithmetic): # If we don't do it this way, we can't store the evaluator because ModelArithmetic is not serializable del self.kwargs["generator"] self.kwargs["formula"] = generator.formula self.formula = generator.formula def prepare_dataset(self, n_input_words=5): """ Prepares the dataset for evaluation. If the dataset does not have an input column, it assumes the input is the first n_input_words words of the output. If the dataset does not have a label column, it assumes all labels are 1. Args: n_input_words (int): The number of input words to be used. """ log(logger.debug, "Preparing dataset") if "input" not in self.dataset.columns: log(logger.debug, f"No input column found, assuming input is the first {n_input_words} words of the output") self.dataset["input"] = self.dataset["text"].apply(lambda x: " ".join(x.split()[:n_input_words])) self.dataset["output"] = self.dataset["text"].apply(lambda x: " " + " ".join(x.split()[n_input_words:])) self.has_input_task = False if "label" not in self.dataset.columns: log(logger.debug, "No label column found, assuming all labels are 1") self.dataset["label"] = 1 def evaluate_lm_eval(self, model, task_name, batch_size, num_fewshot, model_args, no_cache=False, limit=None, write_out=False, output_folder=None, **kwargs): """ Evaluates the model using the lm_eval package. Args: model (PreTrainedModel): The model to be evaluated. task_name (string): The name of the task for evaluation. batch_size (int): The batch size to be used for evaluation. num_fewshot (int): The number of fewshot examples to be used for evaluation. model_args (dict): The arguments to be passed to the model. no_cache (bool, optional): Whether to use cached results or not. limit (int, optional): The maximum number of examples to be used for evaluation. write_out (bool, optional): Whether to write out the results or not. output_folder (string, optional): The folder to write out the results. **kwargs: Additional keyword arguments. """ try: except ImportError: raise ImportError("Please install lm_eval to run this function") results = evaluator.simple_evaluate( model=model, model_args=model_args, tasks=[task_name], num_fewshot=num_fewshot, batch_size=batch_size, device="cuda" if torch.cuda.is_available() else "cpu", no_cache=no_cache, limit=limit, write_out=write_out, output_base_path=output_folder ) if "lm_eval" in self.output: self.output["lm_eval"][task_name] = results else: self.output["lm_eval"] = {task_name: results} def evaluate(self, max_tokens=128, store_file=None, reload=True, dataset_file=None, reload_data=True, preserve_memory=False, batch_size=1, do_perspective=True, speculation=False, only_faithfulness=False, **kwargs): """ Evaluates the model on the dataset and calculates the perplexity and faithfulness Args: max_tokens (int, optional): The maximum number of tokens to be used for evaluation. store_file (string, optional): The file to store the evaluation results. reload (bool, optional): Whether to reload the dataset or not if it was stored before. dataset_file (string, optional): The file containing the dataset. If path exists, dataset is loaded from path. If path does not exist, dataset is saved to path. reload_data (bool, optional): Whether to reload the data or not if it was stored before. preserve_memory (bool, optional): Whether to preserve memory or not. batch_size (int, optional): The batch size to be used for evaluation. do_perspective (bool, optional): Whether to calculate the perspective score or not. speculation (bool, optional): Whether to use speculation or not. **kwargs: Additional keyword arguments. """ if store_file is not None: os.makedirs(os.path.dirname(store_file), exist_ok=True) if dataset_file is not None: os.makedirs(os.path.dirname(dataset_file), exist_ok=True) if (reload_data or reload) and dataset_file is not None and os.path.isfile(dataset_file): log(logger.debug, f"Reloading dataset from {dataset_file}") self.dataset = pd.read_csv(dataset_file, escapechar='\\', lineterminator="\n") self.dataset.fillna("", inplace=True) else: log(logger.debug,"Generating samples") self.generate_samples(max_tokens, batch_size=batch_size, speculation=speculation, **kwargs) if dataset_file is not None: log(logger.info, f"Saving dataset to {dataset_file}") self.dataset.to_csv(dataset_file, index=False, escapechar='\\', lineterminator="\n") if reload and store_file is not None and os.path.isfile(store_file): self.output = json.load(open(store_file))["output"] log(logger.info, f"Reloading output. Output is {self.output}") if not reload: self.output = dict() self.dataset.dropna(inplace=True) if preserve_memory and hasattr(self.generator, "clear_memory"): # can have weird side effects! self.generator.clear_memory() if not only_faithfulness: self.perplexity(self.dataset, **kwargs) else: del self.output['faithfulness'] self.faithfulness_multiple(self.dataset, **kwargs) if do_perspective and not only_faithfulness: self.perspective(**kwargs) if dataset_file is not None: log(logger.info, f"Saving dataset to {dataset_file}") self.dataset.to_csv(dataset_file, index=False, escapechar='\\', lineterminator="\n") self.extra_kwargs = {kwarg: str(kwargs[kwarg]) for kwarg in kwargs} self.extra_kwargs["max_tokens"] = max_tokens if store_file is not None: log(logger.info, f"Saving output to {store_file}") self.save(store_file) return self.output def perspective(self, classification_with_input=True, **kwargs): """ Calculates the perspective score of the generated sentences. Args: classification_with_input (bool, optional): Whether to use the input of the sentence for calculating the perspective score or not. **kwargs: Additional keyword arguments. """ retries = 0 if "perspective" in self.output: log(logger.info, f"Reloading perspective. Perspective is {self.output['perspective']}") return self.output["perspective"] log(logger.info, "Calculating perspective") service = discovery.build('commentanalyzer', 'v1alpha1', developerKey=os.environ["PERSPECTIVE_API_KEY"], discoveryServiceUrl="https://commentanalyzer.googleapis.com/$discovery/rest?version=v1alpha1", static_discovery=False) scores = [] self.dataset["perspective"] = [None] * len(self.dataset) for index, sample in self.dataset.iterrows(): sentence = sample['generated'] if classification_with_input: sentence = sample['input'] + sentence analyze_request = { 'comment': { 'text': sentence }, 'languages': ["en"], 'requestedAttributes': {'TOXICITY': {}} } while retries < 10 ** 5: try: response = service.comments().analyze(body=analyze_request).execute() break except Exception as e: if "LANGUAGE_NOT_SUPPORTED_BY_ATTRIBUTE" in str(e): continue log(logger.warning, f"Exception {e} occurred, retrying...") retries += 1 time.sleep(10) if retries == 10 ** 5: log(logger.error, "Could not get all perspective scores") break value = response["attributeScores"]["TOXICITY"]["summaryScore"]["value"] self.dataset.at[index, "perspective"] = value scores.append(value) time.sleep(0.1) result = np.mean(scores) log(logger.info, f"Perspective is {result:.6f}") self.output["perspective"] = { "mean": result, "std": np.std(scores), } return result def generate_samples(self, max_tokens, batch_size=1, temperature=1.0, top_p=1.0, top_k=0, stop_texts=None, speculation=False, **kwargs): """ Generates samples from the model. Args: max_tokens (int): The maximum number of tokens to be used for evaluation. batch_size (int, optional): The batch size to be used for evaluation. temperature (float, optional): The temperature to be used for sampling. top_p (float, optional): The top-p value to be used for sampling. top_k (int, optional): The top-k value to be used for sampling. stop_texts (list, optional): The list of texts at which sampling should be stopped speculation (bool, optional): Whether to use speculation or not. **kwargs: Additional keyword arguments. """ start_time = time.time() if "generated" not in self.dataset.columns: texts = self.generator.generate_text(self.dataset["input"].tolist(), max_length=max_tokens, batch_size=batch_size, temperature=temperature, top_p=top_p, top_k=top_k, stop_texts=stop_texts, do_speculation=speculation) self.dataset["generated"] = texts end_time = time.time() self.output["time"] = { "total_time": end_time - start_time, "time_per_sample": (end_time - start_time) / len(self.dataset), "dataset_size": len(self.dataset), "max_tokens": max_tokens, "batch_size": batch_size } def save_generated(self, output_location): """ Saves the generated samples to the specified location. Args: output_location (string): The location to save the generated samples. """ log(logger.debug, f"Saving generated samples to {output_location}") self.dataset.to_csv(output_location) def get_perplexity(self, dataset, model, tokenizer, **kwargs): """ Calculates the perplexity of the generated sentences. Args: dataset (pd.DataFrame): The dataset to be used for evaluation. Has columns "input" (for input text), "generated" (for generated text). model (PreTrainedModel): The model to be evaluated. tokenizer (Tokenizer): The tokenizer to be used for tokenizing the sentences. **kwargs: Additional keyword arguments. """ perplexities = [] sum_nllos = 0 n_tokens = 0 for index, sample in dataset.iterrows(): input_sentence = sample['input'] sentence = sample['generated'] if len(sentence) == 0: continue combined_sentence = input_sentence + sentence encodings = tokenizer(combined_sentence, return_tensors='pt') input_ids = encodings['input_ids'].to(model.device) attention_mask = encodings['attention_mask'].to(model.device) input_encodings = tokenizer(input_sentence, return_tensors='pt') input_ids_inputs = input_encodings['input_ids'] input_length = input_ids_inputs.size(1) with torch.no_grad(): output = model(input_ids, labels=input_ids, attention_mask=attention_mask) logprobs = output.logits[0, :].log_softmax(dim=-1) loss_func = torch.nn.NLLLoss(ignore_index=-100, reduction='sum') loss = loss_func(logprobs[..., input_length:-1, :].contiguous(), input_ids[0, :][..., input_length+1:].contiguous()) loss = loss.to(torch.float32).detach().cpu().numpy() n_tokens_here = input_ids.shape[-1] - input_length - 1 if n_tokens_here > 0: perplexity = np.exp(loss / n_tokens_here) sum_nllos += loss n_tokens += n_tokens_here if not np.isnan(perplexity): perplexities.append(perplexity) average = np.mean(perplexities) median = np.median(perplexities) real = np.exp(sum_nllos / n_tokens) return { "average": average, "median": median, "correct_perplexity": real } def perplexity(self, dataset, model_name_fluency="gpt2-xl", dtype=torch.float16, **kwargs): """ Calculates the perplexity of the generated sentences. Args: dataset (pd.DataFrame): The dataset to be used for evaluation. Has columns "input" (for input text), "generated" (for generated text). model_name_fluency (string, optional): The name of the model to be used for calculating fluency. dtype (torch.dtype, optional): The data type to be used for the model. **kwargs: Additional keyword arguments. """ log(logger.info, "Calculating fluency") if "perplexity" in self.output: log(logger.info, f"Reloading perplexity. Perplexity is {self.output['perplexity']}") return self.output["perplexity"] tokenizer = load_tokenizer(model_name_fluency) model = load_model(model_name_fluency, dtype=dtype) self.output["perplexity"] = self.get_perplexity(dataset, model, tokenizer) log(logger.info, f"Perplexity is {self.output['perplexity']}") del model torch.cuda.empty_cache() return self.output["perplexity"] def faithfulness_multiple(self, dataset, model_name, **kwargs): """Calculates the faithfulness for all stored classifiers Args: dataset (pd.DataFrame): The dataset to be used for evaluation. model_name (str, list of strings): Classifier names to use """ if not isinstance(model_name, (list, tuple)): model_name = [model_name] results = dict() for model in model_name: name = model if not isinstance(name, str): name = model.__str__() results[name] = self.faithfulness(dataset, model_name=model, **kwargs) self.output["faithfulness"] = results return results def faithfulness(self, dataset, finetune_model=True, classification_with_input=True, model_name="distilbert-base-uncased", model=None, test_size=0.2, max_length=128, epochs=3, batch_size_faithfulness=16, learning_rate=2e-5, warmup_steps=500, weight_decay=0.01, save_model_folder=None, dtype_faithfulness=torch.float32, store_faithfulness=False, **kwargs): """ Calculates the faithfulness of the generated sentences. Args: dataset (pd.DataFrame): The dataset to be used for evaluation. Has columns "input" (for input text), "generated" (for generated text). If finetuning, also has column "output" (for output ground truth). finetune_model (bool, optional): Whether to finetune the model or not. classification_with_input (bool, optional): Whether to use the input of the sentence for classification or not. model_name (str, optional): The name of the model to be used for classification (either path or name). Either this or model should be provided. model (PreTrainedModel, optional): The model to be used for classification. Either this or model_name should be provided. test_size (float, optional): The size of the test set to be used for evaluation. max_length (int, optional): The maximum length of the sentences to be used for evaluation. epochs (int, optional): The number of epochs to be used for training the model. batch_size_faithfulness (int, optional): The batch size to be used for evaluation. learning_rate (float, optional): The learning rate to be used for training the model. warmup_steps (int, optional): The number of warmup steps to be used for training the model. weight_decay (float, optional): The weight decay to be used for training the model. save_model_folder (str, optional): The folder to save the trained model. dtype_faithfulness (torch.dtype, optional): The data type to be used for the model. store_faithfulness (bool, optional): Whether to store the resulting score or not. **kwargs: Additional keyword arguments. """ log(logger.info, "Calculating faithfulness") if ("label" not in dataset.columns or all(dataset["label"] == 1) or all(dataset["label"] == 0)) and finetune_model: log(logger.info, "Dataset does not have good labels, cannot calculate faithfulness") return None if "faithfulness" in self.output: log(logger.info, f"Reloading faithfulness. Faithfulness is {self.output['faithfulness']}") return self.output["faithfulness"] set_seed(42) df = dataset.copy() if classification_with_input: df["text"] = df["input"] + df["generated"] else: df["text"] = df["generated"] if isinstance(model_name, str): tokenizer = load_tokenizer(model_name) args = TrainingArguments( output_dir="../finetune/eval/random", evaluation_strategy="epoch", save_strategy="epoch", num_train_epochs=epochs, per_device_train_batch_size=batch_size_faithfulness, per_device_eval_batch_size=batch_size_faithfulness, warmup_steps=warmup_steps, weight_decay=weight_decay, logging_dir="logs", logging_steps=100, learning_rate=learning_rate, ) if model is None: log(logger.info, "Loading model") model = load_model(model_name, classification=True, dtype=dtype_faithfulness) # we need to train the model on the dataset if finetune_model: log(logger.info, "Finetuning model") df = dataset.copy() df = df.dropna() df["text"] = df["input"] + df["output"] train, val = train_test_split(df, test_size=test_size)
train_dataset = CustomDataset(tokenizer, train, max_tokens=max_length)
1
2023-11-21 20:01:08+00:00
24k
HeliosZhao/Animate124
dnerf/utils.py
[ { "identifier": "save_tensor2image", "path": "nerf/utils.py", "snippet": "def save_tensor2image(x: torch.Tensor, path, channel_last=False, quality=75, **kwargs):\n # assume the input x is channel last\n # ipdb.set_trace()\n # if x.ndim == 4:\n # if channel_last:\n # x = x.permute(0, 3, 1, 2) \n # TF.to_pil_image(make_grid(x, value_range=(0, 1), **kwargs)).save(path, quality=quality)\n if x.ndim == 5:\n ## video\n # ipdb.set_trace()\n path = os.path.splitext(path)[0] + '.mp4' # convert image to mp4\n # B,F,C,H,W or B,F,H,W,C\n if channel_last: # B,F,H,W,C\n x = rearrange(x, \"b f h w c -> b f c h w\")\n save_videos_grid(x, path, **kwargs)\n else:\n if channel_last:\n x = x.permute(0, 3, 1, 2) \n TF.to_pil_image(make_grid(x, value_range=(0, 1), **kwargs)).save(path, quality=quality)" }, { "identifier": "nonzero_normalize_depth", "path": "nerf/utils.py", "snippet": "def nonzero_normalize_depth(depth, mask=None):\n if mask is not None:\n if (depth[mask]>0).sum() > 0:\n nonzero_depth_min = depth[mask][depth[mask]>0].min()\n else:\n nonzero_depth_min = 0\n else:\n if (depth>0).sum() > 0:\n nonzero_depth_min = depth[depth>0].min()\n else:\n nonzero_depth_min = 0\n if nonzero_depth_min == 0:\n return depth\n else:\n depth = (depth - nonzero_depth_min) \n depth = depth / (depth.max()+1e-6)\n return depth.clamp(0, 1)" }, { "identifier": "Trainer", "path": "nerf/utils.py", "snippet": "class Trainer(object):\n def __init__(self,\n\t\t argv, # command line args\n name, # name of this experiment\n opt, # extra conf\n model, # network\n guidance, # guidance network\n criterion=None, # loss function, if None, assume inline implementation in train_step\n optimizer=None, # optimizer\n ema_decay=None, # if use EMA, set the decay\n lr_scheduler=None, # scheduler\n metrics=[], # metrics for evaluation, if None, use val_loss to measure performance, else use the first metric.\n local_rank=0, # which GPU am I\n world_size=1, # total num of GPUs\n device=None, # device to use, usually setting to None is OK. (auto choose device)\n mute=False, # whether to mute all print\n fp16=False, # amp optimize level\n max_keep_ckpt=1, # max num of saved ckpts in disk\n workspace='workspace', # workspace to save logs & ckpts\n best_mode='min', # the smaller/larger result, the better\n use_loss_as_metric=True, # use loss as the first metric\n report_metric_at_train=False, # also report metrics at training\n use_checkpoint=\"latest\", # which ckpt to use at init time\n use_tensorboard=True, # whether to use tensorboard for logging\n scheduler_update_every_step=False, # whether to call scheduler.step() after every train step\n **kwargs\n ):\n\n self.argv = argv\n self.name = name\n self.opt = opt\n self.mute = mute\n self.metrics = metrics\n self.local_rank = local_rank\n self.world_size = world_size\n self.workspace = workspace\n self.ema_decay = ema_decay\n self.fp16 = fp16\n self.best_mode = best_mode\n self.use_loss_as_metric = use_loss_as_metric\n self.report_metric_at_train = report_metric_at_train\n self.max_keep_ckpt = opt.get(\"max_keep_ckpt\", max_keep_ckpt)\n self.use_checkpoint = use_checkpoint\n self.use_tensorboard = use_tensorboard\n self.time_stamp = time.strftime(\"%Y-%m-%d_%H-%M-%S\")\n self.scheduler_update_every_step = scheduler_update_every_step\n self.device = device if device is not None else torch.device(f'cuda:{local_rank}' if torch.cuda.is_available() else 'cpu')\n self.console = Console()\n\n model.to(self.device)\n if self.world_size > 1:\n model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)\n model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[local_rank])\n self.model = model\n\n # guide model\n self.guidance = guidance\n self.embeddings = {}\n\n # text prompt / images\n if self.guidance is not None:\n for key in self.guidance:\n for p in self.guidance[key].parameters():\n p.requires_grad = False\n self.embeddings[key] = {}\n self.prepare_embeddings()\n\n if isinstance(criterion, nn.Module):\n criterion.to(self.device)\n self.criterion = criterion\n\n if optimizer is None:\n self.optimizer = optim.Adam(self.model.parameters(), lr=0.001, weight_decay=5e-4) # naive adam\n else:\n self.optimizer = optimizer(self.model)\n\n if lr_scheduler is None: ## scheduler is all one, for nerf model other than vanilla nerf\n self.lr_scheduler = optim.lr_scheduler.LambdaLR(self.optimizer, lr_lambda=lambda epoch: 1) # fake scheduler\n else:\n self.lr_scheduler = lr_scheduler(self.optimizer) \n\n if ema_decay:\n self.ema = ExponentialMovingAverage(\n self.model.parameters(), decay=ema_decay)\n else:\n self.ema = None\n\n self.scaler = torch.cuda.amp.GradScaler(enabled=self.fp16)\n\n # variable init\n self.total_train_t = 0\n self.epoch = 0\n self.global_step = 0\n self.local_step = 0\n self.novel_view_step = 0\n self.stats = {\n \"loss\": [],\n \"valid_loss\": [],\n \"results\": [], # metrics[0], or valid_loss\n \"checkpoints\": [], # record path of saved ckpt, to automatically remove old ckpt\n \"best_result\": None,\n }\n self.loss_meter = AverageMeters()\n # auto fix\n if len(metrics) == 0 or self.use_loss_as_metric:\n self.best_mode = 'min'\n\n logger.info(f'[INFO] cmdline: {self.argv}')\n logger.info(f'args:\\n{self.opt}')\n logger.info(\n f'[INFO] Trainer: {self.name} | {self.time_stamp} | {self.device} | {\"fp16\" if self.fp16 else \"fp32\"} | {self.workspace}')\n logger.info(\n f'[INFO] #parameters: {sum([p.numel() for p in model.parameters() if p.requires_grad])}')\n logger.info(f'[INFO] #Optimizer: \\n{self.optimizer}')\n logger.info(f'[INFO] #Scheduler: \\n{self.lr_scheduler}')\n\n if self.workspace is not None:\n if self.use_checkpoint == \"scratch\":\n logger.info(\"[INFO] Training from scratch ...\")\n elif self.use_checkpoint == \"latest\":\n logger.info(\"[INFO] Loading latest checkpoint ...\")\n self.load_checkpoint()\n elif self.use_checkpoint == \"latest_model\":\n logger.info(\"[INFO] Loading latest checkpoint (model only)...\")\n self.load_checkpoint(model_only=True)\n elif self.use_checkpoint == \"best\":\n if os.path.exists(self.opt.best_path):\n logger.info(\"[INFO] Loading best checkpoint ...\")\n self.load_checkpoint(self.opt.best_path)\n else:\n logger.info(\n f\"[INFO] {self.opt.best_path} not found, loading latest ...\")\n self.load_checkpoint()\n else: # path to ckpt\n logger.info(f\"[INFO] Loading {self.use_checkpoint} ...\")\n self.load_checkpoint(self.use_checkpoint)\n\n # calculate the text embs.\n @torch.no_grad()\n def prepare_embeddings(self):\n\n # text embeddings (stable-diffusion)\n if self.opt.text is not None:\n assert not self.opt.text_emb_all\n dir_texts = ['front', 'side', 'back']\n if 'SD' in self.guidance:\n if self.opt.text_emb_all:\n self.embeddings['SD']['default'] = self.guidance['SD'].get_all_text_embeds([self.opt.text])\n neg_embedding = self.guidance['SD'].get_all_text_embeds([self.opt.negative])\n else:\n self.embeddings['SD']['default'] = self.guidance['SD'].get_text_embeds([self.opt.text])\n neg_embedding = self.guidance['SD'].get_text_embeds([self.opt.negative])\n \n self.embeddings['SD']['default'] = torch.cat((neg_embedding, self.embeddings['SD']['default']), dim=0)\n\n for idx, d in enumerate(dir_texts):\n text = f\"{self.opt.text}, {d} view\"\n if self.opt.text_emb_all:\n self.embeddings['SD'][d] = self.guidance['SD'].get_all_text_embeds([text])\n else:\n self.embeddings['SD'][d] = self.guidance['SD'].get_text_embeds([text])\n if self.opt.dir_texts_neg:\n text_neg = self.opt.negative + ', '.join([text+' view' for i, text in enumerate(dir_texts) if i != idx]) \n logger.info(f'dir_texts of {d}\\n postive text: {text},\\n negative text: {text_neg}')\n if self.opt.text_emb_all:\n neg_embedding= self.guidance['SD'].get_all_text_embeds([text_neg])\n else:\n neg_embedding= self.guidance['SD'].get_text_embeds([text_neg])\n self.embeddings['SD'][d] = torch.cat((neg_embedding, self.embeddings['SD'][d]), dim=0)\n\n\n if 'IF' in self.guidance:\n self.embeddings['IF']['default'] = self.guidance['IF'].get_text_embeds([self.opt.text])\n neg_embedding = self.guidance['IF'].get_text_embeds([self.opt.negative])\n\n for idx, d in enumerate(dir_texts):\n text = f\"{self.opt.text}, {d} view\"\n self.embeddings['IF'][d] = self.guidance['IF'].get_text_embeds([text])\n if self.opt.dir_texts_neg:\n text_neg = self.opt.negative + ', '.join([text+' view' for i, text in enumerate(dir_texts) if i != idx]) \n logger.info(f'dir_texts of {d}\\n postive text: {text},\\n negative text: {text_neg}')\n neg_embedding= self.guidance['IF'].get_text_embeds([text_neg])\n self.embeddings['IF'][d] = torch.cat((neg_embedding, self.embeddings['IF'][d]), dim=0)\n \n # if 'clip' in self.guidance:\n # self.embeddings['clip']['text'] = self.guidance['clip'].get_text_embeds(self.opt.text)\n\n if self.opt.cn_text is not None:\n # ipdb.set_trace()\n assert 'CN' in self.guidance\n dir_texts = ['front', 'side', 'back']\n self.embeddings['CN']['default'] = self.guidance['CN'].get_text_embeds([self.opt.cn_text])\n neg_embedding = self.guidance['CN'].get_text_embeds([self.opt.negative])\n self.embeddings['CN']['default'] = torch.cat((neg_embedding, self.embeddings['CN']['default']), dim=0)\n\n ## embedding for controlnet -> best quality\n self.embeddings['CN']['CN'] = self.guidance['CN'].get_text_embeds([self.opt.cn_cn_text])\n self.embeddings['CN']['CN'] = torch.cat((neg_embedding, self.embeddings['CN']['CN']), dim=0)\n\n for idx, d in enumerate(dir_texts):\n text = f\"{self.opt.cn_text}, {d} view\"\n self.embeddings['CN'][d] = self.guidance['CN'].get_text_embeds([text])\n if self.opt.dir_texts_neg:\n text_neg = self.opt.negative + ', '.join([text+' view' for i, text in enumerate(dir_texts) if i != idx]) \n logger.info(f'dir_texts of {d}\\n postive text: {text},\\n negative text: {text_neg}')\n neg_embedding= self.guidance['CN'].get_text_embeds([text_neg])\n self.embeddings['CN'][d] = torch.cat((neg_embedding, self.embeddings['CN'][d]), dim=0)\n\n\n if self.opt.images is not None:\n\n h = int(self.opt.known_view_scale * self.opt.h)\n w = int(self.opt.known_view_scale * self.opt.w)\n\n # load processed image and remove edges\n rgbas = []\n rgbas_hw = []\n mask_no_edges = []\n for image in self.opt.images:\n rgba = cv2.cvtColor(cv2.imread(image, cv2.IMREAD_UNCHANGED), cv2.COLOR_BGRA2RGBA)\n rgbas.append(rgba)\n rgba_hw = cv2.resize(rgba, (w, h), interpolation=cv2.INTER_AREA).astype(np.float32) / 255\n rgbas_hw.append(rgba_hw)\n if self.opt.rm_edge:\n alpha = np.uint8(rgba_hw[..., 3] * 255.)\n dilate = cv2.dilate(alpha, np.ones((self.opt.edge_width, self.opt.edge_width), np.uint8))\n edge = cv2.absdiff(alpha, dilate).astype(np.float32) / 255\n mask_no_edge = rgba_hw[..., 3] > 0.5\n mask_no_edge[edge>self.opt.edge_threshold] = False\n mask_no_edges.append(mask_no_edge)\n rgba_hw = np.stack(rgbas_hw)\n mask = rgba_hw[..., 3] > 0.5\n if len(mask_no_edges) > 0:\n mask_no_edge = np.stack(mask_no_edges)\n else:\n mask_no_edge = mask\n \n # breakpoint() \n # rgb\n rgb_hw = rgba_hw[..., :3] * rgba_hw[..., 3:] + (1 - rgba_hw[..., 3:]) \n self.rgb = torch.from_numpy(rgb_hw).permute(0,3,1,2).contiguous().to(self.device)\n self.mask = torch.from_numpy(mask).to(self.device)\n self.opacity = torch.from_numpy(mask_no_edge).to(self.device).to(torch.float32).unsqueeze(0)\n print(f'[INFO] dataset: load image prompt {self.opt.images} {self.rgb.shape}')\n\n # load depth\n depth_paths = [image.replace('rgba', 'depth') for image in self.opt.images]\n if os.path.exists(depth_paths[0]):\n depths = [cv2.imread(depth_path, cv2.IMREAD_UNCHANGED) for depth_path in depth_paths]\n depth = np.stack([cv2.resize(depth, (w, h), interpolation=cv2.INTER_AREA) for depth in depths])\n self.depth = 1 - torch.from_numpy(depth.astype(np.float32) / 255).to(self.device) ## why use inverse?? MiDas predict depth larger value, more close, while nerf should be small value more close\n # self.depth = torch.from_numpy(depth.astype(np.float32) / 255).to(self.device)\n # ipdb.set_trace()\n if len(self.depth.shape) == 4 and self.depth.shape[-1] > 1:\n self.depth = self.depth[..., 0]\n logger.info(f'[WARN] dataset: {depth_paths[0]} has more than one channel, only use the first channel')\n if self.opt.normalize_depth:\n self.depth = nonzero_normalize_depth(self.depth, self.mask)\n save_tensor2image(self.depth, os.path.join(self.workspace, 'depth_resized.jpg'))\n self.depth = self.depth[self.mask]\n print(f'[INFO] dataset: load depth prompt {depth_paths} {self.depth.shape}')\n else:\n self.depth = None\n logger.info(f'[WARN] dataset: {depth_paths[0]} is not found')\n \n # load normal\n normal_paths = [image.replace('rgba', 'normal') for image in self.opt.images]\n if os.path.exists(normal_paths[0]):\n normals = []\n for normal_path in normal_paths:\n normal = cv2.imread(normal_path, cv2.IMREAD_UNCHANGED)\n if normal.shape[-1] == 4:\n normal = cv2.cvtColor(normal, cv2.COLOR_BGRA2RGB)\n normals.append(normal)\n normal = np.stack([cv2.resize(normal, (w, h), interpolation=cv2.INTER_AREA) for normal in normals])\n self.normal = torch.from_numpy(normal.astype(np.float32) / 255).to(self.device)\n save_tensor2image(self.normal, os.path.join(self.workspace, 'normal_resized.jpg'), channel_last=True)\n print(f'[INFO] dataset: load normal prompt {normal_paths} {self.normal.shape}')\n self.normal = self.normal[self.mask]\n else:\n self.normal = None\n logger.info(f'[WARN] dataset: {normal_paths[0]} is not found')\n\n # save for debug\n save_tensor2image(self.rgb, os.path.join(self.workspace, 'rgb_resized.png'), channel_last=False)\n save_tensor2image(self.opacity, os.path.join(self.workspace, 'opacity_resized.png'), channel_last=False)\n\n # encode embeddings for zero123\n if 'zero123' in self.guidance:\n rgba_256 = np.stack([cv2.resize(rgba, (256, 256), interpolation=cv2.INTER_AREA).astype(np.float32) / 255 for rgba in rgbas])\n rgbs_256 = rgba_256[..., :3] * rgba_256[..., 3:] + (1 - rgba_256[..., 3:])\n rgb_256 = torch.from_numpy(rgbs_256).permute(0,3,1,2).contiguous().to(self.device)\n # import ipdb\n # ipdb.set_trace()\n guidance_embeds = self.guidance['zero123'].get_img_embeds(rgb_256)\n self.embeddings['zero123']['default'] = {\n 'zero123_ws' : self.opt.zero123_ws,\n 'c_crossattn' : guidance_embeds[0],\n 'c_concat' : guidance_embeds[1],\n 'ref_polars' : self.opt.ref_polars,\n 'ref_azimuths' : self.opt.ref_azimuths,\n 'ref_radii' : self.opt.ref_radii,\n }\n\n # if 'clip' in self.guidance:\n # self.embeddings['clip']['image'] = self.guidance['clip'].get_img_embeds(self.rgb)\n # encoder image for clip\n if self.opt.use_clip:\n self.rgb_clip_embed = self.guidance.get_clip_img_embeds(self.rgb)\n # debug.\n scaler = torch.cuda.amp.GradScaler()\n image = torch.randn((1,3,512,512), device=self.device, requires_grad=True)\n with torch.autocast(device_type='cuda', dtype=torch.float16):\n loss = self.guidance.clip_loss(self.rgb_clip_embed, image)\n scaler.scale(loss).backward()\n else:\n self.rgb_clip_embed = None\n\n\n # ------------------------------\n @torch.no_grad()\n def match_known(self, **kwargs):\n self.model.eval()\n data = self.default_view_data\n rays_o = data['rays_o'] # [B, N, 3]\n rays_d = data['rays_d'] # [B, N, 3]\n mvp = data['mvp'] # [B, 4, 4]\n\n B, N = rays_o.shape[:2]\n H, W = data['H'], data['W']\n\n ambient_ratio = 1.0\n shading = self.opt.known_shading\n binarize = False\n bg_color = self.get_bg_color(\n self.opt.bg_color_known, B*N, rays_o.device)\n\n # add camera noise to avoid grid-like artifect\n # * (1 - self.global_step / self.opt.iters)\n noise_scale = self.opt.known_view_noise_scale\n rays_o = rays_o + torch.randn(3, device=self.device) * noise_scale\n rays_d = rays_d + torch.randn(3, device=self.device) * noise_scale\n\n outputs = self.model.render(rays_o, rays_d, mvp, H, W, staged=False, perturb=True,\n bg_color=bg_color, ambient_ratio=ambient_ratio, shading=shading, binarize=binarize)\n pred_rgb = outputs['image'].reshape(B, H, W, 3).permute(\n 0, 3, 1, 2).contiguous() # [1, 3, H, W]\n pred_mask = outputs['weights_sum'].reshape(B, 1, H, W)\n\n rgb_loss = self.opt.lambda_rgb * \\\n F.mse_loss(pred_rgb*self.opacity,\n self.rgb*self.opacity)\n mask_loss = self.opt.lambda_mask * \\\n F.mse_loss(pred_mask, self.mask.to(torch.float32).unsqueeze(0))\n return pred_rgb, pred_mask, rgb_loss, mask_loss\n\n def get_bg_color(self, bg_type, N, device):\n if bg_type is None:\n return None\n elif isinstance(bg_type, str):\n if bg_type == 'pixelnoise':\n bg_color = torch.rand((N, 3), device=device)\n elif bg_type == 'noise':\n bg_color = torch.rand((1, 3), device=device).repeat(N, 1)\n elif bg_type == 'white':\n bg_color = torch.ones((N, 3), device=device)\n return bg_color\n elif isinstance(bg_type, Tensor):\n bg_color = bg_color.to(device)\n return bg_color\n else:\n raise NotImplementedError(f\"{bg_type} is not implemented\")\n\n def train_step(self, data):\n # perform RGBD loss instead of SDS if is image-conditioned\n do_rgbd_loss = self.opt.images is not None and \\\n ((self.global_step < self.opt.known_iters) or (self.global_step % self.opt.known_view_interval == 0))\n\n # override random camera with fixed known camera\n if do_rgbd_loss:\n data = self.default_view_data\n\n # progressively relaxing view range\n if self.opt.progressive_view:\n r = min(1.0, 0.2 + self.global_step / (0.5 * self.opt.iters))\n self.opt.phi_range = [self.opt.default_azimuth * (1 - r) + self.opt.full_phi_range[0] * r,\n self.opt.default_azimuth * (1 - r) + self.opt.full_phi_range[1] * r]\n self.opt.theta_range = [self.opt.default_polar * (1 - r) + self.opt.full_theta_range[0] * r,\n self.opt.default_polar * (1 - r) + self.opt.full_theta_range[1] * r]\n self.opt.radius_range = [self.opt.default_radius * (1 - r) + self.opt.full_radius_range[0] * r,\n self.opt.default_radius * (1 - r) + self.opt.full_radius_range[1] * r]\n self.opt.fovy_range = [self.opt.default_fovy * (1 - r) + self.opt.full_fovy_range[0] * r,\n self.opt.default_fovy * (1 - r) + self.opt.full_fovy_range[1] * r]\n\n # progressively increase max_level\n if self.opt.progressive_level:\n self.model.max_level = min(1.0, 0.25 + self.global_step / (0.5 * self.opt.iters))\n\n rays_o = data['rays_o'] # [B, N, 3]\n rays_d = data['rays_d'] # [B, N, 3]\n mvp = data['mvp'] # [B, 4, 4]\n\n B, N = rays_o.shape[:2]\n H, W = data['H'], data['W']\n\n # When ref_data has B images > opt.batch_size\n if B > self.opt.batch_size:\n # choose batch_size images out of those B images\n choice = torch.randperm(B)[:self.opt.batch_size]\n B = self.opt.batch_size\n rays_o = rays_o[choice]\n rays_d = rays_d[choice]\n mvp = mvp[choice]\n\n if do_rgbd_loss:\n ambient_ratio = 1.0\n shading = 'lambertian' # use lambertian instead of albedo to get normal\n as_latent = False\n binarize = False\n bg_color = self.get_bg_color(\n self.opt.bg_color_known, B*N, rays_o.device)\n\n # add camera noise to avoid grid-like artifact\n if self.opt.known_view_noise_scale > 0:\n noise_scale = self.opt.known_view_noise_scale #* (1 - self.global_step / self.opt.iters)\n rays_o = rays_o + torch.randn(3, device=self.device) * noise_scale\n rays_d = rays_d + torch.randn(3, device=self.device) * noise_scale\n\n elif self.global_step < (self.opt.latent_iter_ratio * self.opt.iters): ## 0\n ambient_ratio = 1.0\n shading = 'normal'\n as_latent = True\n binarize = False\n bg_color = None\n\n else:\n if self.global_step < (self.opt.normal_iter_ratio * self.opt.iters): # 0.2\n ambient_ratio = 1.0\n shading = 'normal'\n elif self.global_step < (self.opt.textureless_iter_ratio * self.opt.iters): # 0\n ambient_ratio = 0.1 + 0.9 * random.random()\n shading = 'textureless'\n elif self.global_step < (self.opt.albedo_iter_ratio * self.opt.iters): # 0\n ambient_ratio = 1.0\n shading = 'albedo'\n else:\n # random shading\n ambient_ratio = 0.1 + 0.9 * random.random()\n rand = random.random()\n if rand > 0.8:\n shading = 'textureless'\n else:\n shading = 'lambertian'\n\n as_latent = False\n\n # random weights binarization (like mobile-nerf) [NOT WORKING NOW]\n # binarize_thresh = min(0.5, -0.5 + self.global_step / self.opt.iters)\n # binarize = random.random() < binarize_thresh\n binarize = False\n\n # random background\n rand = random.random()\n # ipdb.set_trace()\n if self.opt.bg_radius > 0 and rand > 0.5:\n bg_color = None # use bg_net\n else:\n bg_color = torch.rand(3).to(self.device) # single color random bg\n\n outputs = self.model.render(rays_o, rays_d, mvp, H, W, staged=False, perturb=True, bg_color=bg_color, ambient_ratio=ambient_ratio, shading=shading, binarize=binarize)\n pred_depth = outputs['depth'].reshape(B, 1, H, W)\n if self.opt.normalize_depth: \n pred_depth = nonzero_normalize_depth(pred_depth)\n pred_mask = outputs['weights_sum'].reshape(B, 1, H, W)\n if 'normal_image' in outputs:\n pred_normal = outputs['normal_image'].reshape(B, H, W, 3)\n else:\n pred_normal = None \n\n if as_latent:\n # abuse normal & mask as latent code for faster geometry initialization (ref: fantasia3D)\n pred_rgb = torch.cat([outputs['image'], outputs['weights_sum'].unsqueeze(-1)], dim=-1).reshape(B, H, W, 4).permute(0, 3, 1, 2).contiguous() # [B, 4, H, W]\n else:\n pred_rgb = outputs['image'].reshape(B, H, W, 3).permute(0, 3, 1, 2).contiguous() # [B, 3, H, W]\n \n # ipdb.set_trace()\n if 'image_wo_bg' in outputs:\n image_wo_bg = outputs['image_wo_bg'] + (1 - outputs['weights_sum']).unsqueeze(-1) * 1 # B,1,N,3\n if as_latent:\n # abuse normal & mask as latent code for faster geometry initialization (ref: fantasia3D)\n pred_rgb_wobg = torch.cat([image_wo_bg, outputs['weights_sum'].unsqueeze(-1)], dim=-1).reshape(B, H, W, 4).permute(0, 3, 1, 2).contiguous() # [B, 4, H, W]\n else:\n pred_rgb_wobg = image_wo_bg.reshape(B, H, W, 3).permute(0, 3, 1, 2).contiguous() # [B, 3, H, W]\n\n out_dict = {\n 'rgb': pred_rgb,\n 'depth': pred_depth,\n 'mask': pred_mask,\n 'normal': pred_normal,\n 'pred_rgb_wobg': pred_rgb_wobg,\n }\n\n # Loss\n # known view loss\n loss_rgb, loss_mask, loss_normal, loss_depth, loss_sds, loss_if, loss_zero123, loss_clip, loss_entropy, loss_opacity, loss_orient, loss_smooth, loss_smooth2d, loss_smooth3d, loss_mesh_normal, loss_mesh_lap = torch.zeros(16, device=self.device)\n # known view loss\n if do_rgbd_loss:\n gt_mask = self.mask # [B, H, W]\n gt_rgb = self.rgb # [B, 3, H, W]\n gt_opacity = self.opacity # [B, 1, H, W]\n gt_normal = self.normal # [B, H, W, 3]\n gt_depth = self.depth # [B, H, W]\n\n if len(gt_rgb) > self.opt.batch_size:\n gt_mask = gt_mask[choice]\n gt_rgb = gt_rgb[choice]\n gt_opacity = gt_opacity[choice]\n gt_normal = gt_normal[choice]\n gt_depth = gt_depth[choice]\n\n # color loss\n loss_rgb = self.opt.lambda_rgb * \\\n F.mse_loss(pred_rgb*gt_opacity, gt_rgb*gt_opacity)\n\n # mask loss\n loss_mask = self.opt.lambda_mask * F.mse_loss(pred_mask, gt_mask.to(torch.float32).unsqueeze(0))\n\n # normal loss\n if self.opt.lambda_normal > 0 and 'normal_image' in outputs and self.normal is not None:\n pred_normal = pred_normal[self.mask]\n lambda_normal = self.opt.lambda_normal * \\\n min(1, self.global_step / self.opt.iters) \n loss_normal = lambda_normal * \\\n (1 - F.cosine_similarity(pred_normal, self.normal).mean())/2\n\n # relative depth loss\n if self.opt.lambda_depth > 0 and self.depth is not None:\n valid_pred_depth = pred_depth[:, 0][self.mask]\n loss_depth = self.opt.lambda_depth * (1 - pearson_corrcoef(valid_pred_depth, self.depth))/2\n \n loss = loss_rgb + loss_mask + loss_normal + loss_depth\n # novel view loss\n else:\n save_guidance_path = os.path.join(self.opt.workspace, 'guidance', f'train_step{self.global_step}_guidance.jpg') if self.opt.save_guidance_every > 0 and self.global_step % self.opt.save_guidance_every ==0 else None\n if 'SD' in self.guidance:\n # interpolate text_z\n azimuth = data['azimuth'] # [-180, 180]\n\n # ENHANCE: remove loop to handle batch size > 1\n text_z = [] \n for b in range(azimuth.shape[0]):\n if azimuth[b] >= -90 and azimuth[b] < 90:\n if azimuth[b] >= 0:\n r = 1 - azimuth[b] / 90\n else:\n r = 1 + azimuth[b] / 90\n start_z = self.embeddings['SD']['front']\n end_z = self.embeddings['SD']['side']\n else:\n if azimuth[b] >= 0:\n r = 1 - (azimuth[b] - 90) / 90\n else:\n r = 1 + (azimuth[b] + 90) / 90\n start_z = self.embeddings['SD']['side']\n end_z = self.embeddings['SD']['back']\n text_z.append(r * start_z + (1 - r) * end_z)\n text_z = torch.stack(text_z, dim=0).transpose(0, 1).flatten(0, 1)\n # text_z_sds = text_z[:, :-1]\n text_z_sds = text_z \n loss_sds, _ = self.guidance['SD'].train_step(text_z_sds, pred_rgb, as_latent=as_latent, guidance_scale=self.opt.guidance_scale['SD'], grad_scale=self.opt.lambda_guidance['SD'],\n density=pred_mask if self.opt.gudiance_spatial_weighting else None, \n save_guidance_path=save_guidance_path\n )\n\n \n if 'IF' in self.guidance:\n # interpolate text_z\n azimuth = data['azimuth'] # [-180, 180]\n\n # ENHANCE: remove loop to handle batch size > 1\n # ENHANCE: remove loop to handle batch size > 1\n text_z = [] \n for b in range(azimuth.shape[0]):\n if azimuth[b] >= -90 and azimuth[b] < 90:\n if azimuth[b] >= 0:\n r = 1 - azimuth[b] / 90\n else:\n r = 1 + azimuth[b] / 90\n start_z = self.embeddings['IF']['front']\n end_z = self.embeddings['IF']['side']\n else:\n if azimuth[b] >= 0:\n r = 1 - (azimuth[b] - 90) / 90\n else:\n r = 1 + (azimuth[b] + 90) / 90\n start_z = self.embeddings['IF']['side']\n end_z = self.embeddings['IF']['back']\n text_z.append(r * start_z + (1 - r) * end_z)\n text_z = torch.stack(text_z, dim=0).transpose(0, 1).flatten(0, 1)\n text_z = torch.cat(text_z, dim=1).reshape(B, 2, start_z.shape[-2]-1, start_z.shape[-1]).transpose(0, 1).flatten(0, 1)\n loss_if = self.guidance['IF'].train_step(text_z, pred_rgb, guidance_scale=self.opt.guidance_scale['IF'], grad_scale=self.opt.lambda_guidance['IF'])\n\n if 'zero123' in self.guidance:\n\n polar = data['polar']\n azimuth = data['azimuth']\n radius = data['radius']\n\n # ipdb.set_trace()\n input_3dprior = pred_rgb\n loss_zero123 = self.guidance['zero123'].train_step(self.embeddings['zero123']['default'], input_3dprior, polar, azimuth, radius, guidance_scale=self.opt.guidance_scale['zero123'],\n as_latent=as_latent, grad_scale=self.opt.lambda_guidance['zero123'], save_guidance_path=save_guidance_path)\n\n if 'clip' in self.guidance:\n\n # empirical, far view should apply smaller CLIP loss\n lambda_guidance = 10 * (1 - abs(azimuth) / 180) * self.opt.lambda_guidance['clip']\n loss_clip = self.guidance['clip'].train_step(self.embeddings['clip'], pred_rgb, grad_scale=lambda_guidance)\n loss = loss_sds + loss_if + loss_zero123 + loss_clip\n\n # regularizations\n if not self.opt.dmtet:\n\n if self.opt.lambda_opacity > 0: # 0\n loss_opacity = self.opt.lambda_opacity * (outputs['weights_sum'] ** 2).mean()\n\n if self.opt.lambda_entropy > 0: # 1e-3\n lambda_entropy = self.opt.lambda_entropy * \\\n min(1, 2 * self.global_step / self.opt.iters)\n alphas = outputs['weights'].clamp(1e-5, 1 - 1e-5)\n # alphas = alphas ** 2 # skewed entropy, favors 0 over 1\n loss_entropy = lambda_entropy * (- alphas * torch.log2(alphas) -\n (1 - alphas) * torch.log2(1 - alphas)).mean()\n\n if self.opt.lambda_normal_smooth > 0 and 'normal_image' in outputs: # 0.5 # no image in sd-dreamfusion should be 0\n pred_vals = outputs['normal_image'].reshape(B, H, W, 3)\n # total-variation\n loss_smooth = (pred_vals[:, 1:, :, :] - pred_vals[:, :-1, :, :]).square().mean() + \\\n (pred_vals[:, :, 1:, :] -\n pred_vals[:, :, :-1, :]).square().mean()\n loss_smooth = self.opt.lambda_normal_smooth * loss_smooth\n\n if self.opt.lambda_normal_smooth2d > 0 and 'normal_image' in outputs: # 0.5 # no image in sd-dreamfusion should be 0\n pred_vals = outputs['normal_image'].reshape(\n B, H, W, 3).permute(0, 3, 1, 2).contiguous()\n smoothed_vals = TF.gaussian_blur(pred_vals, kernel_size=9)\n loss_smooth2d = self.opt.lambda_normal_smooth2d * F.mse_loss(pred_vals, smoothed_vals)\n\n if self.opt.lambda_orient > 0 and 'loss_orient' in outputs: # 1e-2\n loss_orient = self.opt.lambda_orient * outputs['loss_orient']\n \n if self.opt.lambda_3d_normal_smooth > 0 and 'loss_normal_perturb' in outputs: # 0\n loss_smooth3d = self.opt.lambda_3d_normal_smooth * outputs['loss_normal_perturb']\n\n loss += loss_opacity + loss_entropy + loss_smooth + loss_smooth2d + loss_orient + loss_smooth3d\n \n else:\n if self.opt.lambda_mesh_normal > 0:\n loss_mesh_normal = self.opt.lambda_mesh_normal * \\\n outputs['loss_normal']\n\n if self.opt.lambda_mesh_lap > 0:\n loss_mesh_lap = self.opt.lambda_mesh_lap * outputs['loss_lap']\n loss += loss_mesh_normal + loss_mesh_lap\n\n losses_dict = {\n 'loss': loss.item(),\n 'loss_sds': loss_sds.item(),\n 'loss_if': loss_if.item(),\n 'loss_zero123': loss_zero123.item(),\n 'loss_clip': loss_clip.item(),\n 'loss_rgb': loss_rgb.item(),\n 'loss_mask': loss_mask.item(),\n 'loss_normal': loss_normal.item(),\n 'loss_depth': loss_depth.item(),\n 'loss_opacity': loss_opacity.item(),\n 'loss_entropy': loss_entropy.item(),\n 'loss_smooth': loss_smooth.item(),\n 'loss_smooth2d': loss_smooth2d.item(),\n 'loss_smooth3d': loss_smooth3d.item(),\n 'loss_orient': loss_orient.item(),\n 'loss_mesh_normal': loss_mesh_normal.item(),\n 'loss_mesh_lap': loss_mesh_lap.item(),\n }\n\n \n if 'normal' in out_dict:\n out_dict['normal'] = out_dict['normal'].permute(0, 3, 1, 2).contiguous()\n\n # save for debug purpose\n if self.opt.save_train_every > 0 and self.global_step % self.opt.save_train_every == 0:\n image_save_path = os.path.join(self.workspace, 'train_debug',)\n os.makedirs(image_save_path, exist_ok=True)\n for key, value in out_dict.items():\n if value is not None:\n value = ((value - value.min()) / (value.max() - value.min() + 1e-6)).detach().mul(255).to(torch.uint8)\n try:\n save_tensor2image(value, os.path.join(image_save_path, f'train_{self.global_step:06d}_{key}.jpg'), channel_last=False) \n except:\n pass\n return loss, losses_dict, out_dict \n\n def post_train_step(self):\n\n # unscale grad before modifying it!\n # ref: https://pytorch.org/docs/stable/notes/amp_examples.html#gradient-clipping\n self.scaler.unscale_(self.optimizer)\n\n # clip grad\n if self.opt.grad_clip >= 0:\n torch.nn.utils.clip_grad_value_(self.model.parameters(), self.opt.grad_clip)\n\n if not self.opt.dmtet and self.opt.backbone == 'grid':\n\n if self.opt.lambda_tv > 0:\n lambda_tv = min(1.0, self.global_step / (0.5 * self.opt.iters)) * self.opt.lambda_tv\n self.model.encoder.grad_total_variation(lambda_tv, None, self.model.bound)\n if self.opt.lambda_wd > 0:\n self.model.encoder.grad_weight_decay(self.opt.lambda_wd)\n\n\n def eval_step(self, data):\n\n rays_o = data['rays_o'] # [B, N, 3]\n rays_d = data['rays_d'] # [B, N, 3]\n mvp = data['mvp']\n\n B, N = rays_o.shape[:2]\n H, W = data['H'], data['W']\n\n shading = data['shading'] if 'shading' in data else 'lambertian' \n ambient_ratio = data['ambient_ratio'] if 'ambient_ratio' in data else 1.0\n light_d = data['light_d'] if 'light_d' in data else None\n\n outputs = self.model.render(rays_o, rays_d, mvp, H, W, staged=True, perturb=False, bg_color=None, light_d=light_d, ambient_ratio=ambient_ratio, shading=shading)\n pred_rgb = outputs['image'].reshape(B, H, W, 3)\n pred_depth = outputs['depth'].reshape(B, H, W, 1)\n if self.opt.normalize_depth: \n pred_depth = nonzero_normalize_depth(pred_depth)\n if 'normal_image' in outputs:\n pred_normal = outputs['normal_image'].reshape(B, H, W, 3)\n else:\n pred_normal = None \n out_dict = {\n shading: pred_rgb,\n 'depth': pred_depth,\n 'normal_image': pred_normal,\n }\n # dummy\n loss = torch.zeros([1], device=pred_rgb.device, dtype=pred_rgb.dtype)\n return out_dict, loss\n\n def test_step(self, data, bg_color=None, perturb=False, shading='lambertian'):\n rays_o = data['rays_o'] # [B, N, 3]\n rays_d = data['rays_d'] # [B, N, 3]\n mvp = data['mvp']\n\n B, N = rays_o.shape[:2]\n H, W = data['H'], data['W']\n\n bg_color = self.get_bg_color(bg_color, B*N, rays_o.device)\n\n shading = data['shading'] if 'shading' in data else shading \n ambient_ratio = data['ambient_ratio'] if 'ambient_ratio' in data else 1.0\n light_d = data['light_d'] if 'light_d' in data else None\n\n outputs = self.model.render(rays_o, rays_d, mvp, H, W, staged=True, perturb=perturb, light_d=light_d, ambient_ratio=ambient_ratio, shading=shading, bg_color=bg_color)\n\n pred_rgb = outputs['image'].reshape(B, H, W, 3)\n pred_depth = outputs['depth'].reshape(B, H, W, 1)\n pred_mask = outputs['weights_sum'].reshape(B, H, W, 1)\n # if self.opt.normalize_depth: \n pred_depth = nonzero_normalize_depth(pred_depth)\n if 'normal_image' in outputs:\n pred_normal = outputs['normal_image'].reshape(B, H, W, 3)\n pred_normal = pred_normal * pred_mask + (1.0 - pred_mask) \n else:\n pred_normal = None \n out_dict = {\n shading: pred_rgb,\n 'depth': pred_depth,\n 'normal_image': pred_normal,\n 'mask': pred_mask,\n }\n return out_dict\n\n def save_mesh(self, loader=None, save_path=None):\n\n if save_path is None:\n save_path = os.path.join(self.workspace, 'mesh')\n\n logger.info(f\"==> Saving mesh to {save_path}\")\n\n os.makedirs(save_path, exist_ok=True)\n\n self.model.export_mesh(save_path, resolution=self.opt.mcubes_resolution, decimate_target=self.opt.decimate_target)\n\n logger.info(f\"==> Finished saving mesh.\")\n\n ### ------------------------------\n\n def train(self, train_loader, valid_loader, test_loader, max_epochs):\n\n if self.use_tensorboard and self.local_rank == 0:\n self.writer = SummaryWriter(\n os.path.join(self.workspace, \"run\", self.name))\n\n # init from nerf should be performed after Shap-E, since Shap-E will rescale dmtet\n if self.opt.dmtet and (self.opt.init_ckpt and os.path.exists(self.opt.init_ckpt)):\n reset_scale = False if self.opt.use_shape else True\n old_sdf = self.model.get_sdf_from_nerf(reset_scale)\n if not self.opt.tet_mlp:\n self.model.dmtet.init_tet_from_sdf(old_sdf)\n self.test(valid_loader, name=f'init_ckpt', write_video=False, save_each_frame=False, subfolder='check_init')\n else:\n old_sdf = None\n \n if self.opt.use_shape and self.opt.dmtet:\n os.makedirs(os.path.join(self.opt.workspace, 'shape'), exist_ok=True)\n best_loss = torch.inf\n best_idx = 0\n for idx, (sdf, color) in enumerate(zip(self.opt.rpsts, self.opt.colors)):\n self.model.init_tet_from_sdf_color(sdf)\n pred_rgb, pred_mask, rgb_loss, mask_loss = self.match_known()\n best_loss = min(best_loss, mask_loss)\n if best_loss == mask_loss:\n best_idx = idx\n logger.info(f\"==> Current best match shape known sdf idx: {best_idx}\")\n save_tensor2image(pred_mask, os.path.join(self.opt.workspace, 'shape', f\"match_shape_known_{idx}_rgb.jpg\"), channel_last=False)\n self.test(valid_loader, name=f'idx_{idx}', write_video=False, save_each_frame=False, subfolder='check_init')\n \n sdf = self.opt.rpsts[best_idx]\n self.model.init_tet_from_sdf_color(sdf, self.opt.colors[best_idx])\n self.test(valid_loader, name=f'shape_only', write_video=False, save_each_frame=False, subfolder='check_init')\n\n # Enable mixture model\n if self.opt.base_mesh:\n logger.info(f\"==> Enable mixture model with base mesh {self.opt.base_mesh}\")\n mesh_sdf = self.model.dmtet.get_sdf_from_mesh(self.opt.base_mesh)\n sdf = (mesh_sdf.clamp(0, 1) + sdf.clamp(0,1) ).clamp(0, 1)\n\n if old_sdf is not None:\n sdf = (sdf.clamp(0, 1) + old_sdf.clamp(0, 1)).clamp(0, 1)\n\n self.model.init_tet_from_sdf_color(sdf, self.opt.colors[best_idx])\n self.test(valid_loader, name=f'shape_merge', write_video=False, save_each_frame=False, subfolder='check_init')\n\n del best_loss, best_idx, pred_rgb, pred_mask, rgb_loss, mask_loss\n self.opt.rpsts = None\n gc.collect()\n torch.cuda.empty_cache()\n\n\n start_t = time.time()\n\n for epoch in range(self.epoch + 1, max_epochs + 1):\n self.epoch = epoch\n\n self.train_one_epoch(train_loader, max_epochs)\n\n if self.workspace is not None and self.local_rank == 0:\n if self.epoch % self.opt.save_interval == 0:\n self.save_checkpoint(full=True, best=False)\n\n if self.epoch % self.opt.eval_interval == 0:\n self.evaluate_one_epoch(valid_loader) \n # best_save = True if self.epoch % self.opt.save_interval else False\n self.save_checkpoint(full=False, best=True)\n\n if self.epoch % self.opt.test_interval == 0 or self.epoch == max_epochs:\n self.test(test_loader, img_folder='images' if self.epoch == max_epochs else f'images_ep{self.epoch:04d}')\n\n end_t = time.time()\n\n self.total_train_t = end_t - start_t + self.total_train_t\n\n logger.info(f\"[INFO] training takes {(self.total_train_t)/ 60:.4f} minutes.\")\n\n if self.use_tensorboard and self.local_rank == 0:\n self.writer.close()\n\n def evaluate(self, loader, name=None):\n self.use_tensorboard, use_tensorboard = False, self.use_tensorboard\n self.evaluate_one_epoch(loader, name)\n self.use_tensorboard = use_tensorboard\n\n def test(self, loader, save_path=None, name=None, \n write_video=True, save_each_frame=True, shading='lambertian', \n subfolder='results', img_folder='images'\n ):\n\n if save_path is None:\n save_path = os.path.join(self.workspace, subfolder)\n image_save_path = os.path.join(self.workspace, subfolder, img_folder)\n\n if name is None:\n name = f'{self.name}_ep{self.epoch:04d}'\n\n os.makedirs(save_path, exist_ok=True)\n os.makedirs(image_save_path, exist_ok=True)\n\n logger.info(f\"==> Start Test, saving {shading} results to {save_path}\")\n\n pbar = tqdm.tqdm(total=len(loader) * loader.batch_size, bar_format='{percentage:3.0f}% {n_fmt}/{total_fmt} [{elapsed}<{remaining}, {rate_fmt}]')\n self.model.eval()\n\n all_outputs = {} \n with torch.no_grad():\n for i, data in enumerate(loader):\n # if i > 20:\n # break\n with torch.cuda.amp.autocast(enabled=self.fp16):\n outputs = self.test_step(data, bg_color=self.opt.bg_color_test, shading=shading)\n for key, value in outputs.items():\n if value is not None:\n value = ((value - value.min()) / (value.max() - value.min() + 1e-6)).detach().mul(255).to(torch.uint8)\n if save_each_frame:\n save_tensor2image(value, os.path.join(image_save_path, f'{name}_{i:04d}_{key}.jpg'), channel_last=True) \n if key not in all_outputs.keys():\n all_outputs[key] = []\n all_outputs[key].append(value)\n pbar.update(loader.batch_size)\n\n for key, value in all_outputs.items():\n all_outputs[key] = torch.cat(value, dim=0) # B,H,W,C, B is all the pose results\n # if video -> B,F,H,W,C \n \n if write_video:\n for key, value in all_outputs.items():\n # current version torchvision does not support writing a single-channel video\n # torchvision.io.write_video(os.path.join(save_path, f'{name}_{key}.mp4'), all_outputs[key].detach().cpu(), fps=25)\n # imageio.mimwrite(os.path.join(save_path, f'{name}_{key}.mp4'), all_outputs[key].detach().cpu().numpy(), fps=25, quality=8, macro_block_size=1)\n one_video_save(os.path.join(save_path, f'{name}_{key}.mp4'), all_outputs[key])\n\n for key, value in all_outputs.items():\n save_tensor2image(value, os.path.join(save_path, f'{name}_{key}_grid.jpg'), channel_last=True)\n logger.info(f\"==> Finished Test.\")\n\n # [GUI] train text step.\n def train_gui(self, train_loader, step=16):\n\n self.model.train()\n\n total_loss = torch.tensor([0], dtype=torch.float32, device=self.device)\n\n loader = iter(train_loader)\n\n for _ in range(step):\n\n # mimic an infinite loop dataloader (in case the total dataset is smaller than step)\n try:\n data = next(loader)\n except StopIteration:\n loader = iter(train_loader)\n data = next(loader)\n\n # update grid every 16 steps\n if self.model.cuda_ray and self.global_step % self.opt.update_extra_interval == 0:\n with torch.cuda.amp.autocast(enabled=self.fp16):\n self.model.update_extra_state()\n\n self.global_step += 1\n\n self.optimizer.zero_grad()\n\n with torch.cuda.amp.autocast(enabled=self.fp16):\n loss, loss_dicts, outputs = self.train_step(data)\n\n self.scaler.scale(loss).backward()\n self.post_train_step()\n self.scaler.step(self.optimizer)\n self.scaler.update()\n\n if self.scheduler_update_every_step:\n self.lr_scheduler.step()\n\n self.loss_meter.update(loss_dicts)\n \n if self.ema is not None:\n self.ema.update()\n\n average_loss = self.loss_meter.meters['loss'].avg\n\n if not self.scheduler_update_every_step:\n if isinstance(self.lr_scheduler, torch.optim.lr_scheduler.ReduceLROnPlateau):\n self.lr_scheduler.step(average_loss)\n else:\n self.lr_scheduler.step()\n\n outputs = {\n 'loss': average_loss,\n 'lr': self.optimizer.param_groups[0]['lr'],\n }\n\n return outputs\n\n\n # [GUI] test on a single image\n def test_gui(self, pose, intrinsics, mvp, W, H, bg_color=None, spp=1, downscale=1, light_d=None, ambient_ratio=1.0, shading='albedo'):\n\n # render resolution (may need downscale to for better frame rate)\n rH = int(H * downscale)\n rW = int(W * downscale)\n intrinsics = intrinsics * downscale\n\n pose = torch.from_numpy(pose).unsqueeze(0).to(self.device)\n mvp = torch.from_numpy(mvp).unsqueeze(0).to(self.device)\n\n rays = get_rays(pose, intrinsics, rH, rW, -1)\n\n # from degree theta/phi to 3D normalized vec\n light_d = np.deg2rad(light_d)\n light_d = np.array([\n np.sin(light_d[0]) * np.sin(light_d[1]),\n np.cos(light_d[0]),\n np.sin(light_d[0]) * np.cos(light_d[1]),\n ], dtype=np.float32)\n light_d = torch.from_numpy(light_d).to(self.device)\n\n data = {\n 'rays_o': rays['rays_o'],\n 'rays_d': rays['rays_d'],\n 'mvp': mvp,\n 'H': rH,\n 'W': rW,\n 'light_d': light_d,\n 'ambient_ratio': ambient_ratio,\n 'shading': shading,\n }\n\n self.model.eval()\n\n if self.ema is not None:\n self.ema.store()\n self.ema.copy_to()\n\n with torch.no_grad():\n with torch.cuda.amp.autocast(enabled=self.fp16):\n # here spp is used as perturb random seed!\n outputs = self.test_step(\n data, bg_color=bg_color, perturb=False if spp == 1 else spp)\n\n if self.ema is not None:\n self.ema.restore()\n\n # interpolation to the original resolution\n if downscale != 1:\n # have to permute twice with torch...\n outputs[shading] = F.interpolate(outputs[shading].permute(0, 3, 1, 2), size=(\n H, W), mode='nearest').permute(0, 2, 3, 1).contiguous()\n outputs['depth'] = F.interpolate(outputs['depth'].unsqueeze(\n 1), size=(H, W), mode='nearest').squeeze(1)\n\n if outputs['normal_imagea'] is not None:\n outputs['normal_image'] = F.interpolate(outputs['normal_image'].unsqueeze(1), size=(H, W), mode='nearest').squeeze(1)\n\n return outputs\n\n def train_one_epoch(self, loader, max_epochs):\n logger.info(f\"==> [{time.strftime('%Y-%m-%d_%H-%M-%S')}] Start Training {self.workspace} Epoch {self.epoch}/{max_epochs}, lr={self.optimizer.param_groups[0]['lr']:.6f} ...\")\n\n if self.local_rank == 0 and self.report_metric_at_train:\n for metric in self.metrics:\n metric.clear()\n\n self.model.train()\n\n # distributedSampler: must call set_epoch() to shuffle indices across multiple epochs\n # ref: https://pytorch.org/docs/stable/data.html\n if self.world_size > 1:\n loader.sampler.set_epoch(self.epoch)\n\n self.local_step = 0\n\n for data in loader:\n\n # update grid every 16 steps\n if (self.model.cuda_ray or self.model.taichi_ray) and self.global_step % self.opt.update_extra_interval == 0:\n with torch.cuda.amp.autocast(enabled=self.fp16):\n self.model.update_extra_state()\n \n # Update grid\n if self.opt.grid_levels_mask > 0:\n if self.global_step > self.opt.grid_levels_mask_iters:\n self.model.grid_levels_mask = 0\n else:\n self.model.grid_levels_mask = self.opt.grid_levels_mask\n\n self.local_step += 1\n self.global_step += 1\n\n ## update optimizer\n if self.global_step == self.opt.lr_time_iter:\n # ipdb.set_trace()\n grad_vars = self.model.get_params(self.opt.lr, self.opt.lr_scale_time)\n self.optimizer = torch.optim.Adam(\n grad_vars, betas=(0.9, 0.99), eps=1e-15\n )\n self.lr_scheduler.optimizer = self.optimizer\n\n self.optimizer.zero_grad()\n # ipdb.set_trace()\n with torch.cuda.amp.autocast(enabled=self.fp16):\n loss, losses_dict, outputs = self.train_step(data)\n\n # hooked grad clipping for RGB space\n if self.opt.grad_clip_rgb >= 0:\n def _hook(grad):\n if self.opt.fp16:\n # correctly handle the scale\n grad_scale = self.scaler._get_scale_async()\n return grad.clamp(grad_scale * -self.opt.grad_clip_rgb, grad_scale * self.opt.grad_clip_rgb)\n else:\n return grad.clamp(-self.opt.grad_clip_rgb, self.opt.grad_clip_rgb)\n outputs['rgb'].register_hook(_hook)\n # if (self.global_step <= self.opt.known_iters or self.global_step % self.opt.known_view_interval == 0) and self.opt.image is not None and self.opt.joint_known_unknown and known_rgbs is not None:\n # known_rgbs.register_hook(_hook)\n # pred_rgbs.retain_grad()\n\n self.scaler.scale(loss).backward()\n # ipdb.set_trace()\n self.post_train_step()\n self.scaler.step(self.optimizer)\n self.scaler.update()\n\n if self.scheduler_update_every_step:\n self.lr_scheduler.step()\n \n\n self.loss_meter.update(losses_dict)\n\n # last_losses_dict = losses_dict\n # last_grad = [layer.weight.grad.data.detach() for layer in self.model.deformation_net.net]\n\n if self.local_rank == 0:\n # if self.report_metric_at_train:\n # for metric in self.metrics:\n # metric.update(preds, truths)\n\n if self.use_tensorboard:\n\n for key, val in losses_dict.items():\n self.writer.add_scalar(\n f\"train/{key}\", val, self.global_step) \n\n self.writer.add_scalar(\n \"train/lr\", self.optimizer.param_groups[0]['lr'], self.global_step)\n\n if self.global_step % self.opt.log_every == 0:\n strings = f\"==> Train [Step] {self.global_step}/{self.opt.iters}\"\n for key, value in losses_dict.items():\n strings += f\", {key}={value:.4f}\"\n logger.info(strings)\n strings = f\"==> Train [Avg] {self.global_step}/{self.opt.iters}\"\n for key in self.loss_meter.meters.keys():\n strings += f\", {key}={self.loss_meter.meters[key].avg:.4f}\"\n logger.info(strings)\n\n if self.ema is not None:\n self.ema.update()\n \n average_loss = self.loss_meter.meters['loss'].avg\n self.stats[\"loss\"].append(average_loss)\n\n if self.local_rank == 0:\n # pbar.close()\n if self.report_metric_at_train:\n for metric in self.metrics:\n logger.info(metric.report(), style=\"red\")\n if self.use_tensorboard:\n metric.write(self.writer, self.epoch, prefix=\"train\")\n metric.clear()\n\n if not self.scheduler_update_every_step:\n if isinstance(self.lr_scheduler, torch.optim.lr_scheduler.ReduceLROnPlateau):\n self.lr_scheduler.step(average_loss)\n else:\n self.lr_scheduler.step()\n\n\n # Visualize Training\n if self.local_rank == 0:\n # save image\n save_path = os.path.join(\n self.workspace, 'training')\n os.makedirs(save_path, exist_ok=True)\n name = f'train_{self.name}_ep{self.epoch:04d}'\n for key, value in outputs.items():\n value = ((value - value.min()) / (value.max() - value.min() + 1e-6)).detach().mul(255).to(torch.uint8)\n save_tensor2image(value, os.path.join(save_path, f'{name}_{key}.jpg'), channel_last=False) \n gpu_mem = get_GPU_mem()[0]\n logger.info(f\"==> [Finished Epoch {self.epoch}/{max_epochs}. GPU={gpu_mem:.1f}GB.\")\n\n def evaluate_one_epoch(self, loader, name=None):\n logger.info(f\"++> Evaluate {self.workspace} at epoch {self.epoch} ...\")\n\n if name is None:\n name = f'{self.name}_ep{self.epoch:04d}'\n\n total_loss = 0\n if self.local_rank == 0:\n for metric in self.metrics:\n metric.clear()\n\n self.model.eval()\n \n if self.ema is not None:\n self.ema.store()\n self.ema.copy_to()\n\n if self.local_rank == 0:\n pbar = tqdm.tqdm(total=len(loader) * loader.batch_size, bar_format='{desc}: {percentage:3.0f}% {n_fmt}/{total_fmt} [{elapsed}<{remaining}, {rate_fmt}]')\n\n with torch.no_grad():\n self.local_step = 0\n \n all_outputs = {} \n for data in loader:\n self.local_step += 1\n\n with torch.cuda.amp.autocast(enabled=self.fp16):\n outputs, loss = self.eval_step(data)\n\n # all_gather/reduce the statistics (NCCL only support all_*)\n if self.world_size > 1:\n dist.all_reduce(loss, op=dist.ReduceOp.SUM)\n loss = loss / self.world_size\n \n for key, value in outputs.items():\n if value is not None:\n dist.all_gather(outputs[key])\n outputs[key] = torch.cat(outputs[key], dim=0)\n \n loss_val = loss.item()\n total_loss += loss_val\n\n # only rank = 0 will perform evaluation.\n if self.local_rank == 0:\n\n # save image\n save_path = os.path.join(\n self.workspace, 'validation')\n\n # logger.info(f\"==> Saving validation image to {save_path}\")\n os.makedirs(save_path, exist_ok=True)\n\n for key, value in outputs.items():\n if value is not None:\n value = ((value - value.min()) / (value.max() - value.min() + 1e-6)).detach().mul(255).to(torch.uint8)\n # save_tensor2image(value, os.path.join(save_path, f'{name}_{self.local_step:04d}_{key}.jpg')) \n if key not in all_outputs.keys():\n all_outputs[key] = []\n all_outputs[key].append(value)\n\n pbar.set_description(\n f\"loss={loss_val:.4f} ({total_loss/self.local_step:.4f})\")\n pbar.update(loader.batch_size)\n\n\n average_loss = total_loss / self.local_step\n self.stats[\"valid_loss\"].append(average_loss)\n\n if self.local_rank == 0:\n pbar.close()\n if not self.use_loss_as_metric and len(self.metrics) > 0:\n result = self.metrics[0].measure()\n self.stats[\"results\"].append(result if self.best_mode == 'min' else - result) # if max mode, use -result\n else:\n self.stats[\"results\"].append(average_loss) # if no metric, choose best by min loss\n\n for metric in self.metrics:\n logger.info(metric.report(), style=\"blue\")\n if self.use_tensorboard:\n metric.write(self.writer, self.epoch, prefix=\"evaluate\")\n metric.clear()\n \n for key, value in all_outputs.items():\n all_outputs[key] = torch.cat(value, dim=0)\n save_tensor2image(all_outputs[key], os.path.join(save_path, f'{name}_{key}.jpg'), channel_last=True)\n if self.ema is not None:\n self.ema.restore()\n\n logger.info(f\"++> Evaluate epoch {self.epoch} Finished.\")\n\n def save_checkpoint(self, name=None, full=False, best=False):\n\n if name is None:\n name = f'{self.name}_ep{self.epoch:04d}'\n\n state = {\n 'epoch': self.epoch,\n 'global_step': self.global_step,\n 'stats': self.stats,\n }\n\n if self.model.cuda_ray:\n state['mean_density'] = self.model.mean_density\n\n if self.opt.dmtet:\n state['tet_scale'] = self.model.dmtet.tet_scale.cpu().numpy()\n\n if full:\n state['optimizer'] = self.optimizer.state_dict()\n state['lr_scheduler'] = self.lr_scheduler.state_dict()\n state['scaler'] = self.scaler.state_dict()\n if self.ema is not None:\n state['ema'] = self.ema.state_dict()\n\n if not best:\n\n state['model'] = self.model.state_dict()\n\n file_path = f\"{name}.pth\"\n\n self.stats[\"checkpoints\"].append(file_path)\n\n if len(self.stats[\"checkpoints\"]) > self.max_keep_ckpt:\n old_ckpt = os.path.join(\n self.opt.ckpt_path, self.stats[\"checkpoints\"].pop(0))\n if os.path.exists(old_ckpt):\n os.remove(old_ckpt)\n\n torch.save(state, os.path.join(self.opt.ckpt_path, file_path))\n\n \n \n\n else:\n if len(self.stats[\"results\"]) > 0:\n # always save best since loss cannot reflect performance.\n if True:\n # logger.info(f\"[INFO] New best result: {self.stats['best_result']} --> {self.stats['results'][-1]}\")\n # self.stats[\"best_result\"] = self.stats[\"results\"][-1]\n\n # save ema results\n if self.ema is not None:\n self.ema.store()\n self.ema.copy_to()\n\n state['model'] = self.model.state_dict()\n\n if self.ema is not None:\n self.ema.restore()\n\n torch.save(state, self.opt.best_path)\n\n\n else:\n logger.info(\n f\"[WARN] no evaluated results found, skip saving best checkpoint.\")\n\n def load_checkpoint(self, checkpoint=None, model_only=False):\n if checkpoint is None:\n checkpoint_list = sorted(glob.glob(f'{self.opt.ckpt_path}/*.pth'))\n if checkpoint_list:\n checkpoint = checkpoint_list[-1]\n logger.info(f\"[INFO] Latest checkpoint is {checkpoint}\")\n else:\n logger.info(\n \"[WARN] No checkpoint found, model randomly initialized.\")\n return\n\n checkpoint_dict = torch.load(checkpoint, map_location=self.device)\n\n if 'model' not in checkpoint_dict:\n self.model.load_state_dict(checkpoint_dict)\n logger.info(\"[INFO] loaded model.\")\n return\n\n missing_keys, unexpected_keys = self.model.load_state_dict(checkpoint_dict['model'], strict=False)\n logger.info(\"[INFO] loaded model.\")\n if len(missing_keys) > 0:\n logger.info(f\"[WARN] missing keys: {missing_keys}\")\n if len(unexpected_keys) > 0:\n logger.info(f\"[WARN] unexpected keys: {unexpected_keys}\")\n\n if self.ema is not None and 'ema' in checkpoint_dict:\n try:\n self.ema.load_state_dict(checkpoint_dict['ema'])\n logger.info(\"[INFO] loaded EMA.\")\n except:\n logger.info(\"[WARN] failed to loaded EMA.\")\n\n if self.model.cuda_ray:\n if 'mean_density' in checkpoint_dict:\n self.model.mean_density = checkpoint_dict['mean_density']\n\n if self.opt.dmtet:\n if 'tet_scale' in checkpoint_dict:\n new_scale = torch.from_numpy(\n checkpoint_dict['tet_scale']).to(self.device)\n self.model.dmtet.verts *= new_scale / self.model.dmtet.tet_scale\n self.model.dmtet.tet_scale = new_scale\n # self.model.init_tet() \n if model_only:\n return\n\n self.stats = checkpoint_dict['stats']\n self.epoch = checkpoint_dict['epoch']\n self.global_step = checkpoint_dict['global_step']\n logger.info(\n f\"[INFO] load at epoch {self.epoch}, global step {self.global_step}\")\n\n if self.optimizer and 'optimizer' in checkpoint_dict:\n try:\n self.optimizer.load_state_dict(checkpoint_dict['optimizer'])\n logger.info(\"[INFO] loaded optimizer.\")\n except:\n logger.info(\"[WARN] Failed to load optimizer.\")\n\n if self.lr_scheduler and 'lr_scheduler' in checkpoint_dict:\n try:\n self.lr_scheduler.load_state_dict(checkpoint_dict['lr_scheduler'])\n logger.info(\"[INFO] loaded scheduler.\")\n except:\n logger.info(\"[WARN] Failed to load scheduler.\")\n\n if self.scaler and 'scaler' in checkpoint_dict:\n try:\n self.scaler.load_state_dict(checkpoint_dict['scaler'])\n logger.info(\"[INFO] loaded scaler.\")\n except:\n logger.info(\"[WARN] Failed to load scaler.\")" }, { "identifier": "custom_meshgrid", "path": "nerf/utils.py", "snippet": "def custom_meshgrid(*args):\n # ref: https://pytorch.org/docs/stable/generated/torch.meshgrid.html?highlight=meshgrid#torch.meshgrid\n if pver.parse(torch.__version__) < pver.parse('1.10'):\n return torch.meshgrid(*args)\n else:\n return torch.meshgrid(*args, indexing='ij')" }, { "identifier": "safe_normalize", "path": "nerf/utils.py", "snippet": "def safe_normalize(x, eps=1e-20):\n return x / torch.sqrt(torch.clamp(torch.sum(x * x, -1, keepdim=True), min=eps))" } ]
import os import glob import tqdm import random import logging import gc import numpy as np import imageio, imageio_ffmpeg import time import cv2 import torch import torch.nn as nn import torch.optim as optim import torch.nn.functional as F import torch.distributed as dist import torchvision.transforms.functional as TF import ipdb import copy from torch import Tensor from torch.utils.tensorboard import SummaryWriter from torchvision.utils import make_grid from torchmetrics.functional import pearson_corrcoef from nerf.utils import save_tensor2image, nonzero_normalize_depth, Trainer from einops import rearrange from nerf.utils import custom_meshgrid, safe_normalize from dnerf.network_4dgrid import NeRFNetwork
19,451
'rgb': pred_rgb, 'depth': pred_depth, 'normal_image': pred_normal, 'mask': pred_mask, } return out_dict def train_step(self, data): # perform RGBD loss instead of SDS if is image-conditioned do_rgbd_loss = self.opt.images is not None and \ ((self.global_step < self.opt.known_iters) or (self.global_step % self.opt.known_view_interval == 0)) # ipdb.set_trace() # override random camera with fixed known camera if do_rgbd_loss: data = self.default_view_data # progressively relaxing view range if self.opt.progressive_view: r = min(1.0, 0.2 + self.global_step / (0.5 * self.opt.iters)) self.opt.phi_range = [self.opt.default_azimuth * (1 - r) + self.opt.full_phi_range[0] * r, self.opt.default_azimuth * (1 - r) + self.opt.full_phi_range[1] * r] self.opt.theta_range = [self.opt.default_polar * (1 - r) + self.opt.full_theta_range[0] * r, self.opt.default_polar * (1 - r) + self.opt.full_theta_range[1] * r] self.opt.radius_range = [self.opt.default_radius * (1 - r) + self.opt.full_radius_range[0] * r, self.opt.default_radius * (1 - r) + self.opt.full_radius_range[1] * r] self.opt.fovy_range = [self.opt.default_fovy * (1 - r) + self.opt.full_fovy_range[0] * r, self.opt.default_fovy * (1 - r) + self.opt.full_fovy_range[1] * r] # progressively increase max_level if self.opt.progressive_level: self.model.max_level = min(1.0, 0.25 + self.global_step / (0.5 * self.opt.iters)) rays_o = data['rays_o'] # [B, N, 3] # B,F,N,3 rays_d = data['rays_d'] # [B, N, 3] # B,F,N,3 mvp = data['mvp'] # [B, 4, 4] / [B,F,4,4] time = data['time'] # [B,T] use_dynamic_cam = (rays_o.ndim == 4) B = rays_o.size(0) # ipdb.set_trace() N = rays_o.size(1) if not use_dynamic_cam else rays_o.size(2) H, W = data['H'], data['W'] # ipdb.set_trace() start_from_zero = data.get('start_from_zero', True) if start_from_zero: assert time[0,0] == 0 # When ref_data has B images > opt.batch_size if B > self.opt.batch_size: # choose batch_size images out of those B images choice = torch.randperm(B)[:self.opt.batch_size] B = self.opt.batch_size rays_o = rays_o[choice] rays_d = rays_d[choice] mvp = mvp[choice] if do_rgbd_loss: ambient_ratio = 1.0 shading = 'lambertian' # use lambertian instead of albedo to get normal as_latent = False binarize = False bg_color = self.get_bg_color( self.opt.bg_color_known, B*N, rays_o.device) # add camera noise to avoid grid-like artifact if self.opt.known_view_noise_scale > 0: noise_scale = self.opt.known_view_noise_scale #* (1 - self.global_step / self.opt.iters) rays_o = rays_o + torch.randn(3, device=self.device) * noise_scale rays_d = rays_d + torch.randn(3, device=self.device) * noise_scale elif self.global_step < (self.opt.latent_iter_ratio * self.opt.iters): ## 0 ambient_ratio = 1.0 shading = 'normal' as_latent = True binarize = False bg_color = None else: if self.global_step < (self.opt.normal_iter_ratio * self.opt.iters): # 0.2 ambient_ratio = 1.0 shading = 'normal' elif self.global_step < (self.opt.textureless_iter_ratio * self.opt.iters): # 0 ambient_ratio = 0.1 + 0.9 * random.random() shading = 'textureless' elif self.global_step < (self.opt.albedo_iter_ratio * self.opt.iters): # 0 ambient_ratio = 1.0 shading = 'albedo' else: # random shading ambient_ratio = 0.1 + 0.9 * random.random() rand = random.random() if rand < self.opt.textureless_rate: # 0.2 shading = 'textureless' else: shading = 'lambertian' as_latent = False # random weights binarization (like mobile-nerf) [NOT WORKING NOW] # binarize_thresh = min(0.5, -0.5 + self.global_step / self.opt.iters) # binarize = random.random() < binarize_thresh binarize = False # random background rand = random.random() # ipdb.set_trace() if self.opt.bg_radius > 0 and rand > 0.5: bg_color = None # use bg_net else: bg_color = torch.rand(3).to(self.device) # single color random bg ## NOTE if bg_radius < 0 -> the way magic123 use # The bg color is always random video_outputs = [] num_frames = time.size(1)
logger = logging.getLogger(__name__) class DTrainer(Trainer): def __init__(self, argv, name, opt, model, guidance, criterion=None, optimizer=None, ema_decay=None, lr_scheduler=None, metrics=[], local_rank=0, world_size=1, device=None, mute=False, fp16=False, max_keep_ckpt=1, workspace='workspace', best_mode='min', use_loss_as_metric=True, report_metric_at_train=False, use_checkpoint="latest", use_tensorboard=True, scheduler_update_every_step=False, **kwargs): super().__init__(argv, name, opt, model, guidance, criterion, optimizer, ema_decay, lr_scheduler, metrics, local_rank, world_size, device, mute, fp16, max_keep_ckpt, workspace, best_mode, use_loss_as_metric, report_metric_at_train, use_checkpoint, use_tensorboard, scheduler_update_every_step, **kwargs) self.rgbd_scale = opt.get("rgbd_scale", 1.0) self.fix_dynamic = opt.fix_dynamic if self.fix_dynamic: assert opt.backbone == 'grid4d' self.dynamic_model = NeRFNetwork(opt) # ipdb.set_trace() model_state_dict = self.model.state_dict() self.dynamic_model.load_state_dict(model_state_dict) for p in self.dynamic_model.parameters(): p.requires_grad = False self.dynamic_model.train() self.dynamic_model.to(opt.device) @torch.no_grad() def eval_static_step(self, data, shading): rays_o = data['rays_o'] # [B, N, 3] / B,F,N,3 rays_d = data['rays_d'] # [B, N, 3] / B,F,N,3 mvp = data['mvp'] # B,4,4 / B,F,4,4 if rays_o.ndim == 4: rays_o = rays_o[:, 0] rays_d = rays_d[:, 0] mvp = mvp[:, 0] B, N = rays_o.shape[:2] H, W = data['H'], data['W'] ambient_ratio = data['ambient_ratio'] if 'ambient_ratio' in data else 1.0 light_d = data['light_d'] if 'light_d' in data else None # ipdb.set_trace() outputs = self.static_model.render(rays_o, rays_d, mvp, H, W, staged=True, perturb=False, bg_color=None, light_d=light_d, ambient_ratio=ambient_ratio, shading=shading) pred_rgb = outputs['image'].reshape(B, H, W, 3) pred_depth = outputs['depth'].reshape(B, H, W, 1) if self.opt.normalize_depth: pred_depth = nonzero_normalize_depth(pred_depth) if 'normal_image' in outputs: # eval mode no normal image pred_normal = outputs['normal_image'].reshape(B, H, W, 3) else: pred_normal = None pred_mask = outputs['weights_sum'].reshape(B, H, W, 1) out_dict = { 'rgb': pred_rgb, 'depth': pred_depth, 'normal_image': pred_normal, 'mask': pred_mask, } return out_dict def train_step(self, data): # perform RGBD loss instead of SDS if is image-conditioned do_rgbd_loss = self.opt.images is not None and \ ((self.global_step < self.opt.known_iters) or (self.global_step % self.opt.known_view_interval == 0)) # ipdb.set_trace() # override random camera with fixed known camera if do_rgbd_loss: data = self.default_view_data # progressively relaxing view range if self.opt.progressive_view: r = min(1.0, 0.2 + self.global_step / (0.5 * self.opt.iters)) self.opt.phi_range = [self.opt.default_azimuth * (1 - r) + self.opt.full_phi_range[0] * r, self.opt.default_azimuth * (1 - r) + self.opt.full_phi_range[1] * r] self.opt.theta_range = [self.opt.default_polar * (1 - r) + self.opt.full_theta_range[0] * r, self.opt.default_polar * (1 - r) + self.opt.full_theta_range[1] * r] self.opt.radius_range = [self.opt.default_radius * (1 - r) + self.opt.full_radius_range[0] * r, self.opt.default_radius * (1 - r) + self.opt.full_radius_range[1] * r] self.opt.fovy_range = [self.opt.default_fovy * (1 - r) + self.opt.full_fovy_range[0] * r, self.opt.default_fovy * (1 - r) + self.opt.full_fovy_range[1] * r] # progressively increase max_level if self.opt.progressive_level: self.model.max_level = min(1.0, 0.25 + self.global_step / (0.5 * self.opt.iters)) rays_o = data['rays_o'] # [B, N, 3] # B,F,N,3 rays_d = data['rays_d'] # [B, N, 3] # B,F,N,3 mvp = data['mvp'] # [B, 4, 4] / [B,F,4,4] time = data['time'] # [B,T] use_dynamic_cam = (rays_o.ndim == 4) B = rays_o.size(0) # ipdb.set_trace() N = rays_o.size(1) if not use_dynamic_cam else rays_o.size(2) H, W = data['H'], data['W'] # ipdb.set_trace() start_from_zero = data.get('start_from_zero', True) if start_from_zero: assert time[0,0] == 0 # When ref_data has B images > opt.batch_size if B > self.opt.batch_size: # choose batch_size images out of those B images choice = torch.randperm(B)[:self.opt.batch_size] B = self.opt.batch_size rays_o = rays_o[choice] rays_d = rays_d[choice] mvp = mvp[choice] if do_rgbd_loss: ambient_ratio = 1.0 shading = 'lambertian' # use lambertian instead of albedo to get normal as_latent = False binarize = False bg_color = self.get_bg_color( self.opt.bg_color_known, B*N, rays_o.device) # add camera noise to avoid grid-like artifact if self.opt.known_view_noise_scale > 0: noise_scale = self.opt.known_view_noise_scale #* (1 - self.global_step / self.opt.iters) rays_o = rays_o + torch.randn(3, device=self.device) * noise_scale rays_d = rays_d + torch.randn(3, device=self.device) * noise_scale elif self.global_step < (self.opt.latent_iter_ratio * self.opt.iters): ## 0 ambient_ratio = 1.0 shading = 'normal' as_latent = True binarize = False bg_color = None else: if self.global_step < (self.opt.normal_iter_ratio * self.opt.iters): # 0.2 ambient_ratio = 1.0 shading = 'normal' elif self.global_step < (self.opt.textureless_iter_ratio * self.opt.iters): # 0 ambient_ratio = 0.1 + 0.9 * random.random() shading = 'textureless' elif self.global_step < (self.opt.albedo_iter_ratio * self.opt.iters): # 0 ambient_ratio = 1.0 shading = 'albedo' else: # random shading ambient_ratio = 0.1 + 0.9 * random.random() rand = random.random() if rand < self.opt.textureless_rate: # 0.2 shading = 'textureless' else: shading = 'lambertian' as_latent = False # random weights binarization (like mobile-nerf) [NOT WORKING NOW] # binarize_thresh = min(0.5, -0.5 + self.global_step / self.opt.iters) # binarize = random.random() < binarize_thresh binarize = False # random background rand = random.random() # ipdb.set_trace() if self.opt.bg_radius > 0 and rand > 0.5: bg_color = None # use bg_net else: bg_color = torch.rand(3).to(self.device) # single color random bg ## NOTE if bg_radius < 0 -> the way magic123 use # The bg color is always random video_outputs = [] num_frames = time.size(1)
light_d = safe_normalize(rays_o + torch.randn(3, device=rays_o.device))
4
2023-11-23 10:34:08+00:00
24k
alexzhou907/DreamPropeller
threestudio/models/geometry/tetrahedra_sdf_grid.py
[ { "identifier": "BaseExplicitGeometry", "path": "threestudio/models/geometry/base.py", "snippet": "class BaseExplicitGeometry(BaseGeometry):\n @dataclass\n class Config(BaseGeometry.Config):\n radius: float = 1.0\n\n cfg: Config\n\n def configure(self) -> None:\n self.bbox: Float[Tensor, \"2 3\"]\n self.register_buffer(\n \"bbox\",\n torch.as_tensor(\n [\n [-self.cfg.radius, -self.cfg.radius, -self.cfg.radius],\n [self.cfg.radius, self.cfg.radius, self.cfg.radius],\n ],\n dtype=torch.float32,\n ),\n )" }, { "identifier": "BaseGeometry", "path": "threestudio/models/geometry/base.py", "snippet": "class BaseGeometry(BaseModule):\n @dataclass\n class Config(BaseModule.Config):\n pass\n\n cfg: Config\n\n @staticmethod\n def create_from(\n other: \"BaseGeometry\", cfg: Optional[Union[dict, DictConfig]] = None, **kwargs\n ) -> \"BaseGeometry\":\n raise TypeError(\n f\"Cannot create {BaseGeometry.__name__} from {other.__class__.__name__}\"\n )\n\n def export(self, *args, **kwargs) -> Dict[str, Any]:\n return {}" }, { "identifier": "contract_to_unisphere", "path": "threestudio/models/geometry/base.py", "snippet": "def contract_to_unisphere(\n x: Float[Tensor, \"... 3\"], bbox: Float[Tensor, \"2 3\"], unbounded: bool = False\n) -> Float[Tensor, \"... 3\"]:\n if unbounded:\n x = scale_tensor(x, bbox, (0, 1))\n x = x * 2 - 1 # aabb is at [-1, 1]\n mag = x.norm(dim=-1, keepdim=True)\n mask = mag.squeeze(-1) > 1\n x[mask] = (2 - 1 / mag[mask]) * (x[mask] / mag[mask])\n x = x / 4 + 0.5 # [-inf, inf] is at [0, 1]\n else:\n x = scale_tensor(x, bbox, (0, 1))\n return x" }, { "identifier": "ImplicitSDF", "path": "threestudio/models/geometry/implicit_sdf.py", "snippet": "class ImplicitSDF(BaseImplicitGeometry):\n @dataclass\n class Config(BaseImplicitGeometry.Config):\n n_input_dims: int = 3\n n_feature_dims: int = 3\n pos_encoding_config: dict = field(\n default_factory=lambda: {\n \"otype\": \"HashGrid\",\n \"n_levels\": 16,\n \"n_features_per_level\": 2,\n \"log2_hashmap_size\": 19,\n \"base_resolution\": 16,\n \"per_level_scale\": 1.447269237440378,\n }\n )\n mlp_network_config: dict = field(\n default_factory=lambda: {\n \"otype\": \"VanillaMLP\",\n \"activation\": \"ReLU\",\n \"output_activation\": \"none\",\n \"n_neurons\": 64,\n \"n_hidden_layers\": 1,\n }\n )\n normal_type: Optional[\n str\n ] = \"finite_difference\" # in ['pred', 'finite_difference', 'finite_difference_laplacian']\n finite_difference_normal_eps: Union[\n float, str\n ] = 0.01 # in [float, \"progressive\"]\n shape_init: Optional[str] = None\n shape_init_params: Optional[Any] = None\n shape_init_mesh_up: str = \"+z\"\n shape_init_mesh_front: str = \"+x\"\n force_shape_init: bool = False\n sdf_bias: Union[float, str] = 0.0\n sdf_bias_params: Optional[Any] = None\n\n # no need to removal outlier for SDF\n isosurface_remove_outliers: bool = False\n\n cfg: Config\n\n def configure(self, *args, **kwargs) -> None:\n super().configure()\n self.encoding = get_encoding(\n self.cfg.n_input_dims, self.cfg.pos_encoding_config\n )\n self.sdf_network = get_mlp(\n self.encoding.n_output_dims, 1, self.cfg.mlp_network_config\n )\n\n if self.cfg.n_feature_dims > 0:\n self.feature_network = get_mlp(\n self.encoding.n_output_dims,\n self.cfg.n_feature_dims,\n self.cfg.mlp_network_config,\n )\n\n if self.cfg.normal_type == \"pred\":\n self.normal_network = get_mlp(\n self.encoding.n_output_dims, 3, self.cfg.mlp_network_config\n )\n if self.cfg.isosurface_deformable_grid:\n assert (\n self.cfg.isosurface_method == \"mt\"\n ), \"isosurface_deformable_grid only works with mt\"\n self.deformation_network = get_mlp(\n self.encoding.n_output_dims, 3, self.cfg.mlp_network_config\n )\n\n self.finite_difference_normal_eps: Optional[float] = None\n\n def initialize_shape(self) -> None:\n if self.cfg.shape_init is None and not self.cfg.force_shape_init:\n return\n\n # do not initialize shape if weights are provided\n if self.cfg.weights is not None and not self.cfg.force_shape_init:\n return\n\n if self.cfg.sdf_bias != 0.0:\n threestudio.warn(\n \"shape_init and sdf_bias are both specified, which may lead to unexpected results.\"\n )\n\n get_gt_sdf: Callable[[Float[Tensor, \"N 3\"]], Float[Tensor, \"N 1\"]]\n assert isinstance(self.cfg.shape_init, str)\n if self.cfg.shape_init == \"ellipsoid\":\n assert (\n isinstance(self.cfg.shape_init_params, Sized)\n and len(self.cfg.shape_init_params) == 3\n )\n size = torch.as_tensor(self.cfg.shape_init_params).to(self.device)\n\n def func(points_rand: Float[Tensor, \"N 3\"]) -> Float[Tensor, \"N 1\"]:\n return ((points_rand / size) ** 2).sum(\n dim=-1, keepdim=True\n ).sqrt() - 1.0 # pseudo signed distance of an ellipsoid\n\n get_gt_sdf = func\n elif self.cfg.shape_init == \"sphere\":\n assert isinstance(self.cfg.shape_init_params, float)\n radius = self.cfg.shape_init_params\n\n def func(points_rand: Float[Tensor, \"N 3\"]) -> Float[Tensor, \"N 1\"]:\n return (points_rand**2).sum(dim=-1, keepdim=True).sqrt() - radius\n\n get_gt_sdf = func\n elif self.cfg.shape_init.startswith(\"mesh:\"):\n assert isinstance(self.cfg.shape_init_params, float)\n mesh_path = self.cfg.shape_init[5:]\n if not os.path.exists(mesh_path):\n raise ValueError(f\"Mesh file {mesh_path} does not exist.\")\n\n import trimesh\n\n scene = trimesh.load(mesh_path)\n if isinstance(scene, trimesh.Trimesh):\n mesh = scene\n elif isinstance(scene, trimesh.scene.Scene):\n mesh = trimesh.Trimesh()\n for obj in scene.geometry.values():\n mesh = trimesh.util.concatenate([mesh, obj])\n else:\n raise ValueError(f\"Unknown mesh type at {mesh_path}.\")\n\n # move to center\n centroid = mesh.vertices.mean(0)\n mesh.vertices = mesh.vertices - centroid\n\n # align to up-z and front-x\n dirs = [\"+x\", \"+y\", \"+z\", \"-x\", \"-y\", \"-z\"]\n dir2vec = {\n \"+x\": np.array([1, 0, 0]),\n \"+y\": np.array([0, 1, 0]),\n \"+z\": np.array([0, 0, 1]),\n \"-x\": np.array([-1, 0, 0]),\n \"-y\": np.array([0, -1, 0]),\n \"-z\": np.array([0, 0, -1]),\n }\n if (\n self.cfg.shape_init_mesh_up not in dirs\n or self.cfg.shape_init_mesh_front not in dirs\n ):\n raise ValueError(\n f\"shape_init_mesh_up and shape_init_mesh_front must be one of {dirs}.\"\n )\n if self.cfg.shape_init_mesh_up[1] == self.cfg.shape_init_mesh_front[1]:\n raise ValueError(\n \"shape_init_mesh_up and shape_init_mesh_front must be orthogonal.\"\n )\n z_, x_ = (\n dir2vec[self.cfg.shape_init_mesh_up],\n dir2vec[self.cfg.shape_init_mesh_front],\n )\n y_ = np.cross(z_, x_)\n std2mesh = np.stack([x_, y_, z_], axis=0).T\n mesh2std = np.linalg.inv(std2mesh)\n\n # scaling\n scale = np.abs(mesh.vertices).max()\n mesh.vertices = mesh.vertices / scale * self.cfg.shape_init_params\n mesh.vertices = np.dot(mesh2std, mesh.vertices.T).T\n\n from pysdf import SDF\n\n sdf = SDF(mesh.vertices, mesh.faces)\n\n def func(points_rand: Float[Tensor, \"N 3\"]) -> Float[Tensor, \"N 1\"]:\n # add a negative signed here\n # as in pysdf the inside of the shape has positive signed distance\n return torch.from_numpy(-sdf(points_rand.cpu().numpy())).to(\n points_rand\n )[..., None]\n\n get_gt_sdf = func\n\n else:\n raise ValueError(\n f\"Unknown shape initialization type: {self.cfg.shape_init}\"\n )\n\n # Initialize SDF to a given shape when no weights are provided or force_shape_init is True\n optim = torch.optim.Adam(self.parameters(), lr=1e-3)\n from tqdm import tqdm\n\n for _ in tqdm(\n range(1000),\n desc=f\"Initializing SDF to a(n) {self.cfg.shape_init}:\",\n disable=get_rank() != 0,\n ):\n points_rand = (\n torch.rand((10000, 3), dtype=torch.float32).to(self.device) * 2.0 - 1.0\n )\n sdf_gt = get_gt_sdf(points_rand)\n sdf_pred = self.forward_sdf(points_rand)\n loss = F.mse_loss(sdf_pred, sdf_gt)\n optim.zero_grad()\n loss.backward()\n optim.step()\n\n # explicit broadcast to ensure param consistency across ranks\n for param in self.parameters():\n broadcast(param, src=0)\n\n def get_shifted_sdf(\n self, points: Float[Tensor, \"*N Di\"], sdf: Float[Tensor, \"*N 1\"]\n ) -> Float[Tensor, \"*N 1\"]:\n sdf_bias: Union[float, Float[Tensor, \"*N 1\"]]\n if self.cfg.sdf_bias == \"ellipsoid\":\n assert (\n isinstance(self.cfg.sdf_bias_params, Sized)\n and len(self.cfg.sdf_bias_params) == 3\n )\n size = torch.as_tensor(self.cfg.sdf_bias_params).to(points)\n sdf_bias = ((points / size) ** 2).sum(\n dim=-1, keepdim=True\n ).sqrt() - 1.0 # pseudo signed distance of an ellipsoid\n elif self.cfg.sdf_bias == \"sphere\":\n assert isinstance(self.cfg.sdf_bias_params, float)\n radius = self.cfg.sdf_bias_params\n sdf_bias = (points**2).sum(dim=-1, keepdim=True).sqrt() - radius\n elif isinstance(self.cfg.sdf_bias, float):\n sdf_bias = self.cfg.sdf_bias\n else:\n raise ValueError(f\"Unknown sdf bias {self.cfg.sdf_bias}\")\n return sdf + sdf_bias\n\n def forward(\n self, points: Float[Tensor, \"*N Di\"], output_normal: bool = False\n ) -> Dict[str, Float[Tensor, \"...\"]]:\n grad_enabled = torch.is_grad_enabled()\n\n if output_normal and self.cfg.normal_type == \"analytic\":\n torch.set_grad_enabled(True)\n points.requires_grad_(True)\n\n points_unscaled = points # points in the original scale\n points = contract_to_unisphere(\n points, self.bbox, self.unbounded\n ) # points normalized to (0, 1)\n\n enc = self.encoding(points.view(-1, self.cfg.n_input_dims))\n sdf = self.sdf_network(enc).view(*points.shape[:-1], 1)\n sdf = self.get_shifted_sdf(points_unscaled, sdf)\n output = {\"sdf\": sdf}\n\n if self.cfg.n_feature_dims > 0:\n features = self.feature_network(enc).view(\n *points.shape[:-1], self.cfg.n_feature_dims\n )\n output.update({\"features\": features})\n\n if output_normal:\n if (\n self.cfg.normal_type == \"finite_difference\"\n or self.cfg.normal_type == \"finite_difference_laplacian\"\n ):\n assert self.finite_difference_normal_eps is not None\n eps: float = self.finite_difference_normal_eps\n if self.cfg.normal_type == \"finite_difference_laplacian\":\n offsets: Float[Tensor, \"6 3\"] = torch.as_tensor(\n [\n [eps, 0.0, 0.0],\n [-eps, 0.0, 0.0],\n [0.0, eps, 0.0],\n [0.0, -eps, 0.0],\n [0.0, 0.0, eps],\n [0.0, 0.0, -eps],\n ]\n ).to(points_unscaled)\n points_offset: Float[Tensor, \"... 6 3\"] = (\n points_unscaled[..., None, :] + offsets\n ).clamp(-self.cfg.radius, self.cfg.radius)\n sdf_offset: Float[Tensor, \"... 6 1\"] = self.forward_sdf(\n points_offset\n )\n sdf_grad = (\n 0.5\n * (sdf_offset[..., 0::2, 0] - sdf_offset[..., 1::2, 0])\n / eps\n )\n else:\n offsets: Float[Tensor, \"3 3\"] = torch.as_tensor(\n [[eps, 0.0, 0.0], [0.0, eps, 0.0], [0.0, 0.0, eps]]\n ).to(points_unscaled)\n points_offset: Float[Tensor, \"... 3 3\"] = (\n points_unscaled[..., None, :] + offsets\n ).clamp(-self.cfg.radius, self.cfg.radius)\n sdf_offset: Float[Tensor, \"... 3 1\"] = self.forward_sdf(\n points_offset\n )\n sdf_grad = (sdf_offset[..., 0::1, 0] - sdf) / eps\n normal = F.normalize(sdf_grad, dim=-1)\n elif self.cfg.normal_type == \"pred\":\n normal = self.normal_network(enc).view(*points.shape[:-1], 3)\n normal = F.normalize(normal, dim=-1)\n sdf_grad = normal\n elif self.cfg.normal_type == \"analytic\":\n sdf_grad = -torch.autograd.grad(\n sdf,\n points_unscaled,\n grad_outputs=torch.ones_like(sdf),\n create_graph=True,\n )[0]\n normal = F.normalize(sdf_grad, dim=-1)\n if not grad_enabled:\n sdf_grad = sdf_grad.detach()\n normal = normal.detach()\n else:\n raise AttributeError(f\"Unknown normal type {self.cfg.normal_type}\")\n output.update(\n {\"normal\": normal, \"shading_normal\": normal, \"sdf_grad\": sdf_grad}\n )\n return output\n\n def forward_sdf(self, points: Float[Tensor, \"*N Di\"]) -> Float[Tensor, \"*N 1\"]:\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n\n sdf = self.sdf_network(\n self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n ).reshape(*points.shape[:-1], 1)\n sdf = self.get_shifted_sdf(points_unscaled, sdf)\n return sdf\n\n def forward_field(\n self, points: Float[Tensor, \"*N Di\"]\n ) -> Tuple[Float[Tensor, \"*N 1\"], Optional[Float[Tensor, \"*N 3\"]]]:\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n enc = self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n sdf = self.sdf_network(enc).reshape(*points.shape[:-1], 1)\n sdf = self.get_shifted_sdf(points_unscaled, sdf)\n deformation: Optional[Float[Tensor, \"*N 3\"]] = None\n if self.cfg.isosurface_deformable_grid:\n deformation = self.deformation_network(enc).reshape(*points.shape[:-1], 3)\n return sdf, deformation\n\n def forward_level(\n self, field: Float[Tensor, \"*N 1\"], threshold: float\n ) -> Float[Tensor, \"*N 1\"]:\n return field - threshold\n\n def export(self, points: Float[Tensor, \"*N Di\"], **kwargs) -> Dict[str, Any]:\n out: Dict[str, Any] = {}\n if self.cfg.n_feature_dims == 0:\n return out\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n enc = self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n features = self.feature_network(enc).view(\n *points.shape[:-1], self.cfg.n_feature_dims\n )\n out.update(\n {\n \"features\": features,\n }\n )\n return out\n\n def update_step(self, epoch: int, global_step: int, on_load_weights: bool = False):\n if (\n self.cfg.normal_type == \"finite_difference\"\n or self.cfg.normal_type == \"finite_difference_laplacian\"\n ):\n if isinstance(self.cfg.finite_difference_normal_eps, float):\n self.finite_difference_normal_eps = (\n self.cfg.finite_difference_normal_eps\n )\n elif self.cfg.finite_difference_normal_eps == \"progressive\":\n # progressive finite difference eps from Neuralangelo\n # https://arxiv.org/abs/2306.03092\n hg_conf: Any = self.cfg.pos_encoding_config\n assert (\n hg_conf.otype == \"ProgressiveBandHashGrid\"\n ), \"finite_difference_normal_eps=progressive only works with ProgressiveBandHashGrid\"\n current_level = min(\n hg_conf.start_level\n + max(global_step - hg_conf.start_step, 0) // hg_conf.update_steps,\n hg_conf.n_levels,\n )\n grid_res = hg_conf.base_resolution * hg_conf.per_level_scale ** (\n current_level - 1\n )\n grid_size = 2 * self.cfg.radius / grid_res\n if grid_size != self.finite_difference_normal_eps:\n threestudio.info(\n f\"Update finite_difference_normal_eps to {grid_size}\"\n )\n self.finite_difference_normal_eps = grid_size\n else:\n raise ValueError(\n f\"Unknown finite_difference_normal_eps={self.cfg.finite_difference_normal_eps}\"\n )" }, { "identifier": "ImplicitVolume", "path": "threestudio/models/geometry/implicit_volume.py", "snippet": "class ImplicitVolume(BaseImplicitGeometry):\n @dataclass\n class Config(BaseImplicitGeometry.Config):\n n_input_dims: int = 3\n n_feature_dims: int = 3\n density_activation: Optional[str] = \"softplus\"\n density_bias: Union[float, str] = \"blob_magic3d\"\n density_blob_scale: float = 10.0\n density_blob_std: float = 0.5\n pos_encoding_config: dict = field(\n default_factory=lambda: {\n \"otype\": \"HashGrid\",\n \"n_levels\": 16,\n \"n_features_per_level\": 2,\n \"log2_hashmap_size\": 19,\n \"base_resolution\": 16,\n \"per_level_scale\": 1.447269237440378,\n }\n )\n mlp_network_config: dict = field(\n default_factory=lambda: {\n \"otype\": \"VanillaMLP\",\n \"activation\": \"ReLU\",\n \"output_activation\": \"none\",\n \"n_neurons\": 64,\n \"n_hidden_layers\": 1,\n }\n )\n normal_type: Optional[\n str\n ] = \"finite_difference\" # in ['pred', 'finite_difference', 'finite_difference_laplacian']\n finite_difference_normal_eps: float = 0.01\n\n # automatically determine the threshold\n isosurface_threshold: Union[float, str] = 25.0\n\n cfg: Config\n\n def configure(self, *args, **kwargs) -> None:\n super().configure()\n self.encoding = get_encoding(\n self.cfg.n_input_dims, self.cfg.pos_encoding_config\n )\n self.density_network = get_mlp(\n self.encoding.n_output_dims, 1, self.cfg.mlp_network_config\n )\n if self.cfg.n_feature_dims > 0:\n self.feature_network = get_mlp(\n self.encoding.n_output_dims,\n self.cfg.n_feature_dims,\n self.cfg.mlp_network_config,\n )\n if self.cfg.normal_type == \"pred\":\n self.normal_network = get_mlp(\n self.encoding.n_output_dims, 3, self.cfg.mlp_network_config\n )\n \n def get_activated_density(\n self, points: Float[Tensor, \"*N Di\"], density: Float[Tensor, \"*N 1\"]\n ) -> Tuple[Float[Tensor, \"*N 1\"], Float[Tensor, \"*N 1\"]]:\n density_bias: Union[float, Float[Tensor, \"*N 1\"]]\n if self.cfg.density_bias == \"blob_dreamfusion\":\n # pre-activation density bias\n density_bias = (\n self.cfg.density_blob_scale\n * torch.exp(\n -0.5 * (points**2).sum(dim=-1) / self.cfg.density_blob_std**2\n )[..., None]\n )\n elif self.cfg.density_bias == \"blob_magic3d\":\n # pre-activation density bias\n density_bias = (\n self.cfg.density_blob_scale\n * (\n 1\n - torch.sqrt((points**2).sum(dim=-1)) / self.cfg.density_blob_std\n )[..., None]\n )\n elif isinstance(self.cfg.density_bias, float):\n density_bias = self.cfg.density_bias\n else:\n raise ValueError(f\"Unknown density bias {self.cfg.density_bias}\")\n raw_density: Float[Tensor, \"*N 1\"] = density + density_bias\n density = get_activation(self.cfg.density_activation)(raw_density)\n # density = self.density_act(raw_density)\n return raw_density, density\n\n def forward(\n self, points: Float[Tensor, \"*N Di\"], output_normal: bool = False\n ) -> Dict[str, Float[Tensor, \"...\"]]:\n grad_enabled = torch.is_grad_enabled()\n\n if output_normal and self.cfg.normal_type == \"analytic\":\n torch.set_grad_enabled(True)\n points.requires_grad_(True)\n\n points_unscaled = points # points in the original scale\n points = contract_to_unisphere(\n points, self.bbox, self.unbounded\n ) # points normalized to (0, 1)\n\n enc = self.encoding(points.view(-1, self.cfg.n_input_dims))\n density = self.density_network(enc).view(*points.shape[:-1], 1)\n raw_density, density = self.get_activated_density(points_unscaled, density)\n\n output = {\n \"density\": density,\n }\n\n if self.cfg.n_feature_dims > 0:\n features = self.feature_network(enc).view(\n *points.shape[:-1], self.cfg.n_feature_dims\n )\n output.update({\"features\": features})\n\n if output_normal:\n if (\n self.cfg.normal_type == \"finite_difference\"\n or self.cfg.normal_type == \"finite_difference_laplacian\"\n ):\n # TODO: use raw density\n eps = self.cfg.finite_difference_normal_eps\n if self.cfg.normal_type == \"finite_difference_laplacian\":\n offsets: Float[Tensor, \"6 3\"] = torch.as_tensor(\n [\n [eps, 0.0, 0.0],\n [-eps, 0.0, 0.0],\n [0.0, eps, 0.0],\n [0.0, -eps, 0.0],\n [0.0, 0.0, eps],\n [0.0, 0.0, -eps],\n ]\n ).to(points_unscaled)\n points_offset: Float[Tensor, \"... 6 3\"] = (\n points_unscaled[..., None, :] + offsets\n ).clamp(-self.cfg.radius, self.cfg.radius)\n density_offset: Float[Tensor, \"... 6 1\"] = self.forward_density(\n points_offset\n )\n normal = (\n -0.5\n * (density_offset[..., 0::2, 0] - density_offset[..., 1::2, 0])\n / eps\n )\n else:\n offsets: Float[Tensor, \"3 3\"] = torch.as_tensor(\n [[eps, 0.0, 0.0], [0.0, eps, 0.0], [0.0, 0.0, eps]]\n ).to(points_unscaled)\n points_offset: Float[Tensor, \"... 3 3\"] = (\n points_unscaled[..., None, :] + offsets\n ).clamp(-self.cfg.radius, self.cfg.radius)\n density_offset: Float[Tensor, \"... 3 1\"] = self.forward_density(\n points_offset\n )\n normal = -(density_offset[..., 0::1, 0] - density) / eps\n normal = F.normalize(normal, dim=-1)\n elif self.cfg.normal_type == \"pred\":\n normal = self.normal_network(enc).view(*points.shape[:-1], 3)\n normal = F.normalize(normal, dim=-1)\n elif self.cfg.normal_type == \"analytic\":\n normal = -torch.autograd.grad(\n density,\n points_unscaled,\n grad_outputs=torch.ones_like(density),\n create_graph=True,\n )[0]\n normal = F.normalize(normal, dim=-1)\n if not grad_enabled:\n normal = normal.detach()\n else:\n raise AttributeError(f\"Unknown normal type {self.cfg.normal_type}\")\n output.update({\"normal\": normal, \"shading_normal\": normal})\n\n torch.set_grad_enabled(grad_enabled)\n return output\n\n def forward_density(self, points: Float[Tensor, \"*N Di\"]) -> Float[Tensor, \"*N 1\"]:\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n\n density = self.density_network(\n self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n ).reshape(*points.shape[:-1], 1)\n\n _, density = self.get_activated_density(points_unscaled, density)\n return density\n\n def forward_field(\n self, points: Float[Tensor, \"*N Di\"]\n ) -> Tuple[Float[Tensor, \"*N 1\"], Optional[Float[Tensor, \"*N 3\"]]]:\n if self.cfg.isosurface_deformable_grid:\n threestudio.warn(\n f\"{self.__class__.__name__} does not support isosurface_deformable_grid. Ignoring.\"\n )\n density = self.forward_density(points)\n return density, None\n\n def forward_level(\n self, field: Float[Tensor, \"*N 1\"], threshold: float\n ) -> Float[Tensor, \"*N 1\"]:\n return -(field - threshold)\n\n def export(self, points: Float[Tensor, \"*N Di\"], **kwargs) -> Dict[str, Any]:\n out: Dict[str, Any] = {}\n if self.cfg.n_feature_dims == 0:\n return out\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n enc = self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n features = self.feature_network(enc).view(\n *points.shape[:-1], self.cfg.n_feature_dims\n )\n out.update(\n {\n \"features\": features,\n }\n )\n return out\n\n @staticmethod\n @torch.no_grad()\n def create_from(\n other: BaseGeometry,\n cfg: Optional[Union[dict, DictConfig]] = None,\n copy_net: bool = True,\n **kwargs,\n ) -> \"ImplicitVolume\":\n if isinstance(other, ImplicitVolume):\n instance = ImplicitVolume(cfg, **kwargs)\n instance.encoding.load_state_dict(other.encoding.state_dict())\n instance.density_network.load_state_dict(other.density_network.state_dict())\n if copy_net:\n if (\n instance.cfg.n_feature_dims > 0\n and other.cfg.n_feature_dims == instance.cfg.n_feature_dims\n ):\n instance.feature_network.load_state_dict(\n other.feature_network.state_dict()\n )\n if (\n instance.cfg.normal_type == \"pred\"\n and other.cfg.normal_type == \"pred\"\n ):\n instance.normal_network.load_state_dict(\n other.normal_network.state_dict()\n )\n return instance\n else:\n raise TypeError(\n f\"Cannot create {ImplicitVolume.__name__} from {other.__class__.__name__}\"\n )" }, { "identifier": "MarchingTetrahedraHelper", "path": "threestudio/models/isosurface.py", "snippet": "class MarchingTetrahedraHelper(IsosurfaceHelper):\n def __init__(self, resolution: int, tets_path: str):\n super().__init__()\n self.resolution = resolution\n self.tets_path = tets_path\n\n self.triangle_table: Float[Tensor, \"...\"]\n self.register_buffer(\n \"triangle_table\",\n torch.as_tensor(\n [\n [-1, -1, -1, -1, -1, -1],\n [1, 0, 2, -1, -1, -1],\n [4, 0, 3, -1, -1, -1],\n [1, 4, 2, 1, 3, 4],\n [3, 1, 5, -1, -1, -1],\n [2, 3, 0, 2, 5, 3],\n [1, 4, 0, 1, 5, 4],\n [4, 2, 5, -1, -1, -1],\n [4, 5, 2, -1, -1, -1],\n [4, 1, 0, 4, 5, 1],\n [3, 2, 0, 3, 5, 2],\n [1, 3, 5, -1, -1, -1],\n [4, 1, 2, 4, 3, 1],\n [3, 0, 4, -1, -1, -1],\n [2, 0, 1, -1, -1, -1],\n [-1, -1, -1, -1, -1, -1],\n ],\n dtype=torch.long,\n ),\n persistent=False,\n )\n self.num_triangles_table: Integer[Tensor, \"...\"]\n self.register_buffer(\n \"num_triangles_table\",\n torch.as_tensor(\n [0, 1, 1, 2, 1, 2, 2, 1, 1, 2, 2, 1, 2, 1, 1, 0], dtype=torch.long\n ),\n persistent=False,\n )\n self.base_tet_edges: Integer[Tensor, \"...\"]\n self.register_buffer(\n \"base_tet_edges\",\n torch.as_tensor([0, 1, 0, 2, 0, 3, 1, 2, 1, 3, 2, 3], dtype=torch.long),\n persistent=False,\n )\n\n tets = np.load(self.tets_path)\n self._grid_vertices: Float[Tensor, \"...\"]\n self.register_buffer(\n \"_grid_vertices\",\n torch.from_numpy(tets[\"vertices\"]).float(),\n persistent=False,\n )\n self.indices: Integer[Tensor, \"...\"]\n self.register_buffer(\n \"indices\", torch.from_numpy(tets[\"indices\"]).long(), persistent=False\n )\n\n self._all_edges: Optional[Integer[Tensor, \"Ne 2\"]] = None\n\n def normalize_grid_deformation(\n self, grid_vertex_offsets: Float[Tensor, \"Nv 3\"]\n ) -> Float[Tensor, \"Nv 3\"]:\n return (\n (self.points_range[1] - self.points_range[0])\n / (self.resolution) # half tet size is approximately 1 / self.resolution\n * torch.tanh(grid_vertex_offsets)\n ) # FIXME: hard-coded activation\n\n @property\n def grid_vertices(self) -> Float[Tensor, \"Nv 3\"]:\n return self._grid_vertices\n\n @property\n def all_edges(self) -> Integer[Tensor, \"Ne 2\"]:\n if self._all_edges is None:\n # compute edges on GPU, or it would be VERY SLOW (basically due to the unique operation)\n edges = torch.tensor(\n [0, 1, 0, 2, 0, 3, 1, 2, 1, 3, 2, 3],\n dtype=torch.long,\n device=self.indices.device,\n )\n _all_edges = self.indices[:, edges].reshape(-1, 2)\n _all_edges_sorted = torch.sort(_all_edges, dim=1)[0]\n _all_edges = torch.unique(_all_edges_sorted, dim=0)\n self._all_edges = _all_edges\n return self._all_edges\n\n def sort_edges(self, edges_ex2):\n with torch.no_grad():\n order = (edges_ex2[:, 0] > edges_ex2[:, 1]).long()\n order = order.unsqueeze(dim=1)\n\n a = torch.gather(input=edges_ex2, index=order, dim=1)\n b = torch.gather(input=edges_ex2, index=1 - order, dim=1)\n\n return torch.stack([a, b], -1)\n\n def _forward(self, pos_nx3, sdf_n, tet_fx4):\n with torch.no_grad():\n occ_n = sdf_n > 0\n occ_fx4 = occ_n[tet_fx4.reshape(-1)].reshape(-1, 4)\n occ_sum = torch.sum(occ_fx4, -1)\n valid_tets = (occ_sum > 0) & (occ_sum < 4)\n occ_sum = occ_sum[valid_tets]\n\n # find all vertices\n all_edges = tet_fx4[valid_tets][:, self.base_tet_edges].reshape(-1, 2)\n all_edges = self.sort_edges(all_edges)\n unique_edges, idx_map = torch.unique(all_edges, dim=0, return_inverse=True)\n\n unique_edges = unique_edges.long()\n mask_edges = occ_n[unique_edges.reshape(-1)].reshape(-1, 2).sum(-1) == 1\n mapping = (\n torch.ones(\n (unique_edges.shape[0]), dtype=torch.long, device=pos_nx3.device\n )\n * -1\n )\n mapping[mask_edges] = torch.arange(\n mask_edges.sum(), dtype=torch.long, device=pos_nx3.device\n )\n idx_map = mapping[idx_map] # map edges to verts\n\n interp_v = unique_edges[mask_edges]\n edges_to_interp = pos_nx3[interp_v.reshape(-1)].reshape(-1, 2, 3)\n edges_to_interp_sdf = sdf_n[interp_v.reshape(-1)].reshape(-1, 2, 1)\n edges_to_interp_sdf[:, -1] *= -1\n\n denominator = edges_to_interp_sdf.sum(1, keepdim=True)\n\n edges_to_interp_sdf = torch.flip(edges_to_interp_sdf, [1]) / denominator\n verts = (edges_to_interp * edges_to_interp_sdf).sum(1)\n\n idx_map = idx_map.reshape(-1, 6)\n\n v_id = torch.pow(2, torch.arange(4, dtype=torch.long, device=pos_nx3.device))\n tetindex = (occ_fx4[valid_tets] * v_id.unsqueeze(0)).sum(-1)\n num_triangles = self.num_triangles_table[tetindex]\n\n # Generate triangle indices\n faces = torch.cat(\n (\n torch.gather(\n input=idx_map[num_triangles == 1],\n dim=1,\n index=self.triangle_table[tetindex[num_triangles == 1]][:, :3],\n ).reshape(-1, 3),\n torch.gather(\n input=idx_map[num_triangles == 2],\n dim=1,\n index=self.triangle_table[tetindex[num_triangles == 2]][:, :6],\n ).reshape(-1, 3),\n ),\n dim=0,\n )\n\n return verts, faces\n\n def forward(\n self,\n level: Float[Tensor, \"N3 1\"],\n deformation: Optional[Float[Tensor, \"N3 3\"]] = None,\n ) -> Mesh:\n if deformation is not None:\n grid_vertices = self.grid_vertices + self.normalize_grid_deformation(\n deformation\n )\n else:\n grid_vertices = self.grid_vertices\n\n v_pos, t_pos_idx = self._forward(grid_vertices, level, self.indices)\n\n mesh = Mesh(\n v_pos=v_pos,\n t_pos_idx=t_pos_idx,\n # extras\n grid_vertices=grid_vertices,\n tet_edges=self.all_edges,\n grid_level=level,\n grid_deformation=deformation,\n )\n\n return mesh" }, { "identifier": "Mesh", "path": "threestudio/models/mesh.py", "snippet": "class Mesh:\n def __init__(\n self, v_pos: Float[Tensor, \"Nv 3\"], t_pos_idx: Integer[Tensor, \"Nf 3\"], **kwargs\n ) -> None:\n self.v_pos: Float[Tensor, \"Nv 3\"] = v_pos\n self.t_pos_idx: Integer[Tensor, \"Nf 3\"] = t_pos_idx\n self._v_nrm: Optional[Float[Tensor, \"Nv 3\"]] = None\n self._v_tng: Optional[Float[Tensor, \"Nv 3\"]] = None\n self._v_tex: Optional[Float[Tensor, \"Nt 3\"]] = None\n self._t_tex_idx: Optional[Float[Tensor, \"Nf 3\"]] = None\n self._v_rgb: Optional[Float[Tensor, \"Nv 3\"]] = None\n self._edges: Optional[Integer[Tensor, \"Ne 2\"]] = None\n self.extras: Dict[str, Any] = {}\n for k, v in kwargs.items():\n self.add_extra(k, v)\n\n def add_extra(self, k, v) -> None:\n self.extras[k] = v\n\n def remove_outlier(self, outlier_n_faces_threshold: Union[int, float]) -> Mesh:\n if self.requires_grad:\n threestudio.debug(\"Mesh is differentiable, not removing outliers\")\n return self\n\n # use trimesh to first split the mesh into connected components\n # then remove the components with less than n_face_threshold faces\n import trimesh\n\n # construct a trimesh object\n mesh = trimesh.Trimesh(\n vertices=self.v_pos.detach().cpu().numpy(),\n faces=self.t_pos_idx.detach().cpu().numpy(),\n )\n\n # split the mesh into connected components\n components = mesh.split(only_watertight=False)\n # log the number of faces in each component\n threestudio.debug(\n \"Mesh has {} components, with faces: {}\".format(\n len(components), [c.faces.shape[0] for c in components]\n )\n )\n\n n_faces_threshold: int\n if isinstance(outlier_n_faces_threshold, float):\n # set the threshold to the number of faces in the largest component multiplied by outlier_n_faces_threshold\n n_faces_threshold = int(\n max([c.faces.shape[0] for c in components]) * outlier_n_faces_threshold\n )\n else:\n # set the threshold directly to outlier_n_faces_threshold\n n_faces_threshold = outlier_n_faces_threshold\n\n # log the threshold\n threestudio.debug(\n \"Removing components with less than {} faces\".format(n_faces_threshold)\n )\n\n # remove the components with less than n_face_threshold faces\n components = [c for c in components if c.faces.shape[0] >= n_faces_threshold]\n\n # log the number of faces in each component after removing outliers\n threestudio.debug(\n \"Mesh has {} components after removing outliers, with faces: {}\".format(\n len(components), [c.faces.shape[0] for c in components]\n )\n )\n # merge the components\n mesh = trimesh.util.concatenate(components)\n\n # convert back to our mesh format\n v_pos = torch.from_numpy(mesh.vertices).to(self.v_pos)\n t_pos_idx = torch.from_numpy(mesh.faces).to(self.t_pos_idx)\n\n clean_mesh = Mesh(v_pos, t_pos_idx)\n # keep the extras unchanged\n\n if len(self.extras) > 0:\n clean_mesh.extras = self.extras\n threestudio.debug(\n f\"The following extra attributes are inherited from the original mesh unchanged: {list(self.extras.keys())}\"\n )\n return clean_mesh\n\n @property\n def requires_grad(self):\n return self.v_pos.requires_grad\n\n @property\n def v_nrm(self):\n if self._v_nrm is None:\n self._v_nrm = self._compute_vertex_normal()\n return self._v_nrm\n\n @property\n def v_tng(self):\n if self._v_tng is None:\n self._v_tng = self._compute_vertex_tangent()\n return self._v_tng\n\n @property\n def v_tex(self):\n if self._v_tex is None:\n self._v_tex, self._t_tex_idx = self._unwrap_uv()\n return self._v_tex\n\n @property\n def t_tex_idx(self):\n if self._t_tex_idx is None:\n self._v_tex, self._t_tex_idx = self._unwrap_uv()\n return self._t_tex_idx\n\n @property\n def v_rgb(self):\n return self._v_rgb\n\n @property\n def edges(self):\n if self._edges is None:\n self._edges = self._compute_edges()\n return self._edges\n\n def _compute_vertex_normal(self):\n i0 = self.t_pos_idx[:, 0]\n i1 = self.t_pos_idx[:, 1]\n i2 = self.t_pos_idx[:, 2]\n\n v0 = self.v_pos[i0, :]\n v1 = self.v_pos[i1, :]\n v2 = self.v_pos[i2, :]\n\n face_normals = torch.cross(v1 - v0, v2 - v0)\n\n # Splat face normals to vertices\n v_nrm = torch.zeros_like(self.v_pos)\n v_nrm.scatter_add_(0, i0[:, None].repeat(1, 3), face_normals)\n v_nrm.scatter_add_(0, i1[:, None].repeat(1, 3), face_normals)\n v_nrm.scatter_add_(0, i2[:, None].repeat(1, 3), face_normals)\n\n # Normalize, replace zero (degenerated) normals with some default value\n v_nrm = torch.where(\n dot(v_nrm, v_nrm) > 1e-20, v_nrm, torch.as_tensor([0.0, 0.0, 1.0]).to(v_nrm)\n )\n v_nrm = F.normalize(v_nrm, dim=1)\n\n if torch.is_anomaly_enabled():\n assert torch.all(torch.isfinite(v_nrm))\n\n return v_nrm\n\n def _compute_vertex_tangent(self):\n vn_idx = [None] * 3\n pos = [None] * 3\n tex = [None] * 3\n for i in range(0, 3):\n pos[i] = self.v_pos[self.t_pos_idx[:, i]]\n tex[i] = self.v_tex[self.t_tex_idx[:, i]]\n # t_nrm_idx is always the same as t_pos_idx\n vn_idx[i] = self.t_pos_idx[:, i]\n\n tangents = torch.zeros_like(self.v_nrm)\n tansum = torch.zeros_like(self.v_nrm)\n\n # Compute tangent space for each triangle\n uve1 = tex[1] - tex[0]\n uve2 = tex[2] - tex[0]\n pe1 = pos[1] - pos[0]\n pe2 = pos[2] - pos[0]\n\n nom = pe1 * uve2[..., 1:2] - pe2 * uve1[..., 1:2]\n denom = uve1[..., 0:1] * uve2[..., 1:2] - uve1[..., 1:2] * uve2[..., 0:1]\n\n # Avoid division by zero for degenerated texture coordinates\n tang = nom / torch.where(\n denom > 0.0, torch.clamp(denom, min=1e-6), torch.clamp(denom, max=-1e-6)\n )\n\n # Update all 3 vertices\n for i in range(0, 3):\n idx = vn_idx[i][:, None].repeat(1, 3)\n tangents.scatter_add_(0, idx, tang) # tangents[n_i] = tangents[n_i] + tang\n tansum.scatter_add_(\n 0, idx, torch.ones_like(tang)\n ) # tansum[n_i] = tansum[n_i] + 1\n tangents = tangents / tansum\n\n # Normalize and make sure tangent is perpendicular to normal\n tangents = F.normalize(tangents, dim=1)\n tangents = F.normalize(tangents - dot(tangents, self.v_nrm) * self.v_nrm)\n\n if torch.is_anomaly_enabled():\n assert torch.all(torch.isfinite(tangents))\n\n return tangents\n\n def _unwrap_uv(\n self, xatlas_chart_options: dict = {}, xatlas_pack_options: dict = {}\n ):\n threestudio.info(\"Using xatlas to perform UV unwrapping, may take a while ...\")\n\n import xatlas\n\n atlas = xatlas.Atlas()\n atlas.add_mesh(\n self.v_pos.detach().cpu().numpy(),\n self.t_pos_idx.cpu().numpy(),\n )\n co = xatlas.ChartOptions()\n po = xatlas.PackOptions()\n for k, v in xatlas_chart_options.items():\n setattr(co, k, v)\n for k, v in xatlas_pack_options.items():\n setattr(po, k, v)\n atlas.generate(co, po)\n vmapping, indices, uvs = atlas.get_mesh(0)\n vmapping = (\n torch.from_numpy(\n vmapping.astype(np.uint64, casting=\"same_kind\").view(np.int64)\n )\n .to(self.v_pos.device)\n .long()\n )\n uvs = torch.from_numpy(uvs).to(self.v_pos.device).float()\n indices = (\n torch.from_numpy(\n indices.astype(np.uint64, casting=\"same_kind\").view(np.int64)\n )\n .to(self.v_pos.device)\n .long()\n )\n return uvs, indices\n\n def unwrap_uv(\n self, xatlas_chart_options: dict = {}, xatlas_pack_options: dict = {}\n ):\n self._v_tex, self._t_tex_idx = self._unwrap_uv(\n xatlas_chart_options, xatlas_pack_options\n )\n\n def set_vertex_color(self, v_rgb):\n assert v_rgb.shape[0] == self.v_pos.shape[0]\n self._v_rgb = v_rgb\n\n def _compute_edges(self):\n # Compute edges\n edges = torch.cat(\n [\n self.t_pos_idx[:, [0, 1]],\n self.t_pos_idx[:, [1, 2]],\n self.t_pos_idx[:, [2, 0]],\n ],\n dim=0,\n )\n edges = edges.sort()[0]\n edges = torch.unique(edges, dim=0)\n return edges\n\n def normal_consistency(self) -> Float[Tensor, \"\"]:\n edge_nrm: Float[Tensor, \"Ne 2 3\"] = self.v_nrm[self.edges]\n nc = (\n 1.0 - torch.cosine_similarity(edge_nrm[:, 0], edge_nrm[:, 1], dim=-1)\n ).mean()\n return nc\n\n def _laplacian_uniform(self):\n # from stable-dreamfusion\n # https://github.com/ashawkey/stable-dreamfusion/blob/8fb3613e9e4cd1ded1066b46e80ca801dfb9fd06/nerf/renderer.py#L224\n verts, faces = self.v_pos, self.t_pos_idx\n\n V = verts.shape[0]\n F = faces.shape[0]\n\n # Neighbor indices\n ii = faces[:, [1, 2, 0]].flatten()\n jj = faces[:, [2, 0, 1]].flatten()\n adj = torch.stack([torch.cat([ii, jj]), torch.cat([jj, ii])], dim=0).unique(\n dim=1\n )\n adj_values = torch.ones(adj.shape[1]).to(verts)\n\n # Diagonal indices\n diag_idx = adj[0]\n\n # Build the sparse matrix\n idx = torch.cat((adj, torch.stack((diag_idx, diag_idx), dim=0)), dim=1)\n values = torch.cat((-adj_values, adj_values))\n\n # The coalesce operation sums the duplicate indices, resulting in the\n # correct diagonal\n return torch.sparse_coo_tensor(idx, values, (V, V)).coalesce()\n\n def laplacian(self) -> Float[Tensor, \"\"]:\n with torch.no_grad():\n L = self._laplacian_uniform()\n loss = L.mm(self.v_pos)\n loss = loss.norm(dim=1)\n loss = loss.mean()\n return loss" }, { "identifier": "get_encoding", "path": "threestudio/models/networks.py", "snippet": "def get_encoding(n_input_dims: int, config) -> nn.Module:\n # input suppose to be range [0, 1]\n encoding: nn.Module\n if config.otype == \"ProgressiveBandFrequency\":\n encoding = ProgressiveBandFrequency(n_input_dims, config_to_primitive(config))\n elif config.otype == \"ProgressiveBandHashGrid\":\n encoding = ProgressiveBandHashGrid(n_input_dims, config_to_primitive(config))\n else:\n encoding = TCNNEncoding(n_input_dims, config_to_primitive(config))\n encoding = CompositeEncoding(\n encoding,\n include_xyz=config.get(\"include_xyz\", False),\n xyz_scale=2.0,\n xyz_offset=-1.0,\n ) # FIXME: hard coded\n return encoding" }, { "identifier": "get_mlp", "path": "threestudio/models/networks.py", "snippet": "def get_mlp(n_input_dims, n_output_dims, config) -> nn.Module:\n network: nn.Module\n if config.otype == \"VanillaMLP\":\n network = VanillaMLP(n_input_dims, n_output_dims, config_to_primitive(config))\n elif config.otype == \"SphereInitVanillaMLP\":\n network = SphereInitVanillaMLP(\n n_input_dims, n_output_dims, config_to_primitive(config)\n )\n else:\n assert (\n config.get(\"sphere_init\", False) is False\n ), \"sphere_init=True only supported by VanillaMLP\"\n network = TCNNNetwork(n_input_dims, n_output_dims, config_to_primitive(config))\n return network" }, { "identifier": "broadcast", "path": "threestudio/utils/misc.py", "snippet": "def broadcast(tensor, src=0):\n if not _distributed_available():\n return tensor\n else:\n torch.distributed.broadcast(tensor, src=src)\n return tensor" }, { "identifier": "scale_tensor", "path": "threestudio/utils/ops.py", "snippet": "def scale_tensor(\n dat: Num[Tensor, \"... D\"], inp_scale: ValidScale, tgt_scale: ValidScale\n):\n if inp_scale is None:\n inp_scale = (0, 1)\n if tgt_scale is None:\n tgt_scale = (0, 1)\n if isinstance(tgt_scale, Tensor):\n assert dat.shape[-1] == tgt_scale.shape[-1]\n dat = (dat - inp_scale[0]) / (inp_scale[1] - inp_scale[0])\n dat = dat * (tgt_scale[1] - tgt_scale[0]) + tgt_scale[0]\n return dat" } ]
import os import numpy as np import torch import torch.nn as nn import torch.nn.functional as F import threestudio import trimesh from dataclasses import dataclass, field from threestudio.models.geometry.base import ( BaseExplicitGeometry, BaseGeometry, contract_to_unisphere, ) from threestudio.models.geometry.implicit_sdf import ImplicitSDF from threestudio.models.geometry.implicit_volume import ImplicitVolume from threestudio.models.isosurface import MarchingTetrahedraHelper from threestudio.models.mesh import Mesh from threestudio.models.networks import get_encoding, get_mlp from threestudio.utils.misc import broadcast from threestudio.utils.ops import scale_tensor from threestudio.utils.typing import * from pysdf import SDF
15,281
"+y": np.array([0, 1, 0]), "+z": np.array([0, 0, 1]), "-x": np.array([-1, 0, 0]), "-y": np.array([0, -1, 0]), "-z": np.array([0, 0, -1]), } if ( self.cfg.shape_init_mesh_up not in dirs or self.cfg.shape_init_mesh_front not in dirs ): raise ValueError( f"shape_init_mesh_up and shape_init_mesh_front must be one of {dirs}." ) if self.cfg.shape_init_mesh_up[1] == self.cfg.shape_init_mesh_front[1]: raise ValueError( "shape_init_mesh_up and shape_init_mesh_front must be orthogonal." ) z_, x_ = ( dir2vec[self.cfg.shape_init_mesh_up], dir2vec[self.cfg.shape_init_mesh_front], ) y_ = np.cross(z_, x_) std2mesh = np.stack([x_, y_, z_], axis=0).T mesh2std = np.linalg.inv(std2mesh) # scaling scale = np.abs(mesh.vertices).max() mesh.vertices = mesh.vertices / scale * self.cfg.shape_init_params mesh.vertices = np.dot(mesh2std, mesh.vertices.T).T sdf = SDF(mesh.vertices, mesh.faces) def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: # add a negative signed here # as in pysdf the inside of the shape has positive signed distance return torch.from_numpy(-sdf(points_rand.cpu().numpy())).to( points_rand )[..., None] get_gt_sdf = func else: raise ValueError( f"Unknown shape initialization type: {self.cfg.shape_init}" ) sdf_gt = get_gt_sdf( scale_tensor( self.isosurface_helper.grid_vertices, self.isosurface_helper.points_range, self.isosurface_bbox, ) ) self.sdf.data = sdf_gt # explicit broadcast to ensure param consistency across ranks for param in self.parameters(): broadcast(param, src=0) def isosurface(self) -> Mesh: # return cached mesh if fix_geometry is True to save computation if self.cfg.fix_geometry and self.mesh is not None: return self.mesh mesh = self.isosurface_helper(self.sdf, self.deformation) mesh.v_pos = scale_tensor( mesh.v_pos, self.isosurface_helper.points_range, self.isosurface_bbox ) if self.cfg.isosurface_remove_outliers: mesh = mesh.remove_outlier(self.cfg.isosurface_outlier_n_faces_threshold) self.mesh = mesh return mesh def forward( self, points: Float[Tensor, "*N Di"], output_normal: bool = False ) -> Dict[str, Float[Tensor, "..."]]: if self.cfg.geometry_only: return {} assert ( output_normal == False ), f"Normal output is not supported for {self.__class__.__name__}" points_unscaled = points # points in the original scale points = contract_to_unisphere(points, self.bbox) # points normalized to (0, 1) enc = self.encoding(points.view(-1, self.cfg.n_input_dims)) features = self.feature_network(enc).view( *points.shape[:-1], self.cfg.n_feature_dims ) return {"features": features} @staticmethod @torch.no_grad() def create_from( other: BaseGeometry, cfg: Optional[Union[dict, DictConfig]] = None, copy_net: bool = True, **kwargs, ) -> "TetrahedraSDFGrid": if isinstance(other, TetrahedraSDFGrid): instance = TetrahedraSDFGrid(cfg, **kwargs) assert instance.cfg.isosurface_resolution == other.cfg.isosurface_resolution instance.isosurface_bbox = other.isosurface_bbox.clone() instance.sdf.data = other.sdf.data.clone() if ( instance.cfg.isosurface_deformable_grid and other.cfg.isosurface_deformable_grid ): assert ( instance.deformation is not None and other.deformation is not None ) instance.deformation.data = other.deformation.data.clone() if ( not instance.cfg.geometry_only and not other.cfg.geometry_only and copy_net ): instance.encoding.load_state_dict(other.encoding.state_dict()) instance.feature_network.load_state_dict( other.feature_network.state_dict() ) return instance
@threestudio.register("tetrahedra-sdf-grid") class TetrahedraSDFGrid(BaseExplicitGeometry): @dataclass class Config(BaseExplicitGeometry.Config): isosurface_resolution: int = 128 isosurface_deformable_grid: bool = True isosurface_remove_outliers: bool = False isosurface_outlier_n_faces_threshold: Union[int, float] = 0.01 n_input_dims: int = 3 n_feature_dims: int = 3 pos_encoding_config: dict = field( default_factory=lambda: { "otype": "HashGrid", "n_levels": 16, "n_features_per_level": 2, "log2_hashmap_size": 19, "base_resolution": 16, "per_level_scale": 1.447269237440378, } ) mlp_network_config: dict = field( default_factory=lambda: { "otype": "VanillaMLP", "activation": "ReLU", "output_activation": "none", "n_neurons": 64, "n_hidden_layers": 1, } ) shape_init: Optional[str] = None shape_init_params: Optional[Any] = None shape_init_mesh_up: str = "+z" shape_init_mesh_front: str = "+x" force_shape_init: bool = False geometry_only: bool = False fix_geometry: bool = False cfg: Config def configure(self,*args, **kwargs) -> None: super().configure() # this should be saved to state_dict, register as buffer self.isosurface_bbox: Float[Tensor, "2 3"] self.register_buffer("isosurface_bbox", self.bbox.clone()) self.isosurface_helper = MarchingTetrahedraHelper( self.cfg.isosurface_resolution, f"load/tets/{self.cfg.isosurface_resolution}_tets.npz", ) self.sdf: Float[Tensor, "Nv 1"] self.deformation: Optional[Float[Tensor, "Nv 3"]] if not self.cfg.fix_geometry: self.register_parameter( "sdf", nn.Parameter( torch.zeros( (self.isosurface_helper.grid_vertices.shape[0], 1), dtype=torch.float32, ) ), ) if self.cfg.isosurface_deformable_grid: self.register_parameter( "deformation", nn.Parameter( torch.zeros_like(self.isosurface_helper.grid_vertices) ), ) else: self.deformation = None else: self.register_buffer( "sdf", torch.zeros( (self.isosurface_helper.grid_vertices.shape[0], 1), dtype=torch.float32, ), ) if self.cfg.isosurface_deformable_grid: self.register_buffer( "deformation", torch.zeros_like(self.isosurface_helper.grid_vertices), ) else: self.deformation = None if not self.cfg.geometry_only: self.encoding = get_encoding( self.cfg.n_input_dims, self.cfg.pos_encoding_config ) self.feature_network = get_mlp( self.encoding.n_output_dims, self.cfg.n_feature_dims, self.cfg.mlp_network_config, ) self.mesh: Optional[Mesh] = None def initialize_shape(self) -> None: if self.cfg.shape_init is None and not self.cfg.force_shape_init: return # do not initialize shape if weights are provided if self.cfg.weights is not None and not self.cfg.force_shape_init: return get_gt_sdf: Callable[[Float[Tensor, "N 3"]], Float[Tensor, "N 1"]] assert isinstance(self.cfg.shape_init, str) if self.cfg.shape_init == "ellipsoid": assert ( isinstance(self.cfg.shape_init_params, Sized) and len(self.cfg.shape_init_params) == 3 ) size = torch.as_tensor(self.cfg.shape_init_params).to(self.device) def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: return ((points_rand / size) ** 2).sum( dim=-1, keepdim=True ).sqrt() - 1.0 # pseudo signed distance of an ellipsoid get_gt_sdf = func elif self.cfg.shape_init == "sphere": assert isinstance(self.cfg.shape_init_params, float) radius = self.cfg.shape_init_params def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: return (points_rand**2).sum(dim=-1, keepdim=True).sqrt() - radius get_gt_sdf = func elif self.cfg.shape_init.startswith("mesh:"): assert isinstance(self.cfg.shape_init_params, float) mesh_path = self.cfg.shape_init[5:] if not os.path.exists(mesh_path): raise ValueError(f"Mesh file {mesh_path} does not exist.") mesh = trimesh.load(mesh_path) # move to center centroid = mesh.vertices.mean(0) mesh.vertices = mesh.vertices - centroid # align to up-z and front-x dirs = ["+x", "+y", "+z", "-x", "-y", "-z"] dir2vec = { "+x": np.array([1, 0, 0]), "+y": np.array([0, 1, 0]), "+z": np.array([0, 0, 1]), "-x": np.array([-1, 0, 0]), "-y": np.array([0, -1, 0]), "-z": np.array([0, 0, -1]), } if ( self.cfg.shape_init_mesh_up not in dirs or self.cfg.shape_init_mesh_front not in dirs ): raise ValueError( f"shape_init_mesh_up and shape_init_mesh_front must be one of {dirs}." ) if self.cfg.shape_init_mesh_up[1] == self.cfg.shape_init_mesh_front[1]: raise ValueError( "shape_init_mesh_up and shape_init_mesh_front must be orthogonal." ) z_, x_ = ( dir2vec[self.cfg.shape_init_mesh_up], dir2vec[self.cfg.shape_init_mesh_front], ) y_ = np.cross(z_, x_) std2mesh = np.stack([x_, y_, z_], axis=0).T mesh2std = np.linalg.inv(std2mesh) # scaling scale = np.abs(mesh.vertices).max() mesh.vertices = mesh.vertices / scale * self.cfg.shape_init_params mesh.vertices = np.dot(mesh2std, mesh.vertices.T).T sdf = SDF(mesh.vertices, mesh.faces) def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: # add a negative signed here # as in pysdf the inside of the shape has positive signed distance return torch.from_numpy(-sdf(points_rand.cpu().numpy())).to( points_rand )[..., None] get_gt_sdf = func else: raise ValueError( f"Unknown shape initialization type: {self.cfg.shape_init}" ) sdf_gt = get_gt_sdf( scale_tensor( self.isosurface_helper.grid_vertices, self.isosurface_helper.points_range, self.isosurface_bbox, ) ) self.sdf.data = sdf_gt # explicit broadcast to ensure param consistency across ranks for param in self.parameters(): broadcast(param, src=0) def isosurface(self) -> Mesh: # return cached mesh if fix_geometry is True to save computation if self.cfg.fix_geometry and self.mesh is not None: return self.mesh mesh = self.isosurface_helper(self.sdf, self.deformation) mesh.v_pos = scale_tensor( mesh.v_pos, self.isosurface_helper.points_range, self.isosurface_bbox ) if self.cfg.isosurface_remove_outliers: mesh = mesh.remove_outlier(self.cfg.isosurface_outlier_n_faces_threshold) self.mesh = mesh return mesh def forward( self, points: Float[Tensor, "*N Di"], output_normal: bool = False ) -> Dict[str, Float[Tensor, "..."]]: if self.cfg.geometry_only: return {} assert ( output_normal == False ), f"Normal output is not supported for {self.__class__.__name__}" points_unscaled = points # points in the original scale points = contract_to_unisphere(points, self.bbox) # points normalized to (0, 1) enc = self.encoding(points.view(-1, self.cfg.n_input_dims)) features = self.feature_network(enc).view( *points.shape[:-1], self.cfg.n_feature_dims ) return {"features": features} @staticmethod @torch.no_grad() def create_from( other: BaseGeometry, cfg: Optional[Union[dict, DictConfig]] = None, copy_net: bool = True, **kwargs, ) -> "TetrahedraSDFGrid": if isinstance(other, TetrahedraSDFGrid): instance = TetrahedraSDFGrid(cfg, **kwargs) assert instance.cfg.isosurface_resolution == other.cfg.isosurface_resolution instance.isosurface_bbox = other.isosurface_bbox.clone() instance.sdf.data = other.sdf.data.clone() if ( instance.cfg.isosurface_deformable_grid and other.cfg.isosurface_deformable_grid ): assert ( instance.deformation is not None and other.deformation is not None ) instance.deformation.data = other.deformation.data.clone() if ( not instance.cfg.geometry_only and not other.cfg.geometry_only and copy_net ): instance.encoding.load_state_dict(other.encoding.state_dict()) instance.feature_network.load_state_dict( other.feature_network.state_dict() ) return instance
elif isinstance(other, ImplicitVolume):
4
2023-11-27 23:39:49+00:00
24k
abdulhaim/LMRL-Gym
llm_rl_scripts/text_nav/ilql/train_ilql.py
[ { "identifier": "ilql_loss", "path": "LLM_RL/algorithms/ilql/base_interface.py", "snippet": "def ilql_loss(\n q1: jax.Array, # [batch, time-1] output is masked; shift x[:-1]\n q2: jax.Array, # [batch, time-1] output is masked; shift x[:-1]\n v: jax.Array, # [batch, time-1] output is masked; shift x[:-1]\n v_final: jax.Array, # [batch]\n target_q1: jax.Array, # [batch, time-1] output is masked; shift x[:-1]\n target_q2: jax.Array, # [batch, time-1] output is masked; shift x[:-1]\n q1_logits: jax.Array, # [batch, time-1, vocab] output is masked; shift x[:-1]\n q2_logits: jax.Array, # [batch, time-1, vocab] output is masked; shift x[:-1]\n token_ids: jax.Array, # [batch, time-1] output is masked; shift x[1:]\n attention_mask: jax.Array, # [batch, time-1] output is masked; shift x[1:]\n should_take_action: jax.Array, # [batch, time-1] output is masked; shift x[1:]\n rewards: jax.Array, # [batch, time-1] output is masked; shift x[1:]\n *, \n gamma: Union[float, jax.Array], \n tau: Union[float, jax.Array], \n cql_weight: Union[float, jax.Array], \n) -> Tuple[jnp.ndarray, Any]:\n # should be an action in the batch\n mask = should_take_action.astype(jnp.float32) * attention_mask\n n = mask.sum()\n \n q1sa_flat, q2sa_flat, v_flat = q1.reshape(-1), q2.reshape(-1), v.reshape(-1)\n target_q1sa_flat, target_q2sa_flat = target_q1.reshape(-1), target_q2.reshape(-1)\n vns_flat = jnp.concatenate((v, v_final[..., None]), axis=1).reshape(-1)\n\n qv_query_indicators = get_query_indicators(should_take_action.reshape(-1))\n\n is_next_state = should_take_action.copy()\n # set first action position to false\n is_next_state = is_next_state.at[jnp.arange(0, is_next_state.shape[0], dtype=jnp.int32), jnp.argmax(is_next_state.astype(jnp.int32), axis=1)].set(False)\n # set endpoint to true as long as there is at least 1 action in the sequence\n is_next_state = jnp.concatenate((is_next_state, (should_take_action.sum(axis=1) > 0)[..., None]), axis=1)\n\n vns_query_indicators = get_query_indicators(is_next_state.reshape(-1))\n # should be the same number of vns as qv, so we can clip the extra padding to match shape\n vns_query_indicators = vns_query_indicators[:qv_query_indicators.shape[0], :]\n \n # extract selected values\n q1sa_selected = (qv_query_indicators * q1sa_flat).sum(axis=1)\n q2sa_selected = (qv_query_indicators * q2sa_flat).sum(axis=1)\n v_selected = (qv_query_indicators * v_flat).sum(axis=1)\n target_q1sa_selected = (qv_query_indicators * target_q1sa_flat).sum(axis=1)\n target_q2sa_selected = (qv_query_indicators * target_q2sa_flat).sum(axis=1)\n vns_selected = (vns_query_indicators * vns_flat).sum(axis=1)\n rs_selected = (qv_query_indicators * rewards.reshape(-1)).sum(axis=1)\n\n # get masks for selected values\n sa_mask = (qv_query_indicators.sum(axis=1) > 0).astype(jnp.float32)\n ns_mask = (vns_query_indicators.sum(axis=1) > 0).astype(jnp.float32)\n\n # compute q loss\n q1_loss = (optax.l2_loss(q1sa_selected, jax.lax.stop_gradient(rs_selected + gamma * vns_selected)) * sa_mask).sum() / n\n q2_loss = (optax.l2_loss(q2sa_selected, jax.lax.stop_gradient(rs_selected + gamma * vns_selected)) * sa_mask).sum() / n\n\n # compute v loss\n target_q_selected = jnp.minimum(target_q1sa_selected, target_q2sa_selected)\n expectile_indicator = (target_q_selected >= v_selected).astype(jnp.float32)\n expectile_weights = expectile_indicator * tau + (1 - expectile_indicator) * (1 - tau)\n v_loss = (optax.l2_loss(v_selected, jax.lax.stop_gradient(target_q_selected)) * jax.lax.stop_gradient(expectile_weights) * sa_mask).sum() / n\n\n # compute cql loss on both q heads\n q1_cql_loss = optax.softmax_cross_entropy_with_integer_labels(q1_logits, token_ids)\n q1_cql_loss = (mask * q1_cql_loss).sum() / n\n\n q2_cql_loss = optax.softmax_cross_entropy_with_integer_labels(q2_logits, token_ids)\n q2_cql_loss = (mask * q2_cql_loss).sum() / n\n \n loss = q1_loss + q2_loss + v_loss + cql_weight * (q1_cql_loss + q2_cql_loss)\n\n logs = dict(\n losses=dict(\n total_loss=loss, \n q1_loss=q1_loss, \n q2_loss=q2_loss, \n v_loss=v_loss, \n q1_cql_loss=q1_cql_loss, \n q2_cql_loss=q2_cql_loss, \n ), \n q1=get_tensor_stats(q1sa_selected, mask=sa_mask, n=n), \n q2=get_tensor_stats(q2sa_selected, mask=sa_mask, n=n), \n v=get_tensor_stats(v_selected, mask=sa_mask, n=n), \n target_q=get_tensor_stats(target_q_selected, mask=sa_mask, n=n), \n target_q1=get_tensor_stats(target_q1sa_selected, mask=sa_mask, n=n), \n target_q2=get_tensor_stats(target_q2sa_selected, mask=sa_mask, n=n), \n vns=get_tensor_stats(vns_selected, mask=ns_mask, n=n), \n v_final=get_tensor_stats(v_final, mask=jnp.ones(v_final.shape, dtype=jnp.int32), n=v_final.shape[0]), \n rewards=get_tensor_stats(rewards, mask=mask, n=n), \n )\n\n return loss, logs" }, { "identifier": "Text", "path": "LLM_RL/environment.py", "snippet": "class Text:\nclass TextTrajectory:\nclass TextTrajectoryChain:\nclass TextEnv(ABC):\nclass BatchedTextEnv(ABC):\nclass TextEnvToBatchedTextEnv(BatchedTextEnv):\nclass BatchedTextEnvToTextEnv(TextEnv):\nclass TextPolicy(ABC):\nclass BatchedTextPolicy(ABC):\nclass TextPolicyToBatchedTextPolicy(BatchedTextPolicy):\nclass BatchedTextPolicyToTextPolicy(TextPolicy):\nclass InteractionTransition(NamedTuple):\nclass UserPolicy(TextPolicy): \nclass TokenHistory:\nclass TokenTrajectory:\nclass TokenTrajectoryChain:\n def __post_init__(self):\n def step(self, text_history: TextHistory) -> Tuple[TextHistory, float, bool]:\n def reset(self, seed: Optional[int]=None, options: Optional[Dict]=None) -> TextHistory:\n def close(self) -> None:\n def copy(self) -> TextEnv:\n def step(self, text_history: List[Optional[TextHistory]], done: Optional[List[bool]]=None) -> List[Optional[Tuple[TextHistory, float, bool]]]:\n def reset(self, seed: Optional[List[Optional[int]]]=None, options: Optional[List[Optional[Dict]]]=None) -> List[TextHistory]:\n def close(self) -> None:\n def copy(self) -> BatchedTextEnv:\n def __init__(self, env: TextEnv):\n def step(self, text_history: List[Optional[TextHistory]], done: Optional[List[bool]]=None) -> List[Optional[Tuple[TextHistory, float, bool]]]:\n def reset(self, seed: Optional[List[Optional[int]]]=None, options: Optional[List[Optional[Dict]]]=None) -> List[TextHistory]:\n def close(self) -> None:\n def __init__(self, env: BatchedTextEnv):\n def step(self, text_history: TextHistory) -> Tuple[TextHistory, float, bool]:\n def reset(self, seed: Optional[int]=None, options: Optional[Dict]=None) -> TextHistory:\n def close(self) -> None:\n def act(self, text_history: TextHistory) -> TextHistory:\n def act(self, text_history: List[Optional[TextHistory]], done: Optional[List[bool]]=None) -> List[Optional[TextHistory]]:\n def __init__(self, policy: TextPolicy):\n def act(self, text_history: List[Optional[TextHistory]], done: Optional[List[bool]]=None) -> List[Optional[TextHistory]]:\n def __init__(self, policy: BatchedTextPolicy):\n def act(self, text_history: TextHistory) -> TextHistory:\ndef interact_environment(\n env: Union[TextEnv, BatchedTextEnv], \n policy: Union[TextPolicy, BatchedTextPolicy], \n initial_text_history: Optional[Union[TextHistory, List[TextHistory]]]=None, \n env_seed: Union[Optional[int], Optional[List[Optional[int]]]]=None, \n env_options: Union[Optional[Dict], Optional[List[Optional[int]]]]=None, \n bsize: int=1, \n npad: int=0,\n) -> List[List[InteractionTransition]]:\ndef text_env_eval(\n env: Union[TextEnv, BatchedTextEnv], \n policy: Union[TextPolicy, BatchedTextPolicy], \n n_rollouts: int, \n initial_text_history: Optional[TextHistory]=None, # only allow one initial_text_history here\n seed_generator: Optional[Iterator[int]]=None, \n env_options: Optional[Dict]=None, # only allow one env_options here\n interaction_callback: Optional[Callable[[List[Tuple[TextHistory, TextHistory, TextHistory, float, bool]]], None]]=None, \n bsize: int=1, \n verbose: bool=True, \n) -> Tuple[List[List[InteractionTransition]], Dict[str, Any]]:\n def __init__(\n self, \n initial_str: str, \n postproc_print_f: Optional[Callable[[str], str]]=None, \n postproc_action_f: Optional[Callable[[str], str]]=None, \n ):\n def act(self, text_history: TextHistory) -> TextHistory:\n def __post_init__(self):\n def from_text_history(\n cls, \n text_history: TextHistory, \n tokenizer: PreTrainedTokenizer, \n token_process: Optional[Callable[[List[int]], List[int]]]=None, \n ) -> TokenHistory:\n def __post_init__(self):\n def from_text_trajectory(\n cls, \n text_trajectory: TextTrajectory, \n tokenizer: PreTrainedTokenizer, \n token_process: Optional[Callable[[List[int]], List[int]]]=None, \n ) -> TokenTrajectory:\n def __post_init__(self):\n def to_list(self) -> List[TokenTrajectory]:\n def from_text_trajectory_chain(\n cls, \n text_trajectory_chain: TextTrajectoryChain, \n tokenizer: PreTrainedTokenizer, \n token_process: Optional[Callable[[List[int]], List[int]]]=None, \n ) -> TokenTrajectoryChain:" }, { "identifier": "GPT2ILQLInference", "path": "LLM_RL/algorithms/ilql/gpt2/interface.py", "snippet": "class GPT2ILQLInference(ILQLInference):\n @classmethod\n def load_inference(\n cls, \n value_inference: GPT2ValueRLInference, \n target_value_inference: GPT2ValueRLInference, \n loss_fn: Callable, \n use_target_base_for_loss: bool=True, \n ):\n mesh = value_inference.base_model.config.mesh\n assert mesh is not None\n assert mesh == value_inference.q_head_model.config.mesh\n assert mesh == value_inference.v_head_model.config.mesh\n assert mesh == target_value_inference.base_model.config.mesh\n assert mesh == target_value_inference.q_head_model.config.mesh\n\n base_params_partition_spec = match_partition_rules(value_inference.base_model.config.get_partition_rules(), value_inference.base_params)\n target_base_params_partition_spec = PS() if (not use_target_base_for_loss) else match_partition_rules(target_value_inference.base_model.config.get_partition_rules(), target_value_inference.base_params)\n q1_head_params_partition_spec = match_partition_rules(value_inference.q_head_model.config.get_partition_rules(), value_inference.q1_head_params)\n q2_head_params_partition_spec = match_partition_rules(value_inference.q_head_model.config.get_partition_rules(), value_inference.q2_head_params)\n v_head_params_partition_spec = match_partition_rules(value_inference.v_head_model.config.get_partition_rules(), value_inference.v_head_params)\n q1_target_head_params_partition_spec = match_partition_rules(target_value_inference.q_head_model.config.get_partition_rules(), target_value_inference.q1_head_params)\n q2_target_head_params_partition_spec = match_partition_rules(target_value_inference.q_head_model.config.get_partition_rules(), target_value_inference.q2_head_params)\n \n @partial(\n pjit, \n static_argnames=('train',), \n in_shardings=(\n jax.tree_util.tree_map(lambda ps: NamedSharding(mesh, ps), base_params_partition_spec), \n jax.tree_util.tree_map(lambda ps: NamedSharding(mesh, ps), target_base_params_partition_spec), \n jax.tree_util.tree_map(lambda ps: NamedSharding(mesh, ps), q1_head_params_partition_spec), \n jax.tree_util.tree_map(lambda ps: NamedSharding(mesh, ps), q2_head_params_partition_spec), \n jax.tree_util.tree_map(lambda ps: NamedSharding(mesh, ps), v_head_params_partition_spec), \n jax.tree_util.tree_map(lambda ps: NamedSharding(mesh, ps), q1_target_head_params_partition_spec), \n jax.tree_util.tree_map(lambda ps: NamedSharding(mesh, ps), q2_target_head_params_partition_spec), \n NamedSharding(mesh, PS()), \n NamedSharding(mesh, PS()), \n NamedSharding(mesh, PS()), \n NamedSharding(mesh, PS()), \n NamedSharding(mesh, PS()), \n NamedSharding(mesh, PS()), \n NamedSharding(mesh, PS()), \n NamedSharding(mesh, PS()), \n NamedSharding(mesh, PS()), \n NamedSharding(mesh, PS()), \n NamedSharding(mesh, PS()), \n ), \n out_shardings=(\n NamedSharding(mesh, PS()), \n NamedSharding(mesh, PS()), \n ), \n )\n def _eval_loss(\n base_params: PyTree, \n target_base_params: Optional[PyTree], \n q1_head_params: PyTree, \n q2_head_params: PyTree, \n v_head_params: PyTree, \n q1_target_head_params: PyTree, \n q2_target_head_params: PyTree, \n\n input_ids: jax.Array, \n attention_mask: jax.Array, \n position_ids: jax.Array, \n should_take_action: jax.Array, \n rewards: jax.Array, \n dones: jax.Array, \n\n next_token_ids: Optional[jax.Array], \n next_tokens_attention_mask: Optional[jax.Array], \n next_tokens_position_ids: Optional[jax.Array], \n next_dones: Optional[jax.Array], \n\n prng_key: Optional[jax.random.PRNGKeyArray]=None, \n train: bool=False, \n ) -> Tuple[jax.Array, PyTree]:\n # data parallel shard inputs\n input_ids = with_named_sharding_constraint(input_ids, mesh, PS(('dp', 'fsdp'), None))\n attention_mask = with_named_sharding_constraint(attention_mask, mesh, PS(('dp', 'fsdp'), None))\n position_ids = with_named_sharding_constraint(position_ids, mesh, PS(('dp', 'fsdp'), None))\n should_take_action = with_named_sharding_constraint(should_take_action, mesh, PS(('dp', 'fsdp'), None))\n rewards = with_named_sharding_constraint(rewards, mesh, PS(('dp', 'fsdp'), None))\n dones = with_named_sharding_constraint(dones, mesh, PS(('dp', 'fsdp')))\n if next_token_ids is not None:\n assert next_tokens_attention_mask is not None\n assert next_tokens_position_ids is not None\n next_token_ids = with_named_sharding_constraint(next_token_ids, mesh, PS(('dp', 'fsdp'), None))\n next_tokens_attention_mask = with_named_sharding_constraint(next_tokens_attention_mask, mesh, PS(('dp', 'fsdp'), None))\n next_tokens_position_ids = with_named_sharding_constraint(next_tokens_position_ids, mesh, PS(('dp', 'fsdp'), None))\n next_dones = with_named_sharding_constraint(next_dones, mesh, PS(('dp', 'fsdp')))\n else:\n assert next_tokens_attention_mask is None\n assert next_tokens_position_ids is None\n \n # get base hidden states\n\n new_key = None\n if prng_key is not None:\n prng_key, new_key = jax.random.split(prng_key)\n base_model_output = value_inference.base_model(\n input_ids=input_ids, \n attention_mask=attention_mask, \n position_ids=position_ids, \n params=base_params, \n dropout_rng=new_key, \n train=train, \n output_hidden_states=True, \n )\n\n if target_base_params is not None:\n new_key = None\n if prng_key is not None:\n prng_key, new_key = jax.random.split(prng_key)\n target_base_model_output = target_value_inference.base_model(\n input_ids=input_ids, \n attention_mask=attention_mask, \n position_ids=position_ids, \n params=target_base_params, \n dropout_rng=new_key, \n train=train, \n output_hidden_states=True, \n )\n else:\n target_base_model_output = base_model_output\n \n if next_token_ids is not None:\n new_key = None\n if prng_key is not None:\n prng_key, new_key = jax.random.split(prng_key)\n next_token_base_model_output = value_inference.base_model(\n input_ids=next_token_ids, \n attention_mask=next_tokens_attention_mask, \n position_ids=next_tokens_position_ids, \n params=base_params, \n dropout_rng=new_key, \n train=train, \n output_hidden_states=True, \n )\n \n # get values\n\n new_key = None\n if prng_key is not None:\n prng_key, new_key = jax.random.split(prng_key)\n q1_head_output = value_inference.q_head_model.apply(\n {'params': q1_head_params}, \n base_model_output.hidden_states[-1], \n train=train, \n rngs={'dropout': new_key} if prng_key is not None else None, \n )\n\n new_key = None\n if prng_key is not None:\n prng_key, new_key = jax.random.split(prng_key)\n q2_head_output = value_inference.q_head_model.apply(\n {'params': q2_head_params}, \n base_model_output.hidden_states[-1], \n train=train, \n rngs={'dropout': new_key} if prng_key is not None else None, \n )\n\n new_key = None\n if prng_key is not None:\n prng_key, new_key = jax.random.split(prng_key)\n v_head_output = value_inference.v_head_model.apply(\n {'params': v_head_params}, \n base_model_output.hidden_states[-1], \n train=train, \n rngs={'dropout': new_key} if prng_key is not None else None, \n )\n\n new_key = None\n if prng_key is not None:\n prng_key, new_key = jax.random.split(prng_key)\n target_q1_head_output = target_value_inference.q_head_model.apply(\n {'params': q1_target_head_params}, \n target_base_model_output.hidden_states[-1], \n train=train, \n rngs={'dropout': new_key} if prng_key is not None else None, \n )\n\n new_key = None\n if prng_key is not None:\n prng_key, new_key = jax.random.split(prng_key)\n target_q2_head_output = target_value_inference.q_head_model.apply(\n {'params': q2_target_head_params}, \n target_base_model_output.hidden_states[-1], \n train=train, \n rngs={'dropout': new_key} if prng_key is not None else None, \n )\n\n # process outputs\n\n q1 = jnp.take_along_axis(q1_head_output[:, :-1], input_ids[:, 1:][..., None], axis=2).squeeze(2)\n q2 = jnp.take_along_axis(q2_head_output[:, :-1], input_ids[:, 1:][..., None], axis=2).squeeze(2)\n v = v_head_output[:, :-1].squeeze(2)\n v_full = v_head_output.squeeze(2)\n target_q1 = jnp.take_along_axis(target_q1_head_output[:, :-1], input_ids[:, 1:][..., None], axis=2).squeeze(2)\n target_q2 = jnp.take_along_axis(target_q2_head_output[:, :-1], input_ids[:, 1:][..., None], axis=2).squeeze(2)\n\n q1_logits = q1_head_output[:, :-1, :].astype(jnp.float32)\n q2_logits = q2_head_output[:, :-1, :].astype(jnp.float32)\n\n # get next token values\n\n if next_token_ids is not None:\n # just run vf on last token to save some flops\n last_next_token_idxs = (next_tokens_attention_mask.shape[1]-1)-jnp.argmax(jnp.flip(next_tokens_attention_mask, axis=1).astype(jnp.int32), axis=1)\n final_next_token_h = next_token_base_model_output.hidden_states[-1][jnp.arange(0, input_ids.shape[0], dtype=jnp.int32), last_next_token_idxs, :]\n new_key = None\n if prng_key is not None:\n prng_key, new_key = jax.random.split(prng_key)\n next_token_v_head_output = value_inference.v_head_model.apply(\n {'params': v_head_params}, \n final_next_token_h, \n train=train, \n rngs={'dropout': new_key} if prng_key is not None else None, \n ).squeeze(1)\n v_final = next_token_v_head_output * (1 - next_dones.astype(jnp.float32))\n else:\n last_action_idxs = (should_take_action.shape[1]-1)-jnp.argmax(jnp.flip(should_take_action, axis=1).astype(jnp.int32), axis=1)+1\n last_token_idxs = (attention_mask.shape[1]-1)-jnp.argmax(jnp.flip(attention_mask, axis=1).astype(jnp.int32), axis=1)\n final_state_idxs = ((1 - dones) * last_action_idxs + dones * last_token_idxs).astype(jnp.int32)\n v_final = v_full[jnp.arange(0, should_take_action.shape[0], dtype=jnp.int32), final_state_idxs]\n v_final = v_final * (1 - dones)\n\n loss, info = loss_fn(\n q1, \n q2, \n v, \n v_final, \n target_q1, \n target_q2, \n q1_logits, \n q2_logits, \n input_ids[:, 1:], \n attention_mask[:, 1:], \n should_take_action, \n rewards, \n )\n \n return loss, info\n\n return cls(\n value_inference=value_inference, \n target_value_inference=target_value_inference, \n _eval_loss=_eval_loss, \n use_target_base_for_loss=use_target_base_for_loss, \n )" }, { "identifier": "GPT2ILQLTrain", "path": "LLM_RL/algorithms/ilql/gpt2/interface.py", "snippet": "class GPT2ILQLTrain(ILQLTrain):\n @classmethod\n def load_train(\n cls, \n base_train_state: TrainState, \n target_base_params: Optional[PyTree], \n q1_head_train_state: TrainState, \n q2_head_train_state: TrainState, \n v_head_train_state: TrainState, \n q1_target_head_params: PyTree, \n q2_target_head_params: PyTree, \n base_model: FlaxPreTrainedModel, \n q_head_model: nn.Module, \n v_head_model: nn.Module, \n tokenizer: PreTrainedTokenizerBase, \n loss_fn: Callable, \n detach_q1: bool, \n detach_q2: bool, \n detach_v: bool, \n polyak_alpha: float, \n hard_update_every: Optional[int], \n ):\n mesh = base_model.config.mesh\n assert mesh is not None\n assert mesh == q_head_model.config.mesh\n assert mesh == v_head_model.config.mesh\n base_train_state_partition_spec = match_partition_rules(base_model.config.get_partition_rules(), base_train_state)\n target_base_params_partition_spec = PS() if target_base_params is None else match_partition_rules(base_model.config.get_partition_rules(), target_base_params)\n q1_head_train_state_partition_spec = match_partition_rules(q_head_model.config.get_partition_rules(), q1_head_train_state)\n q2_head_train_state_partition_spec = match_partition_rules(q_head_model.config.get_partition_rules(), q2_head_train_state)\n v_head_train_state_partition_spec = match_partition_rules(v_head_model.config.get_partition_rules(), v_head_train_state)\n q1_target_head_params_partition_spec = match_partition_rules(q_head_model.config.get_partition_rules(), q1_target_head_params)\n q2_target_head_params_partition_spec = match_partition_rules(q_head_model.config.get_partition_rules(), q2_target_head_params)\n\n @partial(\n pjit, \n donate_argnums=(0, 1, 2, 3, 4, 5, 6), \n static_argnames=('train',), \n in_shardings=(\n jax.tree_util.tree_map(lambda ps: NamedSharding(mesh, ps), base_train_state_partition_spec), \n jax.tree_util.tree_map(lambda ps: NamedSharding(mesh, ps), target_base_params_partition_spec), \n jax.tree_util.tree_map(lambda ps: NamedSharding(mesh, ps), q1_head_train_state_partition_spec), \n jax.tree_util.tree_map(lambda ps: NamedSharding(mesh, ps), q2_head_train_state_partition_spec), \n jax.tree_util.tree_map(lambda ps: NamedSharding(mesh, ps), v_head_train_state_partition_spec), \n jax.tree_util.tree_map(lambda ps: NamedSharding(mesh, ps), q1_target_head_params_partition_spec), \n jax.tree_util.tree_map(lambda ps: NamedSharding(mesh, ps), q2_target_head_params_partition_spec), \n NamedSharding(mesh, PS()), \n NamedSharding(mesh, PS()), \n NamedSharding(mesh, PS()), \n NamedSharding(mesh, PS()), \n NamedSharding(mesh, PS()), \n NamedSharding(mesh, PS()), \n NamedSharding(mesh, PS()), \n NamedSharding(mesh, PS()), \n NamedSharding(mesh, PS()), \n NamedSharding(mesh, PS()), \n NamedSharding(mesh, PS()), \n ), \n out_shardings=(\n jax.tree_util.tree_map(lambda ps: NamedSharding(mesh, ps), base_train_state_partition_spec), \n jax.tree_util.tree_map(lambda ps: NamedSharding(mesh, ps), target_base_params_partition_spec), \n jax.tree_util.tree_map(lambda ps: NamedSharding(mesh, ps), q1_head_train_state_partition_spec), \n jax.tree_util.tree_map(lambda ps: NamedSharding(mesh, ps), q2_head_train_state_partition_spec), \n jax.tree_util.tree_map(lambda ps: NamedSharding(mesh, ps), v_head_train_state_partition_spec), \n jax.tree_util.tree_map(lambda ps: NamedSharding(mesh, ps), q1_target_head_params_partition_spec), \n jax.tree_util.tree_map(lambda ps: NamedSharding(mesh, ps), q2_target_head_params_partition_spec), \n NamedSharding(mesh, PS()), \n NamedSharding(mesh, PS()), \n ), \n )\n def _step(\n base_train_state: TrainState, \n target_base_params: Optional[PyTree], \n q1_head_train_state: TrainState, \n q2_head_train_state: TrainState, \n v_head_train_state: TrainState, \n q1_target_head_params: PyTree, \n q2_target_head_params: PyTree, \n\n input_ids: jax.Array, \n attention_mask: jax.Array, \n position_ids: jax.Array, \n should_take_action: jax.Array, \n rewards: jax.Array, \n dones: jax.Array, \n\n next_token_ids: Optional[jax.Array], \n next_tokens_attention_mask: Optional[jax.Array], \n next_tokens_position_ids: Optional[jax.Array], \n next_dones: Optional[jax.Array], \n\n prng_key: Optional[jax.random.PRNGKeyArray], \n train: bool=True, \n ) -> Tuple[TrainState, Optional[PyTree], TrainState, TrainState, TrainState, PyTree, PyTree, jax.Array, PyTree]:\n # data parallel shard inputs\n input_ids = with_named_sharding_constraint(input_ids, mesh, PS(('dp', 'fsdp'), None))\n attention_mask = with_named_sharding_constraint(attention_mask, mesh, PS(('dp', 'fsdp'), None))\n position_ids = with_named_sharding_constraint(position_ids, mesh, PS(('dp', 'fsdp'), None))\n should_take_action = with_named_sharding_constraint(should_take_action, mesh, PS(('dp', 'fsdp'), None))\n rewards = with_named_sharding_constraint(rewards, mesh, PS(('dp', 'fsdp'), None))\n dones = with_named_sharding_constraint(dones, mesh, PS(('dp', 'fsdp')))\n if next_token_ids is not None:\n assert next_tokens_attention_mask is not None\n assert next_tokens_position_ids is not None\n next_token_ids = with_named_sharding_constraint(next_token_ids, mesh, PS(('dp', 'fsdp'), None))\n next_tokens_attention_mask = with_named_sharding_constraint(next_tokens_attention_mask, mesh, PS(('dp', 'fsdp'), None))\n next_tokens_position_ids = with_named_sharding_constraint(next_tokens_position_ids, mesh, PS(('dp', 'fsdp'), None))\n next_dones = with_named_sharding_constraint(next_dones, mesh, PS(('dp', 'fsdp')))\n else:\n assert next_tokens_attention_mask is None\n assert next_tokens_position_ids is None\n\n # define loss function\n\n def grad_loss(base_params: PyTree, q1_head_params: PyTree, q2_head_params: PyTree, v_head_params: PyTree, prng_key: jax.random.PRNGKeyArray):\n \n # get base hidden states\n\n new_key = None\n if prng_key is not None:\n prng_key, new_key = jax.random.split(prng_key)\n base_model_output = base_model(\n input_ids=input_ids, \n attention_mask=attention_mask, \n position_ids=position_ids, \n params=base_params, \n dropout_rng=new_key, \n train=train, \n output_hidden_states=True, \n )\n\n if target_base_params is not None:\n new_key = None\n if prng_key is not None:\n prng_key, new_key = jax.random.split(prng_key)\n target_base_model_output = base_model(\n input_ids=input_ids, \n attention_mask=attention_mask, \n position_ids=position_ids, \n params=target_base_params, \n dropout_rng=new_key, \n train=train, \n output_hidden_states=True, \n )\n else:\n target_base_model_output = base_model_output\n \n if next_token_ids is not None:\n new_key = None\n if prng_key is not None:\n prng_key, new_key = jax.random.split(prng_key)\n next_token_base_model_output = base_model(\n input_ids=next_token_ids, \n attention_mask=next_tokens_attention_mask, \n position_ids=next_tokens_position_ids, \n params=base_params, \n dropout_rng=new_key, \n train=train, \n output_hidden_states=True, \n )\n \n # get values\n\n new_key = None\n if prng_key is not None:\n prng_key, new_key = jax.random.split(prng_key)\n q1_head_output = q_head_model.apply(\n {'params': q1_head_params}, \n base_model_output.hidden_states[-1], \n train=train, \n rngs={'dropout': new_key} if prng_key is not None else None, \n )\n\n new_key = None\n if prng_key is not None:\n prng_key, new_key = jax.random.split(prng_key)\n q2_head_output = q_head_model.apply(\n {'params': q2_head_params}, \n base_model_output.hidden_states[-1], \n train=train, \n rngs={'dropout': new_key} if prng_key is not None else None, \n )\n\n new_key = None\n if prng_key is not None:\n prng_key, new_key = jax.random.split(prng_key)\n v_head_output = v_head_model.apply(\n {'params': v_head_params}, \n base_model_output.hidden_states[-1], \n train=train, \n rngs={'dropout': new_key} if prng_key is not None else None, \n )\n\n new_key = None\n if prng_key is not None:\n prng_key, new_key = jax.random.split(prng_key)\n target_q1_head_output = q_head_model.apply(\n {'params': q1_target_head_params}, \n target_base_model_output.hidden_states[-1], \n train=train, \n rngs={'dropout': new_key} if prng_key is not None else None, \n )\n\n new_key = None\n if prng_key is not None:\n prng_key, new_key = jax.random.split(prng_key)\n target_q2_head_output = q_head_model.apply(\n {'params': q2_target_head_params}, \n target_base_model_output.hidden_states[-1], \n train=train, \n rngs={'dropout': new_key} if prng_key is not None else None, \n )\n\n # stop gradients\n if detach_q1:\n q1_head_output = jax.lax.stop_gradient(q1_head_output)\n if detach_q2:\n q2_head_output = jax.lax.stop_gradient(q2_head_output)\n if detach_v:\n v_head_output = jax.lax.stop_gradient(v_head_output)\n target_q1_head_output = jax.lax.stop_gradient(target_q1_head_output)\n target_q2_head_output = jax.lax.stop_gradient(target_q2_head_output)\n\n q1 = jnp.take_along_axis(q1_head_output[:, :-1], input_ids[:, 1:][..., None], axis=2).squeeze(2)\n q2 = jnp.take_along_axis(q2_head_output[:, :-1], input_ids[:, 1:][..., None], axis=2).squeeze(2)\n v = v_head_output[:, :-1].squeeze(2)\n v_full = v_head_output.squeeze(2)\n target_q1 = jnp.take_along_axis(target_q1_head_output[:, :-1], input_ids[:, 1:][..., None], axis=2).squeeze(2)\n target_q2 = jnp.take_along_axis(target_q2_head_output[:, :-1], input_ids[:, 1:][..., None], axis=2).squeeze(2)\n\n q1_logits = q1_head_output[:, :-1, :].astype(jnp.float32)\n q2_logits = q2_head_output[:, :-1, :].astype(jnp.float32)\n\n # get next token values\n\n if next_token_ids is not None:\n # just run vf on last token to save some flops\n last_next_token_idxs = (next_tokens_attention_mask.shape[1]-1)-jnp.argmax(jnp.flip(next_tokens_attention_mask, axis=1).astype(jnp.int32), axis=1)\n final_next_token_h = next_token_base_model_output.hidden_states[-1][jnp.arange(0, input_ids.shape[0], dtype=jnp.int32), last_next_token_idxs, :]\n new_key = None\n if prng_key is not None:\n prng_key, new_key = jax.random.split(prng_key)\n next_token_v_head_output = v_head_model.apply(\n {'params': v_head_params}, \n final_next_token_h, \n train=train, \n rngs={'dropout': new_key} if prng_key is not None else None, \n ).squeeze(1)\n v_final = next_token_v_head_output * (1 - next_dones.astype(jnp.float32))\n else:\n last_action_idxs = (should_take_action.shape[1]-1)-jnp.argmax(jnp.flip(should_take_action, axis=1).astype(jnp.int32), axis=1)+1\n last_token_idxs = (attention_mask.shape[1]-1)-jnp.argmax(jnp.flip(attention_mask, axis=1).astype(jnp.int32), axis=1)\n final_state_idxs = ((1 - dones) * last_action_idxs + dones * last_token_idxs).astype(jnp.int32)\n v_final = v_full[jnp.arange(0, should_take_action.shape[0], dtype=jnp.int32), final_state_idxs]\n v_final = v_final * (1 - dones)\n v_final = jax.lax.stop_gradient(v_final)\n\n loss, info = loss_fn(\n q1, \n q2, \n v, \n v_final, \n target_q1, \n target_q2, \n q1_logits, \n q2_logits, \n input_ids[:, 1:], \n attention_mask[:, 1:], \n should_take_action, \n rewards, \n )\n return loss, info\n\n # take loss\n (loss, info), (base_grads, q1_head_grads, q2_head_grads, v_head_grads) = jax.value_and_grad(grad_loss, has_aux=True, argnums=(0, 1, 2, 3))(\n base_train_state.params, \n q1_head_train_state.params, \n q2_head_train_state.params, \n v_head_train_state.params, \n prng_key, \n )\n # assert shard gradients\n base_grads = jax.tree_util.tree_map(\n lambda x, ps: with_named_sharding_constraint(x, mesh, ps), \n base_grads, \n base_train_state_partition_spec.params, \n )\n q1_head_grads = jax.tree_util.tree_map(\n lambda x, ps: with_named_sharding_constraint(x, mesh, ps), \n q1_head_grads, \n q1_head_train_state_partition_spec.params, \n )\n q2_head_grads = jax.tree_util.tree_map(\n lambda x, ps: with_named_sharding_constraint(x, mesh, ps), \n q2_head_grads, \n q2_head_train_state_partition_spec.params, \n )\n v_head_grads = jax.tree_util.tree_map(\n lambda x, ps: with_named_sharding_constraint(x, mesh, ps), \n v_head_grads, \n v_head_train_state_partition_spec.params, \n )\n # update params and optim state\n base_train_state = base_train_state.apply_gradients(grads=base_grads)\n q1_head_train_state = q1_head_train_state.apply_gradients(grads=q1_head_grads)\n q2_head_train_state = q2_head_train_state.apply_gradients(grads=q2_head_grads)\n v_head_train_state = v_head_train_state.apply_gradients(grads=v_head_grads)\n\n # handle target network updates\n def update_targets(params: PyTree, base_params: PyTree, steps: jnp.ndarray) -> PyTree:\n base_params = optax.incremental_update(params, base_params, polyak_alpha)\n if hard_update_every is not None:\n base_params = optax.periodic_update(params, base_params, steps, hard_update_every)\n return base_params\n \n def mid_targets(params: PyTree, base_params: PyTree, steps: jnp.ndarray) -> PyTree:\n return base_params\n\n def update_cond(opt_state: PyTree) -> bool:\n if hasattr(opt_state, 'mini_step'):\n return opt_state.mini_step == 0\n return True\n \n if target_base_params is not None:\n target_base_params = jax.lax.cond(\n update_cond(base_train_state.opt_state), \n update_targets, \n mid_targets, \n base_train_state.params, \n target_base_params, \n base_train_state.step, \n )\n q1_target_head_params = jax.lax.cond(\n update_cond(q1_head_train_state.opt_state), \n update_targets, \n mid_targets, \n q1_head_train_state.params, \n q1_target_head_params, \n q1_head_train_state.step, \n )\n q2_target_head_params = jax.lax.cond(\n update_cond(q2_head_train_state.opt_state), \n update_targets, \n mid_targets, \n q2_head_train_state.params, \n q2_target_head_params, \n q2_head_train_state.step, \n )\n\n return base_train_state, target_base_params, q1_head_train_state, q2_head_train_state, v_head_train_state, q1_target_head_params, q2_target_head_params, loss, info\n\n return cls(\n base_train_state=base_train_state, \n target_base_params=target_base_params, \n q1_head_train_state=q1_head_train_state, \n q2_head_train_state=q2_head_train_state, \n v_head_train_state=v_head_train_state, \n q1_target_head_params=q1_target_head_params, \n q2_target_head_params=q2_target_head_params, \n base_model=base_model, \n q_head_model=q_head_model, \n v_head_model=v_head_model, \n tokenizer=tokenizer, \n _step=_step, \n )" }, { "identifier": "GPT2ValuePolicy", "path": "LLM_RL/algorithms/value_rl_base/gpt2/interface.py", "snippet": "class GPT2ValuePolicy(ValueRLPolicy):\n def __init__(\n self, \n inference: ValueRLInference, \n prng_key: Optional[jax.random.KeyArray], \n generation_config: Optional[GenerationConfig]=None, \n blocking_strategy: BlockingStrategy=BlockingStrategy(padding=Padding.LEFT, truncation=Truncation.LEFT, max_length=None), \n in_str_process: Optional[Callable[[str], str]]=None, \n out_str_process: Optional[Callable[[str], str]]=None, \n input_token_process: Optional[Callable[[List[int]], List[int]]]=None, \n target_token_process: Optional[Callable[[List[int]], List[int]]]=None, \n trace: bool=True, \n ):\n self.inference = inference\n self.prng_key = prng_key\n self.generation_config = generation_config\n self.blocking_strategy = blocking_strategy\n self.in_str_process = in_str_process\n self.out_str_process = out_str_process\n self.input_token_process = input_token_process\n self.target_token_process = target_token_process\n if self.in_str_process is None:\n self.in_str_process = lambda x: x\n if self.out_str_process is None:\n self.out_str_process = lambda x: x\n self.trace = trace\n \n def act(self, text_history: List[Optional[TextHistory]], done: Optional[List[bool]]=None) -> List[Optional[TextHistory]]:\n if done is None:\n done = [False]*len(text_history)\n # force eos_token for done sequences\n eos_token = self.inference.tokenizer.eos_token\n if self.generation_config is not None and self.generation_config.eos_token_id is not None:\n eos_token = self.inference.tokenizer.decode(self.generation_config.eos_token_id)\n if eos_token is None:\n eos_token = self.inference.tokenizer.pad_token\n if eos_token is None:\n eos_token = ''\n \n raw_input_strs = [\n eos_token if d else self.in_str_process(text_history_to_str(item)) \\\n for item, d in zip(text_history, done)\n ]\n\n new_key = None\n if self.prng_key is not None:\n self.prng_key, new_key = jax.random.split(self.prng_key)\n model_outputs = self.inference.generate_from_str(\n input_strs=raw_input_strs, \n prng_key=new_key, \n blocking_strategy=self.blocking_strategy, \n generation_config=self.generation_config, \n input_token_process=self.input_token_process, \n target_token_process=self.target_token_process, \n trace=self.trace, \n )\n\n raw_output_strs = model_outputs.output_strs\n output_strs = [\n \"\" if d else self.out_str_process(strip_prompt_from_completion(raw_input_str, raw_output_str)) \\\n for raw_input_str, raw_output_str, d in zip(raw_input_strs, raw_output_strs, done)\n ]\n\n return [\n None if d else text_history_item+(Text(output_str, True),) \\\n for text_history_item, output_str, d in zip(text_history, output_strs, done)\n ]\n \n def set_params(self, policy_params: PyTree) -> None:\n pi_beta_params, base_params, \\\n q1_head_params, q2_head_params = policy_params\n self.inference = self.inference.replace(\n pi_beta_params=pi_beta_params, \n base_params=base_params, \n q1_head_params=q1_head_params, \n q2_head_params=q2_head_params, \n )" }, { "identifier": "GPT2ValueRLInference", "path": "LLM_RL/algorithms/value_rl_base/gpt2/interface.py", "snippet": "class GPT2ValueRLInference(ValueRLInference):\n @classmethod\n def load_inference(\n cls, \n pi_beta_params: Optional[PyTree], \n base_params: PyTree, \n q1_head_params: PyTree, \n q2_head_params: Optional[PyTree], \n v_head_params: Optional[PyTree], \n pi_beta_model: Optional[FlaxPreTrainedModel], \n base_model: FlaxPreTrainedModel, \n q_head_model: nn.Module, \n v_head_model: Optional[nn.Module], \n tokenizer: PreTrainedTokenizerBase, \n beta: float=0.0, \n dp_shard_logits: bool=True, \n ):\n mesh = base_model.config.mesh\n assert mesh is not None\n assert mesh == q_head_model.config.mesh\n if v_head_model is not None:\n assert mesh == v_head_model.config.mesh\n assert (pi_beta_model is None and pi_beta_params is None) or (pi_beta_model is not None and pi_beta_params is not None)\n \n pi_beta_params_partition_spec = PS() if pi_beta_params is None else match_partition_rules(pi_beta_model.config.get_partition_rules(), pi_beta_params)\n base_params_partition_spec = match_partition_rules(base_model.config.get_partition_rules(), base_params)\n q1_head_params_partition_spec = match_partition_rules(q_head_model.config.get_partition_rules(), q1_head_params)\n q2_head_params_partition_spec = PS() if q2_head_params is None else match_partition_rules(q_head_model.config.get_partition_rules(), q2_head_params)\n v_head_params_partition_spec = PS() if v_head_params is None else match_partition_rules(v_head_model.config.get_partition_rules(), v_head_params)\n\n generator = None\n if pi_beta_model is not None:\n generator = GPT2ValueRLGeneration(\n base_model_config=base_model.config, \n pi_beta=pi_beta_model, \n value_base=base_model, \n q_head=q_head_model, \n beta=beta, \n )\n\n if pi_beta_params is not None:\n @partial(\n pjit, \n static_argnames=('generation_config', 'trace'), \n in_shardings=(\n jax.tree_util.tree_map(lambda ps: NamedSharding(mesh, ps), pi_beta_params_partition_spec), \n jax.tree_util.tree_map(lambda ps: NamedSharding(mesh, ps), base_params_partition_spec), \n jax.tree_util.tree_map(lambda ps: NamedSharding(mesh, ps), q1_head_params_partition_spec), \n jax.tree_util.tree_map(lambda ps: NamedSharding(mesh, ps), q2_head_params_partition_spec), \n NamedSharding(mesh, PS()), \n NamedSharding(mesh, PS()), \n NamedSharding(mesh, PS()), \n NamedSharding(mesh, PS()), \n ), \n out_shardings=NamedSharding(mesh, PS()), \n )\n def _generate(\n pi_beta_params: Optional[PyTree], \n base_params: PyTree, \n q1_head_params: PyTree, \n q2_head_params: Optional[PyTree], \n input_ids: jax.Array, \n attention_mask: jax.Array, \n position_ids: jax.Array, \n prng_key: Optional[jax.random.PRNGKeyArray]=None, \n generation_config: Optional[FrozenDict]=None, \n trace: bool=True, \n ) -> Union[FlaxSampleOutput, FlaxGreedySearchOutput, FlaxBeamSearchOutput]:\n # data parallel shard inputs\n input_ids = with_named_sharding_constraint(input_ids, mesh, PS((\"dp\", \"fsdp\"), None))\n attention_mask = with_named_sharding_constraint(attention_mask, mesh, PS((\"dp\", \"fsdp\"), None))\n position_ids = with_named_sharding_constraint(position_ids, mesh, PS((\"dp\", \"fsdp\"), None))\n # NOTE: position_ids ignored by transformers\n\n # generate from model\n output = generator.generate(\n input_ids=input_ids, \n attention_mask=attention_mask, \n params=(pi_beta_params, base_params, q1_head_params, q2_head_params), \n prng_key=prng_key, \n generation_config=StreamingGenerationConfig.from_dict(generation_config) if generation_config is not None else None, \n trace=trace, \n )\n \n return output\n else:\n def _generate(\n pi_beta_params: Optional[PyTree], \n base_params: PyTree, \n q1_head_params: PyTree, \n q2_head_params: Optional[PyTree], \n input_ids: jax.Array, \n attention_mask: jax.Array, \n position_ids: jax.Array, \n prng_key: Optional[jax.random.PRNGKeyArray]=None, \n generation_config: Optional[FrozenDict]=None, \n trace: bool=True, \n ) -> Union[FlaxSampleOutput, FlaxGreedySearchOutput, FlaxBeamSearchOutput]:\n raise NotImplementedError\n \n @partial(\n pjit, \n static_argnames=('output_attentions', 'train'), \n in_shardings=(\n jax.tree_util.tree_map(lambda ps: NamedSharding(mesh, ps), base_params_partition_spec), \n jax.tree_util.tree_map(lambda ps: NamedSharding(mesh, ps), q1_head_params_partition_spec), \n jax.tree_util.tree_map(lambda ps: NamedSharding(mesh, ps), q2_head_params_partition_spec), \n jax.tree_util.tree_map(lambda ps: NamedSharding(mesh, ps), v_head_params_partition_spec), \n NamedSharding(mesh, PS()), \n NamedSharding(mesh, PS()), \n NamedSharding(mesh, PS()), \n NamedSharding(mesh, PS()), \n ), \n out_shardings=ValueRLForwardOutput(\n base_raw_output=FlaxCausalLMOutputWithCrossAttentions(\n logits=NamedSharding(mesh, PS((\"dp\", \"fsdp\"), None, None)) if dp_shard_logits else NamedSharding(mesh, PS()), \n hidden_states=NamedSharding(mesh, PS()), # assume no sharding for hidden states\n attentions=NamedSharding(mesh, PS()), # assume no sharding for attentions\n cross_attentions=NamedSharding(mesh, PS()), # assume no sharding for cross attentions\n past_key_values=NamedSharding(mesh, PS()), # assume no sharding for past key values\n ), \n q1=NamedSharding(mesh, PS((\"dp\", \"fsdp\"), None, None)) if dp_shard_logits else NamedSharding(mesh, PS()), \n q2=NamedSharding(mesh, PS((\"dp\", \"fsdp\"), None, None)) if (dp_shard_logits and q2_head_params is not None) else NamedSharding(mesh, PS()), \n v=NamedSharding(mesh, PS()), \n ), \n )\n def _forward(\n base_params: PyTree, \n q1_head_params: PyTree, \n q2_head_params: Optional[PyTree], \n v_head_params: Optional[PyTree], \n input_ids: jax.Array, \n attention_mask: jax.Array, \n position_ids: jax.Array, \n prng_key: Optional[jax.random.PRNGKeyArray]=None, \n output_attentions: Optional[bool]=None, \n train: bool=False, \n ) -> ValueRLForwardOutput:\n # data parallel shard inputs\n input_ids = with_named_sharding_constraint(input_ids, mesh, PS((\"dp\", \"fsdp\"), None))\n attention_mask = with_named_sharding_constraint(attention_mask, mesh, PS((\"dp\", \"fsdp\"), None))\n position_ids = with_named_sharding_constraint(position_ids, mesh, PS((\"dp\", \"fsdp\"), None))\n\n # get logits\n new_key = None\n if prng_key is not None:\n prng_key, new_key = jax.random.split(prng_key)\n base_output = base_model(\n input_ids=input_ids, \n attention_mask=attention_mask, \n position_ids=position_ids, \n params=base_params, \n train=train, \n output_attentions=output_attentions, \n output_hidden_states=True, \n dropout_rng=new_key, \n )\n # trunc padded logits\n base_output = base_output.replace(logits=base_output.logits.at[:, :, base_model.config.unpadded_vocab_size:].set(-float('inf')))\n\n # get q1\n new_key = None\n if prng_key is not None:\n prng_key, new_key = jax.random.split(prng_key)\n q1 = q_head_model.apply(\n {'params': q1_head_params}, \n base_output.hidden_states[-1], \n train=train, \n rngs={'dropout': new_key} if prng_key is not None else None, \n )\n # trunc padded qs\n q1 = q1.at[:, :, base_model.config.unpadded_vocab_size:].set(-float('inf'))\n\n # get q2\n if q2_head_params is not None:\n new_key = None\n if prng_key is not None:\n prng_key, new_key = jax.random.split(prng_key)\n q2 = q_head_model.apply(\n {'params': q2_head_params}, \n base_output.hidden_states[-1], \n train=train, \n rngs={'dropout': new_key} if prng_key is not None else None, \n )\n # trunc padded qs\n q2 = q2.at[:, :, base_model.config.unpadded_vocab_size:].set(-float('inf'))\n else:\n q2 = None\n\n if v_head_params is not None:\n # get v\n new_key = None\n if prng_key is not None:\n prng_key, new_key = jax.random.split(prng_key)\n v = v_head_model.apply(\n {'params': v_head_params}, \n base_output.hidden_states[-1], \n train=train, \n rngs={'dropout': new_key} if prng_key is not None else None, \n ).squeeze(2)\n else:\n v = None\n\n # assert sharding on outputs\n if dp_shard_logits:\n base_output = base_output.replace(logits=with_named_sharding_constraint(base_output.logits, mesh, PS((\"dp\", \"fsdp\"), None, None)))\n q1 = with_named_sharding_constraint(q1, mesh, PS((\"dp\", \"fsdp\"), None, None))\n if q2 is not None:\n q2 = with_named_sharding_constraint(q2, mesh, PS((\"dp\", \"fsdp\"), None, None))\n return ValueRLForwardOutput(\n base_raw_output=base_output, \n q1=q1, \n q2=q2, \n v=v, \n )\n\n return cls(\n pi_beta_params=pi_beta_params, \n base_params=base_params, \n q1_head_params=q1_head_params, \n q2_head_params=q2_head_params, \n v_head_params=v_head_params, \n pi_beta_model=pi_beta_model, \n base_model=base_model, \n q_head_model=q_head_model, \n v_head_model=v_head_model, \n tokenizer=tokenizer, \n _generate=_generate, \n _forward=_forward,\n )" }, { "identifier": "load_train_state_from_config", "path": "LLM_RL/heads/mlp_head.py", "snippet": "def load_train_state_from_config(\n model_config: MLPHeadConfig, \n model_dtype: Union[str, jnp.dtype], \n optim_getter: Callable[[PyTree], optax.GradientTransformation], \n mesh: Mesh, # should be shape (dp, mp)\n prng_key: jax.random.PRNGKeyArray, \n pad_to_output_dim: Optional[int]=None, \n params_dtype: Optional[Union[str, jnp.dtype]]=jnp.float32, \n) -> Tuple[TrainState, MLPHead]:\n \n model = MLPHead(model_config, dtype=model_dtype)\n model.config.mesh = mesh\n # shard params\n params = freeze(shard_params_from_config(model, prng_key, params_dtype=params_dtype))\n # pad outputs\n if pad_to_output_dim is not None:\n params = freeze(pad_outputs(unfreeze(params), model, pad_to_output_dim, dtype=params_dtype))\n # shard train_state\n train_state = shard_train_state_from_params(model, params, optim_getter(params))\n\n return train_state, model" }, { "identifier": "MLPHeadConfig", "path": "LLM_RL/heads/mlp_head.py", "snippet": "class MLPHeadConfig(HeadConfig):\n def __init__(\n self, \n input_dim: int, \n hidden_dim: int, \n output_dim: int, \n use_bias: bool=True, \n unpadded_output_dim: Optional[int]=None, \n layer1_initializer_range: Optional[int]=None, \n layer1_bias_init: Optional[float]=None, \n layer2_initializer_range: Optional[int]=None, \n layer2_bias_init: Optional[float]=None, \n mesh: Optional[jax.sharding.Mesh]=None, \n ) -> None:\n self.input_dim = input_dim\n self.hidden_dim = hidden_dim\n self.output_dim = output_dim\n self.use_bias = use_bias\n self.layer1_initializer_range = layer1_initializer_range\n self.layer1_bias_init = layer1_bias_init\n self.layer2_initializer_range = layer2_initializer_range\n self.layer2_bias_init = layer2_bias_init\n self.mesh = mesh\n self.unpadded_output_dim = unpadded_output_dim\n if self.unpadded_output_dim is None:\n self.unpadded_output_dim = self.output_dim\n super().__init__()\n \n @staticmethod\n def get_partition_rules():\n return [\n (re.escape(\"['dense1']['kernel']\"), PS(\"fsdp\", \"mp\")), \n (re.escape(\"['dense1']['bias']\"), PS(\"mp\")), \n (re.escape(\"['dense2']['kernel']\"), PS(\"mp\", \"fsdp\")), \n (re.escape(\"['dense2']['bias']\"), PS()), \n ]\n\n def to_dict(self) -> Dict[str, Any]:\n if self.mesh is None:\n return super().to_dict()\n else:\n new_conf = MLPHeadConfig(**self.__dict__)\n new_conf.mesh = None\n return new_conf.to_dict()" }, { "identifier": "ILQLIterableDataset", "path": "LLM_RL/algorithms/ilql/data.py", "snippet": "class ILQLIterableDataset(IterableDataset):\n def __init__(self, ilql_data: Iterable[Dict[str, np.ndarray]]):\n self.ilql_data = ilql_data\n \n def __iter__(self):\n return _ILQLIteratorDataset(iter(self.ilql_data))\n \n @classmethod\n def from_ilql_data_iterable(\n cls, \n ilql_data: Iterable[ILQLData], \n tokenizer: PreTrainedTokenizerBase, \n blocking_strategy: BlockingStrategy, \n ) -> ILQLIterableDataset:\n \n class _TokensIterable(Iterable):\n def _tokens_generator(self):\n for item in ilql_data:\n yield jax.tree_util.tree_map(lambda x: x[0], ILQLData.block([item], blocking_strategy, tokenizer))\n\n def __iter__(self):\n return self._tokens_generator()\n\n return cls(_TokensIterable())" }, { "identifier": "train_loop", "path": "LLM_RL/algorithms/ilql/train.py", "snippet": "def train_loop(\n trainer: ILQLTrain, \n inference: Union[ValueRLInference, ILQLInference], \n evaluator: Optional[Callable[[Inference], Tuple[float, Dict[str, Any]]]], \n dataset: Union[Seq2SeqDataset, Seq2SeqIterableDataset], \n prng_key: KeyArray, \n save_dir: Optional[str], \n epochs: int, \n max_steps: Optional[int], \n bsize: int, \n log_every: int, \n eval_every_steps: Optional[int], \n eval_every_epochs: Optional[int], \n eval_at_beginning: bool, \n eval_at_end: bool, \n save_every_steps: Optional[int], \n save_every_epochs: Optional[int], \n save_at_beginning: bool, \n save_at_end: bool, \n save_best: bool, \n max_checkpoints: Optional[int], \n save_train_state: bool, \n save_dtype: jnp.dtype, \n use_wandb: bool, \n wandb_project: Optional[str], \n wandb_run_name: Optional[str], \n wandb_config: Optional[Dict[str, Any]], \n is_main_process: Optional[bool]=None, \n **loop_state: Dict[Hashable, Any], \n) -> Tuple[Train, Inference]:\n assert (not use_wandb) or (use_wandb and wandb_project is not None)\n if is_main_process is None:\n is_main_process = jax.process_index() == 0\n \n # initalize wandb\n wandb_id = loop_state.get('wandb_id', None)\n if use_wandb and is_main_process:\n if wandb_id is None:\n wandb_id = wandb.util.generate_id()\n wandb.init(\n project=wandb_project, \n id=wandb_id, \n name=wandb_run_name, \n config=wandb_config, \n reinit=True, \n resume=\"allow\", \n )\n\n # initalize training loop state\n train_logs = []\n best_perf = loop_state.get('best_perf', float('inf'))\n saved_checkpoints = loop_state.get('saved_checkpoints', deque([]))\n step = 0\n steps_per_epoch = len(dataset) // bsize if isinstance(dataset, Dataset) else None\n if 'steps_per_epoch' in loop_state:\n assert steps_per_epoch == loop_state['steps_per_epoch'], 'loop_state steps_per_epoch does not match dataset steps_per_epoch'\n epoch = -1\n\n def _save(\n name: str, \n add_to_queue: bool, \n **loop_state: Dict[Hashable, Any], \n ):\n nonlocal saved_checkpoints\n print(f'saving checkpoint {name} ...')\n # conditionally delete old checkpoints\n if add_to_queue and is_main_process:\n if (max_checkpoints is not None) and (len(saved_checkpoints) >= max_checkpoints):\n delete(saved_checkpoints.popleft(), recursive=True)\n curr_save_dir = os.path.join(save_dir, name)\n if is_main_process:\n create_path(curr_save_dir)\n dump_state(\n base_model=trainer.base_model, \n q_head_model=trainer.q_head_model, \n v_head_model=trainer.v_head_model, \n base_train_state=trainer.base_train_state, \n target_base_params=trainer.target_base_params, \n q1_head_train_state=trainer.q1_head_train_state, \n q2_head_train_state=trainer.q2_head_train_state, \n v_head_train_state=trainer.v_head_train_state, \n q1_target_head_params=trainer.q1_target_head_params, \n q2_target_head_params=trainer.q2_target_head_params, \n save_dir=curr_save_dir, \n save_train_state=save_train_state, \n enable_save=is_main_process, \n save_dtype=save_dtype, \n **loop_state, \n )\n if add_to_queue and is_main_process:\n saved_checkpoints.append(curr_save_dir)\n print('saved.')\n \n def _inference_update():\n nonlocal inference\n if isinstance(inference, ValueRLInference):\n inference = inference.replace(\n base_params=trainer.base_train_state.params, \n q1_head_params=trainer.q1_head_train_state.params, \n q2_head_params=trainer.q2_head_train_state.params, \n v_head_params=trainer.v_head_train_state.params, \n )\n elif isinstance(inference, ILQLInference):\n new_value_inference = inference.value_inference.replace(\n base_params=trainer.base_train_state.params, \n q1_head_params=trainer.q1_head_train_state.params, \n q2_head_params=trainer.q2_head_train_state.params, \n v_head_params=trainer.v_head_train_state.params, \n )\n new_target_value_inference = inference.target_value_inference.replace(\n base_params=trainer.target_base_params, \n q1_head_params=trainer.q1_target_head_params, \n q2_head_params=trainer.q2_target_head_params, \n )\n inference = inference.replace(\n value_inference=new_value_inference, \n target_value_inference=new_target_value_inference, \n )\n else:\n raise NotImplementedError\n \n def _eval(\n **loop_state: Dict[Hashable, Any], \n ):\n nonlocal best_perf\n # get eval logs\n _inference_update()\n eval_perf, eval_logs = evaluator(inference)\n\n # publish eval logs\n eval_logs = pull_logs(label_logs(eval_logs, 'eval', {'step': step+1, 'epoch': epoch}))\n log(eval_logs, use_wandb and is_main_process)\n\n # conditionally save best model and optimizer state\n if save_dir is not None and save_best and eval_perf < best_perf:\n print('new best model!')\n best_perf = eval_perf\n _save(\n name='best', \n add_to_queue=False, \n **{**loop_state, 'best_perf': best_perf}, \n )\n \n # begin evaluation\n if evaluator is not None and eval_at_beginning:\n _eval(\n # loop state metadata\n best_perf=best_perf, \n step=step, \n epoch=epoch, \n saved_checkpoints=saved_checkpoints, \n steps_per_epoch=steps_per_epoch, \n wandb_id=wandb_id, \n )\n \n # save initial checkpoint\n if save_dir is not None and save_at_beginning:\n _save(\n name='initial', \n add_to_queue=False, \n # loop state metadata\n best_perf=best_perf, \n step=step, \n epoch=epoch, \n saved_checkpoints=saved_checkpoints, \n steps_per_epoch=steps_per_epoch, \n wandb_id=wandb_id, \n )\n \n # begin training loop\n for epoch in tqdm(range(epochs)):\n prng_key, new_prng = jax.random.split(prng_key)\n d = dataloader(new_prng, dataset, bsize, truncate=True)\n for batch in tqdm(d, total=steps_per_epoch):\n \n # step model and get training logs\n prng_key, new_prng = jax.random.split(prng_key)\n if 'step' in loop_state and step < loop_state['step']:\n step += 1\n continue\n trainer, _, info = trainer.step(\n **batch, \n prng_key=new_prng, \n train=True, \n )\n train_logs.append(info)\n \n # publish training logs and clear logs\n if (step + 1) % log_every == 0:\n logs = combine_logs(train_logs)\n logs = pull_logs(label_logs(logs, 'train', {'step': step+1, 'epoch': epoch}))\n log(logs, use_wandb and is_main_process)\n train_logs = []\n \n # begin evaluation\n if evaluator is not None and eval_every_steps is not None and (step + 1) % eval_every_steps == 0:\n _eval(\n # loop state metadata\n best_perf=best_perf, \n step=step+1, \n epoch=epoch, \n saved_checkpoints=saved_checkpoints, \n steps_per_epoch=steps_per_epoch, \n wandb_id=wandb_id, \n )\n \n # periodically save checkpoint\n if save_dir is not None and save_every_steps is not None and (step + 1) % save_every_steps == 0:\n _save(\n name=f'step_{step+1}', \n add_to_queue=True, \n # loop state metadata\n best_perf=best_perf, \n step=step+1, \n epoch=epoch, \n saved_checkpoints=saved_checkpoints, \n steps_per_epoch=steps_per_epoch, \n wandb_id=wandb_id, \n )\n\n step += 1\n\n # conditionally terminate\n if max_steps is not None and step >= max_steps:\n break\n \n # begin evaluation\n if evaluator is not None and eval_every_epochs is not None and (epoch + 1) % eval_every_epochs == 0:\n _eval(\n # loop state metadata\n best_perf=best_perf, \n step=step, \n epoch=epoch, \n saved_checkpoints=saved_checkpoints, \n steps_per_epoch=steps_per_epoch, \n wandb_id=wandb_id, \n )\n \n # periodically save checkpoint\n if save_dir is not None and save_every_epochs is not None and (epoch + 1) % save_every_epochs == 0:\n _save(\n name=f'epoch_{epoch}', \n add_to_queue=True, \n # loop state metadata\n best_perf=best_perf, \n step=step, \n epoch=epoch, \n saved_checkpoints=saved_checkpoints, \n steps_per_epoch=steps_per_epoch, \n wandb_id=wandb_id, \n )\n \n # conditionally terminate\n if max_steps is not None and step >= max_steps:\n break\n \n # begin evaluation\n if evaluator is not None and eval_at_end:\n _eval(\n # loop state metadata\n best_perf=best_perf, \n step=step, \n epoch=epoch, \n saved_checkpoints=saved_checkpoints, \n steps_per_epoch=steps_per_epoch, \n wandb_id=wandb_id, \n )\n \n # save final checkpoint\n if save_dir is not None and save_at_end:\n _save(\n name='last', \n add_to_queue=False, \n # loop state metadata\n best_perf=best_perf, \n step=step, \n epoch=epoch, \n saved_checkpoints=saved_checkpoints, \n steps_per_epoch=steps_per_epoch, \n wandb_id=wandb_id, \n )\n\n # stop wandb\n if use_wandb and is_main_process:\n wandb.finish()\n _inference_update()\n return trainer, inference" }, { "identifier": "eval_loss", "path": "LLM_RL/algorithms/ilql/train.py", "snippet": "def eval_loss(\n inference: ILQLInference, \n dataset: Union[Seq2SeqDataset, Seq2SeqIterableDataset], \n prng_key: Optional[KeyArray], \n bsize: int, \n eval_batches: Optional[int], \n) -> Dict[str, Any]:\n # setup evaluator loop state\n eval_logs = []\n\n # eval on batches\n prng_key, new_prng = jax.random.split(prng_key) if prng_key is not None else (None, None)\n d = dataloader(new_prng, dataset, bsize, truncate=True)\n for i, batch in tqdm(enumerate(d)):\n # conditionally terminate early\n if eval_batches is not None and i >= eval_batches:\n break\n\n # get eval logs\n _, info = inference.eval_loss(**batch)\n eval_logs.append(info)\n \n # gather and postproc eval logs\n eval_logs = pull_logs(combine_logs(eval_logs))\n return eval_logs" }, { "identifier": "ILQLData", "path": "LLM_RL/algorithms/ilql/data.py", "snippet": "class ILQLData(NamedTuple):\n input_ids: np.ndarray # [t]\n should_take_action: np.ndarray # [t-1]\n rewards: np.ndarray # [t-1]\n done: np.ndarray # []\n next_token_ids: Optional[np.ndarray] # [t']\n next_done: Optional[np.ndarray] # []\n\n @staticmethod\n def block(\n data: List[ILQLData], \n blocking_strategy: BlockingStrategy, \n tokenizer: PreTrainedTokenizerBase, \n ) -> Dict[str, np.ndarray]:\n has_next_token = any(map(lambda x: x.next_token_ids is not None, data))\n assert all(map(lambda x: x.next_token_ids is None, data)) or has_next_token\n assert all(map(lambda x: x.next_done is None, data)) or has_next_token\n\n return dict(\n input_ids=block_sequences(\n list(map(lambda x: x.input_ids, data)), \n tokenizer.pad_token_id, \n dtype=np.int32, \n blocking_strategy=blocking_strategy, \n ), \n should_take_action=block_sequences(\n list(map(lambda x: x.should_take_action, data)), \n False, \n dtype=np.bool_, \n blocking_strategy=blocking_strategy._replace(max_length=blocking_strategy.max_length-1), \n ), \n rewards=block_sequences(\n list(map(lambda x: x.rewards, data)), \n 0.0, \n dtype=np.float32, \n blocking_strategy=blocking_strategy._replace(max_length=blocking_strategy.max_length-1), \n ), \n dones=np.asarray(list(map(lambda x: x.done, data)), dtype=np.bool_), \n next_token_ids=block_sequences(\n list(map(lambda x: x.next_token_ids, data)), \n tokenizer.pad_token_id, \n dtype=np.int32, \n blocking_strategy=blocking_strategy, \n ) if has_next_token else None, \n next_dones=np.asarray(list(map(lambda x: x.next_done, data)), dtype=np.bool_) if has_next_token else None, \n )\n \n @classmethod\n def from_token_trajectory_chain(\n cls, \n token_trajectory_chain: TokenTrajectoryChain, \n ):\n if token_trajectory_chain.next is not None:\n if token_trajectory_chain.next.token_trajectory.is_action[1:].sum() > 0:\n first_next_action = np.argmax(token_trajectory_chain.next.token_trajectory.is_action[1:], axis=0)+1\n next_token_ids = token_trajectory_chain.next.token_trajectory.tokens[:first_next_action]\n next_done = False\n else:\n next_token_ids = token_trajectory_chain.next.token_trajectory.tokens\n next_done = token_trajectory_chain.next.token_trajectory.done\n else:\n next_token_ids, next_done = None, None\n return cls(\n input_ids=token_trajectory_chain.token_trajectory.tokens, \n should_take_action=token_trajectory_chain.token_trajectory.is_action[1:], \n rewards=token_trajectory_chain.token_trajectory.reward[1:], \n done=token_trajectory_chain.token_trajectory.done, \n next_token_ids=next_token_ids, \n next_done=next_done, \n )" }, { "identifier": "ILQLIterableDataset", "path": "LLM_RL/algorithms/ilql/data.py", "snippet": "class ILQLIterableDataset(IterableDataset):\n def __init__(self, ilql_data: Iterable[Dict[str, np.ndarray]]):\n self.ilql_data = ilql_data\n \n def __iter__(self):\n return _ILQLIteratorDataset(iter(self.ilql_data))\n \n @classmethod\n def from_ilql_data_iterable(\n cls, \n ilql_data: Iterable[ILQLData], \n tokenizer: PreTrainedTokenizerBase, \n blocking_strategy: BlockingStrategy, \n ) -> ILQLIterableDataset:\n \n class _TokensIterable(Iterable):\n def _tokens_generator(self):\n for item in ilql_data:\n yield jax.tree_util.tree_map(lambda x: x[0], ILQLData.block([item], blocking_strategy, tokenizer))\n\n def __iter__(self):\n return self._tokens_generator()\n\n return cls(_TokensIterable())" }, { "identifier": "TextNavEnv", "path": "llm_rl_scripts/text_nav/env/env.py", "snippet": "class TextNavEnv(TextEnv):\n \"\"\"\n Environment for textual navigation game.\n \"\"\"\n\n def __init__(self,\n display_location: bool = False,\n display_inventory: bool = False):\n \n self.infos = EnvInfos(description=True,\n admissible_commands=True,\n location=display_location,\n inventory=display_inventory)\n self.reset()\n\n def _reset(self, seed: Optional[int] = None):\n _, self.game_file = build_and_compile_game(not self.infos.location)\n self.env = textworld.start(self.game_file, self.infos)\n self.display_command_during_render = True\n\n self.state = self.env.reset()\n\n redundant = [\"examine\", \"look\", \"inventory\"]\n self.state[\"admissible_commands\"] = list(\n c for c in self.state[\"admissible_commands\"] if not any(a in c for a in redundant))\n self.state.feedback += \"\\nAdmissible commands: {}\\n\".format(\n \", \".join(self.state[\"admissible_commands\"]))\n \n self.state.feedback = re.sub(\"-=.*=-\\n\", \"\", self.state.feedback)\n\n def _step(self, command: str):\n command = command.strip()\n self.state, _, _ = self.env.step(command)\n\n if self.infos.inventory:\n inventory, _, _ = self.env.step(\"inventory\")\n self.state[\"inventory\"] = inventory.feedback.strip()\n self.state.feedback += \"\\n{}\\n\".format(self.state[\"inventory\"])\n\n redundant = [\"examine\", \"look\", \"inventory\"]\n self.state[\"admissible_commands\"] = list(\n c for c in self.state[\"admissible_commands\"] if not any(a in c for a in redundant))\n self.state.feedback += \"\\nAdmissible commands: {}\\n\".format(\n \", \".join(self.state[\"admissible_commands\"]))\n \n self.state.feedback = re.sub(\"-=.*=-\\n\", \"\", self.state.feedback)\n \n def reset(self) -> TextHistory:\n self._reset()\n return tuple(self.state.feedback,)\n\n def step(self, text_history: TextHistory) -> Tuple[TextHistory, float, bool]:\n assert text_history[-1].is_action\n\n command = text_history[-1].text\n self._step(command)\n return (\n text_history + (Text(command, true), Text(self.state.feedback, False)),\n self.state[\"score\"],\n self.state[\"done\"] \n )\n\n def render(self) -> str:\n msg = self.state.feedback.rstrip() + \"\\n\"\n if self.display_command_during_render and self.state.last_command is not None:\n msg = '> ' + self.state.last_command + \"\\n\" + msg\n\n # Wrap each paragraph.\n paragraphs = msg.split(\"\\n\")\n paragraphs = [\"\\n\".join(textwrap.wrap(paragraph, width=80)) for paragraph in paragraphs]\n msg = \"\\n\".join(paragraphs)\n\n sys.stdout.write(msg + \"\\n\")" } ]
from typing import Optional from JaxSeq.bucket_manager import open_with_bucket as open from transformers import AutoTokenizer from JaxSeq.utils import jsonl_stream, convert_path, load_mesh, setup_experiment_save from JaxSeq.utils import BlockingStrategy, Padding, Truncation, get_weight_decay_mask, MapIterable, jsonl_stream, FileOpenIterable from JaxSeq.models.gptj.load import load_train_state, ModelLoadMode from LLM_RL.algorithms.ilql.base_interface import ilql_loss from transformers.generation import GenerationConfig from jaxtyping import PyTree from LLM_RL.environment import Text, text_env_eval, TextTrajectory, TextTrajectoryChain, TokenTrajectoryChain, text_history_to_str from LLM_RL.algorithms.ilql.gpt2.interface import GPT2ILQLInference, GPT2ILQLTrain from LLM_RL.algorithms.value_rl_base.gpt2.interface import GPT2ValuePolicy, GPT2ValueRLInference from LLM_RL.heads.mlp_head import load_train_state_from_config as load_head_train_state_from_config from LLM_RL.heads.mlp_head import MLPHeadConfig from JaxSeq.shard_model import shard_params_from_params from LLM_RL.algorithms.ilql.data import ILQLIterableDataset from functools import partial from JaxSeq.logs import log, pull_logs from LLM_RL.algorithms.ilql.train import train_loop, eval_loss from LLM_RL.algorithms.ilql.data import ILQLData, ILQLIterableDataset from JaxSeq.utils import multihost_device_get from llm_rl_scripts.text_nav.env import TextNavEnv import tyro import jax import jax.numpy as jnp import os import optax import pickle as pkl import re import numpy as np import json
21,481
input_args = locals() print(input_args) tokenizer = AutoTokenizer.from_pretrained('EleutherAI/gpt-j-6B') tokenizer.add_special_tokens({'pad_token': '<|pad|>'}) mesh = load_mesh((data_mesh_shape, fsdp_mesh_shape, model_mesh_shape), ('dp', 'fsdp', 'mp')) is_main_process = jax.process_index() == 0 print(f"Mesh: {mesh}") print(f"Is main process: {is_main_process}") def expand(lst, default_value): new_lst = [default_value] for elem in lst: new_lst.append(elem) new_lst.append(default_value) return new_lst def map_data_item(item): text_trajectory_chain = TextTrajectoryChain( text_trajectory=TextTrajectory( # Actions are at odd indices text_history=[Text(text, i % 2 != 0) for i, text in enumerate(item['text_history'])], reward=expand(item['rewards'], 0.0), done=item['dones'][-1], ), next=None, ) token_trajectory_chain = TokenTrajectoryChain.from_text_trajectory_chain(text_trajectory_chain, tokenizer) return ILQLData.from_token_trajectory_chain(token_trajectory_chain) train_dataset = ILQLIterableDataset.from_ilql_data_iterable( MapIterable(map_data_item, FileOpenIterable(convert_path(train_data_path), 'r', pipe=jsonl_stream)), tokenizer, BlockingStrategy( padding=Padding.RIGHT, truncation=Truncation.RIGHT, max_length=max_length, ), ) eval_dataset = ILQLIterableDataset.from_ilql_data_iterable( MapIterable(map_data_item, FileOpenIterable(convert_path(eval_data_path), 'r', pipe=jsonl_stream)), tokenizer, BlockingStrategy( padding=Padding.RIGHT, truncation=Truncation.RIGHT, max_length=max_length, ), ) def policy_optim_getter(params: PyTree): mask = get_weight_decay_mask(( "".join([r"\['ln_[0-9]+'\]", re.escape("['bias']")]), "".join([r"\['ln_[0-9]+'\]", re.escape("['scale']")]), re.escape("['ln_f']['bias']"), re.escape("['ln_f']['scale']"), "bias", ))(params) return optax.MultiSteps( optax.adamw( learning_rate=lr, b1=0.9, b2=0.95, eps=1e-8, weight_decay=weight_decay, mask=mask, ), every_k_schedule=grad_accum_steps, ) def value_head_optim_getter(params: PyTree): mask = get_weight_decay_mask(("bias",))(params) return optax.MultiSteps( optax.adamw( learning_rate=lr, b1=0.9, b2=0.95, eps=1e-8, weight_decay=weight_decay, mask=mask, ), every_k_schedule=grad_accum_steps, ) model_prng_key = jax.random.PRNGKey(3) base_train_state, base_model = load_train_state( model_load_mode=model_load_mode, model_load_path=convert_path(model_load_path) if model_load_mode != ModelLoadMode.HF else model_load_path, model_dtype=jnp.bfloat16 if bf16_activations else jnp.float32, optim_getter=policy_optim_getter, tokenizer=tokenizer, mesh=mesh, prng_key=model_prng_key, force_pad_embeddings=force_pad_embeddings, params_dtype=jnp.float32, ) base_model.config.gradient_checkpointing = gradient_checkpointing base_model.config.gradient_checkpointing_policy = gradient_checkpointing_policy with jax.default_device(jax.devices('cpu')[0]): target_base_params = jax.tree_util.tree_map( lambda x: multihost_device_get(x, mesh=mesh).copy(), base_train_state.params, ) target_base_params = shard_params_from_params( model=base_model, params=target_base_params, ) with jax.default_device(jax.devices('cpu')[0]): pi_beta_params = jax.tree_util.tree_map( lambda x: multihost_device_get(x, mesh=mesh).copy(), base_train_state.params, ) pi_beta_params = shard_params_from_params( model=base_model, params=pi_beta_params, ) q1_prng_key = jax.random.PRNGKey(4) q1_head_train_state, q_head = load_head_train_state_from_config(
def main( model_load_mode: ModelLoadMode, model_load_path: str, train_data_path: str, eval_data_path: str, vocab_file: str, /, # Mark the end of positional arguments. exp_name: Optional[str]=None, outputs_path: Optional[str]=None, data_mesh_shape: int=1, fsdp_mesh_shape: int=1, model_mesh_shape: int=-1, use_wandb: bool=False, wandb_project: Optional[str]=None, epochs: int=1, max_steps: Optional[int]=None, lr: float=1e-4, weight_decay: float=0.0, train_bsize: int=32, grad_accum_steps: int=1, bf16_activations: bool=False, gradient_checkpointing: bool=False, gradient_checkpointing_policy: str='nothing_saveable', max_length: int=512, log_every: int=256, eval_every_steps: Optional[int]=None, eval_every_epochs: Optional[int]=None, eval_at_beginning: bool=False, eval_at_end: bool=True, save_every_steps: Optional[int]=None, save_every_epochs: Optional[int]=None, save_at_beginning: bool=False, save_at_end: bool=False, save_best: bool=True, max_checkpoints: Optional[int]=None, save_train_state: bool=True, save_bf16: bool=True, policy_max_input_length: int=256, policy_max_output_length: int=256, policy_do_sample: bool=True, policy_num_beams: int=1, policy_temperature: Optional[float]=None, policy_top_p: Optional[float]=None, policy_top_k: Optional[int]=None, policy_bsize: int=32, policy_n_rollouts: int=32, eval_loss_bsize: int=32, eval_loss_batches: Optional[int]=None, force_pad_embeddings: bool=False, should_restore_loop_state: bool=False, beta: float=16.0, detach_q1: bool=False, detach_q2: bool=False, detach_v: bool=False, polyak_alpha: float=0.005, hard_update_every: Optional[int]=None, gamma: float=0.99, tau: float=0.8, cql_weight: float=0.00, ): input_args = locals() print(input_args) tokenizer = AutoTokenizer.from_pretrained('EleutherAI/gpt-j-6B') tokenizer.add_special_tokens({'pad_token': '<|pad|>'}) mesh = load_mesh((data_mesh_shape, fsdp_mesh_shape, model_mesh_shape), ('dp', 'fsdp', 'mp')) is_main_process = jax.process_index() == 0 print(f"Mesh: {mesh}") print(f"Is main process: {is_main_process}") def expand(lst, default_value): new_lst = [default_value] for elem in lst: new_lst.append(elem) new_lst.append(default_value) return new_lst def map_data_item(item): text_trajectory_chain = TextTrajectoryChain( text_trajectory=TextTrajectory( # Actions are at odd indices text_history=[Text(text, i % 2 != 0) for i, text in enumerate(item['text_history'])], reward=expand(item['rewards'], 0.0), done=item['dones'][-1], ), next=None, ) token_trajectory_chain = TokenTrajectoryChain.from_text_trajectory_chain(text_trajectory_chain, tokenizer) return ILQLData.from_token_trajectory_chain(token_trajectory_chain) train_dataset = ILQLIterableDataset.from_ilql_data_iterable( MapIterable(map_data_item, FileOpenIterable(convert_path(train_data_path), 'r', pipe=jsonl_stream)), tokenizer, BlockingStrategy( padding=Padding.RIGHT, truncation=Truncation.RIGHT, max_length=max_length, ), ) eval_dataset = ILQLIterableDataset.from_ilql_data_iterable( MapIterable(map_data_item, FileOpenIterable(convert_path(eval_data_path), 'r', pipe=jsonl_stream)), tokenizer, BlockingStrategy( padding=Padding.RIGHT, truncation=Truncation.RIGHT, max_length=max_length, ), ) def policy_optim_getter(params: PyTree): mask = get_weight_decay_mask(( "".join([r"\['ln_[0-9]+'\]", re.escape("['bias']")]), "".join([r"\['ln_[0-9]+'\]", re.escape("['scale']")]), re.escape("['ln_f']['bias']"), re.escape("['ln_f']['scale']"), "bias", ))(params) return optax.MultiSteps( optax.adamw( learning_rate=lr, b1=0.9, b2=0.95, eps=1e-8, weight_decay=weight_decay, mask=mask, ), every_k_schedule=grad_accum_steps, ) def value_head_optim_getter(params: PyTree): mask = get_weight_decay_mask(("bias",))(params) return optax.MultiSteps( optax.adamw( learning_rate=lr, b1=0.9, b2=0.95, eps=1e-8, weight_decay=weight_decay, mask=mask, ), every_k_schedule=grad_accum_steps, ) model_prng_key = jax.random.PRNGKey(3) base_train_state, base_model = load_train_state( model_load_mode=model_load_mode, model_load_path=convert_path(model_load_path) if model_load_mode != ModelLoadMode.HF else model_load_path, model_dtype=jnp.bfloat16 if bf16_activations else jnp.float32, optim_getter=policy_optim_getter, tokenizer=tokenizer, mesh=mesh, prng_key=model_prng_key, force_pad_embeddings=force_pad_embeddings, params_dtype=jnp.float32, ) base_model.config.gradient_checkpointing = gradient_checkpointing base_model.config.gradient_checkpointing_policy = gradient_checkpointing_policy with jax.default_device(jax.devices('cpu')[0]): target_base_params = jax.tree_util.tree_map( lambda x: multihost_device_get(x, mesh=mesh).copy(), base_train_state.params, ) target_base_params = shard_params_from_params( model=base_model, params=target_base_params, ) with jax.default_device(jax.devices('cpu')[0]): pi_beta_params = jax.tree_util.tree_map( lambda x: multihost_device_get(x, mesh=mesh).copy(), base_train_state.params, ) pi_beta_params = shard_params_from_params( model=base_model, params=pi_beta_params, ) q1_prng_key = jax.random.PRNGKey(4) q1_head_train_state, q_head = load_head_train_state_from_config(
model_config=MLPHeadConfig(
7
2023-11-21 00:16:42+00:00
24k
jzmzhong/Automatic-Prosody-Annotator-with-SSWP-CLAP
src/clap_module/conformer/encoder.py
[ { "identifier": "ConvolutionModule", "path": "src/clap_module/conformer/convolution.py", "snippet": "class ConvolutionModule(nn.Module):\r\n \"\"\"ConvolutionModule in Conformer model.\r\n\r\n Args:\r\n channels (int): The number of channels of conv layers.\r\n kernel_size (int): Kernerl size of conv layers.\r\n\r\n \"\"\"\r\n\r\n def __init__(self, channels, kernel_size, activation=nn.ReLU(), bias=True):\r\n \"\"\"Construct an ConvolutionModule object.\r\n \"\"\"\r\n super(ConvolutionModule, self).__init__()\r\n # kernerl_size should be a odd number for 'SAME' padding\r\n assert (kernel_size - 1) % 2 == 0\r\n\r\n self.pointwise_conv1 = nn.Conv1d(\r\n channels,\r\n 2 * channels,\r\n kernel_size=1,\r\n stride=1,\r\n padding=0,\r\n bias=bias,\r\n )\r\n self.depthwise_conv = nn.Conv1d(\r\n channels,\r\n channels,\r\n kernel_size,\r\n stride=1,\r\n padding=(kernel_size - 1) // 2,\r\n groups=channels,\r\n bias=bias,\r\n )\r\n self.norm = nn.BatchNorm1d(channels)\r\n self.pointwise_conv2 = nn.Conv1d(\r\n channels,\r\n channels,\r\n kernel_size=1,\r\n stride=1,\r\n padding=0,\r\n bias=bias,\r\n )\r\n self.activation = activation\r\n\r\n def forward(self, x):\r\n \"\"\"Compute convolution module.\r\n\r\n Args:\r\n x (torch.Tensor): Input tensor (#batch, time, channels).\r\n\r\n Returns:\r\n torch.Tensor: Output tensor (#batch, time, channels).\r\n\r\n \"\"\"\r\n # exchange the temporal dimension and the feature dimension\r\n x = x.transpose(1, 2)\r\n\r\n # GLU mechanism\r\n x = self.pointwise_conv1(x) # (batch, 2*channel, dim)\r\n x = nn.functional.glu(x, dim=1) # (batch, channel, dim)\r\n\r\n # 1D Depthwise Conv\r\n x = self.depthwise_conv(x)\r\n x = self.activation(self.norm(x))\r\n\r\n x = self.pointwise_conv2(x)\r\n\r\n return x.transpose(1, 2)\r" }, { "identifier": "EncoderLayer", "path": "src/clap_module/conformer/encoder_layer.py", "snippet": "class EncoderLayer(nn.Module):\r\n \"\"\"Encoder layer module.\r\n\r\n Args:\r\n size (int): Input dimension.\r\n self_attn (torch.nn.Module): Self-attention module instance.\r\n `MultiHeadedAttention` or `RelPositionMultiHeadedAttention` instance\r\n can be used as the argument.\r\n feed_forward (torch.nn.Module): Feed-forward module instance.\r\n `PositionwiseFeedForward`, `MultiLayeredConv1d`, or `Conv1dLinear` instance\r\n can be used as the argument.\r\n feed_forward_macaron (torch.nn.Module): Additional feed-forward module instance.\r\n `PositionwiseFeedForward`, `MultiLayeredConv1d`, or `Conv1dLinear` instance\r\n can be used as the argument.\r\n conv_module (torch.nn.Module): Convolution module instance.\r\n `ConvlutionModule` instance can be used as the argument.\r\n dropout_rate (float): Dropout rate.\r\n normalize_before (bool): Whether to use layer_norm before the first block.\r\n concat_after (bool): Whether to concat attention layer's input and output.\r\n if True, additional linear will be applied.\r\n i.e. x -> x + linear(concat(x, att(x)))\r\n if False, no additional linear will be applied. i.e. x -> x + att(x)\r\n stochastic_depth_rate (float): Proability to skip this layer.\r\n During training, the layer may skip residual computation and return input\r\n as-is with given probability.\r\n\r\n \"\"\"\r\n\r\n def __init__(\r\n self,\r\n size,\r\n self_attn,\r\n feed_forward,\r\n feed_forward_macaron,\r\n conv_module,\r\n dropout_rate,\r\n normalize_before=True,\r\n concat_after=False,\r\n stochastic_depth_rate=0.0,\r\n ):\r\n \"\"\"Construct an EncoderLayer object.\"\"\"\r\n super(EncoderLayer, self).__init__()\r\n self.self_attn = self_attn\r\n self.feed_forward = feed_forward\r\n self.feed_forward_macaron = feed_forward_macaron\r\n self.conv_module = conv_module\r\n self.norm_ff = LayerNorm(size) # for the FNN module\r\n self.norm_mha = LayerNorm(size) # for the MHA module\r\n if feed_forward_macaron is not None:\r\n self.norm_ff_macaron = LayerNorm(size)\r\n self.ff_scale = 0.5\r\n else:\r\n self.ff_scale = 1.0\r\n if self.conv_module is not None:\r\n self.norm_conv = LayerNorm(size) # for the CNN module\r\n self.norm_final = LayerNorm(size) # for the final output of the block\r\n self.dropout = nn.Dropout(dropout_rate)\r\n self.size = size\r\n self.normalize_before = normalize_before\r\n self.concat_after = concat_after\r\n if self.concat_after:\r\n self.concat_linear = nn.Linear(size + size, size)\r\n self.stochastic_depth_rate = stochastic_depth_rate\r\n\r\n def forward(self, x_input, mask, cache=None):\r\n \"\"\"Compute encoded features.\r\n\r\n Args:\r\n x_input (Union[Tuple, torch.Tensor]): Input tensor w/ or w/o pos emb.\r\n - w/ pos emb: Tuple of tensors [(#batch, time, size), (1, time, size)].\r\n - w/o pos emb: Tensor (#batch, time, size).\r\n mask (torch.Tensor): Mask tensor for the input (#batch, 1, time).\r\n cache (torch.Tensor): Cache tensor of the input (#batch, time - 1, size).\r\n\r\n Returns:\r\n torch.Tensor: Output tensor (#batch, time, size).\r\n torch.Tensor: Mask tensor (#batch, 1, time).\r\n\r\n \"\"\"\r\n if isinstance(x_input, tuple):\r\n x, pos_emb = x_input[0], x_input[1]\r\n else:\r\n x, pos_emb = x_input, None\r\n\r\n skip_layer = False\r\n # with stochastic depth, residual connection `x + f(x)` becomes\r\n # `x <- x + 1 / (1 - p) * f(x)` at training time.\r\n stoch_layer_coeff = 1.0\r\n if self.training and self.stochastic_depth_rate > 0:\r\n skip_layer = torch.rand(1).item() < self.stochastic_depth_rate\r\n stoch_layer_coeff = 1.0 / (1 - self.stochastic_depth_rate)\r\n\r\n if skip_layer:\r\n if cache is not None:\r\n x = torch.cat([cache, x], dim=1)\r\n if pos_emb is not None:\r\n return (x, pos_emb), mask\r\n return x, mask\r\n\r\n # whether to use macaron style\r\n if self.feed_forward_macaron is not None:\r\n residual = x\r\n if self.normalize_before:\r\n x = self.norm_ff_macaron(x)\r\n x = residual + stoch_layer_coeff * self.ff_scale * self.dropout(\r\n self.feed_forward_macaron(x)\r\n )\r\n if not self.normalize_before:\r\n x = self.norm_ff_macaron(x)\r\n\r\n # convolution module\r\n \"\"\"\r\n if self.conv_module is not None:\r\n residual = x\r\n if self.normalize_before:\r\n x = self.norm_conv(x)\r\n x = residual + stoch_layer_coeff * self.dropout(self.conv_module(x))\r\n if not self.normalize_before:\r\n x = self.norm_conv(x)\r\n \"\"\"\r\n\r\n # multi-headed self-attention module\r\n residual = x\r\n if self.normalize_before:\r\n x = self.norm_mha(x)\r\n\r\n if cache is None:\r\n x_q = x\r\n else:\r\n assert cache.shape == (x.shape[0], x.shape[1] - 1, self.size)\r\n x_q = x[:, -1:, :]\r\n residual = residual[:, -1:, :]\r\n mask = None if mask is None else mask[:, -1:, :]\r\n\r\n if pos_emb is not None:\r\n x_att = self.self_attn(x_q, x, x, pos_emb, mask)\r\n else:\r\n x_att = self.self_attn(x_q, x, x, mask)\r\n\r\n if self.concat_after:\r\n x_concat = torch.cat((x, x_att), dim=-1)\r\n x = residual + stoch_layer_coeff * self.concat_linear(x_concat)\r\n else:\r\n x = residual + stoch_layer_coeff * self.dropout(x_att)\r\n if not self.normalize_before:\r\n x = self.norm_mha(x)\r\n\r\n # convolution module\r\n if self.conv_module is not None:\r\n residual = x\r\n if self.normalize_before:\r\n x = self.norm_conv(x)\r\n x = residual + stoch_layer_coeff * self.dropout(self.conv_module(x))\r\n if not self.normalize_before:\r\n x = self.norm_conv(x)\r\n\r\n # feed forward module\r\n if self.feed_forward:\r\n residual = x\r\n if self.normalize_before:\r\n x = self.norm_ff(x)\r\n x = residual + stoch_layer_coeff * self.ff_scale * self.dropout(\r\n self.feed_forward(x)\r\n )\r\n if not self.normalize_before:\r\n x = self.norm_ff(x)\r\n else:\r\n raise ValueError(\"not exit\")\r\n\r\n if self.conv_module is not None:\r\n x = self.norm_final(x)\r\n\r\n if cache is not None:\r\n x = torch.cat([cache, x], dim=1)\r\n\r\n if pos_emb is not None:\r\n return (x, pos_emb), mask\r\n\r\n return x, mask\r" }, { "identifier": "get_activation", "path": "src/clap_module/conformer/modules.py", "snippet": "def get_activation(act):\r\n \"\"\"Return activation function.\r\n \"\"\"\r\n # Lazy load to avoid unused import\r\n\r\n activation_funcs = {\r\n \"hardtanh\": torch.nn.Hardtanh,\r\n \"tanh\": torch.nn.Tanh,\r\n \"relu\": torch.nn.ReLU,\r\n \"selu\": torch.nn.SELU,\r\n \"swish\": Swish,\r\n }\r\n\r\n return activation_funcs[act]()\r" }, { "identifier": "VGG2L", "path": "src/clap_module/conformer/modules.py", "snippet": "class VGG2L(torch.nn.Module):\r\n \"\"\"VGG2L module for custom encoder.\r\n\r\n Args:\r\n idim: Input dimension.\r\n odim: Output dimension.\r\n pos_enc: Positional encoding class.\r\n\r\n \"\"\"\r\n\r\n def __init__(self, idim: int, odim: int, pos_enc: torch.nn.Module = None):\r\n \"\"\"Construct a VGG2L object.\"\"\"\r\n super().__init__()\r\n\r\n self.vgg2l = torch.nn.Sequential(\r\n torch.nn.Conv2d(1, 64, 3, stride=1, padding=1),\r\n torch.nn.ReLU(),\r\n torch.nn.Conv2d(64, 64, 3, stride=1, padding=1),\r\n torch.nn.ReLU(),\r\n torch.nn.MaxPool2d((3, 2)),\r\n torch.nn.Conv2d(64, 128, 3, stride=1, padding=1),\r\n torch.nn.ReLU(),\r\n torch.nn.Conv2d(128, 128, 3, stride=1, padding=1),\r\n torch.nn.ReLU(),\r\n torch.nn.MaxPool2d((2, 2)),\r\n )\r\n\r\n if pos_enc is not None:\r\n self.output = torch.nn.Sequential(\r\n torch.nn.Linear(128 * ((idim // 2) // 2), odim), pos_enc\r\n )\r\n else:\r\n self.output = torch.nn.Linear(128 * ((idim // 2) // 2), odim)\r\n\r\n def forward(\r\n self, feats: torch.Tensor, feats_mask: torch.Tensor\r\n ) -> Union[\r\n Tuple[torch.Tensor, torch.Tensor],\r\n Tuple[Tuple[torch.Tensor, torch.Tensor], torch.Tensor],\r\n ]:\r\n \"\"\"Forward VGG2L bottleneck.\r\n\r\n Args:\r\n feats: Feature sequences. (B, F, D_feats)\r\n feats_mask: Mask of feature sequences. (B, 1, F)\r\n\r\n Returns:\r\n vgg_output: VGG output sequences.\r\n (B, sub(F), D_out) or ((B, sub(F), D_out), (B, sub(F), D_att))\r\n vgg_mask: Mask of VGG output sequences. (B, 1, sub(F))\r\n\r\n \"\"\"\r\n feats = feats.unsqueeze(1)\r\n vgg_output = self.vgg2l(feats)\r\n\r\n b, c, t, f = vgg_output.size()\r\n\r\n vgg_output = self.output(\r\n vgg_output.transpose(1, 2).contiguous().view(b, t, c * f)\r\n )\r\n\r\n if feats_mask is not None:\r\n vgg_mask = self.create_new_mask(feats_mask)\r\n else:\r\n vgg_mask = feats_mask\r\n\r\n return vgg_output, vgg_mask\r\n\r\n def create_new_mask(self, feats_mask: torch.Tensor) -> torch.Tensor:\r\n \"\"\"Create a subsampled mask of feature sequences.\r\n\r\n Args:\r\n feats_mask: Mask of feature sequences. (B, 1, F)\r\n\r\n Returns:\r\n vgg_mask: Mask of VGG2L output sequences. (B, 1, sub(F))\r\n\r\n \"\"\"\r\n vgg1_t_len = feats_mask.size(2) - (feats_mask.size(2) % 3)\r\n vgg_mask = feats_mask[:, :, :vgg1_t_len][:, :, ::3]\r\n\r\n vgg2_t_len = vgg_mask.size(2) - (vgg_mask.size(2) % 2)\r\n vgg_mask = vgg_mask[:, :, :vgg2_t_len][:, :, ::2]\r\n\r\n return vgg_mask\r" }, { "identifier": "LegacyRelPositionMultiHeadedAttention", "path": "src/clap_module/conformer/modules.py", "snippet": "class LegacyRelPositionMultiHeadedAttention(MultiHeadedAttention):\r\n \"\"\"Multi-Head Attention layer with relative position encoding (old version).\r\n\r\n Details can be found in https://github.com/espnet/espnet/pull/2816.\r\n\r\n Paper: https://arxiv.org/abs/1901.02860\r\n\r\n Args:\r\n n_head (int): The number of heads.\r\n n_feat (int): The number of features.\r\n dropout_rate (float): Dropout rate.\r\n zero_triu (bool): Whether to zero the upper triangular part of attention matrix.\r\n\r\n \"\"\"\r\n\r\n def __init__(self, n_head, n_feat, dropout_rate, zero_triu=False):\r\n \"\"\"Construct an RelPositionMultiHeadedAttention object.\"\"\"\r\n super().__init__(n_head, n_feat, dropout_rate)\r\n self.zero_triu = zero_triu\r\n # linear transformation for positional encoding\r\n self.linear_pos = nn.Linear(n_feat, n_feat, bias=False)\r\n # these two learnable bias are used in matrix c and matrix d\r\n # as described in https://arxiv.org/abs/1901.02860 Section 3.3\r\n self.pos_bias_u = nn.Parameter(torch.Tensor(self.h, self.d_k))\r\n self.pos_bias_v = nn.Parameter(torch.Tensor(self.h, self.d_k))\r\n torch.nn.init.xavier_uniform_(self.pos_bias_u)\r\n torch.nn.init.xavier_uniform_(self.pos_bias_v)\r\n\r\n def rel_shift(self, x):\r\n \"\"\"Compute relative positional encoding.\r\n\r\n Args:\r\n x (torch.Tensor): Input tensor (batch, head, time1, time2).\r\n\r\n Returns:\r\n torch.Tensor: Output tensor.\r\n\r\n \"\"\"\r\n zero_pad = torch.zeros((*x.size()[:3], 1), device=x.device, dtype=x.dtype)\r\n x_padded = torch.cat([zero_pad, x], dim=-1)\r\n\r\n x_padded = x_padded.view(*x.size()[:2], x.size(3) + 1, x.size(2))\r\n x = x_padded[:, :, 1:].view_as(x)\r\n\r\n if self.zero_triu:\r\n ones = torch.ones((x.size(2), x.size(3)))\r\n x = x * torch.tril(ones, x.size(3) - x.size(2))[None, None, :, :]\r\n\r\n return x\r\n\r\n def forward(self, query, key, value, pos_emb, mask):\r\n \"\"\"Compute 'Scaled Dot Product Attention' with rel. positional encoding.\r\n\r\n Args:\r\n query (torch.Tensor): Query tensor (#batch, time1, size).\r\n key (torch.Tensor): Key tensor (#batch, time2, size).\r\n value (torch.Tensor): Value tensor (#batch, time2, size).\r\n pos_emb (torch.Tensor): Positional embedding tensor (#batch, time1, size).\r\n mask (torch.Tensor): Mask tensor (#batch, 1, time2) or\r\n (#batch, time1, time2).\r\n\r\n Returns:\r\n torch.Tensor: Output tensor (#batch, time1, d_model).\r\n\r\n \"\"\"\r\n q, k, v = self.forward_qkv(query, key, value)\r\n q = q.transpose(1, 2) # (batch, time1, head, d_k)\r\n\r\n n_batch_pos = pos_emb.size(0)\r\n p = self.linear_pos(pos_emb).view(n_batch_pos, -1, self.h, self.d_k)\r\n p = p.transpose(1, 2) # (batch, head, time1, d_k)\r\n\r\n # (batch, head, time1, d_k)\r\n q_with_bias_u = (q + self.pos_bias_u).transpose(1, 2)\r\n # (batch, head, time1, d_k)\r\n q_with_bias_v = (q + self.pos_bias_v).transpose(1, 2)\r\n\r\n # compute attention score\r\n # first compute matrix a and matrix c\r\n # as described in https://arxiv.org/abs/1901.02860 Section 3.3\r\n # (batch, head, time1, time2)\r\n matrix_ac = torch.matmul(q_with_bias_u, k.transpose(-2, -1))\r\n\r\n # compute matrix b and matrix d\r\n # (batch, head, time1, time1)\r\n matrix_bd = torch.matmul(q_with_bias_v, p.transpose(-2, -1))\r\n matrix_bd = self.rel_shift(matrix_bd)\r\n\r\n scores = (matrix_ac + matrix_bd) / math.sqrt(\r\n self.d_k\r\n ) # (batch, head, time1, time2)\r\n\r\n return self.forward_attention(v, scores, mask)\r" }, { "identifier": "MultiHeadedAttention", "path": "src/clap_module/conformer/modules.py", "snippet": "class MultiHeadedAttention(nn.Module):\r\n \"\"\"Multi-Head Attention layer.\r\n\r\n Args:\r\n n_head (int): The number of heads.\r\n n_feat (int): The number of features.\r\n dropout_rate (float): Dropout rate.\r\n\r\n \"\"\"\r\n\r\n def __init__(self, n_head, n_feat, dropout_rate):\r\n \"\"\"Construct an MultiHeadedAttention object.\"\"\"\r\n super(MultiHeadedAttention, self).__init__()\r\n assert n_feat % n_head == 0\r\n # We assume d_v always equals d_k\r\n self.d_k = n_feat // n_head\r\n self.h = n_head\r\n self.linear_q = nn.Linear(n_feat, n_feat)\r\n self.linear_k = nn.Linear(n_feat, n_feat)\r\n self.linear_v = nn.Linear(n_feat, n_feat)\r\n self.linear_out = nn.Linear(n_feat, n_feat)\r\n self.attn = None\r\n self.dropout = nn.Dropout(p=dropout_rate)\r\n\r\n def forward_qkv(self, query, key, value):\r\n \"\"\"Transform query, key and value.\r\n\r\n Args:\r\n query (torch.Tensor): Query tensor (#batch, time1, size).\r\n key (torch.Tensor): Key tensor (#batch, time2, size).\r\n value (torch.Tensor): Value tensor (#batch, time2, size).\r\n\r\n Returns:\r\n torch.Tensor: Transformed query tensor (#batch, n_head, time1, d_k).\r\n torch.Tensor: Transformed key tensor (#batch, n_head, time2, d_k).\r\n torch.Tensor: Transformed value tensor (#batch, n_head, time2, d_k).\r\n\r\n \"\"\"\r\n n_batch = query.size(0)\r\n q = self.linear_q(query).view(n_batch, -1, self.h, self.d_k)\r\n k = self.linear_k(key).view(n_batch, -1, self.h, self.d_k)\r\n v = self.linear_v(value).view(n_batch, -1, self.h, self.d_k)\r\n q = q.transpose(1, 2) # (batch, head, time1, d_k)\r\n k = k.transpose(1, 2) # (batch, head, time2, d_k)\r\n v = v.transpose(1, 2) # (batch, head, time2, d_k)\r\n\r\n return q, k, v\r\n\r\n def forward_attention(self, value, scores, mask):\r\n \"\"\"Compute attention context vector.\r\n\r\n Args:\r\n value (torch.Tensor): Transformed value (#batch, n_head, time2, d_k).\r\n scores (torch.Tensor): Attention score (#batch, n_head, time1, time2).\r\n mask (torch.Tensor): Mask (#batch, 1, time2) or (#batch, time1, time2).\r\n\r\n Returns:\r\n torch.Tensor: Transformed value (#batch, time1, d_model)\r\n weighted by the attention score (#batch, time1, time2).\r\n\r\n \"\"\"\r\n n_batch = value.size(0)\r\n if mask is not None:\r\n mask = mask.unsqueeze(1).eq(0) # (batch, 1, *, time2)\r\n min_value = torch.finfo(scores.dtype).min\r\n scores = scores.masked_fill(mask, min_value)\r\n self.attn = torch.softmax(scores, dim=-1).masked_fill(\r\n mask, 0.0\r\n ) # (batch, head, time1, time2)\r\n else:\r\n self.attn = torch.softmax(scores, dim=-1) # (batch, head, time1, time2)\r\n\r\n p_attn = self.dropout(self.attn)\r\n x = torch.matmul(p_attn, value) # (batch, head, time1, d_k)\r\n x = (\r\n x.transpose(1, 2).contiguous().view(n_batch, -1, self.h * self.d_k)\r\n ) # (batch, time1, d_model)\r\n\r\n return self.linear_out(x) # (batch, time1, d_model)\r\n\r\n def forward(self, query, key, value, mask):\r\n \"\"\"Compute scaled dot product attention.\r\n\r\n Args:\r\n query (torch.Tensor): Query tensor (#batch, time1, size).\r\n key (torch.Tensor): Key tensor (#batch, time2, size).\r\n value (torch.Tensor): Value tensor (#batch, time2, size).\r\n mask (torch.Tensor): Mask tensor (#batch, 1, time2) or\r\n (#batch, time1, time2).\r\n\r\n Returns:\r\n torch.Tensor: Output tensor (#batch, time1, d_model).\r\n\r\n \"\"\"\r\n q, k, v = self.forward_qkv(query, key, value)\r\n scores = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(self.d_k)\r\n return self.forward_attention(v, scores, mask)\r" }, { "identifier": "RelPositionMultiHeadedAttention", "path": "src/clap_module/conformer/modules.py", "snippet": "class RelPositionMultiHeadedAttention(MultiHeadedAttention):\r\n \"\"\"Multi-Head Attention layer with relative position encoding (new implementation).\r\n\r\n Details can be found in https://github.com/espnet/espnet/pull/2816.\r\n\r\n Paper: https://arxiv.org/abs/1901.02860\r\n\r\n Args:\r\n n_head (int): The number of heads.\r\n n_feat (int): The number of features.\r\n dropout_rate (float): Dropout rate.\r\n zero_triu (bool): Whether to zero the upper triangular part of attention matrix.\r\n\r\n \"\"\"\r\n\r\n def __init__(self, n_head, n_feat, dropout_rate, zero_triu=False):\r\n \"\"\"Construct an RelPositionMultiHeadedAttention object.\"\"\"\r\n super().__init__(n_head, n_feat, dropout_rate)\r\n self.zero_triu = zero_triu\r\n # linear transformation for positional encoding\r\n self.linear_pos = nn.Linear(n_feat, n_feat, bias=False)\r\n # these two learnable bias are used in matrix c and matrix d\r\n # as described in https://arxiv.org/abs/1901.02860 Section 3.3\r\n self.pos_bias_u = nn.Parameter(torch.Tensor(self.h, self.d_k))\r\n self.pos_bias_v = nn.Parameter(torch.Tensor(self.h, self.d_k))\r\n torch.nn.init.xavier_uniform_(self.pos_bias_u)\r\n torch.nn.init.xavier_uniform_(self.pos_bias_v)\r\n\r\n def rel_shift(self, x):\r\n \"\"\"Compute relative positional encoding.\r\n\r\n Args:\r\n x (torch.Tensor): Input tensor (batch, head, time1, 2*time1-1).\r\n time1 means the length of query vector.\r\n\r\n Returns:\r\n torch.Tensor: Output tensor.\r\n\r\n \"\"\"\r\n zero_pad = torch.zeros((*x.size()[:3], 1), device=x.device, dtype=x.dtype)\r\n x_padded = torch.cat([zero_pad, x], dim=-1)\r\n\r\n x_padded = x_padded.view(*x.size()[:2], x.size(3) + 1, x.size(2))\r\n x = x_padded[:, :, 1:].view_as(x)[\r\n :, :, :, : x.size(-1) // 2 + 1\r\n ] # only keep the positions from 0 to time2\r\n\r\n if self.zero_triu:\r\n ones = torch.ones((x.size(2), x.size(3)), device=x.device)\r\n x = x * torch.tril(ones, x.size(3) - x.size(2))[None, None, :, :]\r\n\r\n return x\r\n\r\n def forward(self, query, key, value, pos_emb, mask):\r\n \"\"\"Compute 'Scaled Dot Product Attention' with rel. positional encoding.\r\n\r\n Args:\r\n query (torch.Tensor): Query tensor (#batch, time1, size).\r\n key (torch.Tensor): Key tensor (#batch, time2, size).\r\n value (torch.Tensor): Value tensor (#batch, time2, size).\r\n pos_emb (torch.Tensor): Positional embedding tensor\r\n (#batch, 2*time1-1, size).\r\n mask (torch.Tensor): Mask tensor (#batch, 1, time2) or\r\n (#batch, time1, time2).\r\n\r\n Returns:\r\n torch.Tensor: Output tensor (#batch, time1, d_model).\r\n\r\n \"\"\"\r\n q, k, v = self.forward_qkv(query, key, value)\r\n q = q.transpose(1, 2) # (batch, time1, head, d_k)\r\n\r\n n_batch_pos = pos_emb.size(0)\r\n p = self.linear_pos(pos_emb).view(n_batch_pos, -1, self.h, self.d_k)\r\n p = p.transpose(1, 2) # (batch, head, 2*time1-1, d_k)\r\n\r\n # (batch, head, time1, d_k)\r\n q_with_bias_u = (q + self.pos_bias_u).transpose(1, 2)\r\n # (batch, head, time1, d_k)\r\n q_with_bias_v = (q + self.pos_bias_v).transpose(1, 2)\r\n\r\n # compute attention score\r\n # first compute matrix a and matrix c\r\n # as described in https://arxiv.org/abs/1901.02860 Section 3.3\r\n # (batch, head, time1, time2)\r\n matrix_ac = torch.matmul(q_with_bias_u, k.transpose(-2, -1))\r\n\r\n # compute matrix b and matrix d\r\n # (batch, head, time1, 2*time1-1)\r\n matrix_bd = torch.matmul(q_with_bias_v, p.transpose(-2, -1))\r\n matrix_bd = self.rel_shift(matrix_bd)\r\n\r\n scores = (matrix_ac + matrix_bd) / math.sqrt(\r\n self.d_k\r\n ) # (batch, head, time1, time2)\r\n\r\n return self.forward_attention(v, scores, mask)\r" }, { "identifier": "LegacyRelPositionalEncoding", "path": "src/clap_module/conformer/embedding.py", "snippet": "class LegacyRelPositionalEncoding(PositionalEncoding):\r\n \"\"\"Relative positional encoding module (old version).\r\n\r\n Details can be found in https://github.com/espnet/espnet/pull/2816.\r\n\r\n See : Appendix B in https://arxiv.org/abs/1901.02860\r\n\r\n Args:\r\n d_model (int): Embedding dimension.\r\n dropout_rate (float): Dropout rate.\r\n max_len (int): Maximum input length.\r\n\r\n \"\"\"\r\n\r\n def __init__(self, d_model, dropout_rate, max_len=5000):\r\n \"\"\"Initialize class.\"\"\"\r\n super().__init__(\r\n d_model=d_model,\r\n dropout_rate=dropout_rate,\r\n max_len=max_len,\r\n reverse=True,\r\n )\r\n\r\n def forward(self, x):\r\n \"\"\"Compute positional encoding.\r\n\r\n Args:\r\n x (torch.Tensor): Input tensor (batch, time, `*`).\r\n\r\n Returns:\r\n torch.Tensor: Encoded tensor (batch, time, `*`).\r\n torch.Tensor: Positional embedding tensor (1, time, `*`).\r\n\r\n \"\"\"\r\n self.extend_pe(x)\r\n x = x * self.xscale\r\n pos_emb = self.pe[:, : x.size(1)]\r\n return self.dropout(x), self.dropout(pos_emb)\r" }, { "identifier": "PositionalEncoding", "path": "src/clap_module/conformer/embedding.py", "snippet": "class PositionalEncoding(torch.nn.Module):\r\n \"\"\"Positional encoding.\r\n\r\n Args:\r\n d_model (int): Embedding dimension.\r\n dropout_rate (float): Dropout rate.\r\n max_len (int): Maximum input length.\r\n reverse (bool): Whether to reverse the input position. Only for\r\n the class LegacyRelPositionalEncoding. We remove it in the current\r\n class RelPositionalEncoding.\r\n \"\"\"\r\n\r\n def __init__(self, d_model, dropout_rate, max_len=5000, reverse=False):\r\n \"\"\"Construct an PositionalEncoding object.\r\n \"\"\"\r\n super(PositionalEncoding, self).__init__()\r\n self.d_model = d_model\r\n self.reverse = reverse\r\n self.xscale = math.sqrt(self.d_model)\r\n self.dropout = torch.nn.Dropout(p=dropout_rate)\r\n self.pe = None\r\n self.extend_pe(torch.tensor(0.0).expand(1, max_len))\r\n self._register_load_state_dict_pre_hook(_pre_hook)\r\n\r\n def extend_pe(self, x):\r\n \"\"\"Reset the positional encodings.\r\n \"\"\"\r\n if self.pe is not None:\r\n if self.pe.size(1) >= x.size(1):\r\n if self.pe.dtype != x.dtype or self.pe.device != x.device:\r\n self.pe = self.pe.to(dtype=x.dtype, device=x.device)\r\n return\r\n pe = torch.zeros(x.size(1), self.d_model)\r\n if self.reverse:\r\n position = torch.arange(\r\n x.size(1) - 1, -1, -1.0, dtype=torch.float32\r\n ).unsqueeze(1)\r\n else:\r\n position = torch.arange(0, x.size(1), dtype=torch.float32).unsqueeze(1)\r\n div_term = torch.exp(\r\n torch.arange(0, self.d_model, 2, dtype=torch.float32)\r\n * -(math.log(10000.0) / self.d_model)\r\n )\r\n pe[:, 0::2] = torch.sin(position * div_term)\r\n pe[:, 1::2] = torch.cos(position * div_term)\r\n pe = pe.unsqueeze(0)\r\n self.pe = pe.to(device=x.device, dtype=x.dtype)\r\n\r\n def forward(self, x: torch.Tensor):\r\n \"\"\"Add positional encoding.\r\n\r\n Args:\r\n x (torch.Tensor): Input tensor (batch, time, `*`).\r\n\r\n Returns:\r\n torch.Tensor: Encoded tensor (batch, time, `*`).\r\n \"\"\"\r\n self.extend_pe(x)\r\n x = x * self.xscale + self.pe[:, : x.size(1)]\r\n return self.dropout(x)\r" }, { "identifier": "RelPositionalEncoding", "path": "src/clap_module/conformer/embedding.py", "snippet": "class RelPositionalEncoding(torch.nn.Module):\r\n \"\"\"Relative positional encoding module (new implementation).\r\n\r\n Details can be found in https://github.com/espnet/espnet/pull/2816.\r\n\r\n See : Appendix B in https://arxiv.org/abs/1901.02860\r\n\r\n Args:\r\n d_model (int): Embedding dimension.\r\n dropout_rate (float): Dropout rate.\r\n max_len (int): Maximum input length.\r\n\r\n \"\"\"\r\n\r\n def __init__(self, d_model, dropout_rate, max_len=5000):\r\n \"\"\"Construct an PositionalEncoding object.\r\n \"\"\"\r\n super(RelPositionalEncoding, self).__init__()\r\n self.d_model = d_model\r\n self.xscale = math.sqrt(self.d_model)\r\n self.dropout = torch.nn.Dropout(p=dropout_rate)\r\n self.pe = None\r\n self.extend_pe(torch.tensor(0.0).expand(1, max_len))\r\n\r\n def extend_pe(self, x):\r\n \"\"\"Reset the positional encodings.\r\n \"\"\"\r\n if self.pe is not None:\r\n # self.pe contains both positive and negative parts\r\n # the length of self.pe is 2 * input_len - 1\r\n if self.pe.size(1) >= x.size(1) * 2 - 1:\r\n if self.pe.dtype != x.dtype or self.pe.device != x.device:\r\n self.pe = self.pe.to(dtype=x.dtype, device=x.device)\r\n return\r\n # Suppose `i` means to the position of query vecotr and `j` means the\r\n # position of key vector. We use position relative positions when keys\r\n # are to the left (i>j) and negative relative positions otherwise (i<j).\r\n pe_positive = torch.zeros(x.size(1), self.d_model)\r\n pe_negative = torch.zeros(x.size(1), self.d_model)\r\n position = torch.arange(0, x.size(1), dtype=torch.float32).unsqueeze(1)\r\n div_term = torch.exp(\r\n torch.arange(0, self.d_model, 2, dtype=torch.float32)\r\n * -(math.log(10000.0) / self.d_model)\r\n )\r\n pe_positive[:, 0::2] = torch.sin(position * div_term)\r\n pe_positive[:, 1::2] = torch.cos(position * div_term)\r\n pe_negative[:, 0::2] = torch.sin(-1 * position * div_term)\r\n pe_negative[:, 1::2] = torch.cos(-1 * position * div_term)\r\n\r\n # Reserve the order of positive indices and concat both positive and\r\n # negative indices. This is used to support the shifting trick\r\n # as in https://arxiv.org/abs/1901.02860\r\n pe_positive = torch.flip(pe_positive, [0]).unsqueeze(0)\r\n pe_negative = pe_negative[1:].unsqueeze(0)\r\n pe = torch.cat([pe_positive, pe_negative], dim=1)\r\n self.pe = pe.to(device=x.device, dtype=x.dtype)\r\n\r\n def forward(self, x: torch.Tensor):\r\n \"\"\"Add positional encoding.\r\n\r\n Args:\r\n x (torch.Tensor): Input tensor (batch, time, `*`).\r\n\r\n Returns:\r\n torch.Tensor: Encoded tensor (batch, time, `*`).\r\n\r\n \"\"\"\r\n self.extend_pe(x)\r\n x = x * self.xscale\r\n pos_emb = self.pe[\r\n :,\r\n self.pe.size(1) // 2 - x.size(1) + 1 : self.pe.size(1) // 2 + x.size(1),\r\n ]\r\n return self.dropout(x), self.dropout(pos_emb)\r" }, { "identifier": "ScaledPositionalEncoding", "path": "src/clap_module/conformer/embedding.py", "snippet": "class ScaledPositionalEncoding(PositionalEncoding):\r\n \"\"\"Scaled positional encoding module.\r\n\r\n See Sec. 3.2 https://arxiv.org/abs/1809.08895\r\n\r\n Args:\r\n d_model (int): Embedding dimension.\r\n dropout_rate (float): Dropout rate.\r\n max_len (int): Maximum input length.\r\n\r\n \"\"\"\r\n\r\n def __init__(self, d_model, dropout_rate, max_len=5000):\r\n \"\"\"Initialize class.\"\"\"\r\n super().__init__(d_model=d_model, dropout_rate=dropout_rate, max_len=max_len)\r\n self.alpha = torch.nn.Parameter(torch.tensor(1.0))\r\n\r\n def reset_parameters(self):\r\n \"\"\"Reset parameters.\"\"\"\r\n self.alpha.data = torch.tensor(1.0)\r\n\r\n def forward(self, x):\r\n \"\"\"Add positional encoding.\r\n\r\n Args:\r\n x (torch.Tensor): Input tensor (batch, time, `*`).\r\n\r\n Returns:\r\n torch.Tensor: Encoded tensor (batch, time, `*`).\r\n\r\n \"\"\"\r\n self.extend_pe(x)\r\n x = x + self.alpha * self.pe[:, : x.size(1)]\r\n return self.dropout(x)\r" }, { "identifier": "LayerNorm", "path": "src/clap_module/conformer/modules.py", "snippet": "class LayerNorm(torch.nn.LayerNorm):\r\n \"\"\"Layer normalization module.\r\n\r\n Args:\r\n nout (int): Output dim size.\r\n dim (int): Dimension to be normalized.\r\n\r\n \"\"\"\r\n\r\n def __init__(self, nout, dim=-1):\r\n \"\"\"Construct an LayerNorm object.\"\"\"\r\n super(LayerNorm, self).__init__(nout, eps=1e-12)\r\n self.dim = dim\r\n\r\n def forward(self, x):\r\n \"\"\"Apply layer normalization.\r\n\r\n Args:\r\n x (torch.Tensor): Input tensor.\r\n\r\n Returns:\r\n torch.Tensor: Normalized tensor.\r\n\r\n \"\"\"\r\n if self.dim == -1:\r\n return super(LayerNorm, self).forward(x)\r\n return (\r\n super(LayerNorm, self)\r\n .forward(x.transpose(self.dim, -1))\r\n .transpose(self.dim, -1)\r\n )\r" }, { "identifier": "Conv1dLinear", "path": "src/clap_module/conformer/multi_layer_conv.py", "snippet": "class Conv1dLinear(torch.nn.Module):\r\n \"\"\"Conv1D + Linear for Transformer block.\r\n\r\n A variant of MultiLayeredConv1d, which replaces second conv-layer to linear.\r\n\r\n \"\"\"\r\n\r\n def __init__(self, in_chans, hidden_chans, kernel_size, dropout_rate):\r\n \"\"\"Initialize Conv1dLinear module.\r\n\r\n Args:\r\n in_chans (int): Number of input channels.\r\n hidden_chans (int): Number of hidden channels.\r\n kernel_size (int): Kernel size of conv1d.\r\n dropout_rate (float): Dropout rate.\r\n\r\n \"\"\"\r\n super(Conv1dLinear, self).__init__()\r\n self.w_1 = torch.nn.Conv1d(\r\n in_chans,\r\n hidden_chans,\r\n kernel_size,\r\n stride=1,\r\n padding=(kernel_size - 1) // 2,\r\n )\r\n self.w_2 = torch.nn.Linear(hidden_chans, in_chans)\r\n self.dropout = torch.nn.Dropout(dropout_rate)\r\n\r\n def forward(self, x):\r\n \"\"\"Calculate forward propagation.\r\n\r\n Args:\r\n x (torch.Tensor): Batch of input tensors (B, T, in_chans).\r\n\r\n Returns:\r\n torch.Tensor: Batch of output tensors (B, T, hidden_chans).\r\n\r\n \"\"\"\r\n x = torch.relu(self.w_1(x.transpose(-1, 1))).transpose(-1, 1)\r\n return self.w_2(self.dropout(x))\r" }, { "identifier": "MultiLayeredConv1d", "path": "src/clap_module/conformer/multi_layer_conv.py", "snippet": "class MultiLayeredConv1d(torch.nn.Module):\r\n \"\"\"Multi-layered conv1d for Transformer block.\r\n\r\n This is a module of multi-leyered conv1d designed\r\n to replace positionwise feed-forward network\r\n in Transforner block, which is introduced in\r\n `FastSpeech: Fast, Robust and Controllable Text to Speech`_.\r\n\r\n .. _`FastSpeech: Fast, Robust and Controllable Text to Speech`:\r\n https://arxiv.org/pdf/1905.09263.pdf\r\n\r\n \"\"\"\r\n\r\n def __init__(self, in_chans, hidden_chans, kernel_size, dropout_rate):\r\n \"\"\"Initialize MultiLayeredConv1d module.\r\n\r\n Args:\r\n in_chans (int): Number of input channels.\r\n hidden_chans (int): Number of hidden channels.\r\n kernel_size (int): Kernel size of conv1d.\r\n dropout_rate (float): Dropout rate.\r\n\r\n \"\"\"\r\n super(MultiLayeredConv1d, self).__init__()\r\n self.w_1 = torch.nn.Conv1d(\r\n in_chans,\r\n hidden_chans,\r\n kernel_size,\r\n stride=1,\r\n padding=(kernel_size - 1) // 2,\r\n )\r\n self.w_2 = torch.nn.Conv1d(\r\n hidden_chans,\r\n in_chans,\r\n kernel_size,\r\n stride=1,\r\n padding=(kernel_size - 1) // 2,\r\n )\r\n self.dropout = torch.nn.Dropout(dropout_rate)\r\n\r\n def forward(self, x):\r\n \"\"\"Calculate forward propagation.\r\n\r\n Args:\r\n x (torch.Tensor): Batch of input tensors (B, T, in_chans).\r\n\r\n Returns:\r\n torch.Tensor: Batch of output tensors (B, T, hidden_chans).\r\n\r\n \"\"\"\r\n x = torch.relu(self.w_1(x.transpose(-1, 1))).transpose(-1, 1)\r\n return self.w_2(self.dropout(x).transpose(-1, 1)).transpose(-1, 1)\r" }, { "identifier": "PositionwiseFeedForward", "path": "src/clap_module/conformer/modules.py", "snippet": "class PositionwiseFeedForward(torch.nn.Module):\r\n \"\"\"Positionwise feed forward layer.\r\n\r\n Args:\r\n idim (int): Input dimenstion.\r\n hidden_units (int): The number of hidden units.\r\n dropout_rate (float): Dropout rate.\r\n\r\n \"\"\"\r\n\r\n def __init__(self, idim, hidden_units, dropout_rate, activation=torch.nn.ReLU()):\r\n \"\"\"Construct an PositionwiseFeedForward object.\"\"\"\r\n super(PositionwiseFeedForward, self).__init__()\r\n self.w_1 = torch.nn.Linear(idim, hidden_units)\r\n self.w_2 = torch.nn.Linear(hidden_units, idim)\r\n self.dropout = torch.nn.Dropout(dropout_rate)\r\n self.activation = activation\r\n\r\n def forward(self, x):\r\n \"\"\"Forward function.\"\"\"\r\n return self.w_2(self.dropout(self.activation(self.w_1(x))))\r" }, { "identifier": "repeat", "path": "src/clap_module/conformer/modules.py", "snippet": "def repeat(N, fn, layer_drop_rate=0.0):\r\n \"\"\"Repeat module N times.\r\n\r\n Args:\r\n N (int): Number of repeat time.\r\n fn (Callable): Function to generate module.\r\n layer_drop_rate (float): Probability of dropping out each fn (layer).\r\n\r\n Returns:\r\n MultiSequential: Repeated model instance.\r\n\r\n \"\"\"\r\n return MultiSequential(*[fn(n) for n in range(N)], layer_drop_rate=layer_drop_rate)\r" }, { "identifier": "Conv2dSubsampling", "path": "src/clap_module/conformer/sub_sampling.py", "snippet": "class Conv2dSubsampling(torch.nn.Module):\r\n \"\"\"Convolutional 2D subsampling (to 1/4 length).\r\n\r\n Args:\r\n idim (int): Input dimension.\r\n odim (int): Output dimension.\r\n dropout_rate (float): Dropout rate.\r\n pos_enc (torch.nn.Module): Custom position encoding layer.\r\n\r\n \"\"\"\r\n\r\n def __init__(self, idim, odim, dropout_rate, pos_enc=None):\r\n \"\"\"Construct an Conv2dSubsampling object.\"\"\"\r\n super(Conv2dSubsampling, self).__init__()\r\n self.conv = torch.nn.Sequential(\r\n torch.nn.Conv2d(1, odim, 3, 2),\r\n torch.nn.ReLU(),\r\n torch.nn.Conv2d(odim, odim, 3, 2),\r\n torch.nn.ReLU(),\r\n )\r\n self.out = torch.nn.Sequential(\r\n torch.nn.Linear(odim * (((idim - 1) // 2 - 1) // 2), odim),\r\n pos_enc if pos_enc is not None else PositionalEncoding(odim, dropout_rate),\r\n )\r\n\r\n def forward(self, x, x_mask):\r\n \"\"\"Subsample x.\r\n\r\n Args:\r\n x (torch.Tensor): Input tensor (#batch, time, idim).\r\n x_mask (torch.Tensor): Input mask (#batch, 1, time).\r\n\r\n Returns:\r\n torch.Tensor: Subsampled tensor (#batch, time', odim),\r\n where time' = time // 4.\r\n torch.Tensor: Subsampled mask (#batch, 1, time'),\r\n where time' = time // 4.\r\n\r\n \"\"\"\r\n x = x.unsqueeze(1) # (b, c, t, f)\r\n x = self.conv(x)\r\n b, c, t, f = x.size()\r\n x = self.out(x.transpose(1, 2).contiguous().view(b, t, c * f))\r\n if x_mask is None:\r\n return x, None\r\n return x, x_mask[:, :, :-2:2][:, :, :-2:2]\r\n\r\n def __getitem__(self, key):\r\n \"\"\"Get item.\r\n\r\n When reset_parameters() is called, if use_scaled_pos_enc is used,\r\n return the positioning encoding.\r\n\r\n \"\"\"\r\n if key != -1:\r\n raise NotImplementedError(\"Support only `-1` (for `reset_parameters`).\")\r\n return self.out[key]\r" }, { "identifier": "AttentionPool1d", "path": "src/clap_module/feature_fusion.py", "snippet": "class AttentionPool1d(nn.Module):\r\n def __init__(\r\n self, spacial_dim: int, embed_dim: int, num_heads: int, output_dim: int = None\r\n ):\r\n super().__init__()\r\n self.positional_embedding = nn.Parameter(\r\n torch.randn(spacial_dim + 1, embed_dim) / embed_dim\r\n # torch.randn(spacial_dim, embed_dim) / embed_dim\r\n )\r\n self.k_proj = nn.Linear(embed_dim, embed_dim)\r\n self.q_proj = nn.Linear(embed_dim, embed_dim)\r\n self.v_proj = nn.Linear(embed_dim, embed_dim)\r\n self.c_proj = nn.Linear(embed_dim, output_dim or embed_dim)\r\n self.num_heads = num_heads\r\n\r\n def forward(self, x):\r\n # import pdb; pdb.set_trace()\r\n x = x.permute(1, 0, 2) # B*L*D -> L*B*D; NCHW -> (HW)NC\r\n x = torch.cat([x.mean(dim=0, keepdim=True), x], dim=0) # (HW+1)NC\r\n x = x + self.positional_embedding[:, None, :].to(x.dtype) # (HW+1)NC\r\n x, _ = F.multi_head_attention_forward(\r\n query=x,\r\n key=x,\r\n value=x,\r\n embed_dim_to_check=x.shape[-1],\r\n num_heads=self.num_heads,\r\n q_proj_weight=self.q_proj.weight,\r\n k_proj_weight=self.k_proj.weight,\r\n v_proj_weight=self.v_proj.weight,\r\n in_proj_weight=None,\r\n in_proj_bias=torch.cat(\r\n [self.q_proj.bias, self.k_proj.bias, self.v_proj.bias]\r\n ),\r\n bias_k=None,\r\n bias_v=None,\r\n add_zero_attn=False,\r\n dropout_p=0,\r\n out_proj_weight=self.c_proj.weight,\r\n out_proj_bias=self.c_proj.bias,\r\n use_separate_proj_weight=True,\r\n training=self.training,\r\n need_weights=False,\r\n )\r\n\r\n return x[0] # B*D\r" }, { "identifier": "DAF", "path": "src/clap_module/feature_fusion.py", "snippet": "class DAF(nn.Module):\r\n \"\"\"直接相加 DirectAddFuse\r\n \"\"\"\r\n\r\n def __init__(self):\r\n super(DAF, self).__init__()\r\n\r\n def forward(self, x, residual):\r\n return x + residual\r" }, { "identifier": "AFF", "path": "src/clap_module/feature_fusion.py", "snippet": "class AFF(nn.Module):\r\n \"\"\"多特征融合 AFF\r\n \"\"\"\r\n\r\n def __init__(self, channels=64, r=4, type='2D'):\r\n super(AFF, self).__init__()\r\n inter_channels = int(channels // r)\r\n\r\n if type == '1D':\r\n self.local_att = nn.Sequential(\r\n nn.Conv1d(channels, inter_channels, kernel_size=1, stride=1, padding=0),\r\n nn.BatchNorm1d(inter_channels),\r\n nn.ReLU(inplace=True),\r\n nn.Conv1d(inter_channels, channels, kernel_size=1, stride=1, padding=0),\r\n nn.BatchNorm1d(channels),\r\n )\r\n self.global_att = nn.Sequential(\r\n nn.AdaptiveAvgPool1d(1),\r\n nn.Conv1d(channels, inter_channels, kernel_size=1, stride=1, padding=0),\r\n nn.BatchNorm1d(inter_channels),\r\n nn.ReLU(inplace=True),\r\n nn.Conv1d(inter_channels, channels, kernel_size=1, stride=1, padding=0),\r\n nn.BatchNorm1d(channels),\r\n )\r\n elif type == '2D':\r\n self.local_att = nn.Sequential(\r\n nn.Conv2d(channels, inter_channels, kernel_size=1, stride=1, padding=0),\r\n nn.BatchNorm2d(inter_channels),\r\n nn.ReLU(inplace=True),\r\n nn.Conv2d(inter_channels, channels, kernel_size=1, stride=1, padding=0),\r\n nn.BatchNorm2d(channels),\r\n )\r\n self.global_att = nn.Sequential(\r\n nn.AdaptiveAvgPool2d(1),\r\n nn.Conv2d(channels, inter_channels, kernel_size=1, stride=1, padding=0),\r\n nn.BatchNorm2d(inter_channels),\r\n nn.ReLU(inplace=True),\r\n nn.Conv2d(inter_channels, channels, kernel_size=1, stride=1, padding=0),\r\n nn.BatchNorm2d(channels),\r\n )\r\n else:\r\n raise f'the type is not supported.'\r\n\r\n self.sigmoid = nn.Sigmoid()\r\n\r\n def forward(self, x, residual):\r\n flag = False\r\n xa = x + residual\r\n if xa.size(0) == 1:\r\n xa = torch.cat([xa, xa], dim=0)\r\n flag = True\r\n xl = self.local_att(xa)\r\n xg = self.global_att(xa)\r\n xlg = xl + xg\r\n wei = self.sigmoid(xlg)\r\n xo = 2 * x * wei + 2 * residual * (1 - wei)\r\n if flag:\r\n xo = xo[0].unsqueeze(0)\r\n return xo\r" }, { "identifier": "iAFF", "path": "src/clap_module/feature_fusion.py", "snippet": "class iAFF(nn.Module):\r\n \"\"\"多特征融合 iAFF\r\n \"\"\"\r\n\r\n def __init__(self, channels=64, r=4, type='2D'):\r\n super(iAFF, self).__init__()\r\n inter_channels = int(channels // r)\r\n\r\n if type == '1D':\r\n # 本地注意力\r\n self.local_att = nn.Sequential(\r\n nn.Conv1d(channels, inter_channels, kernel_size=1, stride=1, padding=0),\r\n nn.BatchNorm1d(inter_channels),\r\n nn.ReLU(inplace=True),\r\n nn.Conv1d(inter_channels, channels, kernel_size=1, stride=1, padding=0),\r\n nn.BatchNorm1d(channels),\r\n )\r\n\r\n # 全局注意力\r\n self.global_att = nn.Sequential(\r\n nn.AdaptiveAvgPool1d(1),\r\n nn.Conv1d(channels, inter_channels, kernel_size=1, stride=1, padding=0),\r\n nn.BatchNorm1d(inter_channels),\r\n nn.ReLU(inplace=True),\r\n nn.Conv1d(inter_channels, channels, kernel_size=1, stride=1, padding=0),\r\n nn.BatchNorm1d(channels),\r\n )\r\n\r\n # 第二次本地注意力\r\n self.local_att2 = nn.Sequential(\r\n nn.Conv1d(channels, inter_channels, kernel_size=1, stride=1, padding=0),\r\n nn.BatchNorm1d(inter_channels),\r\n nn.ReLU(inplace=True),\r\n nn.Conv1d(inter_channels, channels, kernel_size=1, stride=1, padding=0),\r\n nn.BatchNorm1d(channels),\r\n )\r\n # 第二次全局注意力\r\n self.global_att2 = nn.Sequential(\r\n nn.AdaptiveAvgPool1d(1),\r\n nn.Conv1d(channels, inter_channels, kernel_size=1, stride=1, padding=0),\r\n nn.BatchNorm1d(inter_channels),\r\n nn.ReLU(inplace=True),\r\n nn.Conv1d(inter_channels, channels, kernel_size=1, stride=1, padding=0),\r\n nn.BatchNorm1d(channels),\r\n )\r\n elif type == '2D':\r\n # 本地注意力\r\n self.local_att = nn.Sequential(\r\n nn.Conv2d(channels, inter_channels, kernel_size=1, stride=1, padding=0),\r\n nn.BatchNorm2d(inter_channels),\r\n nn.ReLU(inplace=True),\r\n nn.Conv2d(inter_channels, channels, kernel_size=1, stride=1, padding=0),\r\n nn.BatchNorm2d(channels),\r\n )\r\n\r\n # 全局注意力\r\n self.global_att = nn.Sequential(\r\n nn.AdaptiveAvgPool2d(1),\r\n nn.Conv2d(channels, inter_channels, kernel_size=1, stride=1, padding=0),\r\n nn.BatchNorm2d(inter_channels),\r\n nn.ReLU(inplace=True),\r\n nn.Conv2d(inter_channels, channels, kernel_size=1, stride=1, padding=0),\r\n nn.BatchNorm2d(channels),\r\n )\r\n\r\n # 第二次本地注意力\r\n self.local_att2 = nn.Sequential(\r\n nn.Conv2d(channels, inter_channels, kernel_size=1, stride=1, padding=0),\r\n nn.BatchNorm2d(inter_channels),\r\n nn.ReLU(inplace=True),\r\n nn.Conv2d(inter_channels, channels, kernel_size=1, stride=1, padding=0),\r\n nn.BatchNorm2d(channels),\r\n )\r\n # 第二次全局注意力\r\n self.global_att2 = nn.Sequential(\r\n nn.AdaptiveAvgPool2d(1),\r\n nn.Conv2d(channels, inter_channels, kernel_size=1, stride=1, padding=0),\r\n nn.BatchNorm2d(inter_channels),\r\n nn.ReLU(inplace=True),\r\n nn.Conv2d(inter_channels, channels, kernel_size=1, stride=1, padding=0),\r\n nn.BatchNorm2d(channels),\r\n )\r\n else:\r\n raise f'the type is not supported'\r\n\r\n self.sigmoid = nn.Sigmoid()\r\n\r\n def forward(self, x, residual):\r\n flag = False\r\n xa = x + residual\r\n if xa.size(0) == 1:\r\n xa = torch.cat([xa, xa], dim=0)\r\n flag = True\r\n xl = self.local_att(xa)\r\n xg = self.global_att(xa)\r\n xlg = xl + xg\r\n wei = self.sigmoid(xlg)\r\n xi = x * wei + residual * (1 - wei)\r\n\r\n xl2 = self.local_att2(xi)\r\n xg2 = self.global_att(xi)\r\n xlg2 = xl2 + xg2\r\n wei2 = self.sigmoid(xlg2)\r\n xo = x * wei2 + residual * (1 - wei2)\r\n if flag:\r\n xo = xo[0].unsqueeze(0)\r\n return xo\r" } ]
import logging import torch import math from .convolution import ConvolutionModule from .encoder_layer import EncoderLayer from .modules import get_activation from .modules import VGG2L from .modules import ( LegacyRelPositionMultiHeadedAttention, MultiHeadedAttention, RelPositionMultiHeadedAttention, ) from .embedding import ( LegacyRelPositionalEncoding, PositionalEncoding, RelPositionalEncoding, ScaledPositionalEncoding, ) from .modules import LayerNorm from .multi_layer_conv import ( Conv1dLinear, MultiLayeredConv1d, ) from .modules import ( PositionwiseFeedForward, ) from .modules import repeat from .sub_sampling import Conv2dSubsampling from ..feature_fusion import AttentionPool1d, DAF, AFF, iAFF
15,326
self.conv_subsampling_factor = 4 elif input_layer == "vgg2l": self.embed = VGG2L(idim, attention_dim) self.conv_subsampling_factor = 4 elif input_layer == "embed": self.embed = torch.nn.Sequential( torch.nn.Embedding(idim, attention_dim, padding_idx=padding_idx), pos_enc_class(attention_dim, positional_dropout_rate), ) elif isinstance(input_layer, torch.nn.Module): self.embed = torch.nn.Sequential( input_layer, pos_enc_class(attention_dim, positional_dropout_rate), ) elif input_layer is None: self.embed = torch.nn.Sequential( pos_enc_class(attention_dim, positional_dropout_rate) ) else: raise ValueError("unknown input_layer: " + input_layer) self.normalize_before = normalize_before # self-attention module definition if selfattention_layer_type == "selfattn": logging.info("encoder self-attention layer type = self-attention") encoder_selfattn_layer = MultiHeadedAttention encoder_selfattn_layer_args = ( attention_heads, attention_dim, attention_dropout_rate, ) elif selfattention_layer_type == "legacy_rel_selfattn": assert pos_enc_layer_type == "legacy_rel_pos" encoder_selfattn_layer = LegacyRelPositionMultiHeadedAttention encoder_selfattn_layer_args = ( attention_heads, attention_dim, attention_dropout_rate, ) elif selfattention_layer_type == "rel_selfattn": logging.info("encoder self-attention layer type = relative self-attention") assert pos_enc_layer_type == "rel_pos" encoder_selfattn_layer = RelPositionMultiHeadedAttention encoder_selfattn_layer_args = ( attention_heads, attention_dim, attention_dropout_rate, zero_triu, ) else: raise ValueError("unknown encoder_attn_layer: " + selfattention_layer_type) # feed-forward module definition if ffn_layer_type == "linear": ffn_layer = PositionwiseFeedForward ffn_layer_args = ( attention_dim, linear_units, dropout_rate, activation, ) elif ffn_layer_type == "conv1d": ffn_layer = MultiLayeredConv1d ffn_layer_args = ( attention_dim, linear_units, ffn_conv_kernel_size, dropout_rate, ) elif ffn_layer_type == "conv1d-linear": ffn_layer = Conv1dLinear ffn_layer_args = ( attention_dim, linear_units, ffn_conv_kernel_size, dropout_rate, ) else: raise NotImplementedError("Support only linear or conv1d.") # convolution module definition convolution_layer = ConvolutionModule convolution_layer_args = (attention_dim, cnn_module_kernel, activation) self.encoders = repeat( num_blocks, lambda lnum: EncoderLayer( attention_dim, encoder_selfattn_layer(*encoder_selfattn_layer_args), ffn_layer(*ffn_layer_args), ffn_layer(*ffn_layer_args) if macaron_style else None, convolution_layer(*convolution_layer_args) if use_cnn_module else None, dropout_rate, normalize_before, concat_after, stochastic_depth_rate * float(1 + lnum) / num_blocks, ), ) if self.normalize_before: self.after_norm = LayerNorm(attention_dim) self.intermediate_layers = intermediate_layers self.use_conditioning = True if ctc_softmax is not None else False if self.use_conditioning: self.ctc_softmax = ctc_softmax self.conditioning_layer = torch.nn.Linear( conditioning_layer_dim, attention_dim ) self.enable_fusion = enable_fusion self.fusion_type = fusion_type if (self.enable_fusion) and (self.fusion_type in ['daf_1d','aff_1d','iaff_1d']): raise NotImplementedError if self.fusion_type == 'daf_1d': self.fusion_model = DAF() elif self.fusion_type == 'aff_1d': self.fusion_model = AFF(channels=attention_dim, type='1D') elif self.fusion_type == 'iaff_1d': self.fusion_model = iAFF(channels=attention_dim, type='1D') elif (self.enable_fusion) and (self.fusion_type in ['attnpool_1d']):
# Copyright 2020 Johns Hopkins University (Shinji Watanabe) # Northwestern Polytechnical University (Pengcheng Guo) # Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0) """Encoder definition.""" class Encoder(torch.nn.Module): """Conformer encoder module. Args: idim (int): Input dimension. attention_dim (int): Dimension of attention. attention_heads (int): The number of heads of multi head attention. linear_units (int): The number of units of position-wise feed forward. num_blocks (int): The number of decoder blocks. dropout_rate (float): Dropout rate. positional_dropout_rate (float): Dropout rate after adding positional encoding. attention_dropout_rate (float): Dropout rate in attention. input_layer (Union[str, torch.nn.Module]): Input layer type. normalize_before (bool): Whether to use layer_norm before the first block. concat_after (bool): Whether to concat attention layer's input and output. if True, additional linear will be applied. i.e. x -> x + linear(concat(x, att(x))) if False, no additional linear will be applied. i.e. x -> x + att(x) positionwise_layer_type (str): "linear", "conv1d", or "conv1d-linear". positionwise_conv_kernel_size (int): Kernel size of positionwise conv1d layer. macaron_style (bool): Whether to use macaron style for positionwise layer. pos_enc_layer_type (str): Encoder positional encoding layer type. selfattention_layer_type (str): Encoder attention layer type. activation_type (str): Encoder activation function type. use_cnn_module (bool): Whether to use convolution module. zero_triu (bool): Whether to zero the upper triangular part of attention matrix. cnn_module_kernel (int): Kernerl size of convolution module. padding_idx (int): Padding idx for input_layer=embed. stochastic_depth_rate (float): Maximum probability to skip the encoder layer. intermediate_layers (Union[List[int], None]): indices of intermediate CTC layer. indices start from 1. if not None, intermediate outputs are returned (which changes return type signature.) """ def __init__( self, idim, attention_dim=256, attention_heads=4, linear_units=2048, num_blocks=6, dropout_rate=0.1, positional_dropout_rate=0.1, attention_dropout_rate=0.0, input_layer="conv2d", normalize_before=True, concat_after=False, ffn_layer_type="linear", ffn_conv_kernel_size=1, macaron_style=False, pos_enc_layer_type="abs_pos", selfattention_layer_type="selfattn", activation_type="relu", use_cnn_module=True, zero_triu=False, cnn_module_kernel=31, padding_idx=-1, stochastic_depth_rate=0.0, intermediate_layers=None, ctc_softmax=None, conditioning_layer_dim=None, max_seq_len=100, enable_fusion=False, fusion_type="", ): """Construct an Encoder object.""" super(Encoder, self).__init__() self.max_seq_len = max_seq_len activation = get_activation(activation_type) if pos_enc_layer_type == "abs_pos": pos_enc_class = PositionalEncoding elif pos_enc_layer_type == "scaled_abs_pos": pos_enc_class = ScaledPositionalEncoding elif pos_enc_layer_type == "rel_pos": assert selfattention_layer_type == "rel_selfattn" pos_enc_class = RelPositionalEncoding elif pos_enc_layer_type == "legacy_rel_pos": assert selfattention_layer_type == "legacy_rel_selfattn" pos_enc_class = LegacyRelPositionalEncoding else: raise ValueError("unknown pos_enc_layer: " + pos_enc_layer_type) self.conv_subsampling_factor = 1 if input_layer == "linear": self.embed = torch.nn.Sequential( torch.nn.Linear(idim, attention_dim), torch.nn.LayerNorm(attention_dim), torch.nn.Dropout(dropout_rate), pos_enc_class(attention_dim, positional_dropout_rate), ) elif input_layer == "conv2d": self.embed = Conv2dSubsampling( idim, attention_dim, dropout_rate, pos_enc_class(attention_dim, positional_dropout_rate), ) self.conv_subsampling_factor = 4 elif input_layer == "vgg2l": self.embed = VGG2L(idim, attention_dim) self.conv_subsampling_factor = 4 elif input_layer == "embed": self.embed = torch.nn.Sequential( torch.nn.Embedding(idim, attention_dim, padding_idx=padding_idx), pos_enc_class(attention_dim, positional_dropout_rate), ) elif isinstance(input_layer, torch.nn.Module): self.embed = torch.nn.Sequential( input_layer, pos_enc_class(attention_dim, positional_dropout_rate), ) elif input_layer is None: self.embed = torch.nn.Sequential( pos_enc_class(attention_dim, positional_dropout_rate) ) else: raise ValueError("unknown input_layer: " + input_layer) self.normalize_before = normalize_before # self-attention module definition if selfattention_layer_type == "selfattn": logging.info("encoder self-attention layer type = self-attention") encoder_selfattn_layer = MultiHeadedAttention encoder_selfattn_layer_args = ( attention_heads, attention_dim, attention_dropout_rate, ) elif selfattention_layer_type == "legacy_rel_selfattn": assert pos_enc_layer_type == "legacy_rel_pos" encoder_selfattn_layer = LegacyRelPositionMultiHeadedAttention encoder_selfattn_layer_args = ( attention_heads, attention_dim, attention_dropout_rate, ) elif selfattention_layer_type == "rel_selfattn": logging.info("encoder self-attention layer type = relative self-attention") assert pos_enc_layer_type == "rel_pos" encoder_selfattn_layer = RelPositionMultiHeadedAttention encoder_selfattn_layer_args = ( attention_heads, attention_dim, attention_dropout_rate, zero_triu, ) else: raise ValueError("unknown encoder_attn_layer: " + selfattention_layer_type) # feed-forward module definition if ffn_layer_type == "linear": ffn_layer = PositionwiseFeedForward ffn_layer_args = ( attention_dim, linear_units, dropout_rate, activation, ) elif ffn_layer_type == "conv1d": ffn_layer = MultiLayeredConv1d ffn_layer_args = ( attention_dim, linear_units, ffn_conv_kernel_size, dropout_rate, ) elif ffn_layer_type == "conv1d-linear": ffn_layer = Conv1dLinear ffn_layer_args = ( attention_dim, linear_units, ffn_conv_kernel_size, dropout_rate, ) else: raise NotImplementedError("Support only linear or conv1d.") # convolution module definition convolution_layer = ConvolutionModule convolution_layer_args = (attention_dim, cnn_module_kernel, activation) self.encoders = repeat( num_blocks, lambda lnum: EncoderLayer( attention_dim, encoder_selfattn_layer(*encoder_selfattn_layer_args), ffn_layer(*ffn_layer_args), ffn_layer(*ffn_layer_args) if macaron_style else None, convolution_layer(*convolution_layer_args) if use_cnn_module else None, dropout_rate, normalize_before, concat_after, stochastic_depth_rate * float(1 + lnum) / num_blocks, ), ) if self.normalize_before: self.after_norm = LayerNorm(attention_dim) self.intermediate_layers = intermediate_layers self.use_conditioning = True if ctc_softmax is not None else False if self.use_conditioning: self.ctc_softmax = ctc_softmax self.conditioning_layer = torch.nn.Linear( conditioning_layer_dim, attention_dim ) self.enable_fusion = enable_fusion self.fusion_type = fusion_type if (self.enable_fusion) and (self.fusion_type in ['daf_1d','aff_1d','iaff_1d']): raise NotImplementedError if self.fusion_type == 'daf_1d': self.fusion_model = DAF() elif self.fusion_type == 'aff_1d': self.fusion_model = AFF(channels=attention_dim, type='1D') elif self.fusion_type == 'iaff_1d': self.fusion_model = iAFF(channels=attention_dim, type='1D') elif (self.enable_fusion) and (self.fusion_type in ['attnpool_1d']):
self.attnpool = AttentionPool1d(max_seq_len, attention_dim, attention_heads)
17
2023-11-25 02:38:32+00:00
24k
Luo-Z13/pointobb
PointOBB/mmdet/models/roi_heads/PointOBB_head.py
[ { "identifier": "HEADS", "path": "PointOBB/mmdet/models/builder.py", "snippet": "HEADS = MODELS" }, { "identifier": "MODELS", "path": "PointOBB/mmdet/models/builder.py", "snippet": "MODELS = Registry('models', parent=MMCV_MODELS)" }, { "identifier": "build_head", "path": "PointOBB/mmdet/models/builder.py", "snippet": "def build_head(cfg):\n \"\"\"Build head.\"\"\"\n return HEADS.build(cfg)" }, { "identifier": "build_roi_extractor", "path": "PointOBB/mmdet/models/builder.py", "snippet": "def build_roi_extractor(cfg):\n \"\"\"Build roi extractor.\"\"\"\n return ROI_EXTRACTORS.build(cfg)" }, { "identifier": "build_loss", "path": "PointOBB/mmdet/models/builder.py", "snippet": "def build_loss(cfg):\n \"\"\"Build loss.\"\"\"\n return LOSSES.build(cfg)" }, { "identifier": "StandardRoIHead", "path": "PointOBB/mmdet/models/roi_heads/standard_roi_head.py", "snippet": "class StandardRoIHead(BaseRoIHead, BBoxTestMixin, MaskTestMixin):\n \"\"\"Simplest base roi head including one bbox head and one mask head.\"\"\"\n\n def init_assigner_sampler(self):\n \"\"\"Initialize assigner and sampler.\"\"\"\n self.bbox_assigner = None\n self.bbox_sampler = None\n if self.train_cfg:\n self.bbox_assigner = build_assigner(self.train_cfg.assigner)\n self.bbox_sampler = build_sampler(\n self.train_cfg.sampler, context=self)\n\n def init_bbox_head(self, bbox_roi_extractor, bbox_head):\n \"\"\"Initialize ``bbox_head``\"\"\"\n self.bbox_roi_extractor = build_roi_extractor(bbox_roi_extractor)\n self.bbox_head = build_head(bbox_head)\n\n def init_mask_head(self, mask_roi_extractor, mask_head):\n \"\"\"Initialize ``mask_head``\"\"\"\n if mask_roi_extractor is not None:\n self.mask_roi_extractor = build_roi_extractor(mask_roi_extractor)\n self.share_roi_extractor = False\n else:\n self.share_roi_extractor = True\n self.mask_roi_extractor = self.bbox_roi_extractor\n self.mask_head = build_head(mask_head)\n\n def forward_dummy(self, x, proposals):\n \"\"\"Dummy forward function.\"\"\"\n # bbox head\n outs = ()\n rois = bbox2roi([proposals])\n if self.with_bbox:\n bbox_results = self._bbox_forward(x, rois)\n outs = outs + (bbox_results['cls_score'],\n bbox_results['bbox_pred'])\n # mask head\n if self.with_mask:\n mask_rois = rois[:100]\n mask_results = self._mask_forward(x, mask_rois)\n outs = outs + (mask_results['mask_pred'], )\n return outs\n\n def forward_train(self,\n x,\n img_metas,\n proposal_list,\n gt_bboxes,\n gt_labels,\n ann_weight,\n gt_bboxes_ignore=None,\n gt_masks=None):\n \"\"\"\n Args:\n x (list[Tensor]): list of multi-level img features.\n img_metas (list[dict]): list of image info dict where each dict\n has: 'img_shape', 'scale_factor', 'flip', and may also contain\n 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.\n For details on the values of these keys see\n `mmdet/datasets/pipelines/formatting.py:Collect`.\n proposals (list[Tensors]): list of region proposals.\n gt_bboxes (list[Tensor]): Ground truth bboxes for each image with\n shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.\n gt_labels (list[Tensor]): class indices corresponding to each box\n gt_bboxes_ignore (None | list[Tensor]): specify which bounding\n boxes can be ignored when computing the loss.\n gt_masks (None | Tensor) : true segmentation masks for each box\n used if the architecture supports a segmentation task.\n\n Returns:\n dict[str, Tensor]: a dictionary of loss components\n \"\"\"\n # assign gts and sample proposals\n if self.with_bbox or self.with_mask:\n num_imgs = len(img_metas)\n if gt_bboxes_ignore is None:\n gt_bboxes_ignore = [None for _ in range(num_imgs)]\n sampling_results = []\n for i in range(num_imgs):\n assign_result = self.bbox_assigner.assign(\n proposal_list[i], gt_bboxes[i], gt_bboxes_ignore[i],\n gt_labels[i])\n sampling_result = self.bbox_sampler.sample(\n assign_result,\n proposal_list[i],\n gt_bboxes[i],\n gt_labels[i],\n feats=[lvl_feat[i][None] for lvl_feat in x])\n sampling_results.append(sampling_result)\n\n losses = dict()\n # bbox head forward and loss\n if self.with_bbox:\n bbox_results = self._bbox_forward_train(x, sampling_results,\n gt_bboxes, gt_labels,ann_weight, #add by fei\n img_metas)\n losses.update(bbox_results['loss_bbox'])\n\n # mask head forward and loss\n if self.with_mask:\n mask_results = self._mask_forward_train(x, sampling_results,\n bbox_results['bbox_feats'],\n gt_masks, img_metas)\n losses.update(mask_results['loss_mask'])\n\n return losses\n\n def _bbox_forward(self, x, rois):\n \"\"\"Box head forward function used in both training and testing.\"\"\"\n # TODO: a more flexible way to decide which feature maps to use\n bbox_feats = self.bbox_roi_extractor(\n x[:self.bbox_roi_extractor.num_inputs], rois)\n if self.with_shared_head:\n bbox_feats = self.shared_head(bbox_feats)\n cls_score, bbox_pred = self.bbox_head(bbox_feats)\n\n bbox_results = dict(\n cls_score=cls_score, bbox_pred=bbox_pred, bbox_feats=bbox_feats)\n return bbox_results\n\n def _bbox_forward_train(self, x, sampling_results, gt_bboxes, gt_labels, ann_weight,\n img_metas):\n \"\"\"Run forward function and calculate loss for box head in training.\"\"\"\n rois = bbox2roi([res.bboxes for res in sampling_results])\n bbox_results = self._bbox_forward(x, rois)\n\n bbox_targets = self.bbox_head.get_targets(sampling_results, gt_bboxes,\n gt_labels,ann_weight, self.train_cfg) ## add by fei\n loss_bbox = self.bbox_head.loss(bbox_results['cls_score'],\n bbox_results['bbox_pred'], rois,\n *bbox_targets)\n\n bbox_results.update(loss_bbox=loss_bbox)\n return bbox_results\n\n def _mask_forward_train(self, x, sampling_results, bbox_feats, gt_masks,\n img_metas):\n \"\"\"Run forward function and calculate loss for mask head in\n training.\"\"\"\n if not self.share_roi_extractor:\n pos_rois = bbox2roi([res.pos_bboxes for res in sampling_results])\n mask_results = self._mask_forward(x, pos_rois)\n else:\n pos_inds = []\n device = bbox_feats.device\n for res in sampling_results:\n pos_inds.append(\n torch.ones(\n res.pos_bboxes.shape[0],\n device=device,\n dtype=torch.uint8))\n pos_inds.append(\n torch.zeros(\n res.neg_bboxes.shape[0],\n device=device,\n dtype=torch.uint8))\n pos_inds = torch.cat(pos_inds)\n\n mask_results = self._mask_forward(\n x, pos_inds=pos_inds, bbox_feats=bbox_feats)\n\n mask_targets = self.mask_head.get_targets(sampling_results, gt_masks,\n self.train_cfg)\n pos_labels = torch.cat([res.pos_gt_labels for res in sampling_results])\n loss_mask = self.mask_head.loss(mask_results['mask_pred'],\n mask_targets, pos_labels)\n\n mask_results.update(loss_mask=loss_mask, mask_targets=mask_targets)\n return mask_results\n\n def _mask_forward(self, x, rois=None, pos_inds=None, bbox_feats=None):\n \"\"\"Mask head forward function used in both training and testing.\"\"\"\n assert ((rois is not None) ^\n (pos_inds is not None and bbox_feats is not None))\n if rois is not None:\n mask_feats = self.mask_roi_extractor(\n x[:self.mask_roi_extractor.num_inputs], rois)\n if self.with_shared_head:\n mask_feats = self.shared_head(mask_feats)\n else:\n assert bbox_feats is not None\n mask_feats = bbox_feats[pos_inds]\n\n mask_pred = self.mask_head(mask_feats)\n mask_results = dict(mask_pred=mask_pred, mask_feats=mask_feats)\n return mask_results\n\n async def async_simple_test(self,\n x,\n proposal_list,\n img_metas,\n proposals=None,\n rescale=False):\n \"\"\"Async test without augmentation.\"\"\"\n assert self.with_bbox, 'Bbox head must be implemented.'\n\n det_bboxes, det_labels = await self.async_test_bboxes(\n x, img_metas, proposal_list, self.test_cfg, rescale=rescale)\n bbox_results = bbox2result(det_bboxes, det_labels,\n self.bbox_head.num_classes)\n if not self.with_mask:\n return bbox_results\n else:\n segm_results = await self.async_test_mask(\n x,\n img_metas,\n det_bboxes,\n det_labels,\n rescale=rescale,\n mask_test_cfg=self.test_cfg.get('mask'))\n return bbox_results, segm_results\n\n def simple_test(self,\n x,\n proposal_list,\n img_metas,\n proposals=None,\n rescale=False):\n \"\"\"Test without augmentation.\"\"\"\n assert self.with_bbox, 'Bbox head must be implemented.'\n\n det_bboxes, det_labels = self.simple_test_bboxes(\n x, img_metas, proposal_list, self.test_cfg, rescale=rescale)\n\n bbox_results = [\n bbox2result(det_bboxes[i], det_labels[i],\n self.bbox_head.num_classes)\n for i in range(len(det_bboxes))\n ]\n\n if not self.with_mask:\n return bbox_results\n else:\n segm_results = self.simple_test_mask(\n x, img_metas, det_bboxes, det_labels, rescale=rescale)\n return list(zip(bbox_results, segm_results))\n\n def aug_test(self, x, proposal_list, img_metas, rescale=False):\n \"\"\"Test with augmentations.\n\n If rescale is False, then returned bboxes and masks will fit the scale\n of imgs[0].\n \"\"\"\n det_bboxes, det_labels = self.aug_test_bboxes(x, img_metas,\n proposal_list,\n self.test_cfg)\n if rescale:\n _det_bboxes = det_bboxes\n else:\n _det_bboxes = det_bboxes.clone()\n _det_bboxes[:, :4] *= det_bboxes.new_tensor(\n img_metas[0][0]['scale_factor'])\n bbox_results = bbox2result(_det_bboxes, det_labels,\n self.bbox_head.num_classes)\n\n # det_bboxes always keep the original scale\n if self.with_mask:\n segm_results = self.aug_test_mask(x, img_metas, det_bboxes,\n det_labels)\n return [(bbox_results, segm_results)]\n else:\n return [bbox_results]\n\n def onnx_export(self, x, proposals, img_metas, rescale=False):\n \"\"\"Test without augmentation.\"\"\"\n assert self.with_bbox, 'Bbox head must be implemented.'\n det_bboxes, det_labels = self.bbox_onnx_export(\n x, img_metas, proposals, self.test_cfg, rescale=rescale)\n\n if not self.with_mask:\n return det_bboxes, det_labels\n else:\n segm_results = self.mask_onnx_export(\n x, img_metas, det_bboxes, det_labels, rescale=rescale)\n return det_bboxes, det_labels, segm_results\n\n def mask_onnx_export(self, x, img_metas, det_bboxes, det_labels, **kwargs):\n \"\"\"Export mask branch to onnx which supports batch inference.\n\n Args:\n x (tuple[Tensor]): Feature maps of all scale level.\n img_metas (list[dict]): Image meta info.\n det_bboxes (Tensor): Bboxes and corresponding scores.\n has shape [N, num_bboxes, 5].\n det_labels (Tensor): class labels of\n shape [N, num_bboxes].\n\n Returns:\n tuple[Tensor, Tensor]: bboxes of shape [N, num_bboxes, 5]\n and class labels of shape [N, num_bboxes].\n \"\"\"\n # image shapes of images in the batch\n\n if all(det_bbox.shape[0] == 0 for det_bbox in det_bboxes):\n raise RuntimeError('[ONNX Error] Can not record MaskHead '\n 'as it has not been executed this time')\n batch_size = det_bboxes.size(0)\n # if det_bboxes is rescaled to the original image size, we need to\n # rescale it back to the testing scale to obtain RoIs.\n det_bboxes = det_bboxes[..., :4]\n batch_index = torch.arange(\n det_bboxes.size(0), device=det_bboxes.device).float().view(\n -1, 1, 1).expand(det_bboxes.size(0), det_bboxes.size(1), 1)\n mask_rois = torch.cat([batch_index, det_bboxes], dim=-1)\n mask_rois = mask_rois.view(-1, 5)\n mask_results = self._mask_forward(x, mask_rois)\n mask_pred = mask_results['mask_pred']\n max_shape = img_metas[0]['img_shape_for_onnx']\n num_det = det_bboxes.shape[1]\n det_bboxes = det_bboxes.reshape(-1, 4)\n det_labels = det_labels.reshape(-1)\n segm_results = self.mask_head.onnx_export(mask_pred, det_bboxes,\n det_labels, self.test_cfg,\n max_shape)\n segm_results = segm_results.reshape(batch_size, num_det, max_shape[0],\n max_shape[1])\n return segm_results\n\n def bbox_onnx_export(self, x, img_metas, proposals, rcnn_test_cfg,\n **kwargs):\n \"\"\"Export bbox branch to onnx which supports batch inference.\n\n Args:\n x (tuple[Tensor]): Feature maps of all scale level.\n img_metas (list[dict]): Image meta info.\n proposals (Tensor): Region proposals with\n batch dimension, has shape [N, num_bboxes, 5].\n rcnn_test_cfg (obj:`ConfigDict`): `test_cfg` of R-CNN.\n\n Returns:\n tuple[Tensor, Tensor]: bboxes of shape [N, num_bboxes, 5]\n and class labels of shape [N, num_bboxes].\n \"\"\"\n # get origin input shape to support onnx dynamic input shape\n assert len(\n img_metas\n ) == 1, 'Only support one input image while in exporting to ONNX'\n img_shapes = img_metas[0]['img_shape_for_onnx']\n\n rois = proposals\n batch_index = torch.arange(\n rois.size(0), device=rois.device).float().view(-1, 1, 1).expand(\n rois.size(0), rois.size(1), 1)\n rois = torch.cat([batch_index, rois[..., :4]], dim=-1)\n batch_size = rois.shape[0]\n num_proposals_per_img = rois.shape[1]\n\n # Eliminate the batch dimension\n rois = rois.view(-1, 5)\n bbox_results = self._bbox_forward(x, rois)\n cls_score = bbox_results['cls_score']\n bbox_pred = bbox_results['bbox_pred']\n\n # Recover the batch dimension\n rois = rois.reshape(batch_size, num_proposals_per_img, rois.size(-1))\n cls_score = cls_score.reshape(batch_size, num_proposals_per_img,\n cls_score.size(-1))\n\n bbox_pred = bbox_pred.reshape(batch_size, num_proposals_per_img,\n bbox_pred.size(-1))\n det_bboxes, det_labels = self.bbox_head.onnx_export(\n rois, cls_score, bbox_pred, img_shapes, cfg=rcnn_test_cfg)\n\n return det_bboxes, det_labels" }, { "identifier": "CascadeRoIHead", "path": "PointOBB/mmdet/models/roi_heads/cascade_roi_head.py", "snippet": "class CascadeRoIHead(BaseRoIHead, BBoxTestMixin, MaskTestMixin):\n \"\"\"Cascade roi head including one bbox head and one mask head.\n\n https://arxiv.org/abs/1712.00726\n \"\"\"\n\n def __init__(self,\n num_stages,\n stage_loss_weights,\n bbox_roi_extractor=None,\n bbox_head=None,\n mask_roi_extractor=None,\n mask_head=None,\n shared_head=None,\n train_cfg=None,\n test_cfg=None,\n pretrained=None,\n init_cfg=None):\n assert bbox_roi_extractor is not None\n assert bbox_head is not None\n assert shared_head is None, \\\n 'Shared head is not supported in Cascade RCNN anymore'\n\n self.num_stages = num_stages\n self.stage_loss_weights = stage_loss_weights\n super(CascadeRoIHead, self).__init__(\n bbox_roi_extractor=bbox_roi_extractor,\n bbox_head=bbox_head,\n mask_roi_extractor=mask_roi_extractor,\n mask_head=mask_head,\n shared_head=shared_head,\n train_cfg=train_cfg,\n test_cfg=test_cfg,\n pretrained=pretrained,\n init_cfg=init_cfg)\n\n def init_bbox_head(self, bbox_roi_extractor, bbox_head):\n \"\"\"Initialize box head and box roi extractor.\n\n Args:\n bbox_roi_extractor (dict): Config of box roi extractor.\n bbox_head (dict): Config of box in box head.\n \"\"\"\n self.bbox_roi_extractor = ModuleList()\n self.bbox_head = ModuleList()\n if not isinstance(bbox_roi_extractor, list):\n bbox_roi_extractor = [\n bbox_roi_extractor for _ in range(self.num_stages)\n ]\n if not isinstance(bbox_head, list):\n bbox_head = [bbox_head for _ in range(self.num_stages)]\n assert len(bbox_roi_extractor) == len(bbox_head) == self.num_stages\n for roi_extractor, head in zip(bbox_roi_extractor, bbox_head):\n self.bbox_roi_extractor.append(build_roi_extractor(roi_extractor))\n self.bbox_head.append(build_head(head))\n\n def init_mask_head(self, mask_roi_extractor, mask_head):\n \"\"\"Initialize mask head and mask roi extractor.\n\n Args:\n mask_roi_extractor (dict): Config of mask roi extractor.\n mask_head (dict): Config of mask in mask head.\n \"\"\"\n self.mask_head = nn.ModuleList()\n if not isinstance(mask_head, list):\n mask_head = [mask_head for _ in range(self.num_stages)]\n assert len(mask_head) == self.num_stages\n for head in mask_head:\n self.mask_head.append(build_head(head))\n if mask_roi_extractor is not None:\n self.share_roi_extractor = False\n self.mask_roi_extractor = ModuleList()\n if not isinstance(mask_roi_extractor, list):\n mask_roi_extractor = [\n mask_roi_extractor for _ in range(self.num_stages)\n ]\n assert len(mask_roi_extractor) == self.num_stages\n for roi_extractor in mask_roi_extractor:\n self.mask_roi_extractor.append(\n build_roi_extractor(roi_extractor))\n else:\n self.share_roi_extractor = True\n self.mask_roi_extractor = self.bbox_roi_extractor\n\n def init_assigner_sampler(self):\n \"\"\"Initialize assigner and sampler for each stage.\"\"\"\n self.bbox_assigner = []\n self.bbox_sampler = []\n if self.train_cfg is not None:\n for idx, rcnn_train_cfg in enumerate(self.train_cfg):\n self.bbox_assigner.append(\n build_assigner(rcnn_train_cfg.assigner))\n self.current_stage = idx\n self.bbox_sampler.append(\n build_sampler(rcnn_train_cfg.sampler, context=self))\n\n def forward_dummy(self, x, proposals):\n \"\"\"Dummy forward function.\"\"\"\n # bbox head\n outs = ()\n rois = bbox2roi([proposals])\n if self.with_bbox:\n for i in range(self.num_stages):\n bbox_results = self._bbox_forward(i, x, rois)\n outs = outs + (bbox_results['cls_score'],\n bbox_results['bbox_pred'])\n # mask heads\n if self.with_mask:\n mask_rois = rois[:100]\n for i in range(self.num_stages):\n mask_results = self._mask_forward(i, x, mask_rois)\n outs = outs + (mask_results['mask_pred'], )\n return outs\n\n def _bbox_forward(self, stage, x, rois):\n \"\"\"Box head forward function used in both training and testing.\"\"\"\n bbox_roi_extractor = self.bbox_roi_extractor[stage]\n bbox_head = self.bbox_head[stage]\n bbox_feats = bbox_roi_extractor(x[:bbox_roi_extractor.num_inputs],\n rois)\n # do not support caffe_c4 model anymore\n cls_score, bbox_pred = bbox_head(bbox_feats)\n\n bbox_results = dict(\n cls_score=cls_score, bbox_pred=bbox_pred, bbox_feats=bbox_feats)\n return bbox_results\n\n def _bbox_forward_train(self, stage, x, sampling_results, gt_bboxes,\n gt_labels, rcnn_train_cfg):\n \"\"\"Run forward function and calculate loss for box head in training.\"\"\"\n rois = bbox2roi([res.bboxes for res in sampling_results])\n bbox_results = self._bbox_forward(stage, x, rois)\n bbox_targets = self.bbox_head[stage].get_targets(\n sampling_results, gt_bboxes, gt_labels, rcnn_train_cfg)\n loss_bbox = self.bbox_head[stage].loss(bbox_results['cls_score'],\n bbox_results['bbox_pred'], rois,\n *bbox_targets)\n\n bbox_results.update(\n loss_bbox=loss_bbox, rois=rois, bbox_targets=bbox_targets)\n return bbox_results\n\n def _mask_forward(self, stage, x, rois):\n \"\"\"Mask head forward function used in both training and testing.\"\"\"\n mask_roi_extractor = self.mask_roi_extractor[stage]\n mask_head = self.mask_head[stage]\n mask_feats = mask_roi_extractor(x[:mask_roi_extractor.num_inputs],\n rois)\n # do not support caffe_c4 model anymore\n mask_pred = mask_head(mask_feats)\n\n mask_results = dict(mask_pred=mask_pred)\n return mask_results\n\n def _mask_forward_train(self,\n stage,\n x,\n sampling_results,\n gt_masks,\n rcnn_train_cfg,\n bbox_feats=None):\n \"\"\"Run forward function and calculate loss for mask head in\n training.\"\"\"\n pos_rois = bbox2roi([res.pos_bboxes for res in sampling_results])\n mask_results = self._mask_forward(stage, x, pos_rois)\n\n mask_targets = self.mask_head[stage].get_targets(\n sampling_results, gt_masks, rcnn_train_cfg)\n pos_labels = torch.cat([res.pos_gt_labels for res in sampling_results])\n loss_mask = self.mask_head[stage].loss(mask_results['mask_pred'],\n mask_targets, pos_labels)\n\n mask_results.update(loss_mask=loss_mask)\n return mask_results\n\n def forward_train(self,\n x,\n img_metas,\n proposal_list,\n gt_bboxes,\n gt_labels,\n gt_bboxes_ignore=None,\n gt_masks=None):\n \"\"\"\n Args:\n x (list[Tensor]): list of multi-level img features.\n img_metas (list[dict]): list of image info dict where each dict\n has: 'img_shape', 'scale_factor', 'flip', and may also contain\n 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.\n For details on the values of these keys see\n `mmdet/datasets/pipelines/formatting.py:Collect`.\n proposals (list[Tensors]): list of region proposals.\n gt_bboxes (list[Tensor]): Ground truth bboxes for each image with\n shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.\n gt_labels (list[Tensor]): class indices corresponding to each box\n gt_bboxes_ignore (None | list[Tensor]): specify which bounding\n boxes can be ignored when computing the loss.\n gt_masks (None | Tensor) : true segmentation masks for each box\n used if the architecture supports a segmentation task.\n\n Returns:\n dict[str, Tensor]: a dictionary of loss components\n \"\"\"\n losses = dict()\n for i in range(self.num_stages):\n self.current_stage = i\n rcnn_train_cfg = self.train_cfg[i]\n lw = self.stage_loss_weights[i]\n\n # assign gts and sample proposals\n sampling_results = []\n if self.with_bbox or self.with_mask:\n bbox_assigner = self.bbox_assigner[i]\n bbox_sampler = self.bbox_sampler[i]\n num_imgs = len(img_metas)\n if gt_bboxes_ignore is None:\n gt_bboxes_ignore = [None for _ in range(num_imgs)]\n\n for j in range(num_imgs):\n assign_result = bbox_assigner.assign(\n proposal_list[j], gt_bboxes[j], gt_bboxes_ignore[j],\n gt_labels[j])\n sampling_result = bbox_sampler.sample(\n assign_result,\n proposal_list[j],\n gt_bboxes[j],\n gt_labels[j],\n feats=[lvl_feat[j][None] for lvl_feat in x])\n sampling_results.append(sampling_result)\n\n # bbox head forward and loss\n bbox_results = self._bbox_forward_train(i, x, sampling_results,\n gt_bboxes, gt_labels,\n rcnn_train_cfg)\n\n for name, value in bbox_results['loss_bbox'].items():\n losses[f's{i}.{name}'] = (\n value * lw if 'loss' in name else value)\n\n # mask head forward and loss\n if self.with_mask:\n mask_results = self._mask_forward_train(\n i, x, sampling_results, gt_masks, rcnn_train_cfg,\n bbox_results['bbox_feats'])\n for name, value in mask_results['loss_mask'].items():\n losses[f's{i}.{name}'] = (\n value * lw if 'loss' in name else value)\n\n # refine bboxes\n if i < self.num_stages - 1:\n pos_is_gts = [res.pos_is_gt for res in sampling_results]\n # bbox_targets is a tuple\n roi_labels = bbox_results['bbox_targets'][0]\n with torch.no_grad():\n roi_labels = torch.where(\n roi_labels == self.bbox_head[i].num_classes,\n bbox_results['cls_score'][:, :-1].argmax(1),\n roi_labels)\n proposal_list = self.bbox_head[i].refine_bboxes(\n bbox_results['rois'], roi_labels,\n bbox_results['bbox_pred'], pos_is_gts, img_metas)\n\n return losses\n\n def simple_test(self, x, proposal_list, img_metas, rescale=False):\n \"\"\"Test without augmentation.\"\"\"\n assert self.with_bbox, 'Bbox head must be implemented.'\n num_imgs = len(proposal_list)\n img_shapes = tuple(meta['img_shape'] for meta in img_metas)\n ori_shapes = tuple(meta['ori_shape'] for meta in img_metas)\n scale_factors = tuple(meta['scale_factor'] for meta in img_metas)\n\n # \"ms\" in variable names means multi-stage\n ms_bbox_result = {}\n ms_segm_result = {}\n ms_scores = []\n rcnn_test_cfg = self.test_cfg\n\n rois = bbox2roi(proposal_list)\n for i in range(self.num_stages):\n bbox_results = self._bbox_forward(i, x, rois)\n\n # split batch bbox prediction back to each image\n cls_score = bbox_results['cls_score']\n bbox_pred = bbox_results['bbox_pred']\n num_proposals_per_img = tuple(\n len(proposals) for proposals in proposal_list)\n rois = rois.split(num_proposals_per_img, 0)\n cls_score = cls_score.split(num_proposals_per_img, 0)\n if isinstance(bbox_pred, torch.Tensor):\n bbox_pred = bbox_pred.split(num_proposals_per_img, 0)\n else:\n bbox_pred = self.bbox_head[i].bbox_pred_split(\n bbox_pred, num_proposals_per_img)\n ms_scores.append(cls_score)\n\n if i < self.num_stages - 1:\n bbox_label = [s[:, :-1].argmax(dim=1) for s in cls_score]\n rois = torch.cat([\n self.bbox_head[i].regress_by_class(rois[j], bbox_label[j],\n bbox_pred[j],\n img_metas[j])\n for j in range(num_imgs)\n ])\n\n # average scores of each image by stages\n cls_score = [\n sum([score[i] for score in ms_scores]) / float(len(ms_scores))\n for i in range(num_imgs)\n ]\n\n # apply bbox post-processing to each image individually\n det_bboxes = []\n det_labels = []\n for i in range(num_imgs):\n det_bbox, det_label = self.bbox_head[-1].get_bboxes(\n rois[i],\n cls_score[i],\n bbox_pred[i],\n img_shapes[i],\n scale_factors[i],\n rescale=rescale,\n cfg=rcnn_test_cfg)\n det_bboxes.append(det_bbox)\n det_labels.append(det_label)\n\n if torch.onnx.is_in_onnx_export():\n return det_bboxes, det_labels\n bbox_results = [\n bbox2result(det_bboxes[i], det_labels[i],\n self.bbox_head[-1].num_classes)\n for i in range(num_imgs)\n ]\n ms_bbox_result['ensemble'] = bbox_results\n\n if self.with_mask:\n if all(det_bbox.shape[0] == 0 for det_bbox in det_bboxes):\n mask_classes = self.mask_head[-1].num_classes\n segm_results = [[[] for _ in range(mask_classes)]\n for _ in range(num_imgs)]\n else:\n if rescale and not isinstance(scale_factors[0], float):\n scale_factors = [\n torch.from_numpy(scale_factor).to(det_bboxes[0].device)\n for scale_factor in scale_factors\n ]\n _bboxes = [\n det_bboxes[i][:, :4] *\n scale_factors[i] if rescale else det_bboxes[i][:, :4]\n for i in range(len(det_bboxes))\n ]\n mask_rois = bbox2roi(_bboxes)\n num_mask_rois_per_img = tuple(\n _bbox.size(0) for _bbox in _bboxes)\n aug_masks = []\n for i in range(self.num_stages):\n mask_results = self._mask_forward(i, x, mask_rois)\n mask_pred = mask_results['mask_pred']\n # split batch mask prediction back to each image\n mask_pred = mask_pred.split(num_mask_rois_per_img, 0)\n aug_masks.append(\n [m.sigmoid().cpu().numpy() for m in mask_pred])\n\n # apply mask post-processing to each image individually\n segm_results = []\n for i in range(num_imgs):\n if det_bboxes[i].shape[0] == 0:\n segm_results.append(\n [[]\n for _ in range(self.mask_head[-1].num_classes)])\n else:\n aug_mask = [mask[i] for mask in aug_masks]\n merged_masks = merge_aug_masks(\n aug_mask, [[img_metas[i]]] * self.num_stages,\n rcnn_test_cfg)\n segm_result = self.mask_head[-1].get_seg_masks(\n merged_masks, _bboxes[i], det_labels[i],\n rcnn_test_cfg, ori_shapes[i], scale_factors[i],\n rescale)\n segm_results.append(segm_result)\n ms_segm_result['ensemble'] = segm_results\n\n if self.with_mask:\n results = list(\n zip(ms_bbox_result['ensemble'], ms_segm_result['ensemble']))\n else:\n results = ms_bbox_result['ensemble']\n\n return results\n\n def aug_test(self, features, proposal_list, img_metas, rescale=False):\n \"\"\"Test with augmentations.\n\n If rescale is False, then returned bboxes and masks will fit the scale\n of imgs[0].\n \"\"\"\n rcnn_test_cfg = self.test_cfg\n aug_bboxes = []\n aug_scores = []\n for x, img_meta in zip(features, img_metas):\n # only one image in the batch\n img_shape = img_meta[0]['img_shape']\n scale_factor = img_meta[0]['scale_factor']\n flip = img_meta[0]['flip']\n flip_direction = img_meta[0]['flip_direction']\n\n proposals = bbox_mapping(proposal_list[0][:, :4], img_shape,\n scale_factor, flip, flip_direction)\n # \"ms\" in variable names means multi-stage\n ms_scores = []\n\n rois = bbox2roi([proposals])\n for i in range(self.num_stages):\n bbox_results = self._bbox_forward(i, x, rois)\n ms_scores.append(bbox_results['cls_score'])\n\n if i < self.num_stages - 1:\n bbox_label = bbox_results['cls_score'][:, :-1].argmax(\n dim=1)\n rois = self.bbox_head[i].regress_by_class(\n rois, bbox_label, bbox_results['bbox_pred'],\n img_meta[0])\n\n cls_score = sum(ms_scores) / float(len(ms_scores))\n bboxes, scores = self.bbox_head[-1].get_bboxes(\n rois,\n cls_score,\n bbox_results['bbox_pred'],\n img_shape,\n scale_factor,\n rescale=False,\n cfg=None)\n aug_bboxes.append(bboxes)\n aug_scores.append(scores)\n\n # after merging, bboxes will be rescaled to the original image size\n merged_bboxes, merged_scores = merge_aug_bboxes(\n aug_bboxes, aug_scores, img_metas, rcnn_test_cfg)\n det_bboxes, det_labels = multiclass_nms(merged_bboxes, merged_scores,\n rcnn_test_cfg.score_thr,\n rcnn_test_cfg.nms,\n rcnn_test_cfg.max_per_img)\n\n bbox_result = bbox2result(det_bboxes, det_labels,\n self.bbox_head[-1].num_classes)\n\n if self.with_mask:\n if det_bboxes.shape[0] == 0:\n segm_result = [[]\n for _ in range(self.mask_head[-1].num_classes)]\n else:\n aug_masks = []\n aug_img_metas = []\n for x, img_meta in zip(features, img_metas):\n img_shape = img_meta[0]['img_shape']\n scale_factor = img_meta[0]['scale_factor']\n flip = img_meta[0]['flip']\n flip_direction = img_meta[0]['flip_direction']\n _bboxes = bbox_mapping(det_bboxes[:, :4], img_shape,\n scale_factor, flip, flip_direction)\n mask_rois = bbox2roi([_bboxes])\n for i in range(self.num_stages):\n mask_results = self._mask_forward(i, x, mask_rois)\n aug_masks.append(\n mask_results['mask_pred'].sigmoid().cpu().numpy())\n aug_img_metas.append(img_meta)\n merged_masks = merge_aug_masks(aug_masks, aug_img_metas,\n self.test_cfg)\n\n ori_shape = img_metas[0][0]['ori_shape']\n segm_result = self.mask_head[-1].get_seg_masks(\n merged_masks,\n det_bboxes,\n det_labels,\n rcnn_test_cfg,\n ori_shape,\n scale_factor=1.0,\n rescale=False)\n return [(bbox_result, segm_result)]\n else:\n return [bbox_result]" }, { "identifier": "BBoxTestMixin", "path": "PointOBB/mmdet/models/roi_heads/test_mixins.py", "snippet": "class BBoxTestMixin:\n\n if sys.version_info >= (3, 7):\n\n async def async_test_bboxes(self,\n x,\n img_metas,\n proposals,\n rcnn_test_cfg,\n rescale=False,\n **kwargs):\n \"\"\"Asynchronized test for box head without augmentation.\"\"\"\n rois = bbox2roi(proposals)\n roi_feats = self.bbox_roi_extractor(\n x[:len(self.bbox_roi_extractor.featmap_strides)], rois)\n if self.with_shared_head:\n roi_feats = self.shared_head(roi_feats)\n sleep_interval = rcnn_test_cfg.get('async_sleep_interval', 0.017)\n\n async with completed(\n __name__, 'bbox_head_forward',\n sleep_interval=sleep_interval):\n cls_score, bbox_pred = self.bbox_head(roi_feats)\n\n img_shape = img_metas[0]['img_shape']\n scale_factor = img_metas[0]['scale_factor']\n det_bboxes, det_labels = self.bbox_head.get_bboxes(\n rois,\n cls_score,\n bbox_pred,\n img_shape,\n scale_factor,\n rescale=rescale,\n cfg=rcnn_test_cfg)\n return det_bboxes, det_labels\n\n def simple_test_bboxes(self,\n x,\n img_metas,\n proposals,\n rcnn_test_cfg,\n rescale=False):\n \"\"\"Test only det bboxes without augmentation.\n\n Args:\n x (tuple[Tensor]): Feature maps of all scale level.\n img_metas (list[dict]): Image meta info.\n proposals (List[Tensor]): Region proposals.\n rcnn_test_cfg (obj:`ConfigDict`): `test_cfg` of R-CNN.\n rescale (bool): If True, return boxes in original image space.\n Default: False.\n\n Returns:\n tuple[list[Tensor], list[Tensor]]: The first list contains\n the boxes of the corresponding image in a batch, each\n tensor has the shape (num_boxes, 5) and last dimension\n 5 represent (tl_x, tl_y, br_x, br_y, score). Each Tensor\n in the second list is the labels with shape (num_boxes, ).\n The length of both lists should be equal to batch_size.\n \"\"\"\n # get origin input shape to support onnx dynamic input shape\n\n img_shapes = tuple(meta['img_shape'] for meta in img_metas)\n scale_factors = tuple(meta['scale_factor'] for meta in img_metas)\n\n # The length of proposals of different batches may be different.\n # In order to form a batch, a padding operation is required.\n max_size = max([proposal.size(0) for proposal in proposals])\n # padding to form a batch\n for i, proposal in enumerate(proposals):\n supplement = proposal.new_full(\n (max_size - proposal.size(0), proposal.size(1)), 0)\n proposals[i] = torch.cat((supplement, proposal), dim=0)\n rois = torch.stack(proposals, dim=0)\n\n batch_index = torch.arange(\n rois.size(0), device=rois.device).float().view(-1, 1, 1).expand(\n rois.size(0), rois.size(1), 1)\n rois = torch.cat([batch_index, rois[..., :4]], dim=-1)\n batch_size = rois.shape[0]\n num_proposals_per_img = rois.shape[1]\n\n # Eliminate the batch dimension\n rois = rois.view(-1, 5)\n bbox_results = self._bbox_forward(x, rois)\n cls_score = bbox_results['cls_score']\n bbox_pred = bbox_results['bbox_pred']\n\n # Recover the batch dimension\n rois = rois.reshape(batch_size, num_proposals_per_img, rois.size(-1))\n cls_score = cls_score.reshape(batch_size, num_proposals_per_img,\n cls_score.size(-1))\n\n # remove padding, ignore batch_index when calculating mask\n supplement_mask = rois.abs()[..., 1:].sum(dim=-1) == 0\n cls_score[supplement_mask, :] = 0\n\n # bbox_pred would be None in some detector when with_reg is False,\n # e.g. Grid R-CNN.\n if bbox_pred is not None:\n # the bbox prediction of some detectors like SABL is not Tensor\n if isinstance(bbox_pred, torch.Tensor):\n bbox_pred = bbox_pred.reshape(batch_size,\n num_proposals_per_img,\n bbox_pred.size(-1))\n bbox_pred[supplement_mask, :] = 0\n else:\n # TODO: Looking forward to a better way\n # TODO move these special process to a corresponding head\n # For SABL\n bbox_preds = self.bbox_head.bbox_pred_split(\n bbox_pred, num_proposals_per_img)\n # apply bbox post-processing to each image individually\n det_bboxes = []\n det_labels = []\n for i in range(len(proposals)):\n # remove padding\n supplement_mask = proposals[i].abs().sum(dim=-1) == 0\n for bbox in bbox_preds[i]:\n bbox[supplement_mask] = 0\n det_bbox, det_label = self.bbox_head.get_bboxes(\n rois[i],\n cls_score[i],\n bbox_preds[i],\n img_shapes[i],\n scale_factors[i],\n rescale=rescale,\n cfg=rcnn_test_cfg)\n det_bboxes.append(det_bbox)\n det_labels.append(det_label)\n return det_bboxes, det_labels\n else:\n bbox_pred = None\n\n return self.bbox_head.get_bboxes(\n rois,\n cls_score,\n bbox_pred,\n img_shapes,\n scale_factors,\n rescale=rescale,\n cfg=rcnn_test_cfg)\n\n def aug_test_bboxes(self, feats, img_metas, proposal_list, rcnn_test_cfg):\n \"\"\"Test det bboxes with test time augmentation.\"\"\"\n aug_bboxes = []\n aug_scores = []\n for x, img_meta in zip(feats, img_metas):\n # only one image in the batch\n img_shape = img_meta[0]['img_shape']\n scale_factor = img_meta[0]['scale_factor']\n flip = img_meta[0]['flip']\n flip_direction = img_meta[0]['flip_direction']\n # TODO more flexible\n proposals = bbox_mapping(proposal_list[0][:, :4], img_shape,\n scale_factor, flip, flip_direction, img_meta[0].get('tile_offset', None)) # add by hui\n rois = bbox2roi([proposals])\n bbox_results = self._bbox_forward(x, rois)\n bboxes, scores = self.bbox_head.get_bboxes(\n rois,\n bbox_results['cls_score'],\n bbox_results['bbox_pred'],\n img_shape,\n scale_factor,\n rescale=False,\n cfg=None)\n aug_bboxes.append(bboxes)\n aug_scores.append(scores)\n # after merging, bboxes will be rescaled to the original image size\n merged_bboxes, merged_scores = merge_aug_bboxes(\n aug_bboxes, aug_scores, img_metas, rcnn_test_cfg)\n det_bboxes, det_labels = multiclass_nms(merged_bboxes, merged_scores,\n rcnn_test_cfg.score_thr,\n rcnn_test_cfg.nms,\n rcnn_test_cfg.max_per_img)\n return det_bboxes, det_labels" }, { "identifier": "MaskTestMixin", "path": "PointOBB/mmdet/models/roi_heads/test_mixins.py", "snippet": "class MaskTestMixin:\n\n if sys.version_info >= (3, 7):\n\n async def async_test_mask(self,\n x,\n img_metas,\n det_bboxes,\n det_labels,\n rescale=False,\n mask_test_cfg=None):\n \"\"\"Asynchronized test for mask head without augmentation.\"\"\"\n # image shape of the first image in the batch (only one)\n ori_shape = img_metas[0]['ori_shape']\n scale_factor = img_metas[0]['scale_factor']\n if det_bboxes.shape[0] == 0:\n segm_result = [[] for _ in range(self.mask_head.num_classes)]\n else:\n if rescale:\n scale_factor = det_bboxes.new_tensor(scale_factor)\n _bboxes = (\n det_bboxes[:, :4] *\n scale_factor if rescale else det_bboxes)\n mask_rois = bbox2roi([_bboxes])\n mask_feats = self.mask_roi_extractor(\n x[:len(self.mask_roi_extractor.featmap_strides)],\n mask_rois)\n\n if self.with_shared_head:\n mask_feats = self.shared_head(mask_feats)\n if mask_test_cfg and mask_test_cfg.get('async_sleep_interval'):\n sleep_interval = mask_test_cfg['async_sleep_interval']\n else:\n sleep_interval = 0.035\n async with completed(\n __name__,\n 'mask_head_forward',\n sleep_interval=sleep_interval):\n mask_pred = self.mask_head(mask_feats)\n segm_result = self.mask_head.get_seg_masks(\n mask_pred, _bboxes, det_labels, self.test_cfg, ori_shape,\n scale_factor, rescale)\n return segm_result\n\n def simple_test_mask(self,\n x,\n img_metas,\n det_bboxes,\n det_labels,\n rescale=False):\n \"\"\"Simple test for mask head without augmentation.\"\"\"\n # image shapes of images in the batch\n ori_shapes = tuple(meta['ori_shape'] for meta in img_metas)\n scale_factors = tuple(meta['scale_factor'] for meta in img_metas)\n\n if all(det_bbox.shape[0] == 0 for det_bbox in det_bboxes):\n segm_results = [[[] for _ in range(self.mask_head.num_classes)]\n for _ in range(len(det_bboxes))]\n return segm_results\n\n # The length of proposals of different batches may be different.\n # In order to form a batch, a padding operation is required.\n\n # padding to form a batch\n max_size = max([bboxes.size(0) for bboxes in det_bboxes])\n for i, (bbox, label) in enumerate(zip(det_bboxes, det_labels)):\n supplement_bbox = bbox.new_full(\n (max_size - bbox.size(0), bbox.size(1)), 0)\n supplement_label = label.new_full((max_size - label.size(0), ), 0)\n det_bboxes[i] = torch.cat((supplement_bbox, bbox), dim=0)\n det_labels[i] = torch.cat((supplement_label, label), dim=0)\n det_bboxes = torch.stack(det_bboxes, dim=0)\n det_labels = torch.stack(det_labels, dim=0)\n\n batch_size = det_bboxes.size(0)\n num_proposals_per_img = det_bboxes.shape[1]\n\n # if det_bboxes is rescaled to the original image size, we need to\n # rescale it back to the testing scale to obtain RoIs.\n det_bboxes = det_bboxes[..., :4]\n if rescale:\n scale_factors = det_bboxes.new_tensor(scale_factors)\n det_bboxes = det_bboxes * scale_factors.unsqueeze(1)\n\n batch_index = torch.arange(\n det_bboxes.size(0), device=det_bboxes.device).float().view(\n -1, 1, 1).expand(det_bboxes.size(0), det_bboxes.size(1), 1)\n mask_rois = torch.cat([batch_index, det_bboxes], dim=-1)\n mask_rois = mask_rois.view(-1, 5)\n mask_results = self._mask_forward(x, mask_rois)\n mask_pred = mask_results['mask_pred']\n\n # Recover the batch dimension\n mask_preds = mask_pred.reshape(batch_size, num_proposals_per_img,\n *mask_pred.shape[1:])\n\n # apply mask post-processing to each image individually\n segm_results = []\n for i in range(batch_size):\n mask_pred = mask_preds[i]\n det_bbox = det_bboxes[i]\n det_label = det_labels[i]\n\n # remove padding\n supplement_mask = det_bbox.abs().sum(dim=-1) != 0\n mask_pred = mask_pred[supplement_mask]\n det_bbox = det_bbox[supplement_mask]\n det_label = det_label[supplement_mask]\n\n if det_label.shape[0] == 0:\n segm_results.append([[]\n for _ in range(self.mask_head.num_classes)\n ])\n else:\n segm_result = self.mask_head.get_seg_masks(\n mask_pred, det_bbox, det_label, self.test_cfg,\n ori_shapes[i], scale_factors[i], rescale)\n segm_results.append(segm_result)\n return segm_results\n\n def aug_test_mask(self, feats, img_metas, det_bboxes, det_labels):\n \"\"\"Test for mask head with test time augmentation.\"\"\"\n if det_bboxes.shape[0] == 0:\n segm_result = [[] for _ in range(self.mask_head.num_classes)]\n else:\n aug_masks = []\n for x, img_meta in zip(feats, img_metas):\n img_shape = img_meta[0]['img_shape']\n scale_factor = img_meta[0]['scale_factor']\n flip = img_meta[0]['flip']\n flip_direction = img_meta[0]['flip_direction']\n _bboxes = bbox_mapping(det_bboxes[:, :4], img_shape,\n scale_factor, flip, flip_direction, img_meta[0].get('tile_offset', None)) # add by hui\n mask_rois = bbox2roi([_bboxes])\n mask_results = self._mask_forward(x, mask_rois)\n # convert to numpy array to save memory\n aug_masks.append(\n mask_results['mask_pred'].sigmoid().cpu().numpy())\n merged_masks = merge_aug_masks(aug_masks, img_metas, self.test_cfg)\n\n ori_shape = img_metas[0][0]['ori_shape']\n scale_factor = det_bboxes.new_ones(4)\n segm_result = self.mask_head.get_seg_masks(\n merged_masks,\n det_bboxes,\n det_labels,\n self.test_cfg,\n ori_shape,\n scale_factor=scale_factor,\n rescale=False)\n return segm_result" }, { "identifier": "obb2xyxy", "path": "PointOBB/mmdet/models/detectors/utils.py", "snippet": "def obb2xyxy(rbboxes, version='oc'):\n \"\"\"Convert oriented bounding boxes to horizontal bounding boxes.\n\n Args:\n obbs (torch.Tensor): [x_ctr,y_ctr,w,h,angle]\n version (Str): angle representations.\n\n Returns:\n hbbs (torch.Tensor): [x_lt,y_lt,x_rb,y_rb]\n \"\"\"\n if version == 'oc':\n results = obb2xyxy_oc(rbboxes)\n elif version == 'le135':\n results = obb2xyxy_le135(rbboxes)\n elif version == 'le90':\n results = obb2xyxy_le90(rbboxes)\n else:\n raise NotImplementedError\n return results" }, { "identifier": "regularize_boxes", "path": "PointOBB/mmdet/models/detectors/utils.py", "snippet": "def regularize_boxes(boxes,\n pattern: str = None,\n width_longer: bool = True,\n start_angle: float = -90) -> Tensor:\n \"\"\"Regularize rotated boxes.\n\n Due to the angle periodicity, one rotated box can be represented in\n many different (x, y, w, h, t). To make each rotated box unique,\n ``regularize_boxes`` will take the remainder of the angle divided by\n 180 degrees.\n\n However, after taking the remainder of the angle, there are still two\n representations for one rotate box. For example, (0, 0, 4, 5, 0.5) and\n (0, 0, 5, 4, 0.5 + pi/2) are the same areas in the image. To solve the\n problem, the code will swap edges w.r.t ``width_longer``:\n\n - width_longer=True: Make sure the width is longer than the height. If\n not, swap the width and height. The angle ranges in [start_angle,\n start_angle + 180). For the above example, the rotated box will be\n represented as (0, 0, 5, 4, 0.5 + pi/2).\n - width_longer=False: Make sure the angle is lower than\n start_angle+pi/2. If not, swap the width and height. The angle\n ranges in [start_angle, start_angle + 90). For the above example,\n the rotated box will be represented as (0, 0, 4, 5, 0.5).\n\n For convenience, three commonly used patterns are preset in\n ``regualrize_boxes``:\n\n - 'oc': OpenCV Definition. Has the same box representation as\n ``cv2.minAreaRect`` the angle ranges in [-90, 0). Equal to set\n width_longer=False and start_angle=-90.\n - 'le90': Long Edge Definition (90). the angle ranges in [-90, 90).\n The width is always longer than the height. Equal to set\n width_longer=True and start_angle=-90.\n - 'le135': Long Edge Definition (135). the angle ranges in [-45, 135).\n The width is always longer than the height. Equal to set\n width_longer=True and start_angle=-45.\n\n Args:\n pattern (str, Optional): Regularization pattern. Can only be 'oc',\n 'le90', or 'le135'. Defaults to None.\n width_longer (bool): Whether to make sure width is larger than\n height. Defaults to True.\n start_angle (float): The starting angle of the box angle\n represented in degrees. Defaults to -90.\n\n Returns:\n Tensor: Regularized box tensor.\n \"\"\"\n\n if pattern is not None:\n if pattern == 'oc':\n width_longer, start_angle = False, -90\n elif pattern == 'le90':\n width_longer, start_angle = True, -90\n elif pattern == 'le135':\n width_longer, start_angle = True, -45\n else:\n raise ValueError(\"pattern only can be 'oc', 'le90', and\"\n f\"'le135', but get {pattern}.\")\n start_angle = start_angle / 180 * np.pi\n\n x, y, w, h, t = boxes.unbind(dim=-1)\n if width_longer:\n # swap edge and angle if h >= w\n w_ = torch.where(w > h, w, h)\n h_ = torch.where(w > h, h, w)\n t = torch.where(w > h, t, t + np.pi / 2)\n t = ((t - start_angle) % np.pi) + start_angle\n else:\n # swap edge and angle if angle > pi/2\n t = ((t - start_angle) % np.pi)\n w_ = torch.where(t < np.pi / 2, w, h)\n h_ = torch.where(t < np.pi / 2, h, w)\n t = torch.where(t < np.pi / 2, t, t - np.pi / 2) + start_angle\n obb = torch.stack([x, y, w_, h_, t], dim=-1)\n return obb" }, { "identifier": "reduce_mean", "path": "PointOBB/mmdet/models/detectors/utils.py", "snippet": "def reduce_mean(tensor):\n \"\"\"\"Obtain the mean of tensor on different GPUs.\"\"\"\n if not (dist.is_available() and dist.is_initialized()):\n return tensor\n tensor = tensor.clone()\n dist.all_reduce(tensor.div_(dist.get_world_size()), op=dist.ReduceOp.SUM)\n return tensor" }, { "identifier": "obb2poly_np", "path": "PointOBB/mmdet/models/detectors/utils.py", "snippet": "def obb2poly_np(rbboxes, version='oc'):\n \"\"\"Convert oriented bounding boxes to polygons.\n\n Args:\n obbs (ndarray): [x_ctr,y_ctr,w,h,angle]\n version (Str): angle representations.\n\n Returns:\n polys (ndarray): [x0,y0,x1,y1,x2,y2,x3,y3]\n \"\"\"\n if version == 'oc':\n results = obb2poly_np_oc(rbboxes)\n elif version == 'le135':\n results = obb2poly_np_le135(rbboxes)\n elif version == 'le90':\n results = obb2poly_np_le90(rbboxes)\n else:\n raise NotImplementedError\n return results" } ]
import math import torch import torch.nn.functional as F import torch.nn as nn import copy import numpy as np import cv2 from mmdet.core import bbox2result, bbox2roi, rbbox2roi, build_assigner, build_sampler, multi_apply from ..builder import HEADS, MODELS, build_head, build_roi_extractor, build_loss from .standard_roi_head import StandardRoIHead from .cascade_roi_head import CascadeRoIHead from mmdet.core.bbox.iou_calculators import bbox_overlaps from .test_mixins import BBoxTestMixin, MaskTestMixin from mmdet.core.bbox import bbox_xyxy_to_cxcywh from mmdet.core.bbox.transforms import rbbox2result from mmcv.cnn import Scale, ConvModule from mmcv.ops import box_iou_rotated from typing import Any, List, Sequence, Tuple, Union from torch import Tensor from mmdet.models.utils.base_bbox_coder import BaseBBoxCoder from ..detectors.utils import obb2xyxy, regularize_boxes, reduce_mean, obb2poly_np
15,221
"""Phase-Shifting Coder. `Phase-Shifting Coder (PSC) <https://arxiv.org/abs/2211.06368>`. Args: angle_version (str): Angle definition. Only 'le90' is supported at present. dual_freq (bool, optional): Use dual frequency. Default: True. num_step (int, optional): Number of phase steps. Default: 3. thr_mod (float): Threshold of modulation. Default: 0.47. """ def __init__(self, angle_version: str, dual_freq: bool = True, num_step: int = 3, thr_mod: float = 0.47): super().__init__() self.angle_version = angle_version assert angle_version in ['le90'] self.dual_freq = dual_freq self.num_step = num_step self.thr_mod = thr_mod if self.dual_freq: self.encode_size = 2 * self.num_step else: self.encode_size = self.num_step self.coef_sin = torch.tensor( tuple( torch.sin(torch.tensor(2 * k * math.pi / self.num_step)) for k in range(self.num_step))) self.coef_cos = torch.tensor( tuple( torch.cos(torch.tensor(2 * k * math.pi / self.num_step)) for k in range(self.num_step))) def encode(self, angle_targets: Tensor) -> Tensor: """Phase-Shifting Encoder. Args: angle_targets (Tensor): Angle offset for each scale level. Has shape (num_anchors * H * W, 1) Returns: list[Tensor]: The psc coded data (phase-shifting patterns) for each scale level. Has shape (num_anchors * H * W, encode_size) """ phase_targets = angle_targets * 2 phase_shift_targets = tuple( torch.cos(phase_targets + 2 * math.pi * x / self.num_step) for x in range(self.num_step)) # Dual-freq PSC for square-like problem if self.dual_freq: phase_targets = angle_targets * 4 phase_shift_targets += tuple( torch.cos(phase_targets + 2 * math.pi * x / self.num_step) for x in range(self.num_step)) return torch.cat(phase_shift_targets, axis=-1) def decode(self, angle_preds: Tensor, keepdim: bool = False) -> Tensor: """Phase-Shifting Decoder. Args: angle_preds (Tensor): The psc coded data (phase-shifting patterns) for each scale level. Has shape (num_anchors * H * W, encode_size) keepdim (bool): Whether the output tensor has dim retained or not. Returns: list[Tensor]: Angle offset for each scale level. Has shape (num_anchors * H * W, 1) when keepdim is true, (num_anchors * H * W) otherwise """ self.coef_sin = self.coef_sin.to(angle_preds) self.coef_cos = self.coef_cos.to(angle_preds) phase_sin = torch.sum( angle_preds[:, 0:self.num_step] * self.coef_sin, dim=-1, keepdim=keepdim) phase_cos = torch.sum( angle_preds[:, 0:self.num_step] * self.coef_cos, dim=-1, keepdim=keepdim) phase_mod = phase_cos**2 + phase_sin**2 phase = -torch.atan2(phase_sin, phase_cos) # In range [-pi,pi) if self.dual_freq: phase_sin = torch.sum( angle_preds[:, self.num_step:(2 * self.num_step)] * self.coef_sin, dim=-1, keepdim=keepdim) phase_cos = torch.sum( angle_preds[:, self.num_step:(2 * self.num_step)] * self.coef_cos, dim=-1, keepdim=keepdim) phase_mod = phase_cos**2 + phase_sin**2 phase2 = -torch.atan2(phase_sin, phase_cos) / 2 # Phase unwarpping, dual freq mixing # Angle between phase and phase2 is obtuse angle idx = torch.cos(phase) * torch.cos(phase2) + torch.sin( phase) * torch.sin(phase2) < 0 # Add pi to phase2 and keep it in range [-pi,pi) phase2[idx] = phase2[idx] % (2 * math.pi) - math.pi phase = phase2 # Set the angle of isotropic objects to zero phase[phase_mod < self.thr_mod] *= 0 angle_pred = phase / 2 return angle_pred @HEADS.register_module()
RangeType = Sequence[Tuple[int, int]] INF = 1e8 def meshgrid(x: Tensor, y: Tensor, row_major: bool = True) -> Tuple[Tensor, Tensor]: yy, xx = torch.meshgrid(y, x) if row_major: # warning .flatten() would cause error in ONNX exportingF # have to use reshape here return xx.reshape(-1), yy.reshape(-1) else: return yy.reshape(-1), xx.reshape(-1) def obb2cxcywh_le90(obboxes): """Convert oriented bounding boxes to horizontal bounding boxes. Args: obbs (torch.Tensor): [x_ctr,y_ctr,w,h,angle] Returns: hbbs (torch.Tensor): [x_lt,y_lt,x_rb,y_rb] """ center, w, h, theta = torch.split(obboxes, [2, 1, 1, 1], dim=-1) Cos, Sin = torch.cos(theta), torch.sin(theta) x_bias = torch.abs(w / 2 * Cos) + torch.abs(h / 2 * Sin) y_bias = torch.abs(w / 2 * Sin) + torch.abs(h / 2 * Cos) bias = torch.cat([x_bias, y_bias], dim=-1) wh = bias * 2 return torch.cat([center, wh, torch.zeros_like(theta)], dim=-1) @HEADS.register_module() class PSCCoder(BaseBBoxCoder): """Phase-Shifting Coder. `Phase-Shifting Coder (PSC) <https://arxiv.org/abs/2211.06368>`. Args: angle_version (str): Angle definition. Only 'le90' is supported at present. dual_freq (bool, optional): Use dual frequency. Default: True. num_step (int, optional): Number of phase steps. Default: 3. thr_mod (float): Threshold of modulation. Default: 0.47. """ def __init__(self, angle_version: str, dual_freq: bool = True, num_step: int = 3, thr_mod: float = 0.47): super().__init__() self.angle_version = angle_version assert angle_version in ['le90'] self.dual_freq = dual_freq self.num_step = num_step self.thr_mod = thr_mod if self.dual_freq: self.encode_size = 2 * self.num_step else: self.encode_size = self.num_step self.coef_sin = torch.tensor( tuple( torch.sin(torch.tensor(2 * k * math.pi / self.num_step)) for k in range(self.num_step))) self.coef_cos = torch.tensor( tuple( torch.cos(torch.tensor(2 * k * math.pi / self.num_step)) for k in range(self.num_step))) def encode(self, angle_targets: Tensor) -> Tensor: """Phase-Shifting Encoder. Args: angle_targets (Tensor): Angle offset for each scale level. Has shape (num_anchors * H * W, 1) Returns: list[Tensor]: The psc coded data (phase-shifting patterns) for each scale level. Has shape (num_anchors * H * W, encode_size) """ phase_targets = angle_targets * 2 phase_shift_targets = tuple( torch.cos(phase_targets + 2 * math.pi * x / self.num_step) for x in range(self.num_step)) # Dual-freq PSC for square-like problem if self.dual_freq: phase_targets = angle_targets * 4 phase_shift_targets += tuple( torch.cos(phase_targets + 2 * math.pi * x / self.num_step) for x in range(self.num_step)) return torch.cat(phase_shift_targets, axis=-1) def decode(self, angle_preds: Tensor, keepdim: bool = False) -> Tensor: """Phase-Shifting Decoder. Args: angle_preds (Tensor): The psc coded data (phase-shifting patterns) for each scale level. Has shape (num_anchors * H * W, encode_size) keepdim (bool): Whether the output tensor has dim retained or not. Returns: list[Tensor]: Angle offset for each scale level. Has shape (num_anchors * H * W, 1) when keepdim is true, (num_anchors * H * W) otherwise """ self.coef_sin = self.coef_sin.to(angle_preds) self.coef_cos = self.coef_cos.to(angle_preds) phase_sin = torch.sum( angle_preds[:, 0:self.num_step] * self.coef_sin, dim=-1, keepdim=keepdim) phase_cos = torch.sum( angle_preds[:, 0:self.num_step] * self.coef_cos, dim=-1, keepdim=keepdim) phase_mod = phase_cos**2 + phase_sin**2 phase = -torch.atan2(phase_sin, phase_cos) # In range [-pi,pi) if self.dual_freq: phase_sin = torch.sum( angle_preds[:, self.num_step:(2 * self.num_step)] * self.coef_sin, dim=-1, keepdim=keepdim) phase_cos = torch.sum( angle_preds[:, self.num_step:(2 * self.num_step)] * self.coef_cos, dim=-1, keepdim=keepdim) phase_mod = phase_cos**2 + phase_sin**2 phase2 = -torch.atan2(phase_sin, phase_cos) / 2 # Phase unwarpping, dual freq mixing # Angle between phase and phase2 is obtuse angle idx = torch.cos(phase) * torch.cos(phase2) + torch.sin( phase) * torch.sin(phase2) < 0 # Add pi to phase2 and keep it in range [-pi,pi) phase2[idx] = phase2[idx] % (2 * math.pi) - math.pi phase = phase2 # Set the angle of isotropic objects to zero phase[phase_mod < self.thr_mod] *= 0 angle_pred = phase / 2 return angle_pred @HEADS.register_module()
class PointOBBHead(StandardRoIHead):
5
2023-11-20 07:50:12+00:00
24k
ModelTC/EasyLLM
llm/models/hf_models/qwen_vl/modeling_qwen.py
[ { "identifier": "QWenConfig", "path": "llm/models/hf_models/qwen_vl/configuration_qwen.py", "snippet": "class QWenConfig(PretrainedConfig):\n model_type = \"qwen\"\n keys_to_ignore_at_inference = [\"past_key_values\"]\n\n def __init__(\n self,\n vocab_size=151936,\n hidden_size=4096,\n num_hidden_layers=32,\n num_attention_heads=32,\n emb_dropout_prob=0.0,\n attn_dropout_prob=0.0,\n layer_norm_epsilon=1e-6,\n initializer_range=0.02,\n max_position_embeddings=8192,\n scale_attn_weights=True,\n use_cache=True,\n bf16=False,\n fp16=False,\n fp32=False,\n kv_channels=128,\n rotary_pct=1.0,\n rotary_emb_base=10000,\n use_dynamic_ntk=True,\n use_logn_attn=True,\n use_flash_attn=\"auto\",\n intermediate_size=22016,\n no_bias=True,\n tie_word_embeddings=False,\n **kwargs,\n ):\n self.vocab_size = vocab_size\n self.hidden_size = hidden_size\n self.intermediate_size = intermediate_size\n self.num_hidden_layers = num_hidden_layers\n self.num_attention_heads = num_attention_heads\n self.emb_dropout_prob = emb_dropout_prob\n self.attn_dropout_prob = attn_dropout_prob\n self.layer_norm_epsilon = layer_norm_epsilon\n self.initializer_range = initializer_range\n self.scale_attn_weights = scale_attn_weights\n self.use_cache = use_cache\n self.max_position_embeddings = max_position_embeddings\n self.bf16 = bf16\n self.fp16 = fp16\n self.fp32 = fp32\n self.kv_channels = kv_channels\n self.rotary_pct = rotary_pct\n self.rotary_emb_base = rotary_emb_base\n self.use_dynamic_ntk = use_dynamic_ntk\n self.use_logn_attn = use_logn_attn\n self.use_flash_attn = use_flash_attn\n self.no_bias = no_bias\n super().__init__(\n tie_word_embeddings=tie_word_embeddings,\n **kwargs\n )" }, { "identifier": "make_context", "path": "llm/models/hf_models/qwen_vl/qwen_generation_utils.py", "snippet": "def make_context(\n tokenizer: PreTrainedTokenizer,\n query: str,\n history: List[Tuple[str, str]] = None,\n system: str = \"\",\n max_window_size: int = 6144,\n chat_format: str = \"chatml\",\n):\n if history is None:\n history = []\n\n if chat_format == \"chatml\":\n im_start, im_end = \"<|im_start|>\", \"<|im_end|>\"\n im_start_tokens = [tokenizer.im_start_id]\n im_end_tokens = [tokenizer.im_end_id]\n nl_tokens = tokenizer.encode(\"\\n\")\n\n def _tokenize_str(role, content):\n return f\"{role}\\n{content}\", tokenizer.encode(\n role, allowed_special=set(tokenizer.IMAGE_ST)\n ) + nl_tokens + tokenizer.encode(content, allowed_special=set(tokenizer.IMAGE_ST))\n\n system_text, system_tokens_part = _tokenize_str(\"system\", system)\n system_tokens = im_start_tokens + system_tokens_part + im_end_tokens\n\n raw_text = \"\"\n context_tokens = []\n\n for turn_query, turn_response in reversed(history):\n query_text, query_tokens_part = _tokenize_str(\"user\", turn_query)\n query_tokens = im_start_tokens + query_tokens_part + im_end_tokens\n if turn_response is not None:\n response_text, response_tokens_part = _tokenize_str(\n \"assistant\", turn_response\n )\n response_tokens = im_start_tokens + response_tokens_part + im_end_tokens\n\n next_context_tokens = nl_tokens + query_tokens + nl_tokens + response_tokens\n prev_chat = (\n f\"\\n{im_start}{query_text}{im_end}\\n{im_start}{response_text}{im_end}\"\n )\n else:\n next_context_tokens = nl_tokens + query_tokens + nl_tokens\n prev_chat = f\"\\n{im_start}{query_text}{im_end}\\n\"\n\n current_context_size = (\n len(system_tokens) + len(next_context_tokens) + len(context_tokens)\n )\n if current_context_size < max_window_size:\n context_tokens = next_context_tokens + context_tokens\n raw_text = prev_chat + raw_text\n else:\n break\n\n context_tokens = system_tokens + context_tokens\n raw_text = f\"{im_start}{system_text}{im_end}\" + raw_text\n context_tokens += (\n nl_tokens\n + im_start_tokens\n + _tokenize_str(\"user\", query)[1]\n + im_end_tokens\n + nl_tokens\n + im_start_tokens\n + tokenizer.encode(\"assistant\")\n + nl_tokens\n )\n raw_text += f\"\\n{im_start}user\\n{query}{im_end}\\n{im_start}assistant\\n\"\n\n elif chat_format == \"raw\":\n raw_text = query\n context_tokens = tokenizer.encode(raw_text)\n else:\n raise NotImplementedError(f\"Unknown chat format {chat_format!r}\")\n\n return raw_text, context_tokens" }, { "identifier": "HistoryType", "path": "llm/models/hf_models/qwen/qwen_generation_utils.py", "snippet": "def pad_batch(batch: BatchTokensType, pad_id: int, seq_length: int) -> BatchTokensType:\ndef get_ltor_masks_and_position_ids(\n data,\n eod_token,\n reset_position_ids,\n reset_attention_mask,\n eod_mask_loss,\n):\ndef get_batch(context_tokens: torch.LongTensor, eod_id: int):\ndef get_stop_words_ids(chat_format, tokenizer):\ndef make_context(\n tokenizer: PreTrainedTokenizer,\n query: str,\n history: List[Tuple[str, str]] = None,\n system: str = \"\",\n max_window_size: int = 6144,\n chat_format: str = \"chatml\",\n):\n def _tokenize_str(role, content):\ndef _decode_default(\n tokens: List[int],\n *,\n stop_words: List[str],\n eod_words: List[str],\n tokenizer: PreTrainedTokenizer,\n raw_text_len: int,\n verbose: bool = False,\n return_end_reason: bool = False,\n errors: str = 'replace',\n):\ndef _decode_chatml(\n tokens: List[int],\n *,\n stop_words: List[str],\n eod_token_ids: List[int],\n tokenizer: PreTrainedTokenizer,\n raw_text_len: int,\n context_length: int,\n verbose: bool = False,\n return_end_reason: bool = False,\n errors: str = 'replace'\n):\ndef decode_tokens(\n tokens: Union[torch.LongTensor, TokensType],\n tokenizer: PreTrainedTokenizer,\n raw_text_len: int,\n context_length: int,\n chat_format: str,\n verbose: bool = False,\n return_end_reason: bool = False,\n errors: str = \"replace\",\n) -> str:\n def __init__(self, stop_words_ids: Iterable[Iterable[int]], eos_token_id: int):\n def __call__(\n self, input_ids: torch.LongTensor, scores: torch.FloatTensor\n ) -> torch.FloatTensor:\n def _tokens_match(self, prev_tokens: torch.LongTensor, tokens: List[int]) -> bool:\n def _calc_stopped_samples(self, prev_input_ids: Iterable[int]) -> Iterable[int]:\ndef top_k_logits(logits, top_k=0, top_p=0.0, filter_value=-float(\"Inf\")):\ndef switch(val1, val2, boolean):\nclass StopWordsLogitsProcessor(LogitsProcessor):" }, { "identifier": "VisionTransformer", "path": "llm/models/hf_models/qwen_vl/visual.py", "snippet": "class VisionTransformer(nn.Module):\n\n def __init__(\n self,\n image_size: int,\n patch_size: int,\n width: int,\n layers: int,\n heads: int,\n mlp_ratio: float,\n n_queries: int = 256,\n output_dim: int = 512,\n **kwargs\n ):\n super().__init__()\n image_height, image_width = self.image_size = (image_size, image_size)\n patch_height, patch_width = self.patch_size = (patch_size, patch_size)\n self.grid_size = (image_height // patch_height, image_width // patch_width)\n self.output_dim = output_dim\n\n mean = (0.48145466, 0.4578275, 0.40821073)\n std = (0.26862954, 0.26130258, 0.27577711)\n self.image_transform = transforms.Compose([\n transforms.Resize(\n (image_size, image_size),\n interpolation=InterpolationMode.BICUBIC\n ),\n transforms.ToTensor(),\n transforms.Normalize(mean=mean, std=std),\n ])\n\n self.conv1 = nn.Conv2d(in_channels=3, out_channels=width, kernel_size=patch_size, stride=patch_size, bias=False)\n\n # class embeddings and positional embeddings\n scale = width ** -0.5\n self.positional_embedding = nn.Parameter(scale * torch.randn(256, width))\n\n norm_layer = partial(nn.LayerNorm, eps=1e-6)\n act_layer = nn.GELU\n\n self.ln_pre = norm_layer(width)\n self.transformer = TransformerBlock(\n width,\n layers,\n heads,\n mlp_ratio,\n act_layer=act_layer,\n norm_layer=norm_layer,\n )\n\n self.attn_pool = Resampler(\n grid_size=int(math.sqrt(n_queries)),\n embed_dim=output_dim,\n num_heads=output_dim // 128,\n kv_dim=width,\n norm_layer=norm_layer,\n )\n self.ln_post = norm_layer(output_dim)\n self.proj = nn.Parameter((output_dim ** -0.5) * torch.randn(output_dim, output_dim))\n\n def forward(self, x: torch.Tensor):\n x = x.to(\n dtype=self.transformer.get_cast_dtype(),\n device=self.transformer.get_cast_device(),\n )\n # to patches\n x = self.conv1(x) # shape = [*, width, grid, grid]\n x = x.reshape(x.shape[0], x.shape[1], -1) # shape = [*, width, grid ** 2]\n x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width]\n\n x = x + get_abs_pos(self.positional_embedding, x.size(1))\n\n x = self.ln_pre(x)\n\n x = x.permute(1, 0, 2) # NLD -> LND\n x = self.transformer(x)\n x = x.permute(1, 0, 2) # LND -> NLD\n\n x = self.attn_pool(x)\n x = self.ln_post(x)\n x = x @ self.proj\n\n return x\n\n def encode(self, image_paths: List[str]):\n images = []\n for image_path in image_paths:\n if image_path.startswith(\"http://\") or image_path.startswith(\"https://\"):\n image = Image.open(requests.get(image_path, stream=True).raw)\n else:\n image = Image.open(image_path)\n image = image.convert(\"RGB\")\n images.append(self.image_transform(image))\n images = torch.stack(images, dim=0)\n return self(images)" }, { "identifier": "RMSNorm", "path": "llm/models/hf_models/qwen/modeling_qwen.py", "snippet": "class RMSNorm(torch.nn.Module):\n def __init__(self, dim: int, eps: float = 1e-6):\n super().__init__()\n self.eps = eps\n self.weight = nn.Parameter(torch.ones(dim))\n\n def _norm(self, x):\n return x * torch.rsqrt(x.pow(2).mean(-1, keepdim=True) + self.eps)\n\n def forward(self, x):\n if rms_norm is not None and x.is_cuda:\n return rms_norm(x, self.weight, self.eps)\n else:\n output = self._norm(x.float()).type_as(x)\n return output * self.weight" }, { "identifier": "apply_rotary_pos_emb", "path": "llm/models/hf_models/qwen/modeling_qwen.py", "snippet": "def apply_rotary_pos_emb(t, freqs):\n cos, sin = freqs\n if apply_rotary_emb_func is not None and t.is_cuda:\n t_ = t.float()\n cos = cos.squeeze(0).squeeze(1)[:, : cos.shape[-1] // 2]\n sin = sin.squeeze(0).squeeze(1)[:, : sin.shape[-1] // 2]\n output = apply_rotary_emb_func(t_, cos, sin).type_as(t)\n return output\n else:\n rot_dim = freqs[0].shape[-1]\n cos, sin = freqs\n t_, t_pass_ = t[..., :rot_dim], t[..., rot_dim:]\n t_ = t_.float()\n t_pass_ = t_pass_.float()\n t_ = (t_ * cos) + (_rotate_half(t_) * sin)\n return torch.cat((t_, t_pass_), dim=-1).type_as(t)" }, { "identifier": "QWenMLP", "path": "llm/models/hf_models/qwen/modeling_qwen.py", "snippet": "class QWenMLP(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.w1 = nn.Linear(\n config.hidden_size, config.intermediate_size // 2, bias=not config.no_bias\n )\n self.w2 = nn.Linear(\n config.hidden_size, config.intermediate_size // 2, bias=not config.no_bias\n )\n ff_dim_in = config.intermediate_size // 2\n self.c_proj = nn.Linear(ff_dim_in, config.hidden_size, bias=not config.no_bias)\n\n def forward(self, hidden_states):\n a1 = self.w1(hidden_states)\n a2 = self.w2(hidden_states)\n intermediate_parallel = a1 * F.silu(a2)\n output = self.c_proj(intermediate_parallel)\n return output" }, { "identifier": "QWenAttention", "path": "llm/models/hf_models/qwen/modeling_qwen.py", "snippet": "class QWenAttention(nn.Module):\n def __init__(self, config):\n super().__init__()\n\n self.register_buffer(\"masked_bias\", torch.tensor(-1e4), persistent=False)\n self.seq_length = config.seq_length\n\n self.hidden_size = config.hidden_size\n self.split_size = config.hidden_size\n self.num_heads = config.num_attention_heads\n self.head_dim = self.hidden_size // self.num_heads\n\n self.use_flash_attn = config.use_flash_attn\n self.scale_attn_weights = True\n\n self.projection_size = config.kv_channels * config.num_attention_heads\n\n assert self.projection_size % config.num_attention_heads == 0\n self.hidden_size_per_attention_head = (\n self.projection_size // config.num_attention_heads\n )\n\n self.c_attn = nn.Linear(config.hidden_size, 3 * self.projection_size)\n\n self.c_proj = nn.Linear(\n config.hidden_size, self.projection_size, bias=not config.no_bias\n )\n\n self.is_fp32 = not (config.bf16 or config.fp16)\n if (\n self.use_flash_attn\n and flash_attn_unpadded_func is not None\n and not self.is_fp32\n ):\n self.core_attention_flash = FlashSelfAttention(\n causal=True, attention_dropout=config.attn_dropout_prob\n )\n self.bf16 = config.bf16\n\n self.use_dynamic_ntk = config.use_dynamic_ntk\n self.use_logn_attn = config.use_logn_attn\n\n logn_list = [\n math.log(i, self.seq_length) if i > self.seq_length else 1\n for i in range(1, 32768)\n ]\n logn_tensor = torch.tensor(logn_list)[None, :, None, None]\n self.register_buffer(\"logn_tensor\", logn_tensor, persistent=False)\n\n self.attn_dropout = nn.Dropout(config.attn_dropout_prob)\n self.softmax_in_fp32 = config.softmax_in_fp32 if hasattr(config, 'softmax_in_fp32') else False\n self.use_cache_quantization = config.use_cache_quantization if hasattr(\n config, 'use_cache_quantization') else False\n self.use_cache_kernel = config.use_cache_kernel if hasattr(config, 'use_cache_kernel') else False\n cache_dtype = torch.float\n if self.bf16:\n cache_dtype = torch.bfloat16\n elif config.fp16:\n cache_dtype = torch.float16\n self.cache_qmax = torch.tensor(torch.iinfo(torch.uint8).max, dtype=cache_dtype)\n self.cache_qmin = torch.tensor(torch.iinfo(torch.uint8).min, dtype=cache_dtype)\n\n if config.use_cache_quantization and config.use_cache_kernel:\n try:\n from .cpp_kernels import cache_autogptq_cuda_256\n self.cache_kernels = cache_autogptq_cuda_256\n except ImportError:\n self.cache_kernels = None\n\n def _attn(self, query, key, value, registered_causal_mask, attention_mask=None, head_mask=None):\n device = query.device\n if self.use_cache_quantization:\n qk, qk_scale, qk_zero = key\n if self.use_cache_kernel and self.cache_kernels is not None:\n shape = query.shape[:-1] + (qk.shape[-2],)\n attn_weights = torch.zeros(shape, dtype=torch.float16, device=device)\n self.cache_kernels.vecquant8matmul_batched_faster_old(\n query.contiguous() if query.dtype == torch.float16 else query.to(torch.float16).contiguous(),\n qk.transpose(-1, -2).contiguous(),\n attn_weights,\n qk_scale.contiguous() if qk_scale.dtype == torch.float16 else qk_scale.to(torch.float16).contiguous(),\n qk_zero.contiguous()if qk_zero.dtype == torch.float16 else qk_zero.to(torch.float16).contiguous())\n # attn_weights = attn_weights.to(query.dtype).contiguous()\n else:\n key = dequantize_cache_torch(qk, qk_scale, qk_zero)\n attn_weights = torch.matmul(query, key.transpose(-1, -2))\n else:\n attn_weights = torch.matmul(query, key.transpose(-1, -2))\n\n if self.scale_attn_weights:\n if self.use_cache_quantization:\n size_temp = value[0].size(-1)\n else:\n size_temp = value.size(-1)\n attn_weights = attn_weights / torch.full(\n [],\n size_temp ** 0.5,\n dtype=attn_weights.dtype,\n device=attn_weights.device,\n )\n if self.use_cache_quantization:\n query_length, key_length = query.size(-2), key[0].size(-2)\n else:\n query_length, key_length = query.size(-2), key.size(-2)\n causal_mask = registered_causal_mask[\n :, :, key_length - query_length: key_length, :key_length\n ]\n mask_value = torch.finfo(attn_weights.dtype).min\n mask_value = torch.full([], mask_value, dtype=attn_weights.dtype).to(\n attn_weights.device\n )\n attn_weights = torch.where(\n causal_mask, attn_weights.to(attn_weights.dtype), mask_value\n )\n\n if attention_mask is not None:\n attn_weights = attn_weights + attention_mask\n\n if self.softmax_in_fp32:\n attn_weights = nn.functional.softmax(attn_weights.float(), dim=-1)\n else:\n attn_weights = nn.functional.softmax(attn_weights, dim=-1)\n\n attn_weights = attn_weights.type(query.dtype)\n attn_weights = self.attn_dropout(attn_weights)\n\n if head_mask is not None:\n attn_weights = attn_weights * head_mask\n\n if self.use_cache_quantization:\n qv, qv_scale, qv_zero = value\n if self.use_cache_kernel and self.cache_kernels is not None:\n shape = attn_weights.shape[:-1] + (query.shape[-1],)\n attn_output = torch.zeros(shape, dtype=torch.float16, device=device)\n self.cache_kernels.vecquant8matmul_batched_column_compression_faster_old(\n attn_weights.contiguous() if attn_weights.dtype == torch.float16 else attn_weights.to(torch.float16).contiguous(),\n qv.contiguous(), # dtype: int32\n attn_output,\n qv_scale.contiguous() if qv_scale.dtype == torch.float16 else qv_scale.to(torch.float16).contiguous(),\n qv_zero.contiguous() if qv_zero.dtype == torch.float16 else qv_zero.to(torch.float16).contiguous())\n if attn_output.dtype != query.dtype:\n attn_output = attn_output.to(query.dtype)\n attn_weights = attn_weights.to(query.dtype)\n else:\n value = dequantize_cache_torch(qv, qv_scale, qv_zero)\n attn_output = torch.matmul(attn_weights, value)\n else:\n attn_output = torch.matmul(attn_weights, value)\n\n attn_output = attn_output.transpose(1, 2)\n\n return attn_output, attn_weights\n\n def _upcast_and_reordered_attn(\n self, query, key, value, registered_causal_mask, attention_mask=None, head_mask=None\n ):\n bsz, num_heads, q_seq_len, dk = query.size()\n _, _, k_seq_len, _ = key.size()\n\n attn_weights = torch.empty(\n bsz * num_heads,\n q_seq_len,\n k_seq_len,\n dtype=torch.float32,\n device=query.device,\n )\n\n scale_factor = 1.0\n if self.scale_attn_weights:\n scale_factor /= float(value.size(-1)) ** 0.5\n\n with autocast(enabled=False):\n q, k = query.reshape(-1, q_seq_len, dk), key.transpose(-1, -2).reshape(\n -1, dk, k_seq_len\n )\n attn_weights = torch.baddbmm(\n attn_weights, q.float(), k.float(), beta=0, alpha=scale_factor\n )\n attn_weights = attn_weights.reshape(bsz, num_heads, q_seq_len, k_seq_len)\n\n query_length, key_length = query.size(-2), key.size(-2)\n causal_mask = registered_causal_mask[\n :, :, key_length - query_length: key_length, :key_length\n ]\n mask_value = torch.finfo(attn_weights.dtype).min\n mask_value = torch.tensor(mask_value, dtype=attn_weights.dtype).to(\n attn_weights.device\n )\n attn_weights = torch.where(causal_mask, attn_weights, mask_value)\n\n if attention_mask is not None:\n attn_weights = attn_weights + attention_mask\n\n attn_weights = nn.functional.softmax(attn_weights, dim=-1)\n\n if attn_weights.dtype != torch.float32:\n raise RuntimeError(\n \"Error with upcasting, attn_weights does not have dtype torch.float32\"\n )\n attn_weights = attn_weights.type(value.dtype)\n attn_weights = self.attn_dropout(attn_weights)\n\n if head_mask is not None:\n attn_weights = attn_weights * head_mask\n\n attn_output = torch.matmul(attn_weights, value)\n\n return attn_output, attn_weights\n\n def _split_heads(self, tensor, num_heads, attn_head_size):\n new_shape = tensor.size()[:-1] + (num_heads, attn_head_size)\n tensor = tensor.view(new_shape)\n return tensor\n\n def _merge_heads(self, tensor, num_heads, attn_head_size):\n tensor = tensor.contiguous()\n new_shape = tensor.size()[:-2] + (num_heads * attn_head_size,)\n return tensor.view(new_shape)\n\n def forward(\n self,\n hidden_states: Optional[Tuple[torch.FloatTensor]],\n rotary_pos_emb_list: Optional[List[List[torch.Tensor]]] = None,\n registered_causal_mask: Optional[torch.Tensor] = None,\n layer_past: Optional[Tuple[torch.Tensor]] = None,\n attention_mask: Optional[torch.FloatTensor] = None,\n head_mask: Optional[torch.FloatTensor] = None,\n encoder_hidden_states: Optional[torch.Tensor] = None,\n encoder_attention_mask: Optional[torch.FloatTensor] = None,\n output_attentions: Optional[bool] = False,\n use_cache: Optional[bool] = False,\n ):\n mixed_x_layer = self.c_attn(hidden_states)\n\n query, key, value = mixed_x_layer.split(self.split_size, dim=2)\n\n query = self._split_heads(query, self.num_heads, self.head_dim)\n key = self._split_heads(key, self.num_heads, self.head_dim)\n value = self._split_heads(value, self.num_heads, self.head_dim)\n\n if rotary_pos_emb_list is not None:\n cur_len = query.shape[1]\n if len(rotary_pos_emb_list) == 1:\n rotary_pos_emb = rotary_pos_emb_list[0]\n rotary_pos_emb = [i[:, -cur_len:, :, :] for i in rotary_pos_emb]\n rotary_pos_emb = (rotary_pos_emb,) * 2\n q_pos_emb, k_pos_emb = rotary_pos_emb\n # Slice the pos emb for current inference\n query = apply_rotary_pos_emb(query, q_pos_emb)\n key = apply_rotary_pos_emb(key, k_pos_emb)\n else:\n query_list = []\n key_list = []\n for i, rotary_pos_emb in enumerate(rotary_pos_emb_list):\n rotary_pos_emb = [i[:, -cur_len:, :, :] for i in rotary_pos_emb]\n rotary_pos_emb = (rotary_pos_emb,) * 2\n q_pos_emb, k_pos_emb = rotary_pos_emb\n # Slice the pos emb for current inference\n query_list += [apply_rotary_pos_emb(query[i:i + 1, :, :], q_pos_emb)]\n key_list += [apply_rotary_pos_emb(key[i:i + 1, :, :], k_pos_emb)]\n query = torch.cat(query_list, dim=0)\n key = torch.cat(key_list, dim=0)\n\n if self.use_cache_quantization:\n key = quantize_cache_v(key.permute(0, 2, 1, 3),\n bits=8,\n qmin=self.cache_qmin,\n qmax=self.cache_qmax)\n value = quantize_cache_v(value.permute(0, 2, 1, 3),\n bits=8,\n qmin=self.cache_qmin,\n qmax=self.cache_qmax)\n\n if layer_past is not None:\n past_key, past_value = layer_past[0], layer_past[1]\n if self.use_cache_quantization:\n # use_cache_quantization:\n # present=((q_key,key_scale,key_zero_point),\n # (q_value,value_scale,value_zero_point))\n key = (torch.cat((past_key[0], key[0]), dim=2),\n torch.cat((past_key[1], key[1]), dim=2),\n torch.cat((past_key[2], key[2]), dim=2))\n value = (torch.cat((past_value[0], value[0]), dim=2),\n torch.cat((past_value[1], value[1]), dim=2),\n torch.cat((past_value[2], value[2]), dim=2))\n else:\n # not use_cache_quantization:\n # present=(key,value)\n key = torch.cat((past_key, key), dim=1)\n value = torch.cat((past_value, value), dim=1)\n\n if use_cache:\n present = (key, value)\n else:\n present = None\n\n if self.use_logn_attn and not self.training:\n if self.use_cache_quantization:\n seq_start = key[0].size(2) - query.size(1)\n seq_end = key[0].size(2)\n else:\n seq_start = key.size(1) - query.size(1)\n seq_end = key.size(1)\n logn_tensor = self.logn_tensor[:, seq_start:seq_end, :, :].type_as(query)\n query = query * logn_tensor.expand_as(query)\n\n if (\n self.use_flash_attn\n and flash_attn_unpadded_func is not None\n and not self.is_fp32\n and query.is_cuda\n ):\n q, k, v = query, key, value\n attn_output = self.core_attention_flash(q, k, v, attention_mask=attention_mask)\n else:\n query = query.permute(0, 2, 1, 3)\n if not self.use_cache_quantization:\n key = key.permute(0, 2, 1, 3)\n value = value.permute(0, 2, 1, 3)\n if (\n registered_causal_mask is None\n and self.use_flash_attn\n and flash_attn_unpadded_func is not None\n and not self.is_fp32\n and not query.is_cuda\n ):\n raise Exception(_ERROR_INPUT_CPU_QUERY_WITH_FLASH_ATTN_ACTIVATED)\n\n if not self.use_cache_quantization and SUPPORT_TORCH2:\n causal_mask = registered_causal_mask[\n :, :, key.size(-2) - query.size(-2): key.size(-2), :key.size(-2)\n ]\n if attention_mask is not None:\n attention_mask = attention_mask.expand(\n -1, -1, causal_mask.size(2), -1\n ).masked_fill(~causal_mask, torch.finfo(query.dtype).min)\n else:\n attention_mask = causal_mask\n attn_output = F.scaled_dot_product_attention(\n query, key, value, attn_mask=attention_mask\n ).transpose(1, 2)\n attn_weight = None\n else:\n attn_output, attn_weight = self._attn(\n query, key, value, registered_causal_mask, attention_mask, head_mask\n )\n context_layer = self._merge_heads(\n attn_output, self.num_heads, self.head_dim\n )\n\n attn_output = self.c_proj(context_layer)\n\n outputs = (attn_output, present)\n if output_attentions:\n if (\n self.use_flash_attn\n and flash_attn_unpadded_func is not None\n and not self.is_fp32\n ):\n raise ValueError(\"Cannot output attentions while using flash-attn\")\n else:\n outputs += (attn_weight,)\n\n return outputs" }, { "identifier": "QWenModel", "path": "llm/models/hf_models/qwen/modeling_qwen.py", "snippet": "class QWenModel(QWenPreTrainedModel):\n _keys_to_ignore_on_load_missing = [\"attn.masked_bias\"]\n\n def __init__(self, config):\n super().__init__(config)\n self.vocab_size = config.vocab_size\n self.num_hidden_layers = config.num_hidden_layers\n self.embed_dim = config.hidden_size\n self.use_cache_quantization = self.config.use_cache_quantization if hasattr(\n self.config, 'use_cache_quantization') else False\n\n self.gradient_checkpointing = False\n self.use_dynamic_ntk = config.use_dynamic_ntk\n self.seq_length = config.seq_length\n\n self.wte = nn.Embedding(self.vocab_size, self.embed_dim)\n\n self.drop = nn.Dropout(config.emb_dropout_prob)\n\n if config.rotary_pct == 1.0:\n self.rotary_ndims = None\n else:\n assert config.rotary_pct < 1\n self.rotary_ndims = int(\n config.kv_channels * config.rotary_pct\n )\n dim = (\n self.rotary_ndims\n if self.rotary_ndims is not None\n else config.kv_channels\n )\n self.rotary_emb = RotaryEmbedding(dim, base=config.rotary_emb_base)\n\n self.use_flash_attn = config.use_flash_attn\n self.is_fp32 = not (config.bf16 or config.fp16)\n if (\n self.use_flash_attn\n and flash_attn_unpadded_func is not None\n and not self.is_fp32\n ):\n self.registered_causal_mask = None\n else:\n max_positions = config.max_position_embeddings\n self.register_buffer(\n \"registered_causal_mask\",\n torch.tril(\n torch.ones((max_positions, max_positions), dtype=torch.bool)\n ).view(1, 1, max_positions, max_positions),\n persistent=False,\n )\n\n self.h = nn.ModuleList(\n [\n QWenBlock(\n config\n )\n for i in range(config.num_hidden_layers)\n ]\n )\n self.ln_f = RMSNorm(\n self.embed_dim,\n eps=config.layer_norm_epsilon,\n )\n\n self.post_init()\n\n def get_input_embeddings(self):\n return self.wte\n\n def set_input_embeddings(self, new_embeddings):\n self.wte = new_embeddings\n\n def get_ntk_alpha(self, true_seq_len):\n context_value = math.log(true_seq_len / self.seq_length, 2) + 1\n ntk_alpha = 2 ** math.ceil(context_value) - 1\n ntk_alpha = max(ntk_alpha, 1)\n return ntk_alpha\n\n def forward(\n self,\n input_ids: Optional[torch.LongTensor] = None,\n past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,\n attention_mask: Optional[torch.FloatTensor] = None,\n token_type_ids: Optional[torch.LongTensor] = None,\n position_ids: Optional[torch.LongTensor] = None,\n head_mask: Optional[torch.FloatTensor] = None,\n inputs_embeds: Optional[torch.FloatTensor] = None,\n encoder_hidden_states: Optional[torch.Tensor] = None,\n encoder_attention_mask: Optional[torch.FloatTensor] = None,\n use_cache: Optional[bool] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n ):\n output_attentions = (\n output_attentions\n if output_attentions is not None\n else self.config.output_attentions\n )\n output_hidden_states = (\n output_hidden_states\n if output_hidden_states is not None\n else self.config.output_hidden_states\n )\n use_cache = use_cache if use_cache is not None else self.config.use_cache\n return_dict = (\n return_dict if return_dict is not None else self.config.use_return_dict\n )\n\n if input_ids is not None and inputs_embeds is not None:\n raise ValueError(\n \"You cannot specify both input_ids and inputs_embeds at the same time\"\n )\n elif input_ids is not None:\n input_shape = input_ids.size()\n input_ids = input_ids.view(-1, input_shape[-1])\n batch_size = input_ids.shape[0]\n elif inputs_embeds is not None:\n input_shape = inputs_embeds.size()[:-1]\n batch_size = inputs_embeds.shape[0]\n else:\n raise ValueError(\"You have to specify either input_ids or inputs_embeds\")\n\n device = input_ids.device if input_ids is not None else inputs_embeds.device\n\n if token_type_ids is not None:\n token_type_ids = token_type_ids.view(-1, input_shape[-1])\n if position_ids is not None:\n position_ids = position_ids.view(-1, input_shape[-1])\n\n if past_key_values is None:\n past_length = 0\n past_key_values = tuple([None] * len(self.h))\n else:\n if self.use_cache_quantization:\n past_length = past_key_values[0][0][0].size(2)\n else:\n past_length = past_key_values[0][0].size(-2)\n if position_ids is None:\n position_ids = torch.arange(\n past_length,\n input_shape[-1] + past_length,\n dtype=torch.long,\n device=device,\n )\n position_ids = position_ids.unsqueeze(0).view(-1, input_shape[-1])\n\n if attention_mask is not None:\n if batch_size <= 0:\n raise ValueError(\"batch_size has to be defined and > 0\")\n attention_mask = attention_mask.view(batch_size, -1)\n attention_mask = attention_mask[:, None, None, :]\n attention_mask = attention_mask.to(dtype=self.dtype)\n attention_mask = (1.0 - attention_mask) * torch.finfo(self.dtype).min\n\n encoder_attention_mask = None\n head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)\n\n if inputs_embeds is None:\n inputs_embeds = self.wte(input_ids)\n hidden_states = inputs_embeds\n\n kv_seq_len = hidden_states.size()[1]\n if past_key_values[0] is not None:\n # past key values[0][0] shape: bs * seq_len * head_num * dim\n if self.use_cache_quantization:\n kv_seq_len += past_key_values[0][0][0].shape[2]\n else:\n kv_seq_len += past_key_values[0][0].shape[1]\n\n if self.training or not self.use_dynamic_ntk:\n ntk_alpha_list = [1.0]\n elif kv_seq_len != hidden_states.size()[1]:\n ntk_alpha_list = self.rotary_emb._ntk_alpha_cached_list\n else:\n ntk_alpha_list = []\n if attention_mask is not None and kv_seq_len > self.seq_length:\n true_seq_lens = attention_mask.squeeze(1).squeeze(1).eq(0).sum(dim=-1, dtype=torch.int32)\n for i in range(hidden_states.size()[0]):\n true_seq_len = true_seq_lens[i].item()\n ntk_alpha = self.get_ntk_alpha(true_seq_len)\n ntk_alpha_list.append(ntk_alpha)\n else:\n ntk_alpha = self.get_ntk_alpha(kv_seq_len)\n ntk_alpha_list.append(ntk_alpha)\n self.rotary_emb._ntk_alpha_cached_list = ntk_alpha_list\n rotary_pos_emb_list = [\n self.rotary_emb(kv_seq_len, ntk_alpha=ntk_alpha) for ntk_alpha in ntk_alpha_list\n ]\n\n hidden_states = self.drop(hidden_states)\n output_shape = input_shape + (hidden_states.size(-1),)\n\n if self.gradient_checkpointing and self.training:\n if use_cache:\n logger.warning_once(\n \"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...\"\n )\n use_cache = False\n\n presents = () if use_cache else None\n all_self_attentions = () if output_attentions else None\n all_hidden_states = () if output_hidden_states else None\n for i, (block, layer_past) in enumerate(zip(self.h, past_key_values)):\n\n if output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n if self.gradient_checkpointing and self.training:\n\n def create_custom_forward(module):\n def custom_forward(*inputs):\n # None for past_key_value\n return module(*inputs, use_cache, output_attentions)\n\n return custom_forward\n\n outputs = torch.utils.checkpoint.checkpoint(\n create_custom_forward(block),\n hidden_states,\n rotary_pos_emb_list,\n self.registered_causal_mask,\n None,\n attention_mask,\n head_mask[i],\n encoder_hidden_states,\n encoder_attention_mask,\n )\n else:\n outputs = block(\n hidden_states,\n layer_past=layer_past,\n rotary_pos_emb_list=rotary_pos_emb_list,\n registered_causal_mask=self.registered_causal_mask,\n attention_mask=attention_mask,\n head_mask=head_mask[i],\n encoder_hidden_states=encoder_hidden_states,\n encoder_attention_mask=encoder_attention_mask,\n use_cache=use_cache,\n output_attentions=output_attentions,\n )\n\n hidden_states = outputs[0]\n if use_cache is True:\n presents = presents + (outputs[1],)\n\n if output_attentions:\n all_self_attentions = all_self_attentions + (outputs[2 if use_cache else 1],)\n\n hidden_states = self.ln_f(hidden_states)\n hidden_states = hidden_states.view(output_shape)\n # Add last hidden state\n if output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n if not return_dict:\n return tuple(\n v for v in [hidden_states, presents, all_hidden_states] if v is not None\n )\n\n return BaseModelOutputWithPast(\n last_hidden_state=hidden_states,\n past_key_values=presents,\n hidden_states=all_hidden_states,\n attentions=all_self_attentions,\n )" }, { "identifier": "QWenLMHeadModel", "path": "llm/models/hf_models/qwen/modeling_qwen.py", "snippet": "class QWenLMHeadModel(QWenPreTrainedModel):\n _keys_to_ignore_on_load_missing = [r\"h\\.\\d+\\.attn\\.rotary_emb\\.inv_freq\"]\n _keys_to_ignore_on_load_unexpected = [r\"h\\.\\d+\\.attn\\.masked_bias\"]\n\n def __init__(self, config):\n super().__init__(config)\n assert (\n config.bf16 + config.fp16 + config.fp32 <= 1\n ), \"Only one of \\\"bf16\\\", \\\"fp16\\\", \\\"fp32\\\" can be true\"\n logger.warn(\n \"Warning: please make sure that you are using the latest codes and checkpoints, \"\n \"especially if you used Qwen-7B before 09.25.2023.\"\n \"请使用最新模型和代码,尤其如果你在9月25日前已经开始使用Qwen-7B,千万注意不要使用错误代码和模型。\"\n )\n\n autoset_precision = config.bf16 + config.fp16 + config.fp32 == 0\n\n if autoset_precision:\n if SUPPORT_BF16:\n logger.warn(\n \"The model is automatically converting to bf16 for faster inference. \"\n \"If you want to disable the automatic precision, please manually add bf16/fp16/fp32=True to \\\"AutoModelForCausalLM.from_pretrained\\\".\" # noqa\n )\n config.bf16 = True\n elif SUPPORT_FP16:\n logger.warn(\n \"The model is automatically converting to fp16 for faster inference. \"\n \"If you want to disable the automatic precision, please manually add bf16/fp16/fp32=True to \\\"AutoModelForCausalLM.from_pretrained\\\".\" # noqa\n )\n config.fp16 = True\n else:\n config.fp32 = True\n\n if config.bf16 and SUPPORT_CUDA and not SUPPORT_BF16:\n logger.warn(\n \"Your device does NOT seem to support bf16, you can switch to fp16 or fp32 by by passing fp16/fp32=True in \\\"AutoModelForCausalLM.from_pretrained\\\".\") # noqa\n if config.fp16 and SUPPORT_CUDA and not SUPPORT_FP16:\n logger.warn(\n \"Your device does NOT support faster inference with fp16, please switch to fp32 which is likely to be faster\")\n if config.fp32:\n if SUPPORT_BF16:\n logger.warn(\n \"Your device support faster inference by passing bf16=True in \\\"AutoModelForCausalLM.from_pretrained\\\".\")\n elif SUPPORT_FP16:\n logger.warn(\n \"Your device support faster inference by passing fp16=True in \\\"AutoModelForCausalLM.from_pretrained\\\".\")\n\n if config.use_flash_attn == \"auto\":\n if config.bf16 or config.fp16:\n logger.warn(\"Try importing flash-attention for faster inference...\")\n config.use_flash_attn = True\n else:\n config.use_flash_attn = False\n if config.use_flash_attn and config.fp32:\n logger.warn(\"Flash attention will be disabled because it does NOT support fp32.\")\n\n if config.use_flash_attn:\n _import_flash_attn()\n\n self.transformer = QWenModel(config)\n self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n if config.bf16:\n self.transformer.bfloat16()\n self.lm_head.bfloat16()\n if config.fp16:\n self.transformer.half()\n self.lm_head.half()\n self.post_init()\n\n def get_output_embeddings(self):\n return self.lm_head\n\n def set_output_embeddings(self, new_embeddings):\n self.lm_head = new_embeddings\n\n def prepare_inputs_for_generation(\n self, input_ids, past_key_values=None, inputs_embeds=None, **kwargs\n ):\n token_type_ids = kwargs.get(\"token_type_ids\", None)\n if past_key_values:\n input_ids = input_ids[:, -1].unsqueeze(-1)\n if token_type_ids is not None:\n token_type_ids = token_type_ids[:, -1].unsqueeze(-1)\n\n attention_mask = kwargs.get(\"attention_mask\", None)\n position_ids = kwargs.get(\"position_ids\", None)\n\n if attention_mask is not None and position_ids is None:\n position_ids = attention_mask.long().cumsum(-1) - 1\n position_ids.masked_fill_(attention_mask == 0, 1)\n if past_key_values:\n position_ids = position_ids[:, -1].unsqueeze(-1)\n else:\n position_ids = None\n\n if inputs_embeds is not None and past_key_values is None:\n model_inputs = {\"inputs_embeds\": inputs_embeds}\n else:\n model_inputs = {\"input_ids\": input_ids}\n\n model_inputs.update(\n {\n \"past_key_values\": past_key_values,\n \"use_cache\": kwargs.get(\"use_cache\"),\n \"position_ids\": position_ids,\n \"attention_mask\": attention_mask,\n \"token_type_ids\": token_type_ids,\n }\n )\n return model_inputs\n\n def forward(\n self,\n input_ids: Optional[torch.LongTensor] = None,\n attention_mask: Optional[torch.FloatTensor] = None,\n past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,\n token_type_ids: Optional[torch.LongTensor] = None,\n position_ids: Optional[torch.LongTensor] = None,\n head_mask: Optional[torch.FloatTensor] = None,\n inputs_embeds: Optional[torch.FloatTensor] = None,\n encoder_hidden_states: Optional[torch.Tensor] = None,\n encoder_attention_mask: Optional[torch.FloatTensor] = None,\n labels: Optional[torch.LongTensor] = None,\n use_cache: Optional[bool] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n ) -> Union[Tuple, CausalLMOutputWithPast]:\n\n return_dict = (\n return_dict if return_dict is not None else self.config.use_return_dict\n )\n\n transformer_outputs = self.transformer(\n input_ids,\n past_key_values=past_key_values,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n encoder_hidden_states=encoder_hidden_states,\n encoder_attention_mask=encoder_attention_mask,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n hidden_states = transformer_outputs[0]\n\n lm_logits = self.lm_head(hidden_states)\n\n loss = None\n if labels is not None:\n labels = labels.to(lm_logits.device)\n shift_logits = lm_logits[..., :-1, :].contiguous()\n shift_labels = labels[..., 1:].contiguous()\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(\n shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1)\n )\n\n if not return_dict:\n output = (lm_logits,) + transformer_outputs[1:]\n return ((loss,) + output) if loss is not None else output\n\n return CausalLMOutputWithPast(\n loss=loss,\n logits=lm_logits,\n past_key_values=transformer_outputs.past_key_values,\n hidden_states=transformer_outputs.hidden_states,\n attentions=transformer_outputs.attentions,\n )\n\n @staticmethod\n def _reorder_cache(\n past_key_values: Tuple[Tuple[torch.Tensor]], beam_idx: torch.Tensor\n ) -> Tuple[Tuple[torch.Tensor]]:\n\n return tuple(\n tuple(\n past_state.index_select(0, beam_idx.to(past_state.device))\n for past_state in layer_past\n )\n for layer_past in past_key_values\n )\n\n def chat(\n self,\n tokenizer: PreTrainedTokenizer,\n query: str,\n history: Optional[HistoryType],\n system: str = \"You are a helpful assistant.\",\n append_history: bool = True,\n stream: Optional[bool] = _SENTINEL,\n stop_words_ids: Optional[List[List[int]]] = None,\n generation_config: Optional[GenerationConfig] = None,\n **kwargs,\n ) -> Tuple[str, HistoryType]:\n generation_config = generation_config if generation_config is not None else self.generation_config\n\n assert stream is _SENTINEL, _ERROR_STREAM_IN_CHAT\n assert generation_config.chat_format == 'chatml', _ERROR_BAD_CHAT_FORMAT\n if history is None:\n history = []\n if stop_words_ids is None:\n stop_words_ids = []\n\n max_window_size = kwargs.get('max_window_size', None)\n if max_window_size is None:\n max_window_size = generation_config.max_window_size\n raw_text, context_tokens = make_context(\n tokenizer,\n query,\n history=history,\n system=system,\n max_window_size=max_window_size,\n chat_format=generation_config.chat_format,\n )\n\n stop_words_ids.extend(get_stop_words_ids(\n generation_config.chat_format, tokenizer\n ))\n input_ids = torch.tensor([context_tokens]).to(self.device)\n outputs = self.generate(\n input_ids,\n stop_words_ids=stop_words_ids,\n return_dict_in_generate=False,\n generation_config=generation_config,\n **kwargs,\n )\n\n response = decode_tokens(\n outputs[0],\n tokenizer,\n raw_text_len=len(raw_text),\n context_length=len(context_tokens),\n chat_format=generation_config.chat_format,\n verbose=False,\n errors='replace'\n )\n\n if append_history:\n history.append((query, response))\n\n return response, history\n\n def chat_stream(\n self,\n tokenizer: PreTrainedTokenizer,\n query: str,\n history: Optional[HistoryType],\n system: str = \"You are a helpful assistant.\",\n stop_words_ids: Optional[List[List[int]]] = None,\n logits_processor: Optional[LogitsProcessorList] = None,\n generation_config: Optional[GenerationConfig] = None,\n **kwargs,\n ) -> Generator[str, Any, None]:\n generation_config = generation_config if generation_config is not None else self.generation_config\n assert generation_config.chat_format == 'chatml', _ERROR_BAD_CHAT_FORMAT\n if history is None:\n history = []\n if stop_words_ids is None:\n stop_words_ids = []\n\n max_window_size = kwargs.get('max_window_size', None)\n if max_window_size is None:\n max_window_size = generation_config.max_window_size\n raw_text, context_tokens = make_context(\n tokenizer,\n query,\n history=history,\n system=system,\n max_window_size=max_window_size,\n chat_format=generation_config.chat_format,\n )\n\n stop_words_ids.extend(get_stop_words_ids(\n generation_config.chat_format, tokenizer\n ))\n if stop_words_ids is not None:\n stop_words_logits_processor = StopWordsLogitsProcessor(\n stop_words_ids=stop_words_ids,\n eos_token_id=generation_config.eos_token_id,\n )\n if logits_processor is None:\n logits_processor = LogitsProcessorList([stop_words_logits_processor])\n else:\n logits_processor.append(stop_words_logits_processor)\n input_ids = torch.tensor([context_tokens]).to(self.device)\n\n from transformers_stream_generator.main import NewGenerationMixin, StreamGenerationConfig\n self.__class__.generate_stream = NewGenerationMixin.generate\n self.__class__.sample_stream = NewGenerationMixin.sample_stream\n stream_config = StreamGenerationConfig(**generation_config.to_dict(), do_stream=True)\n\n def stream_generator():\n outputs = []\n for token in self.generate_stream(\n input_ids,\n return_dict_in_generate=False,\n generation_config=stream_config,\n logits_processor=logits_processor,\n seed=-1,\n **kwargs):\n outputs.append(token.item())\n yield tokenizer.decode(outputs, skip_special_tokens=True, errors='ignore')\n\n return stream_generator()\n\n def generate(\n self,\n inputs: Optional[torch.Tensor] = None,\n generation_config: Optional[GenerationConfig] = None,\n logits_processor: Optional[LogitsProcessorList] = None,\n stopping_criteria: Optional[StoppingCriteriaList] = None,\n prefix_allowed_tokens_fn: Optional[\n Callable[[int, torch.Tensor], List[int]]\n ] = None,\n synced_gpus: Optional[bool] = None,\n assistant_model: Optional[\"PreTrainedModel\"] = None,\n streamer: Optional[\"BaseStreamer\"] = None,\n **kwargs,\n ) -> Union[GenerateOutput, torch.LongTensor]:\n generation_config = generation_config if generation_config is not None else self.generation_config\n\n # Process stop_words_ids.\n stop_words_ids = kwargs.pop(\"stop_words_ids\", None)\n if stop_words_ids is None and generation_config is not None:\n stop_words_ids = getattr(generation_config, \"stop_words_ids\", None)\n if stop_words_ids is None:\n stop_words_ids = getattr(generation_config, \"stop_words_ids\", None)\n\n if stop_words_ids is not None:\n stop_words_logits_processor = StopWordsLogitsProcessor(\n stop_words_ids=stop_words_ids,\n eos_token_id=generation_config.eos_token_id,\n )\n if logits_processor is None:\n logits_processor = LogitsProcessorList([stop_words_logits_processor])\n else:\n logits_processor.append(stop_words_logits_processor)\n\n return super().generate(\n inputs,\n generation_config=generation_config,\n logits_processor=logits_processor,\n stopping_criteria=stopping_criteria,\n prefix_allowed_tokens_fn=prefix_allowed_tokens_fn,\n synced_gpus=synced_gpus,\n assistant_model=assistant_model,\n streamer=streamer,\n **kwargs,\n )" } ]
import importlib import math import torch # noqa import torch.nn.functional as F # noqa import torch.utils.checkpoint # noqa from typing import TYPE_CHECKING, Dict, Optional, Tuple, Union, Callable, List, Any, Generator # noqa from torch.cuda.amp import autocast # noqa from torch.nn import CrossEntropyLoss from transformers import PreTrainedTokenizer, GenerationConfig, StoppingCriteriaList # noqa from transformers.generation.logits_process import LogitsProcessorList # noqa from transformers.generation.streamers import BaseStreamer # noqa from transformers.generation.utils import GenerateOutput # noqa from transformers.modeling_outputs import ( BaseModelOutputWithPast, CausalLMOutputWithPast, ) from transformers.modeling_utils import PreTrainedModel # noqa from transformers.utils import logging from einops import rearrange from torch import nn from .configuration_qwen import QWenConfig # noqa from .qwen_generation_utils import ( make_context, ) # noqa from llm.models.hf_models.qwen.qwen_generation_utils import ( HistoryType, decode_tokens, get_stop_words_ids, ) from .visual import VisionTransformer from llm.models.hf_models.qwen.modeling_qwen import RMSNorm, apply_rotary_pos_emb, QWenMLP from llm.models.hf_models.qwen.modeling_qwen import QWenAttention as QWenAttention_chat from llm.models.hf_models.qwen.modeling_qwen import QWenModel as QWenModel_chat from llm.models.hf_models.qwen.modeling_qwen import QWenLMHeadModel as QWenLMHeadModel_chat from einops import rearrange
15,370
tgt_len = tgt_len if tgt_len is not None else src_len expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype) inverted_mask = 1.0 - expanded_mask return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min) class QWenAttention(QWenAttention_chat): def __init__(self, config): super().__init__(config) def _attn(self, query, key, value, registered_causal_mask, attention_mask=None, head_mask=None): attn_weights = torch.matmul(query, key.transpose(-1, -2)) if self.scale_attn_weights: attn_weights = attn_weights / torch.full( [], value.size(-1) ** 0.5, dtype=attn_weights.dtype, device=attn_weights.device, ) attn_weights = attn_weights + attention_mask attn_weights = nn.functional.softmax(attn_weights, dim=-1) attn_weights = attn_weights.type(value.dtype) attn_weights = self.attn_dropout(attn_weights) if head_mask is not None: attn_weights = attn_weights * head_mask attn_output = torch.matmul(attn_weights, value) attn_output = attn_output.transpose(1, 2) return attn_output, attn_weights def forward( self, hidden_states: Optional[Tuple[torch.FloatTensor]], rotary_pos_emb: Optional[List[torch.Tensor]] = None, registered_causal_mask: Optional[torch.Tensor] = None, layer_past: Optional[Tuple[torch.Tensor]] = None, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = False, use_cache: Optional[bool] = False, ): mixed_x_layer = self.c_attn(hidden_states) query, key, value = mixed_x_layer.split(self.split_size, dim=2) query = self._split_heads(query, self.num_heads, self.head_dim) key = self._split_heads(key, self.num_heads, self.head_dim) value = self._split_heads(value, self.num_heads, self.head_dim) if rotary_pos_emb is not None: cur_len = query.shape[1] rotary_pos_emb = [i[:, -cur_len:, :, :] for i in rotary_pos_emb] rotary_pos_emb = (rotary_pos_emb,) * 2 q_pos_emb, k_pos_emb = rotary_pos_emb # Slice the pos emb for current inference query = apply_rotary_pos_emb(query, q_pos_emb) key = apply_rotary_pos_emb(key, k_pos_emb) if layer_past is not None: past_key, past_value = layer_past[0], layer_past[1] key = torch.cat((past_key, key), dim=1) value = torch.cat((past_value, value), dim=1) if use_cache: present = (key, value) else: present = None if self.use_logn_attn and not self.training: if self.logn_tensor.device != query.device or self.logn_tensor.dtype != query.dtype: self.logn_tensor = self.logn_tensor.to(query.device).type_as(query) seq_start = key.size(1) - query.size(1) seq_end = key.size(1) logn_tensor = self.logn_tensor[:, seq_start:seq_end, :, :] query = query * logn_tensor.expand_as(query) query = query.permute(0, 2, 1, 3) key = key.permute(0, 2, 1, 3) value = value.permute(0, 2, 1, 3) attn_output, attn_weight = self._attn( query, key, value, registered_causal_mask, attention_mask, head_mask ) context_layer = self._merge_heads( attn_output, self.num_heads, self.head_dim ) attn_output = self.c_proj(context_layer) outputs = (attn_output, present) if output_attentions: outputs += (attn_weight,) return outputs class QWenBlock(nn.Module): def __init__(self, config): super().__init__() hidden_size = config.hidden_size self.bf16 = config.bf16 self.ln_1 = RMSNorm( hidden_size, eps=config.layer_norm_epsilon, ) self.attn = QWenAttention(config) self.ln_2 = RMSNorm( hidden_size, eps=config.layer_norm_epsilon, )
# Copyright (c) Alibaba Cloud. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. if TYPE_CHECKING: try: except ImportError: rearrange = None SUPPORT_CUDA = torch.cuda.is_available() SUPPORT_BF16 = SUPPORT_CUDA and torch.cuda.is_bf16_supported() SUPPORT_FP16 = SUPPORT_CUDA and torch.cuda.get_device_capability(0)[0] >= 7 logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "qwen" _CONFIG_FOR_DOC = "QWenConfig" QWen_PRETRAINED_MODEL_ARCHIVE_LIST = ["qwen-7b"] _ERROR_BAD_CHAT_FORMAT = """\ We detect you are probably using the pretrained model (rather than chat model) for chatting, since the chat_format in generation_config is not "chatml". If you are directly using the model downloaded from Huggingface, please make sure you are using our "Qwen/Qwen-7B-Chat" Huggingface model (rather than "Qwen/Qwen-7B") when you call model.chat(). 我们检测到您可能在使用预训练模型(而非chat模型)进行多轮chat,因为您当前在generation_config指定的chat_format,并未设置为我们在对话中所支持的"chatml"格式。 如果您在直接使用我们从Huggingface提供的模型,请确保您在调用model.chat()时,使用的是"Qwen/Qwen-7B-Chat"模型(而非"Qwen/Qwen-7B"预训练模型)。 """ _SENTINEL = object() _ERROR_STREAM_IN_CHAT = """\ Pass argument `stream` to model.chat() is buggy, deprecated, and marked for removal. Please use model.chat_stream(...) instead of model.chat(..., stream=True). 向model.chat()传入参数stream的用法可能存在Bug,该用法已被废弃,将在未来被移除。请使用model.chat_stream(...)代替model.chat(..., stream=True)。 """ apply_rotary_emb_func = None rms_norm = None # Copied from transformers.models.bart.modeling_bart._make_causal_mask def _make_causal_mask( input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int = 0 ): """ Make causal mask used for bi-directional self-attention. """ bsz, tgt_len = input_ids_shape mask = torch.full((tgt_len, tgt_len), torch.finfo(dtype).min, device=device) mask_cond = torch.arange(mask.size(-1), device=device) mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0) mask = mask.to(dtype) if past_key_values_length > 0: mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype, device=device), mask], dim=-1) return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length) # Copied from transformers.models.bart.modeling_bart._expand_mask def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None): """ Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`. """ bsz, src_len = mask.size() tgt_len = tgt_len if tgt_len is not None else src_len expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype) inverted_mask = 1.0 - expanded_mask return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min) class QWenAttention(QWenAttention_chat): def __init__(self, config): super().__init__(config) def _attn(self, query, key, value, registered_causal_mask, attention_mask=None, head_mask=None): attn_weights = torch.matmul(query, key.transpose(-1, -2)) if self.scale_attn_weights: attn_weights = attn_weights / torch.full( [], value.size(-1) ** 0.5, dtype=attn_weights.dtype, device=attn_weights.device, ) attn_weights = attn_weights + attention_mask attn_weights = nn.functional.softmax(attn_weights, dim=-1) attn_weights = attn_weights.type(value.dtype) attn_weights = self.attn_dropout(attn_weights) if head_mask is not None: attn_weights = attn_weights * head_mask attn_output = torch.matmul(attn_weights, value) attn_output = attn_output.transpose(1, 2) return attn_output, attn_weights def forward( self, hidden_states: Optional[Tuple[torch.FloatTensor]], rotary_pos_emb: Optional[List[torch.Tensor]] = None, registered_causal_mask: Optional[torch.Tensor] = None, layer_past: Optional[Tuple[torch.Tensor]] = None, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = False, use_cache: Optional[bool] = False, ): mixed_x_layer = self.c_attn(hidden_states) query, key, value = mixed_x_layer.split(self.split_size, dim=2) query = self._split_heads(query, self.num_heads, self.head_dim) key = self._split_heads(key, self.num_heads, self.head_dim) value = self._split_heads(value, self.num_heads, self.head_dim) if rotary_pos_emb is not None: cur_len = query.shape[1] rotary_pos_emb = [i[:, -cur_len:, :, :] for i in rotary_pos_emb] rotary_pos_emb = (rotary_pos_emb,) * 2 q_pos_emb, k_pos_emb = rotary_pos_emb # Slice the pos emb for current inference query = apply_rotary_pos_emb(query, q_pos_emb) key = apply_rotary_pos_emb(key, k_pos_emb) if layer_past is not None: past_key, past_value = layer_past[0], layer_past[1] key = torch.cat((past_key, key), dim=1) value = torch.cat((past_value, value), dim=1) if use_cache: present = (key, value) else: present = None if self.use_logn_attn and not self.training: if self.logn_tensor.device != query.device or self.logn_tensor.dtype != query.dtype: self.logn_tensor = self.logn_tensor.to(query.device).type_as(query) seq_start = key.size(1) - query.size(1) seq_end = key.size(1) logn_tensor = self.logn_tensor[:, seq_start:seq_end, :, :] query = query * logn_tensor.expand_as(query) query = query.permute(0, 2, 1, 3) key = key.permute(0, 2, 1, 3) value = value.permute(0, 2, 1, 3) attn_output, attn_weight = self._attn( query, key, value, registered_causal_mask, attention_mask, head_mask ) context_layer = self._merge_heads( attn_output, self.num_heads, self.head_dim ) attn_output = self.c_proj(context_layer) outputs = (attn_output, present) if output_attentions: outputs += (attn_weight,) return outputs class QWenBlock(nn.Module): def __init__(self, config): super().__init__() hidden_size = config.hidden_size self.bf16 = config.bf16 self.ln_1 = RMSNorm( hidden_size, eps=config.layer_norm_epsilon, ) self.attn = QWenAttention(config) self.ln_2 = RMSNorm( hidden_size, eps=config.layer_norm_epsilon, )
self.mlp = QWenMLP(config)
6
2023-11-26 10:12:52+00:00
24k
danilonumeroso/conar
models/tsp_reasoner.py
[ { "identifier": "vmapped_beam_search_rollout", "path": "baselines/beam_search.py", "snippet": "BEAM_WIDTH = 128\ndef expand_single(beam_vis, beam_last, beam_cost, beam_par, W):\ndef beam_search_rollout_step(W, beam_width, i, tpl):\ndef beam_search_rollout(start_route, W, num_nodes, beam_width):\ndef beam_search_baseline(data, return_ratio=True):" }, { "identifier": "AlgorithmReasoner", "path": "models/algorithm_reasoner.py", "snippet": "class AlgorithmReasoner(nn.Module):\n @staticmethod\n def prepare_batch(batch):\n batch = batch.clone()\n for name, tensor in batch.items():\n if not torch.is_tensor(tensor):\n continue\n if name.endswith('_temporal') and 'index' not in name:\n tensor = tensor.transpose(1, 0)\n batch[name] = tensor\n return batch\n\n @staticmethod\n def get_masks(train, batch, continue_logits, enforced_mask):\n mask = continue_logits[batch.batch] > 0\n mask_cp = (continue_logits > 0.0).bool()\n mask_edges = mask[batch.edge_index[0]]\n if not train and enforced_mask is not None:\n enforced_mask_ids = enforced_mask[batch.batch]\n mask &= enforced_mask_ids\n mask_cp &= enforced_mask\n return mask_cp, mask, mask_edges\n\n def add_encoder(self, stage, name, loc, data_type, data_sample, bias):\n if name == 'adj': # we use edge indices\n return\n if data_type == Type.SCALAR or data_type == Type.MASK or data_type == Type.MASK_ONE:\n self.encoders[stage][name] = nn.Linear(1, self.latent_features, bias=bias)\n\n if data_type == Type.CATEGORICAL:\n in_shape = data_sample.shape[-1]\n self.encoders[stage][name] = nn.Linear(in_shape, self.latent_features, bias=bias)\n\n if loc == Location.NODE and data_type in [Type.POINTER, Type.PERMUTATION_POINTER, Type.SHOULD_BE_PERMUTATION]: # pointers are 1-hot encoded on the edges\n self.encoders[stage][name] = nn.Linear(1, self.latent_features, bias=bias)\n if loc == Location.EDGE and data_type in [Type.POINTER, Type.PERMUTATION_POINTER, Type.SHOULD_BE_PERMUTATION]:\n self.encoders[stage][name] = nn.ModuleList([\n nn.Linear(1, self.latent_features, bias=bias),\n nn.Linear(1, self.latent_features, bias=bias)\n ])\n\n def add_decoder(self, stage, name, loc, data_type, data_sample, bias):\n assert name != 'adj', 'Adjacency matrix should not be decoded'\n dec = None\n if loc == Location.NODE:\n if data_type in (Type.SCALAR, Type.MASK, Type.MASK_ONE):\n dec = nn.Linear(2*self.latent_features, 1, bias=bias)\n\n if data_type == Type.CATEGORICAL:\n in_shape = data_sample.shape[-1]\n dec = nn.Linear(2*self.latent_features, in_shape, bias=bias)\n\n if data_type in [Type.POINTER, Type.PERMUTATION_POINTER, Type.SHOULD_BE_PERMUTATION]: # pointers are decoded from both node and edge information\n dec = nn.ModuleList([\n nn.Linear(2*self.latent_features, self.latent_features, bias=bias),\n nn.Linear(2*self.latent_features, self.latent_features, bias=bias),\n nn.Linear(self.latent_features, self.latent_features, bias=bias),\n nn.Linear(self.latent_features, 1, bias=bias),\n ])\n if loc == Location.GRAPH:\n if data_type in [Type.MASK, Type.SCALAR, Type.CATEGORICAL, Type.MASK_ONE]:\n in_shape = data_sample.shape[-1] if data_type == Type.CATEGORICAL else 1\n dec = nn.ModuleList([\n nn.Linear(2*self.latent_features, in_shape, bias=bias),\n nn.Linear(self.latent_features, in_shape, bias=bias),\n ])\n\n if loc == Location.EDGE:\n if data_type in (Type.SCALAR, Type.MASK, Type.MASK_ONE):\n dec = nn.ModuleList([\n nn.Linear(2*self.latent_features, 1, bias=bias),\n nn.Linear(2*self.latent_features, 1, bias=bias),\n nn.Linear(self.latent_features, 1, bias=bias),\n ])\n if data_type == Type.CATEGORICAL:\n in_shape = data_sample.shape[-1]\n dec = nn.ModuleList([\n nn.Linear(2*self.latent_features, in_shape, bias=bias),\n nn.Linear(2*self.latent_features, in_shape, bias=bias),\n nn.Linear(self.latent_features, in_shape, bias=bias),\n ])\n if data_type == Type.POINTER:\n dec = nn.ModuleList([\n nn.Linear(2*self.latent_features, self.latent_features, bias=bias),\n nn.Linear(2*self.latent_features, self.latent_features, bias=bias),\n nn.Linear(self.latent_features, self.latent_features, bias=bias),\n nn.Linear(2*self.latent_features, self.latent_features, bias=bias),\n nn.Linear(self.latent_features, 1, bias=bias),\n ])\n assert dec is not None, breakpoint()\n self.decoders[stage][name] = dec\n\n\n\n\n def __init__(self,\n spec,\n data,\n latent_features,\n algo_processor,\n bias=True,\n use_TF=False,\n use_sinkhorn=True,\n L1_loss=False,\n xavier_on_scalars=True,\n global_termination_pool='max', #'predinet',\n get_attention=False,\n use_batch_norm=False,\n transferring=False,\n timeit=True,\n **kwargs):\n\n super().__init__()\n self.step_idx = 0\n self.latent_features = latent_features\n self.assert_checks = False\n self.timeit = timeit\n self.debug = False\n self.debug_epoch_threshold = 1e9\n self.L1_loss = L1_loss\n self.global_termination_pool = global_termination_pool\n self.next_step_pool = True\n self.processor = algo_processor\n self.triplet_reasoning = False\n if isinstance(self.processor.processors[0].processor, TripletMPNN):\n self.triplet_reasoning = True\n self.triplet_reductor = nn.Linear(2*latent_features, latent_features, bias=bias)\n self.use_TF = use_TF\n self.use_sinkhorn = use_sinkhorn\n self.get_attention = get_attention\n self.lambda_mul = 1 # 0.0001\n self.transferring = transferring\n self.node_encoder = nn.Sequential(\n nn.Linear(2*latent_features, latent_features, bias=bias),\n )\n self.encoders = nn.ModuleDict({\n 'input': nn.ModuleDict({\n }),\n 'hint': nn.ModuleDict({\n }),\n })\n self.decoders = nn.ModuleDict({\n 'hint': nn.ModuleDict({\n }),\n 'output': nn.ModuleDict({\n })\n })\n for name, (stage, loc, datatype) in spec.items():\n if name == 'adj': # we use edge indices\n continue\n if stage == 'input':\n self.add_encoder(stage, name, loc, datatype, getattr(data, name), bias)\n if stage == 'output':\n self.add_decoder(stage, name, loc, datatype, getattr(data, name), bias)\n if stage == 'hint':\n self.add_encoder(stage, name, loc, datatype, getattr(data, name), bias)\n self.add_decoder(stage, name, loc, datatype, getattr(data, name), bias)\n\n self.node_pointer_vec = nn.Parameter(torch.randn(latent_features))\n if xavier_on_scalars:\n assert False, \"NEEDS REFACTORING\"\n torch.nn.init.trunc_normal_(self.encoders['input']['edge_attr'].weight, std=1/torch.sqrt(torch.tensor(latent_features)))\n\n if global_termination_pool == 'attention':\n inp_dim = latent_features\n self.global_attn = GlobalAttentionPlusCoef(\n nn.Sequential(\n nn.Linear(inp_dim, latent_features, bias=bias),\n nn.LeakyReLU(),\n nn.Linear(latent_features, 1, bias=bias)\n ),\n nn=None)\n\n if global_termination_pool == 'predinet':\n lf = latent_features\n self.predinet = PrediNet(lf, 1, lf, lf, flatten_pooling=torch_geometric.nn.glob.global_max_pool)\n\n self.termination_network = nn.Sequential(\n nn.BatchNorm1d(latent_features) if use_batch_norm else nn.Identity(),\n nn.Linear(latent_features, 1, bias=bias),\n )\n\n def get_continue_logits(self, batch_ids, latent_nodes, sth_else=None):\n if self.global_termination_pool == 'mean':\n graph_latent = torch_geometric.nn.global_mean_pool(latent_nodes, batch_ids)\n if self.global_termination_pool == 'max':\n graph_latent = torch_geometric.nn.global_max_pool(latent_nodes, batch_ids)\n if self.global_termination_pool == 'attention':\n graph_latent, coef = self.global_attn(latent_nodes, batch_ids)\n if self.get_attention:\n self.attentions[self.step_idx] = coef.clone().detach()\n self.per_step_latent[self.step_idx] = sth_else\n\n if self.global_termination_pool == 'predinet':\n assert not torch.isnan(latent_nodes).any()\n graph_latent = self.predinet(latent_nodes, batch_ids)\n\n if self.get_attention:\n self.attentions[self.step_idx] = latent_nodes\n continue_logits = self.termination_network(graph_latent).view(-1)\n return continue_logits\n\n def zero_termination(self):\n self.true_positive = 0\n self.false_positive = 0\n self.false_negative = 0\n self.true_negative = 0\n\n def zero_steps(self):\n self.sum_of_processed_nodes = 0\n self.sum_of_processed_edges = 0\n self.step_idx = 0\n self.sum_of_steps = 0\n self.cnt = 0\n\n @staticmethod\n def convert_logits_to_outputs(spec,\n logits,\n fr,\n to,\n num_nodes,\n batch_ids,\n include_probabilities=True,\n dbg=False):\n outs = defaultdict(dict)\n\n for stage in logits.keys():\n for name in logits[stage].keys():\n if name not in logits[stage] or name not in spec:\n continue\n stage, loc, data_type = spec[name]\n assert stage != Stage.INPUT\n if data_type == Type.SOFT_POINTER:\n assert False, f\"Not yet added, please add {name}\"\n if data_type in [Type.CATEGORICAL]:\n indices = logits[stage][name].argmax(-1)\n outshape = logits[stage][name].shape[-1]\n outs[stage][name] = F.one_hot(indices, num_classes=outshape).float()\n if data_type == Type.MASK_ONE:\n _, amax = torch_scatter.scatter_max(logits[stage][name], batch_ids, dim=0)\n amax = amax.squeeze(-1)\n outs[stage][name] = torch.zeros_like(logits[stage][name])\n outs[stage][name][amax] = 1\n if data_type == Type.MASK:\n outs[stage][name] = (logits[stage][name] > 0).float()\n if data_type == Type.SCALAR:\n outs[stage][name] = logits[stage][name]\n if loc == Location.NODE and data_type in [Type.POINTER, Type.PERMUTATION_POINTER, Type.SHOULD_BE_PERMUTATION]:\n pointer_logits = logits[stage][name]\n _, pointers = torch_scatter.scatter_max(pointer_logits, fr, dim_size=num_nodes)\n pointers = to[pointers]\n pointer_probabilities = torch_geometric.utils.softmax(pointer_logits, fr, num_nodes=num_nodes)\n outs[stage][name] = pointers\n if include_probabilities:\n outs[stage][f'{name}_probabilities'] = pointer_probabilities\n if loc == Location.EDGE and data_type in [Type.POINTER, Type.PERMUTATION_POINTER, Type.SHOULD_BE_PERMUTATION]:\n pointer_logits = logits[stage][name]\n pointers = pointer_logits.argmax(-1)\n pointer_probabilities = F.softmax(pointer_logits, dim=-1)\n outs[stage][name] = pointers\n if include_probabilities:\n outs[stage][f'{name}_probabilities'] = pointer_probabilities\n return outs\n\n def set_initial_states(self, batch, init_last_latent=None):\n self.processor.zero_lstm(batch.num_nodes) # NO-OP if processor(s) don't use LSTM\n self.last_latent = torch.zeros(batch.num_nodes, self.latent_features, device=batch.edge_index.device)\n if init_last_latent is not None:\n self.last_latent = init_last_latent\n self.last_latent_edges = torch.zeros(batch.num_edges, self.latent_features, device=batch.edge_index.device)\n self.last_continue_logits = torch.ones(batch.num_graphs, device=batch.edge_index.device)\n self.last_logits = defaultdict(dict)\n\n\n for name, (stage, loc, data_type) in self.dataset_spec.items():\n if stage == Stage.INPUT:\n continue\n if name not in self.decoders[stage]:\n continue\n if stage == Stage.OUTPUT:\n\n if loc in [Location.NODE, Location.GRAPH]:\n if data_type == Type.CATEGORICAL:\n self.last_logits[stage][name] = getattr(batch, name)\n if data_type == Type.SCALAR:\n self.last_logits[stage][name] = getattr(batch, name).unsqueeze(-1)\n if data_type in [Type.MASK, Type.MASK_ONE]:\n self.last_logits[stage][name] = torch.where(getattr(batch, name).bool(), 1e9, -1e9).unsqueeze(-1)\n if data_type in [Type.POINTER, Type.PERMUTATION_POINTER, Type.SHOULD_BE_PERMUTATION]:\n self.last_logits[stage][name] = torch.where(batch.edge_index[0, :] == batch.edge_index[1, :], 1e9, -1e9).to(batch.edge_index.device) # self-loops\n\n if loc == Location.EDGE:\n if data_type == Type.CATEGORICAL:\n self.last_logits[stage][name] = getattr(batch, name)\n elif data_type in [Type.MASK, Type.MASK_ONE]:\n self.last_logits[stage][name] = torch.where(getattr(batch, name).bool(), 1e9, -1e9).unsqueeze(-1)\n elif data_type in [Type.POINTER, Type.PERMUTATION_POINTER, Type.SHOULD_BE_PERMUTATION]:\n ptrs = getattr(batch, name).int()\n starts_edge = batch.ptr[:-1][batch.batch[batch.edge_index[0]]]\n ptrs = ptrs - starts_edge\n self.last_logits[stage][name] = torch.full((batch.edge_index.shape[1], int(ptrs.max().item())+1), -1e9).to(batch.edge_index.device)\n self.last_logits[stage][name][torch.arange(ptrs.shape[0]), ptrs] = 1e9\n else:\n assert False, breakpoint()\n\n if stage == Stage.HINT:\n\n if loc in [Location.NODE, Location.GRAPH]:\n if data_type == Type.CATEGORICAL:\n self.last_logits[stage][name] = getattr(batch, name)[0]\n elif data_type == Type.SCALAR:\n self.last_logits[stage][name] = getattr(batch, name)[0].unsqueeze(-1)\n elif data_type in [Type.MASK, Type.MASK_ONE]:\n self.last_logits[stage][name] = torch.where(getattr(batch, name)[0, :].bool(), 1e9, -1e9).unsqueeze(-1)\n elif data_type in [Type.POINTER, Type.PERMUTATION_POINTER, Type.SHOULD_BE_PERMUTATION]:\n self.last_logits[stage][name] = torch.where(batch.edge_index[0, :] == batch.edge_index[1, :], 1e9, -1e9).to(batch.edge_index.device) # self-loops\n else:\n assert False, breakpoint()\n\n if loc == Location.EDGE:\n if data_type == Type.CATEGORICAL:\n self.last_logits[stage][name] = getattr(batch, name)[0]\n elif data_type in [Type.MASK, Type.MASK_ONE]:\n self.last_logits[stage][name] = torch.where(getattr(batch, name)[0, :].bool(), 1e9, -1e9).unsqueeze(-1)\n elif data_type == Type.SCALAR:\n self.last_logits[stage][name] = getattr(batch, name)[0, :].unsqueeze(-1)\n elif data_type in [Type.POINTER, Type.PERMUTATION_POINTER, Type.SHOULD_BE_PERMUTATION]:\n ptrs = getattr(batch, name)[0, :].int()\n starts_edge = batch.ptr[:-1][batch.batch[batch.edge_index[0]]]\n ptrs = ptrs - starts_edge\n self.max_nodes_in_graph = int(ptrs.max().item())+1 # FIXME try another way to infer\n self.last_logits[stage][name] = torch.where(edge_one_hot_encode_pointers_edge(ptrs, batch, self.max_nodes_in_graph).bool(), 1e9, -1e9).to(batch.edge_index.device)\n else:\n assert False, breakpoint()\n\n self.all_hint_logits = []\n self.all_masks_graph = []\n\n def update_per_mask(self, before, after, mask=None):\n # NOTE: this does expansion of the mask, if you do\n # NOT use expansion, use torch.where\n if mask is None:\n mask = self.mask\n mask = mask.unsqueeze(-1).expand_as(before)\n return torch.where(mask, after, before)\n\n def update_state_dict(self, before, after):\n new_before = defaultdict(dict)\n for stage in after.keys():\n for name in after[stage].keys():\n _, loc, data_type = self.dataset_spec[name]\n if loc == Location.GRAPH:\n new_before[stage][name] = self.update_per_mask(before[stage][name], after[stage][name], mask=self.mask_cp)\n if loc == Location.EDGE:\n if data_type in [Type.MASK, Type.MASK_ONE, Type.SCALAR, Type.CATEGORICAL, Type.POINTER, Type.PERMUTATION_POINTER, Type.SHOULD_BE_PERMUTATION]:\n new_before[stage][name] = self.update_per_mask(before[stage][name], after[stage][name], mask=self.mask_edges)\n else:\n assert False, \"Please implement\"\n if loc == Location.NODE:\n if data_type in [Type.MASK, Type.MASK_ONE, Type.SCALAR, Type.CATEGORICAL]:\n new_before[stage][name] = self.update_per_mask(before[stage][name], after[stage][name])\n elif data_type in [Type.POINTER, Type.PERMUTATION_POINTER, Type.SHOULD_BE_PERMUTATION]:\n new_before[stage][name] = torch.where(self.mask_edges, after[stage][name], before[stage][name])\n else:\n assert False, breakpoint()\n return new_before\n\n def update_states(self, batch, current_latent, edges_current_latent,\n logits, continue_logits):\n self.last_continue_logits = torch.where(self.mask_cp, continue_logits,\n self.last_continue_logits)\n self.last_latent = self.update_per_mask(self.last_latent, current_latent)\n self.last_latent_edges = self.update_per_mask(self.last_latent_edges, edges_current_latent, mask=self.mask_edges)\n self.last_logits = self.update_state_dict(self.last_logits, logits)\n self.all_hint_logits.append(self.last_logits['hint'])\n self.all_masks_graph.append(self.mask_cp)\n preds = type(self).convert_logits_to_outputs(\n self.dataset_spec, self.last_logits, batch.edge_index[0],\n batch.edge_index[1], batch.num_nodes, batch.batch,\n self.epoch > self.debug_epoch_threshold)\n self.last_hint = preds['hint']\n self.last_output = preds['output']\n\n def prepare_initial_masks(self, batch):\n self.mask = torch.ones_like(batch.batch, dtype=torch.bool, device=batch.edge_index.device)\n self.mask_cp = torch.ones(batch.num_graphs, dtype=torch.bool, device=batch.edge_index.device)\n self.mask_edges = torch.ones_like(batch.edge_index[0], dtype=torch.bool, device=batch.edge_index.device)\n\n def loop_condition(self, termination, STEPS_SIZE):\n return (((not self.training and termination.any()) or\n (self.training and termination.any())) and\n self.step_idx+1 < STEPS_SIZE)\n\n def loop_body(self,\n batch,\n node_fts,\n edge_fts,\n graph_fts,\n hint_inp_curr,\n hint_out_curr,\n true_termination,\n first_n_processors=1000):\n\n current_latent, edges_current_latent, preds, continue_logits =\\\n self.forward(\n batch,\n node_fts,\n edge_fts,\n graph_fts,\n first_n_processors=first_n_processors,\n )\n termination = continue_logits\n\n self.debug_batch = batch\n self.debug_hint_out_curr = hint_out_curr\n if self.timeit:\n st = time.time()\n self.update_states(batch, current_latent, edges_current_latent, preds, termination)\n if self.timeit:\n print(f'updating states: {time.time()-st}')\n\n def get_step_input(self, x_curr, batch):\n if self.training and self.use_TF or self.hardcode_outputs:\n return x_curr\n return type(self).convert_logits_to_outputs(\n self.dataset_spec, self.last_logits, batch.edge_index[0],\n batch.edge_index[1], batch.num_nodes, batch.batch,\n self.epoch > self.debug_epoch_threshold)['hint']\n\n def encode_inputs(self, batch):\n node_fts = torch.zeros(batch.num_nodes, self.latent_features, device=batch.edge_index.device)\n edge_fts = torch.zeros(batch.num_edges, self.latent_features, device=batch.edge_index.device)\n for name, (stage, loc, data_type) in self.dataset_spec.items():\n if stage != Stage.INPUT:\n continue\n if name not in self.encoders[stage]:\n continue\n data = getattr(batch, name)\n if data_type in [Type.POINTER, Type.PERMUTATION_POINTER, Type.SHOULD_BE_PERMUTATION]:\n assert False, breakpoint() # we don't have it for now (B-F/MST), will figure out later\n if data_type != Type.CATEGORICAL:\n data = data.unsqueeze(-1)\n if loc == Location.EDGE:\n edge_fts += self.encoders[stage][name](data)\n if loc == Location.NODE:\n node_fts += self.encoders[stage][name](data)\n return node_fts, edge_fts\n\n def encode_hints(self, hints, batch):\n node_fts = torch.zeros(batch.num_nodes, self.latent_features, device=batch.edge_index.device)\n edge_fts = torch.zeros(batch.num_edges, self.latent_features, device=batch.edge_index.device)\n graph_fts = torch.zeros(batch.num_graphs, self.latent_features, device=batch.edge_index.device)\n\n for name, (stage, loc, data_type) in self.dataset_spec.items():\n if stage != Stage.HINT:\n continue\n if name not in self.encoders[stage]:\n continue\n hint = hints[name]\n if loc == Location.NODE and data_type in [Type.MASK, Type.MASK_ONE, Type.SCALAR, Type.CATEGORICAL]:\n node_fts = node_fts + self.encoders['hint'][name](hint)\n if loc == Location.EDGE and data_type in [Type.MASK, Type.MASK_ONE, Type.SCALAR, Type.CATEGORICAL]:\n edge_fts = edge_fts + self.encoders['hint'][name](hint)\n if loc == Location.NODE and data_type in [Type.POINTER, Type.PERMUTATION_POINTER, Type.SHOULD_BE_PERMUTATION]:\n pred_gt_one_hot = edge_one_hot_encode_pointers(hint, batch.edge_index)\n edge_fts = edge_fts + self.encoders['hint'][name](pred_gt_one_hot.unsqueeze(-1))\n if loc == Location.EDGE and data_type in [Type.POINTER, Type.PERMUTATION_POINTER, Type.SHOULD_BE_PERMUTATION]:\n pred_gt_one_hot = edge_one_hot_encode_pointers_edge(hint, batch, self.max_nodes_in_graph)\n starts_edge = batch.ptr[:-1][batch.batch[batch.edge_index[0]]]\n encoding = self.encoders['hint'][name][0](pred_gt_one_hot.unsqueeze(-1))\n encoding_2 = self.encoders['hint'][name][1](pred_gt_one_hot.unsqueeze(-1))\n encoding_sparse = SparseTensor(row=batch.edge_index[0], col=batch.edge_index[1], value=encoding)\n res_1 = encoding_sparse.mean(1)[batch.edge_index[0], batch.edge_index[1]-starts_edge]\n res_2 = encoding_2.mean(1)\n edge_fts += res_1 + res_2 # INPLACE\n if loc == Location.GRAPH and data_type in [Type.CATEGORICAL, Type.SCALAR, Type.MASK]:\n graph_fts = graph_fts + self.encoders['hint'][name](hint)\n return node_fts, edge_fts, graph_fts\n\n def get_input_output_hints(self, batch):\n hint_inp_curr = {}\n hint_out_curr = {}\n for name, (stage, loc, data_type) in self.dataset_spec.items():\n if stage != Stage.HINT:\n continue\n hint_inp_curr[name] = getattr(batch, name)[self.step_idx]\n hint_out_curr[name] = getattr(batch, name)[self.step_idx+1]\n if 'mask' in data_type or data_type == Type.SCALAR:\n hint_inp_curr[name] = hint_inp_curr[name].unsqueeze(-1)\n hint_out_curr[name] = hint_out_curr[name].unsqueeze(-1)\n return hint_inp_curr, hint_out_curr\n\n def process(\n self,\n batch,\n EPSILON=0,\n enforced_mask=None,\n hardcode_outputs=False,\n debug=False,\n first_n_processors=1000,\n init_last_latent=None,\n **kwargs):\n\n SIZE, STEPS_SIZE = prepare_constants(batch)\n self.hardcode_outputs = hardcode_outputs\n\n # Pytorch Geometric batches along the node dimension, but we execute\n # along the temporal (step) dimension, hence we need to transpose\n # a few tensors. Done by `prepare_batch`.\n if self.assert_checks:\n check_edge_index_sorted(batch.edge_index)\n if self.epoch > self.debug_epoch_threshold:\n breakpoint()\n self.zero_steps()\n batch = type(self).prepare_batch(batch)\n # When we want to calculate last step metrics/accuracies\n # we need to take into account again different termination per graph\n # hence we save last step tensors (e.g. outputs) into their\n # corresponding tensor. The function below prepares these tensors\n # (all set to zeros, except masking for computation, which are ones)\n self.set_initial_states(batch, init_last_latent=init_last_latent)\n # Prepare masking tensors (each graph does at least 1 iteration of the algo)\n self.prepare_initial_masks(batch)\n # A flag if we had a wrong graph in the batch. Used for visualisation\n # of what went wrong\n self.wrong_flag = False\n assert self.mask_cp.all(), self.mask_cp\n if self.timeit:\n st = time.time()\n node_fts_inp, edge_fts_inp = self.encode_inputs(batch)\n if self.timeit:\n print(f'encoding inputs: {time.time()-st}')\n\n while True:\n hint_inp_curr, hint_out_curr = self.get_input_output_hints(batch)\n if not self.training:\n assert (self.last_continue_logits > 0).any() or True\n\n # Some algorithms output fewer values than they take\n # so if we reuse our last step outputs, they need to be fed back in.\n if self.timeit:\n st = time.time()\n hint_inp_curr = self.get_step_input(hint_inp_curr, batch)\n if self.timeit:\n print(f'getting step input : {time.time()-st}')\n st = time.time()\n node_fts_hint, edge_fts_hint, graph_fts = self.encode_hints(hint_inp_curr, batch)\n node_fts = node_fts_inp + node_fts_hint\n edge_fts = edge_fts_inp + edge_fts_hint\n if self.timeit:\n print(f'encoding hints: {time.time()-st}')\n\n true_termination = torch.where(self.step_idx+1 >= batch.lengths-1, -1e9, 1e9)\n\n # Does one iteration of the algo and accumulates statistics\n self.loop_body(batch,\n node_fts,\n edge_fts,\n graph_fts,\n hint_inp_curr,\n hint_out_curr,\n true_termination,\n first_n_processors=first_n_processors)\n # And calculate what graphs would execute on the next step.\n self.mask_cp, self.mask, self.mask_edges = type(self).get_masks(self.training, batch, true_termination if self.training else self.last_continue_logits, enforced_mask)\n if not self.loop_condition(\n self.mask_cp,\n STEPS_SIZE):\n break\n assert self.mask_cp.any()\n self.step_idx += 1\n\n return self.all_hint_logits, self.last_logits, self.all_masks_graph\n\n def decode(self, batch, encoded_nodes, hidden, edge_fts, graph_fts):\n catted = torch.cat((encoded_nodes, hidden), dim=1)\n outs = defaultdict(dict)\n for name, (stage, loc, data_type) in self.dataset_spec.items():\n if stage == Stage.INPUT:\n continue\n\n if loc == Location.NODE:\n\n if data_type in [Type.MASK, Type.SCALAR, Type.CATEGORICAL, Type.MASK_ONE]:\n outs[stage][name] = self.decoders[stage][name](catted)\n\n if data_type in [Type.POINTER, Type.PERMUTATION_POINTER, Type.SHOULD_BE_PERMUTATION]:\n fr = self.decoders[stage][name][0](catted[batch.edge_index[0]])\n to = self.decoders[stage][name][1](catted[batch.edge_index[1]])\n edge = self.decoders[stage][name][2](edge_fts)\n prod = self.decoders[stage][name][3](to.max(fr+edge)).squeeze(-1)\n if data_type in [Type.PERMUTATION_POINTER, Type.SHOULD_BE_PERMUTATION] and self.use_sinkhorn:\n prod = torch.maximum(prod, self.decoders[stage][name][3](fr.max(to+edge)).squeeze(-1))\n prod = sinkhorn_normalize(batch, prod, temperature=0.1, steps=10 if self.training else 60, add_noise=self.training)\n outs[stage][name] = prod\n\n if loc == Location.GRAPH:\n aggr_node_fts = torch_scatter.scatter_max(catted, batch.batch, dim=0)[0]\n if data_type in [Type.MASK, Type.SCALAR, Type.CATEGORICAL, Type.MASK_ONE]:\n outs[stage][name] = self.decoders[stage][name][0](aggr_node_fts) + self.decoders[stage][name][1](graph_fts)\n else:\n assert False\n\n if loc == Location.EDGE:\n fr = self.decoders[stage][name][0](catted[batch.edge_index[0]])\n to = self.decoders[stage][name][1](catted[batch.edge_index[1]])\n edge = self.decoders[stage][name][2](edge_fts)\n if data_type in (Type.CATEGORICAL, Type.MASK, Type.SCALAR):\n outs[stage][name] = fr + to + edge\n elif data_type == Type.POINTER:\n pred = fr + to + edge\n pred_2 = self.decoders[stage][name][3](catted)\n ebatch = batch.edge_index_batch\n st = batch.ptr[ebatch]\n en = batch.ptr[ebatch+1]\n dense_pred_2, mask_pred_2 = tg_utils.to_dense_batch(pred_2, batch=batch.batch)\n edge_pred_2 = dense_pred_2[ebatch]\n mask_edge_pred_2 = mask_pred_2[ebatch]\n probs_logits = self.decoders[stage][name][4](torch.maximum(pred[:, None, :], edge_pred_2)).squeeze(-1)\n probs_logits[~mask_edge_pred_2] = -1e9\n outs[stage][name] = probs_logits\n else:\n assert False\n\n return outs\n\n def encode_nodes(self, current_input, last_latent):\n return torch.cat((current_input, last_latent), dim=1)\n\n def forward(self, batch, node_fts, edge_fts, graph_fts, first_n_processors=1000):\n if torch.isnan(node_fts).any():\n breakpoint()\n assert not torch.isnan(self.last_latent).any()\n assert not torch.isnan(node_fts).any()\n if self.timeit:\n st = time.time()\n if self.timeit:\n print(f'projecting nodes: {time.time()-st}')\n\n if self.timeit:\n st = time.time()\n edge_index = batch.edge_index\n hidden, edges_hidden = self.processor(node_fts, edge_fts, graph_fts, edge_index, self.last_latent, self.last_latent_edges, first_n_processors=first_n_processors, batch=batch)\n if self.timeit:\n print(f'message passing: {time.time()-st}')\n assert not torch.isnan(hidden).any()\n if self.timeit:\n st = time.time()\n if self.triplet_reasoning:\n edge_fts = self.triplet_reductor(torch.cat([edge_fts, edges_hidden], dim=-1))\n outs = self.decode(batch, node_fts, hidden, edge_fts, graph_fts)\n if self.timeit:\n print(f'decoding hints: {time.time()-st}')\n continue_logits = torch.where(self.step_idx+1 >= batch.lengths-1, -1e9, 1e9)\n return hidden, edges_hidden, outs, continue_logits" }, { "identifier": "LitAlgorithmReasoner", "path": "models/algorithm_reasoner.py", "snippet": "class LitAlgorithmReasoner(pl.LightningModule):\n def __init__(self,\n hidden_dim,\n algo_processor,\n dataset_class,\n dataset_root,\n dataset_kwargs,\n algorithm='mst_prim',\n update_edges_hidden=False,\n use_TF=False,\n use_sinkhorn=True,\n xavier_on_scalars=True,\n learning_rate=get_hyperparameters()['lr'],\n weight_decay=get_hyperparameters()['weight_decay'],\n test_with_val=False,\n test_with_val_every_n_epoch=20,\n test_train_every_n_epoch=20,\n **algorithm_base_kwargs):\n super().__init__()\n self.hidden_dim = hidden_dim\n self.algorithm_base_kwargs = algorithm_base_kwargs\n self.dataset_class = dataset_class\n self.dataset_root = dataset_root\n self.dataset_kwargs = dataset_kwargs\n self.learning_rate = learning_rate\n self.weight_decay = weight_decay\n self.timeit = False\n self.update_edges_hidden = update_edges_hidden\n self.use_TF = use_TF\n self.use_sinkhorn = use_sinkhorn\n self.algorithm_base_kwargs = algorithm_base_kwargs\n self.algorithm = algorithm\n self.xavier_on_scalars = xavier_on_scalars\n self.test_with_val = test_with_val\n self.test_with_val_every_n_epoch = test_with_val_every_n_epoch\n self.test_train_every_n_epoch = test_train_every_n_epoch\n self._datasets = {}\n if self.test_with_val:\n self.val_dataloader = self.val_dataloader_alt\n self.validation_step = self.validation_step_alt\n self._current_epoch = 0\n self.load_dataset('train')\n\n self.algorithm_module = AlgorithmReasoner(self.dataset.spec,\n self.dataset[0],\n hidden_dim,\n algo_processor,\n update_edges_hidden=update_edges_hidden,\n use_TF=use_TF,\n use_sinkhorn=use_sinkhorn,\n timeit=self.timeit,\n xavier_on_scalars=xavier_on_scalars,\n **algorithm_base_kwargs)\n self.save_hyperparameters(ignore=['algo_processor'])\n\n @property\n def current_epoch(self) -> int:\n \"\"\"The current epoch in the ``Trainer``, or 0 if not attached.\"\"\"\n return self.trainer.current_epoch if self._trainer else self._current_epoch\n\n @current_epoch.setter\n def current_epoch(self, epoch) -> int:\n self._current_epoch = epoch\n\n def prepare_for_transfer(self):\n algo_processor = copy.deepcopy(self.algorithm_module.processor)\n self.algorithm_module = AlgorithmReasoner(self.hidden_dim,\n self.node_features,\n self.edge_features,\n self.output_features,\n algo_processor,\n use_TF=False,\n timeit=self.timeit,\n **self.algorithm_base_kwargs)\n for p in self.algorithm_module.processor.parameters():\n p.requires_grad = False\n\n @staticmethod\n def pointer_loss(predecessor_pred, predecessor_gt_edge_1h,\n softmax_idx, num_nodes):\n loss_unreduced = cross_entropy(predecessor_pred, softmax_idx, predecessor_gt_edge_1h, num_nodes)\n sum_loss = loss_unreduced.flatten().sum()\n cnt_loss = predecessor_gt_edge_1h.count_nonzero()\n return sum_loss / cnt_loss\n\n def single_prediction_loss(self, name, pred, pred_gt, batch, graph_mask,\n node_mask, edge_mask):\n loss = None\n stage, loc, data_type = self.dataset.spec[name]\n if loc == Location.GRAPH:\n if data_type == Type.CATEGORICAL:\n loss = F.cross_entropy(pred[graph_mask], pred_gt[graph_mask].argmax(-1))\n if data_type == Type.SCALAR:\n loss = F.mse_loss(\n pred[graph_mask].squeeze(-1),\n pred_gt[graph_mask])\n if data_type == Type.MASK:\n loss = F.binary_cross_entropy_with_logits(\n pred[graph_mask].squeeze(-1),\n pred_gt[graph_mask])\n\n if loc == Location.NODE:\n if data_type in [Type.POINTER, Type.PERMUTATION_POINTER, Type.SHOULD_BE_PERMUTATION]:\n pred_gt_one_hot = edge_one_hot_encode_pointers(pred_gt, batch.edge_index)\n loss = type(self).pointer_loss(\n pred[edge_mask],\n pred_gt_one_hot[edge_mask],\n batch.edge_index[0][edge_mask], batch.num_nodes)\n if data_type == Type.MASK:\n loss = F.binary_cross_entropy_with_logits(\n pred[node_mask].squeeze(-1),\n pred_gt[node_mask])\n if data_type == Type.MASK_ONE:\n lsms = torch_scatter.scatter_log_softmax(pred[node_mask], batch.batch[node_mask].unsqueeze(-1), dim=0)\n loss = (-lsms[(pred_gt[node_mask] == 1.)]).mean()\n if data_type == Type.SCALAR:\n loss = F.mse_loss(\n pred[node_mask].squeeze(-1),\n pred_gt[node_mask])\n if data_type == Type.CATEGORICAL:\n loss = F.cross_entropy(pred[node_mask], pred_gt[node_mask].argmax(-1))\n if loc == Location.EDGE:\n if data_type == Type.MASK:\n loss = F.binary_cross_entropy_with_logits(\n pred[edge_mask].squeeze(-1),\n pred_gt[edge_mask])\n if data_type == Type.CATEGORICAL:\n loss = F.cross_entropy(pred[edge_mask], pred_gt[edge_mask].argmax(-1))\n if data_type == Type.SCALAR:\n loss = F.mse_loss(\n pred[edge_mask].squeeze(-1),\n pred_gt[edge_mask])\n if data_type in [Type.POINTER, Type.PERMUTATION_POINTER, Type.SHOULD_BE_PERMUTATION]:\n starts_edge = batch.ptr[:-1][batch.batch[batch.edge_index[0]]]\n pred_gt = pred_gt.int() - starts_edge\n loss = F.cross_entropy(\n pred[edge_mask],\n pred_gt[edge_mask])\n assert loss is not None, f'{stage}/{name}/{loc}/{data_type}'\n return loss\n\n def get_step_loss(self,\n batch,\n all_hint_logits,\n output_logits,\n all_masks_graph):\n\n if self.timeit:\n st = time.time()\n batch = self.algorithm_module.prepare_batch(batch)\n losses_dict = defaultdict(list)\n for i, (pred, graph_mask) in enumerate(zip(all_hint_logits, all_masks_graph)):\n node_mask = graph_mask[batch.batch]\n edge_mask = node_mask[batch.edge_index[0]]\n assert graph_mask.any()\n for name in pred:\n stage, loc, data_type = self.dataset.spec[name]\n pred_gt = getattr(batch, name)[i+1]\n losses_dict[name].append(\n self.single_prediction_loss(name, pred[name], pred_gt,\n batch, graph_mask, node_mask,\n edge_mask))\n\n for name in output_logits:\n graph_mask = torch.ones(batch.num_graphs, dtype=torch.bool, device=self.device)\n node_mask = graph_mask[batch.batch]\n edge_mask = node_mask[batch.edge_index[0]]\n losses_dict[name].append(\n self.single_prediction_loss(name, output_logits[name],\n getattr(batch, name), batch,\n graph_mask, node_mask, edge_mask))\n\n for k, v in losses_dict.items():\n losses_dict[k] = torch.stack(v).mean()\n if self.timeit:\n print(f'loss calculation: {time.time()-st}')\n input()\n\n return losses_dict\n\n def single_prediction_acc(self, name, pred, pred_gt, batch, graph_mask,\n node_mask, edge_mask):\n acc = None\n stage, loc, data_type = self.dataset.spec[name]\n if loc == Location.NODE:\n if data_type == Type.MASK_ONE:\n # try:\n acc = (pred[node_mask].squeeze(-1).nonzero() == pred_gt[node_mask].nonzero()).float().mean()\n # except Exception as e:\n # breakpoint()\n if data_type in [Type.POINTER, Type.PERMUTATION_POINTER, Type.SHOULD_BE_PERMUTATION, Type.MASK]:\n acc = (pred[node_mask].squeeze(-1) == pred_gt[node_mask]).float().mean()\n if data_type == Type.SCALAR:\n acc = ((pred[node_mask].squeeze(-1) - pred_gt[node_mask])**2).mean()\n if data_type == Type.CATEGORICAL:\n acc = (pred[node_mask].argmax(-1) == pred_gt[node_mask].argmax(-1)).float().mean()\n if data_type == Type.MASK:\n acc = multiclass_f1_score(pred[node_mask].squeeze(-1), pred_gt[node_mask])\n\n if loc == Location.GRAPH:\n if data_type == Type.CATEGORICAL:\n acc = (pred[graph_mask].argmax(-1) == pred_gt[graph_mask].argmax(-1)).float().mean()\n if data_type == Type.SCALAR:\n acc = ((pred[graph_mask].squeeze(-1) - pred_gt[graph_mask])**2).mean()\n if data_type == Type.MASK:\n acc = multiclass_f1_score(pred[graph_mask].squeeze(-1), pred_gt[graph_mask])\n\n if loc == Location.EDGE:\n if data_type == Type.CATEGORICAL:\n acc = (pred[edge_mask].argmax(-1) == pred_gt[edge_mask].argmax(-1)).float().mean()\n if data_type == Type.MASK:\n acc = multiclass_f1_score(pred[edge_mask].squeeze(-1), pred_gt[edge_mask])\n if data_type == Type.SCALAR:\n acc = ((pred[edge_mask].squeeze(-1) - pred_gt[edge_mask])**2).mean()\n if data_type in [Type.POINTER, Type.PERMUTATION_POINTER, Type.SHOULD_BE_PERMUTATION]:\n starts_edge = batch.ptr[:-1][batch.batch[batch.edge_index[0]]]\n pred_gt = pred_gt.int() - starts_edge\n acc = (pred[edge_mask] == pred_gt[edge_mask]).float().mean()\n assert acc is not None, f\"Please implement {name}\"\n return acc\n\n def get_metrics(self,\n batch,\n all_hint_logits,\n output_logits,\n all_masks_graph):\n\n batch = self.algorithm_module.prepare_batch(batch)\n accs_dict = defaultdict(list)\n\n for i, (pred, graph_mask) in enumerate(zip(all_hint_logits, all_masks_graph)):\n node_mask = graph_mask[batch.batch]\n edge_mask = node_mask[batch.edge_index[0]]\n outputs = type(self.algorithm_module).convert_logits_to_outputs(\n self.dataset.spec, {'hint': pred},\n batch.edge_index[0],\n batch.edge_index[1],\n batch.num_nodes,\n batch.batch,\n include_probabilities=False)['hint']\n\n for name in outputs:\n acc = self.single_prediction_acc(\n name,\n outputs[name],\n getattr(batch, name)[i+1],\n batch,\n graph_mask,\n node_mask,\n edge_mask)\n accs_dict[name].append(acc)\n\n outputs = type(self.algorithm_module).convert_logits_to_outputs(\n self.dataset.spec,\n output_logits,\n batch.edge_index[0],\n batch.edge_index[1],\n batch.num_nodes,\n batch.batch,\n include_probabilities=False)['output']\n for name in outputs:\n graph_mask = torch.ones(batch.num_graphs, dtype=torch.bool, device=self.device)\n node_mask = graph_mask[batch.batch]\n edge_mask = node_mask[batch.edge_index[0]]\n accs_dict[name].append(\n self.single_prediction_acc(\n name,\n outputs[name],\n getattr(batch, name),\n batch,\n graph_mask,\n node_mask,\n edge_mask))\n\n for k, v in accs_dict.items():\n accs_dict[k] = torch.stack(v).mean()\n\n return accs_dict\n\n def fwd_step(self, batch, batch_idx):\n if self.timeit:\n st = time.time()\n self.algorithm_module.epoch = self.current_epoch\n all_hint_logits, output_logits, masks = self.algorithm_module.process(batch)\n if self.timeit:\n print(f'forward step: {time.time()-st}')\n input()\n return all_hint_logits, output_logits, masks\n\n def training_step(self, batch, batch_idx):\n all_hint_logits, output_logits, masks = self.fwd_step(batch, batch_idx)\n losses_dict = self.get_step_loss(batch, all_hint_logits, output_logits['output'], masks)\n self.log_dict(dict((f'train/loss/{k}', v) for k, v in losses_dict.items()), batch_size=batch.num_graphs)\n total_loss = sum(losses_dict.values()) / len(losses_dict)\n self.log('train/loss/average_loss', total_loss, prog_bar=False, on_step=True, on_epoch=True, batch_size=batch.num_graphs)\n accs_dict = {}\n if self.current_epoch % self.test_train_every_n_epoch == 0:\n accs_dict = self.get_metrics(batch, all_hint_logits, output_logits, masks)\n self.log_dict(dict((f'train/acc/{k}', v) for k, v in accs_dict.items()), batch_size=batch.num_graphs, add_dataloader_idx=False)\n # if sum(losses_dict.values()) > 1e5:\n # breakpoint()\n return {'loss': total_loss, 'losses_dict': losses_dict, 'accuracies': accs_dict}\n\n def valtest_step(self, batch, batch_idx, mode):\n all_hint_logits, output_logits, masks = self.fwd_step(batch, batch_idx)\n losses_dict = self.get_step_loss(batch, all_hint_logits, output_logits['output'], masks)\n self.log_dict(dict((f'{mode}/loss/{k}', v) for k, v in losses_dict.items()), batch_size=batch.num_graphs, add_dataloader_idx=False)\n if torch.isnan(sum(losses_dict.values())).any():\n breakpoint()\n self.log(f'{mode}/loss/average_loss', sum(losses_dict.values()) / len(losses_dict), batch_size=batch.num_graphs, add_dataloader_idx=False)\n accs_dict = self.get_metrics(batch, all_hint_logits, output_logits, masks)\n self.log_dict(dict((f'{mode}/acc/{k}', v) for k, v in accs_dict.items()), batch_size=batch.num_graphs, add_dataloader_idx=False)\n return {'losses': losses_dict, 'accuracies': accs_dict}\n\n def validation_step_alt(self, batch, batch_idx, dataloader_idx):\n if dataloader_idx == 1 and not self.trainer.state.stage == 'sanity_check' and self.current_epoch % self.test_with_val_every_n_epoch == 0:\n return self.valtest_step(batch, batch_idx, 'periodic_test')\n if dataloader_idx == 0:\n return self.valtest_step(batch, batch_idx, 'val')\n\n def validation_step(self, batch, batch_idx):\n return self.valtest_step(batch, batch_idx, 'val')\n\n def test_step(self, batch, batch_idx):\n return self.valtest_step(batch, batch_idx, 'test')\n\n def predict_step(self, batch, batch_idx):\n return self.fwd_step(batch, batch_idx)\n\n def load_dataset(self, split, suffix=''):\n split = split+suffix\n nn = CONFIGS[self.algorithm][split]['num_nodes']\n self.dataset_kwargs['split'] = split\n if (split, nn) not in self._datasets:\n self._datasets[(split, nn)] = self.dataset_class(\n self.dataset_root,\n nn,\n CONFIGS[self.algorithm][split]['num_samples'],\n algorithm=self.algorithm,\n **self.dataset_kwargs)\n self.dataset = self._datasets[(split, nn)]\n print(f'Loading {self.dataset=} (num nodes: {nn}) with kwargs')\n pprint(self.dataset_kwargs)\n print()\n\n def get_a_loader(self, split, suffix=''):\n self.load_dataset(split, suffix='')\n self.algorithm_module.dataset_spec = self.dataset.spec\n dl = DataLoader(self.dataset,\n batch_size=get_hyperparameters()['batch_size'],\n shuffle=True if split == 'train' else False,\n drop_last=False,\n follow_batch=['edge_index'],\n num_workers=1,\n persistent_workers=True)\n return dl\n\n def train_dataloader(self):\n return self.get_a_loader('train')\n\n def val_dataloader_alt(self):\n return [self.get_a_loader('val'), self.get_a_loader('test')]\n\n def val_dataloader(self):\n return self.get_a_loader('val')\n\n def test_dataloader(self, suffix=''):\n return self.get_a_loader('test'+suffix)\n\n def configure_optimizers(self):\n lr = self.learning_rate\n wd = self.weight_decay\n optimizer = optim.Adam(self.parameters(),\n weight_decay=wd,\n lr=lr)\n return optimizer" }, { "identifier": "get_hyperparameters", "path": "hyperparameters.py", "snippet": "def get_hyperparameters():\n return {\n 'dim_latent': 128,\n 'num_bits': 8,\n 'weight_decay': 0,\n 'lr': 0.0003,\n 'nee_warmup_steps': 4000,\n 'dim_nodes_mst_prim': 1,\n 'dim_target_mst_prim': 1,\n 'device': 'cuda',\n 'batch_size': 64,\n 'bias': True,\n 'seed': 47, # for dataset generation\n 'calculate_termination_statistics': False,\n }" }, { "identifier": "CONFIGS", "path": "datasets/_configs.py", "snippet": "CONFIGS = defaultdict(lambda: _DEFAULT_CONFIG)" }, { "identifier": "cross_entropy", "path": "utils_execution.py", "snippet": "def cross_entropy(pred, softmax_idx, truth_1h, num_nodes):\n lsm_pred = torch.log(torch_geometric.utils.softmax(pred, softmax_idx, num_nodes=num_nodes)+1e-9)\n # truth_1h = F.one_hot(truth, num_nodes)\n return (-truth_1h*lsm_pred)" }, { "identifier": "check_edge_index_sorted", "path": "utils_execution.py", "snippet": "def check_edge_index_sorted(ei):\n for i in range(ei.shape[1]-1):\n assert ei[0][i] <= ei[0][i+1]\n if ei[0][i] == ei[0][i+1]:\n assert ei[1][i] < ei[1][i+1]" }, { "identifier": "prepare_constants", "path": "utils_execution.py", "snippet": "def prepare_constants(batch):\n SIZE = batch.num_nodes\n STEPS_SIZE = batch.lengths.max()-1\n return SIZE, STEPS_SIZE" }, { "identifier": "edge_one_hot_encode_pointers", "path": "utils_execution.py", "snippet": "def edge_one_hot_encode_pointers(pred, edge_index):\n pred_ei = torch.stack((torch.arange(pred.shape[0]).to(pred), pred))\n amat = torch_geometric.utils.to_dense_adj(pred_ei)\n return amat[0, edge_index[0], edge_index[1]]" }, { "identifier": "get_number_of_nodes", "path": "utils_execution.py", "snippet": "def get_number_of_nodes(algorithm, split):\n nns = CONFIGS[algorithm][split]['num_nodes']\n if isinstance(nns, int):\n nns = [nns]\n return nns" } ]
from collections import defaultdict from pprint import pprint from torch_geometric.loader import DataLoader from pytorch_lightning.trainer.supporters import CombinedLoader from baselines.beam_search import vmapped_beam_search_rollout, BEAM_WIDTH from models.algorithm_reasoner import AlgorithmReasoner, LitAlgorithmReasoner from hyperparameters import get_hyperparameters from torch_geometric.utils import k_hop_subgraph from datasets._configs import CONFIGS from utils_execution import cross_entropy, check_edge_index_sorted, prepare_constants, edge_one_hot_encode_pointers, get_number_of_nodes from clrs import Type, Location, Stage import copy import itertools import time import numpy as np import torch import torch.nn as nn import torch.nn.functional as F import torch_scatter import torch_geometric import pytorch_lightning as pl
15,341
break assert j == edges.shape[1] return mask def get_mask_v2(edges): dense_edges = torch_geometric.utils.to_dense_adj(edges, batch=batch.batch).bool() dense_edges_batch = torch_geometric.utils.to_dense_adj(batch.edge_index, batch=batch.batch).bool() edge_index, mask = torch_geometric.utils.dense_to_sparse(((dense_edges & dense_edges_batch).float()+1)) mask = mask - 1 return mask acc = None # st = time.time() outputs = type(self.algorithm_module).convert_logits_to_outputs( self.dataset.spec, output_logits, batch.edge_index[0], batch.edge_index[1], batch.num_nodes, batch.batch, include_probabilities=False)['output'] for name in outputs: pred = outputs[name] pred_gt = getattr(batch, name) stage, loc, data_type = self.dataset.spec[name] if loc == Location.NODE: if name == 'predecessor_index': tours = torch.stack([torch.arange(pred.shape[0]).to(pred), pred]) mask = get_mask_v2(tours).bool() st = time.time() mattr = batch.edge_attr[mask] mbatch = batch.edge_index_batch[mask] msrc, mdst = batch.edge_index[:, mask] tour_len = torch_scatter.scatter_sum(mattr, mbatch) tour_correctness = torch_scatter.scatter_sum((msrc == mdst.sort().values), mbatch) assert sum(tour_correctness)/len(tour_correctness) == 1 return dict(tour_len=tour_len.mean(), tour_len_gt=batch.optimal_value.mean().item(), tour_correctness=sum(tour_correctness)/len(tour_correctness), tour_relative_error=((tour_len-batch.optimal_value)/batch.optimal_value).mean()) def process_TSP_tour_greedy(self, batch, output_logits): mask_active_nodes = torch.tensor(batch.start_route).bool() mask_edges_to_nodes_in_tour = torch.zeros_like(batch.edge_index[0]).bool() max_nodes_per_graph = batch.batch.unique(return_counts=True)[1].max() num_nodes_per_graph = batch.num_nodes // batch.num_graphs for _ in range(max_nodes_per_graph - 1): mask_active_edges = mask_active_nodes[batch.edge_index[0]] & ~mask_edges_to_nodes_in_tour # Any edge outwards of active nodes and not pointing to previously used node mask_edges_to_nodes_in_tour |= mask_active_nodes[batch.edge_index[1]] # any edge towards the active nodes should not be used in future iterations sloops = (batch.edge_index[0] == batch.edge_index[1]) preds = output_logits['output']['predecessor_index'].clone() preds = preds.masked_fill(~mask_active_edges | sloops, -1e6) # nudge the max value to ensure there is a unique maximum max_idxs = preds.reshape(-1, num_nodes_per_graph).argmax(-1) max_idxs = F.one_hot(max_idxs, num_nodes_per_graph) preds[max_idxs.bool().flatten()] = (preds.reshape(-1, num_nodes_per_graph)[max_idxs.bool()] + 1e-4).flatten() output_logits['output']['predecessor_index'][mask_active_nodes[batch.edge_index[0]]] = preds[mask_active_nodes[batch.edge_index[0]]] new_active_nodes = preds.reshape(-1, num_nodes_per_graph).argmax(-1)[mask_active_nodes.bool()].unsqueeze(-1) # NOTE the reshape/flatten mechanic may not work if graphs in the same batch are of different sizes (consider using torch_scatter.scatter_max) mask_active_nodes = F.one_hot(new_active_nodes, num_nodes_per_graph).flatten().bool() final_pred_mask = mask_active_nodes[batch.edge_index[0]] & batch.start_route.bool()[batch.edge_index[1]] output_logits['output']['predecessor_index'] = output_logits['output']['predecessor_index'].masked_fill(final_pred_mask, 1e8) return output_logits def process_TSP_tour_BS(self, batch, output_logits): start_route = torch_geometric.utils.to_dense_batch(batch.start_route, batch=batch.batch)[0] dens_logits = torch_geometric.utils.to_dense_adj(batch.edge_index, batch=batch.batch, edge_attr=output_logits['output']['predecessor_index']) num_nodes = start_route.shape[1] # st = time.time() tours = torch.tensor(np.array(vmapped_beam_search_rollout( start_route.cpu().detach().numpy(), -dens_logits.cpu().detach().numpy(), num_nodes, BEAM_WIDTH)), device=start_route.device) # print('tours took', time.time()-st) # st = time.time() dens_logits_o = torch.full_like(dens_logits, -1e9) arranged = torch.arange(dens_logits_o.shape[0], device=dens_logits.device) fr = tours[arranged, 0] to = tours[arranged, 1] batch_id = arranged.unsqueeze(1).expand_as(fr) fr = fr.flatten() to = to.flatten() batch_id = batch_id.flatten() dens_logits_o[batch_id, fr, to] = 1e9 edge_index, sparse_logits = torch_geometric.utils.dense_to_sparse(dens_logits_o) sparse_logits = sparse_logits.to(batch.edge_index.device) assert (edge_index == batch.edge_index).all() output_logits['output']['predecessor_index'] = sparse_logits # print('rest took', time.time()-st) return output_logits def process_TSP_tour(self, batch, output_logits): if self.ensure_permutation == "greedy": return self.process_TSP_tour_greedy(batch, output_logits) return self.process_TSP_tour_BS(batch, output_logits) def get_metrics(self, batch, all_hint_logits, output_logits, all_masks_graph): output_logits = self.process_TSP_tour(batch, output_logits) accs_dict = super().get_metrics(batch, all_hint_logits, output_logits, all_masks_graph) accs_dict.update(**self.get_tour_metrics(output_logits, batch)) return accs_dict def load_dataset(self, split, suffix=''): split = split+suffix nns = get_number_of_nodes(self.algorithm, split) for nn in nns: self.dataset_kwargs['split'] = split if (split, nn) not in self._datasets: self._datasets[(split, nn)] = self.dataset_class( self.dataset_root, nn,
class TSPReasoner(AlgorithmReasoner): def __init__(self, spec, data, latent_features, algo_processor, bias=True, use_TF=False, L1_loss=False, global_termination_pool='max', #'predinet', get_attention=False, use_batch_norm=False, transferring=False, timeit=True, double_process=False, **algo_reasoner_kwargs): super().__init__( spec, data, latent_features, algo_processor, use_TF=use_TF, timeit=timeit, L1_loss=L1_loss, global_termination_pool=global_termination_pool, get_attention=get_attention, use_batch_norm=use_batch_norm, transferring=transferring, **algo_reasoner_kwargs, ) self.step_idx = 0 self.assert_checks = False self.debug = False self.debug_epoch_threshold = 1e9 self.next_step_pool = True self.double_process = double_process self.lambda_mul = 1# 0.0001 self.transferring = transferring def get_input_output_hints(self, batch): hint_inp_curr = dict() hint_out_curr = dict() return hint_inp_curr, hint_out_curr def process( self, *args, **kwargs): self.all_hint_logits, self.last_logits, self.all_masks_graph = super().process( *args, first_n_processors=1000 if not self.double_process else 1, **kwargs) if self.double_process: self.all_hint_logits, self.last_logits, self.all_masks_graph = super().process( *args, init_last_latent=self.last_latent, **kwargs) return self.all_hint_logits, self.last_logits, self.all_masks_graph class LitTSPReasoner(LitAlgorithmReasoner): def __init__(self, hidden_dim, algo_processor, dataset_class, dataset_root, dataset_kwargs, bias=True, use_TF=False, ensure_permutation='greedy', transferring=False, learning_rate=get_hyperparameters()['lr'], double_process=False, **algo_reasoner_kwargs): super().__init__(hidden_dim, algo_processor, dataset_class, dataset_root, dataset_kwargs, bias=bias, use_TF=use_TF, transferring=transferring, learning_rate=learning_rate, **algo_reasoner_kwargs) self.algorithm_module = TSPReasoner(self.dataset.spec, self.dataset[0], hidden_dim, algo_processor, bias=bias, use_TF=use_TF, transferring=transferring, timeit=self.timeit, double_process=double_process, **algo_reasoner_kwargs) self.ensure_permutation = ensure_permutation self.double_process = double_process self.save_hyperparameters(ignore=['algo_processor']) def training_step(self, batch, batch_idx): ret = {'loss': 0, 'losses_dict': defaultdict(list), 'accuracies': defaultdict(list)} for bb in batch: ans = super().training_step(bb, batch_idx) ret['loss'] += ans['loss'] for name in ['losses_dict', 'accuracies']: for k, v in ans[name].items(): ret[name][k].append(v) ret['loss'] /= len(batch) for name in ['losses_dict', 'accuracies']: for k, v in ans[name].items(): ret[name][k] = torch.tensor(v).mean() return ret def get_tour_metrics(self, output_logits, batch): def get_mask(edges): mask = torch.zeros_like(batch.edge_index[0]) j = 0 for i in range(batch.edge_index.shape[1]): u1, v1 = batch.edge_index[:, i] u2, v2 = edges[:, j] if u1 == u2 and v1 == v2: mask[i] = 1 j += 1 if j == edges.shape[1]: break assert j == edges.shape[1] return mask def get_mask_v2(edges): dense_edges = torch_geometric.utils.to_dense_adj(edges, batch=batch.batch).bool() dense_edges_batch = torch_geometric.utils.to_dense_adj(batch.edge_index, batch=batch.batch).bool() edge_index, mask = torch_geometric.utils.dense_to_sparse(((dense_edges & dense_edges_batch).float()+1)) mask = mask - 1 return mask acc = None # st = time.time() outputs = type(self.algorithm_module).convert_logits_to_outputs( self.dataset.spec, output_logits, batch.edge_index[0], batch.edge_index[1], batch.num_nodes, batch.batch, include_probabilities=False)['output'] for name in outputs: pred = outputs[name] pred_gt = getattr(batch, name) stage, loc, data_type = self.dataset.spec[name] if loc == Location.NODE: if name == 'predecessor_index': tours = torch.stack([torch.arange(pred.shape[0]).to(pred), pred]) mask = get_mask_v2(tours).bool() st = time.time() mattr = batch.edge_attr[mask] mbatch = batch.edge_index_batch[mask] msrc, mdst = batch.edge_index[:, mask] tour_len = torch_scatter.scatter_sum(mattr, mbatch) tour_correctness = torch_scatter.scatter_sum((msrc == mdst.sort().values), mbatch) assert sum(tour_correctness)/len(tour_correctness) == 1 return dict(tour_len=tour_len.mean(), tour_len_gt=batch.optimal_value.mean().item(), tour_correctness=sum(tour_correctness)/len(tour_correctness), tour_relative_error=((tour_len-batch.optimal_value)/batch.optimal_value).mean()) def process_TSP_tour_greedy(self, batch, output_logits): mask_active_nodes = torch.tensor(batch.start_route).bool() mask_edges_to_nodes_in_tour = torch.zeros_like(batch.edge_index[0]).bool() max_nodes_per_graph = batch.batch.unique(return_counts=True)[1].max() num_nodes_per_graph = batch.num_nodes // batch.num_graphs for _ in range(max_nodes_per_graph - 1): mask_active_edges = mask_active_nodes[batch.edge_index[0]] & ~mask_edges_to_nodes_in_tour # Any edge outwards of active nodes and not pointing to previously used node mask_edges_to_nodes_in_tour |= mask_active_nodes[batch.edge_index[1]] # any edge towards the active nodes should not be used in future iterations sloops = (batch.edge_index[0] == batch.edge_index[1]) preds = output_logits['output']['predecessor_index'].clone() preds = preds.masked_fill(~mask_active_edges | sloops, -1e6) # nudge the max value to ensure there is a unique maximum max_idxs = preds.reshape(-1, num_nodes_per_graph).argmax(-1) max_idxs = F.one_hot(max_idxs, num_nodes_per_graph) preds[max_idxs.bool().flatten()] = (preds.reshape(-1, num_nodes_per_graph)[max_idxs.bool()] + 1e-4).flatten() output_logits['output']['predecessor_index'][mask_active_nodes[batch.edge_index[0]]] = preds[mask_active_nodes[batch.edge_index[0]]] new_active_nodes = preds.reshape(-1, num_nodes_per_graph).argmax(-1)[mask_active_nodes.bool()].unsqueeze(-1) # NOTE the reshape/flatten mechanic may not work if graphs in the same batch are of different sizes (consider using torch_scatter.scatter_max) mask_active_nodes = F.one_hot(new_active_nodes, num_nodes_per_graph).flatten().bool() final_pred_mask = mask_active_nodes[batch.edge_index[0]] & batch.start_route.bool()[batch.edge_index[1]] output_logits['output']['predecessor_index'] = output_logits['output']['predecessor_index'].masked_fill(final_pred_mask, 1e8) return output_logits def process_TSP_tour_BS(self, batch, output_logits): start_route = torch_geometric.utils.to_dense_batch(batch.start_route, batch=batch.batch)[0] dens_logits = torch_geometric.utils.to_dense_adj(batch.edge_index, batch=batch.batch, edge_attr=output_logits['output']['predecessor_index']) num_nodes = start_route.shape[1] # st = time.time() tours = torch.tensor(np.array(vmapped_beam_search_rollout( start_route.cpu().detach().numpy(), -dens_logits.cpu().detach().numpy(), num_nodes, BEAM_WIDTH)), device=start_route.device) # print('tours took', time.time()-st) # st = time.time() dens_logits_o = torch.full_like(dens_logits, -1e9) arranged = torch.arange(dens_logits_o.shape[0], device=dens_logits.device) fr = tours[arranged, 0] to = tours[arranged, 1] batch_id = arranged.unsqueeze(1).expand_as(fr) fr = fr.flatten() to = to.flatten() batch_id = batch_id.flatten() dens_logits_o[batch_id, fr, to] = 1e9 edge_index, sparse_logits = torch_geometric.utils.dense_to_sparse(dens_logits_o) sparse_logits = sparse_logits.to(batch.edge_index.device) assert (edge_index == batch.edge_index).all() output_logits['output']['predecessor_index'] = sparse_logits # print('rest took', time.time()-st) return output_logits def process_TSP_tour(self, batch, output_logits): if self.ensure_permutation == "greedy": return self.process_TSP_tour_greedy(batch, output_logits) return self.process_TSP_tour_BS(batch, output_logits) def get_metrics(self, batch, all_hint_logits, output_logits, all_masks_graph): output_logits = self.process_TSP_tour(batch, output_logits) accs_dict = super().get_metrics(batch, all_hint_logits, output_logits, all_masks_graph) accs_dict.update(**self.get_tour_metrics(output_logits, batch)) return accs_dict def load_dataset(self, split, suffix=''): split = split+suffix nns = get_number_of_nodes(self.algorithm, split) for nn in nns: self.dataset_kwargs['split'] = split if (split, nn) not in self._datasets: self._datasets[(split, nn)] = self.dataset_class( self.dataset_root, nn,
CONFIGS[self.algorithm][split]['num_samples'],
4
2023-11-20 15:32:43+00:00
24k
bearyi26/DCPT
lib/train/base_functions.py
[ { "identifier": "Lasot", "path": "lib/train/dataset/lasot.py", "snippet": "class Lasot(BaseVideoDataset):\n \"\"\" LaSOT dataset.\n\n Publication:\n LaSOT: A High-quality Benchmark for Large-scale Single Object Tracking\n Heng Fan, Liting Lin, Fan Yang, Peng Chu, Ge Deng, Sijia Yu, Hexin Bai, Yong Xu, Chunyuan Liao and Haibin Ling\n CVPR, 2019\n https://arxiv.org/pdf/1809.07845.pdf\n\n Download the dataset from https://cis.temple.edu/lasot/download.html\n \"\"\"\n\n def __init__(self, root=None, image_loader=jpeg4py_loader, vid_ids=None, split=None, data_fraction=None):\n \"\"\"\n args:\n root - path to the lasot dataset.\n image_loader (jpeg4py_loader) - The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py)\n is used by default.\n vid_ids - List containing the ids of the videos (1 - 20) used for training. If vid_ids = [1, 3, 5], then the\n videos with subscripts -1, -3, and -5 from each class will be used for training.\n split - If split='train', the official train split (protocol-II) is used for training. Note: Only one of\n vid_ids or split option can be used at a time.\n data_fraction - Fraction of dataset to be used. The complete dataset is used by default\n \"\"\"\n root = env_settings().lasot_dir if root is None else root\n super().__init__('LaSOT', root, image_loader)\n\n # Keep a list of all classes\n self.class_list = [f for f in os.listdir(self.root)]\n self.class_to_id = {cls_name: cls_id for cls_id, cls_name in enumerate(self.class_list)}\n\n self.sequence_list = self._build_sequence_list(vid_ids, split)\n\n if data_fraction is not None:\n self.sequence_list = random.sample(self.sequence_list, int(len(self.sequence_list)*data_fraction))\n\n self.seq_per_class = self._build_class_list()\n\n def _build_sequence_list(self, vid_ids=None, split=None):\n if split is not None:\n if vid_ids is not None:\n raise ValueError('Cannot set both split_name and vid_ids.')\n ltr_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..')\n if split == 'train':\n file_path = os.path.join(ltr_path, 'data_specs', 'lasot_train_split.txt')\n else:\n raise ValueError('Unknown split name.')\n # sequence_list = pandas.read_csv(file_path, header=None, squeeze=True).values.tolist()\n sequence_list = pandas.read_csv(file_path, header=None).squeeze(\"columns\").values.tolist()\n elif vid_ids is not None:\n sequence_list = [c+'-'+str(v) for c in self.class_list for v in vid_ids]\n else:\n raise ValueError('Set either split_name or vid_ids.')\n\n return sequence_list\n\n def _build_class_list(self):\n seq_per_class = {}\n for seq_id, seq_name in enumerate(self.sequence_list):\n class_name = seq_name.split('-')[0]\n if class_name in seq_per_class:\n seq_per_class[class_name].append(seq_id)\n else:\n seq_per_class[class_name] = [seq_id]\n\n return seq_per_class\n\n def get_name(self):\n return 'lasot'\n\n def has_class_info(self):\n return True\n\n def has_occlusion_info(self):\n return True\n\n def get_num_sequences(self):\n return len(self.sequence_list)\n\n def get_num_classes(self):\n return len(self.class_list)\n\n def get_sequences_in_class(self, class_name):\n return self.seq_per_class[class_name]\n\n def _read_bb_anno(self, seq_path):\n bb_anno_file = os.path.join(seq_path, \"groundtruth.txt\")\n gt = pandas.read_csv(bb_anno_file, delimiter=',', header=None, dtype=np.float32, na_filter=False, low_memory=False).values\n return torch.tensor(gt)\n\n def _read_target_visible(self, seq_path):\n # Read full occlusion and out_of_view\n occlusion_file = os.path.join(seq_path, \"full_occlusion.txt\")\n out_of_view_file = os.path.join(seq_path, \"out_of_view.txt\")\n\n with open(occlusion_file, 'r', newline='') as f:\n occlusion = torch.ByteTensor([int(v) for v in list(csv.reader(f))[0]])\n with open(out_of_view_file, 'r') as f:\n out_of_view = torch.ByteTensor([int(v) for v in list(csv.reader(f))[0]])\n\n target_visible = ~occlusion & ~out_of_view\n\n return target_visible\n\n def _get_sequence_path(self, seq_id):\n seq_name = self.sequence_list[seq_id]\n class_name = seq_name.split('-')[0]\n vid_id = seq_name.split('-')[1]\n\n return os.path.join(self.root, class_name, class_name + '-' + vid_id)\n\n def get_sequence_info(self, seq_id):\n seq_path = self._get_sequence_path(seq_id)\n bbox = self._read_bb_anno(seq_path)\n\n valid = (bbox[:, 2] > 0) & (bbox[:, 3] > 0)\n visible = self._read_target_visible(seq_path) & valid.byte()\n\n return {'bbox': bbox, 'valid': valid, 'visible': visible}\n\n def _get_frame_path(self, seq_path, frame_id):\n return os.path.join(seq_path, 'img', '{:08}.jpg'.format(frame_id+1)) # frames start from 1\n\n def _get_frame(self, seq_path, frame_id):\n return self.image_loader(self._get_frame_path(seq_path, frame_id))\n\n def _get_class(self, seq_path):\n raw_class = seq_path.split('/')[-2]\n return raw_class\n\n def get_class_name(self, seq_id):\n seq_path = self._get_sequence_path(seq_id)\n obj_class = self._get_class(seq_path)\n\n return obj_class\n\n def get_frames(self, seq_id, frame_ids, anno=None):\n seq_path = self._get_sequence_path(seq_id)\n\n obj_class = self._get_class(seq_path)\n frame_list = [self._get_frame(seq_path, f_id) for f_id in frame_ids]\n\n if anno is None:\n anno = self.get_sequence_info(seq_id)\n\n anno_frames = {}\n for key, value in anno.items():\n anno_frames[key] = [value[f_id, ...].clone() for f_id in frame_ids]\n\n object_meta = OrderedDict({'object_class_name': obj_class,\n 'motion_class': None,\n 'major_class': None,\n 'root_class': None,\n 'motion_adverb': None})\n\n return frame_list, anno_frames, object_meta" }, { "identifier": "Got10k", "path": "lib/train/dataset/got10k.py", "snippet": "class Got10k(BaseVideoDataset):\n \"\"\" GOT-10k dataset.\n\n Publication:\n GOT-10k: A Large High-Diversity Benchmark for Generic Object Tracking in the Wild\n Lianghua Huang, Xin Zhao, and Kaiqi Huang\n arXiv:1810.11981, 2018\n https://arxiv.org/pdf/1810.11981.pdf\n\n Download dataset from http://got-10k.aitestunion.com/downloads\n \"\"\"\n\n def __init__(self, root=None, image_loader=jpeg4py_loader, split=None, seq_ids=None, data_fraction=None):\n \"\"\"\n args:\n root - path to the got-10k training data. Note: This should point to the 'train' folder inside GOT-10k\n image_loader (jpeg4py_loader) - The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py)\n is used by default.\n split - 'train' or 'val'. Note: The validation split here is a subset of the official got-10k train split,\n not NOT the official got-10k validation split. To use the official validation split, provide that as\n the root folder instead.\n seq_ids - List containing the ids of the videos to be used for training. Note: Only one of 'split' or 'seq_ids'\n options can be used at the same time.\n data_fraction - Fraction of dataset to be used. The complete dataset is used by default\n \"\"\"\n root = env_settings().got10k_dir if root is None else root\n super().__init__('GOT10k', root, image_loader)\n\n # all folders inside the root\n self.sequence_list = self._get_sequence_list()\n\n # seq_id is the index of the folder inside the got10k root path\n if split is not None:\n if seq_ids is not None:\n raise ValueError('Cannot set both split_name and seq_ids.')\n ltr_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..')\n if split == 'train':\n file_path = os.path.join(ltr_path, 'data_specs', 'got10k_train_split.txt')\n elif split == 'val':\n file_path = os.path.join(ltr_path, 'data_specs', 'got10k_val_split.txt')\n elif split == 'train_full':\n file_path = os.path.join(ltr_path, 'data_specs', 'got10k_train_full_split.txt')\n elif split == 'vottrain':\n file_path = os.path.join(ltr_path, 'data_specs', 'got10k_vot_train_split.txt')\n elif split == 'votval':\n file_path = os.path.join(ltr_path, 'data_specs', 'got10k_vot_val_split.txt')\n else:\n raise ValueError('Unknown split name.')\n # seq_ids = pandas.read_csv(file_path, header=None, squeeze=True, dtype=np.int64).values.tolist()\n seq_ids = pandas.read_csv(file_path, header=None, dtype=np.int64).squeeze(\"columns\").values.tolist()\n elif seq_ids is None:\n seq_ids = list(range(0, len(self.sequence_list)))\n\n self.sequence_list = [self.sequence_list[i] for i in seq_ids]\n\n if data_fraction is not None:\n self.sequence_list = random.sample(self.sequence_list, int(len(self.sequence_list)*data_fraction))\n\n self.sequence_meta_info = self._load_meta_info()\n self.seq_per_class = self._build_seq_per_class()\n\n self.class_list = list(self.seq_per_class.keys())\n self.class_list.sort()\n\n def get_name(self):\n return 'got10k'\n\n def has_class_info(self):\n return True\n\n def has_occlusion_info(self):\n return True\n\n def _load_meta_info(self):\n sequence_meta_info = {s: self._read_meta(os.path.join(self.root, s)) for s in self.sequence_list}\n return sequence_meta_info\n\n def _read_meta(self, seq_path):\n try:\n with open(os.path.join(seq_path, 'meta_info.ini')) as f:\n meta_info = f.readlines()\n object_meta = OrderedDict({'object_class_name': meta_info[5].split(': ')[-1][:-1],\n 'motion_class': meta_info[6].split(': ')[-1][:-1],\n 'major_class': meta_info[7].split(': ')[-1][:-1],\n 'root_class': meta_info[8].split(': ')[-1][:-1],\n 'motion_adverb': meta_info[9].split(': ')[-1][:-1]})\n except:\n object_meta = OrderedDict({'object_class_name': None,\n 'motion_class': None,\n 'major_class': None,\n 'root_class': None,\n 'motion_adverb': None})\n return object_meta\n\n def _build_seq_per_class(self):\n seq_per_class = {}\n\n for i, s in enumerate(self.sequence_list):\n object_class = self.sequence_meta_info[s]['object_class_name']\n if object_class in seq_per_class:\n seq_per_class[object_class].append(i)\n else:\n seq_per_class[object_class] = [i]\n\n return seq_per_class\n\n def get_sequences_in_class(self, class_name):\n return self.seq_per_class[class_name]\n\n def _get_sequence_list(self):\n with open(os.path.join(self.root, 'list.txt')) as f:\n dir_list = list(csv.reader(f))\n dir_list = [dir_name[0] for dir_name in dir_list]\n return dir_list\n\n def _read_bb_anno(self, seq_path):\n bb_anno_file = os.path.join(seq_path, \"groundtruth.txt\")\n gt = pandas.read_csv(bb_anno_file, delimiter=',', header=None, dtype=np.float32, na_filter=False, low_memory=False).values\n return torch.tensor(gt)\n\n def _read_target_visible(self, seq_path):\n # Read full occlusion and out_of_view\n occlusion_file = os.path.join(seq_path, \"absence.label\")\n cover_file = os.path.join(seq_path, \"cover.label\")\n\n with open(occlusion_file, 'r', newline='') as f:\n occlusion = torch.ByteTensor([int(v[0]) for v in csv.reader(f)])\n with open(cover_file, 'r', newline='') as f:\n cover = torch.ByteTensor([int(v[0]) for v in csv.reader(f)])\n\n target_visible = ~occlusion & (cover>0).byte()\n\n visible_ratio = cover.float() / 8\n return target_visible, visible_ratio\n\n def _get_sequence_path(self, seq_id):\n return os.path.join(self.root, self.sequence_list[seq_id])\n\n def get_sequence_info(self, seq_id):\n seq_path = self._get_sequence_path(seq_id)\n bbox = self._read_bb_anno(seq_path)\n\n valid = (bbox[:, 2] > 0) & (bbox[:, 3] > 0)\n visible, visible_ratio = self._read_target_visible(seq_path)\n visible = visible & valid.byte()\n\n return {'bbox': bbox, 'valid': valid, 'visible': visible, 'visible_ratio': visible_ratio}\n\n def _get_frame_path(self, seq_path, frame_id):\n return os.path.join(seq_path, '{:08}.jpg'.format(frame_id+1)) # frames start from 1\n\n def _get_frame(self, seq_path, frame_id):\n return self.image_loader(self._get_frame_path(seq_path, frame_id))\n\n def get_class_name(self, seq_id):\n obj_meta = self.sequence_meta_info[self.sequence_list[seq_id]]\n\n return obj_meta['object_class_name']\n\n def get_frames(self, seq_id, frame_ids, anno=None):\n seq_path = self._get_sequence_path(seq_id)\n obj_meta = self.sequence_meta_info[self.sequence_list[seq_id]]\n\n frame_list = [self._get_frame(seq_path, f_id) for f_id in frame_ids]\n\n if anno is None:\n anno = self.get_sequence_info(seq_id)\n\n anno_frames = {}\n for key, value in anno.items():\n anno_frames[key] = [value[f_id, ...].clone() for f_id in frame_ids]\n\n return frame_list, anno_frames, obj_meta" }, { "identifier": "TrackingNet", "path": "lib/train/dataset/tracking_net.py", "snippet": "class TrackingNet(BaseVideoDataset):\n \"\"\" TrackingNet dataset.\n\n Publication:\n TrackingNet: A Large-Scale Dataset and Benchmark for Object Tracking in the Wild.\n Matthias Mueller,Adel Bibi, Silvio Giancola, Salman Al-Subaihi and Bernard Ghanem\n ECCV, 2018\n https://ivul.kaust.edu.sa/Documents/Publications/2018/TrackingNet%20A%20Large%20Scale%20Dataset%20and%20Benchmark%20for%20Object%20Tracking%20in%20the%20Wild.pdf\n\n Download the dataset using the toolkit https://github.com/SilvioGiancola/TrackingNet-devkit.\n \"\"\"\n def __init__(self, root=None, image_loader=jpeg4py_loader, set_ids=None, data_fraction=None):\n \"\"\"\n args:\n root - The path to the TrackingNet folder, containing the training sets.\n image_loader (jpeg4py_loader) - The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py)\n is used by default.\n set_ids (None) - List containing the ids of the TrackingNet sets to be used for training. If None, all the\n sets (0 - 11) will be used.\n data_fraction - Fraction of dataset to be used. The complete dataset is used by default\n \"\"\"\n root = env_settings().trackingnet_dir if root is None else root\n super().__init__('TrackingNet', root, image_loader)\n\n if set_ids is None:\n set_ids = [i for i in range(12)]\n\n self.set_ids = set_ids\n\n # Keep a list of all videos. Sequence list is a list of tuples (set_id, video_name) containing the set_id and\n # video_name for each sequence\n self.sequence_list = list_sequences(self.root, self.set_ids)\n\n if data_fraction is not None:\n self.sequence_list = random.sample(self.sequence_list, int(len(self.sequence_list) * data_fraction))\n\n self.seq_to_class_map, self.seq_per_class = self._load_class_info()\n\n # we do not have the class_lists for the tracking net\n self.class_list = list(self.seq_per_class.keys())\n self.class_list.sort()\n\n def _load_class_info(self):\n ltr_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..')\n class_map_path = os.path.join(ltr_path, 'data_specs', 'trackingnet_classmap.txt')\n\n with open(class_map_path, 'r') as f:\n seq_to_class_map = {seq_class.split('\\t')[0]: seq_class.rstrip().split('\\t')[1] for seq_class in f}\n\n seq_per_class = {}\n for i, seq in enumerate(self.sequence_list):\n class_name = seq_to_class_map.get(seq[1], 'Unknown')\n if class_name not in seq_per_class:\n seq_per_class[class_name] = [i]\n else:\n seq_per_class[class_name].append(i)\n\n return seq_to_class_map, seq_per_class\n\n def get_name(self):\n return 'trackingnet'\n\n def has_class_info(self):\n return True\n\n def get_sequences_in_class(self, class_name):\n return self.seq_per_class[class_name]\n\n def _read_bb_anno(self, seq_id):\n set_id = self.sequence_list[seq_id][0]\n vid_name = self.sequence_list[seq_id][1]\n bb_anno_file = os.path.join(self.root, \"TRAIN_\" + str(set_id), \"anno\", vid_name + \".txt\")\n gt = pandas.read_csv(bb_anno_file, delimiter=',', header=None, dtype=np.float32, na_filter=False,\n low_memory=False).values\n return torch.tensor(gt)\n\n def get_sequence_info(self, seq_id):\n bbox = self._read_bb_anno(seq_id)\n\n valid = (bbox[:, 2] > 0) & (bbox[:, 3] > 0)\n visible = valid.clone().byte()\n return {'bbox': bbox, 'valid': valid, 'visible': visible}\n\n def _get_frame(self, seq_id, frame_id):\n set_id = self.sequence_list[seq_id][0]\n vid_name = self.sequence_list[seq_id][1]\n frame_path = os.path.join(self.root, \"TRAIN_\" + str(set_id), \"frames\", vid_name, str(frame_id) + \".jpg\")\n return self.image_loader(frame_path)\n\n def _get_class(self, seq_id):\n seq_name = self.sequence_list[seq_id][1]\n return self.seq_to_class_map[seq_name]\n\n def get_class_name(self, seq_id):\n obj_class = self._get_class(seq_id)\n\n return obj_class\n\n def get_frames(self, seq_id, frame_ids, anno=None):\n frame_list = [self._get_frame(seq_id, f) for f in frame_ids]\n\n if anno is None:\n anno = self.get_sequence_info(seq_id)\n\n anno_frames = {}\n for key, value in anno.items():\n anno_frames[key] = [value[f_id, ...].clone() for f_id in frame_ids]\n\n obj_class = self._get_class(seq_id)\n\n object_meta = OrderedDict({'object_class_name': obj_class,\n 'motion_class': None,\n 'major_class': None,\n 'root_class': None,\n 'motion_adverb': None})\n\n return frame_list, anno_frames, object_meta" }, { "identifier": "ImagenetVID", "path": "lib/train/dataset/imagenetvid.py", "snippet": "class ImagenetVID(BaseVideoDataset):\n \"\"\" Imagenet VID dataset.\n\n Publication:\n ImageNet Large Scale Visual Recognition Challenge\n Olga Russakovsky, Jia Deng, Hao Su, Jonathan Krause, Sanjeev Satheesh, Sean Ma, Zhiheng Huang, Andrej Karpathy,\n Aditya Khosla, Michael Bernstein, Alexander C. Berg and Li Fei-Fei\n IJCV, 2015\n https://arxiv.org/pdf/1409.0575.pdf\n\n Download the dataset from http://image-net.org/\n \"\"\"\n def __init__(self, root=None, image_loader=jpeg4py_loader, min_length=0, max_target_area=1):\n \"\"\"\n args:\n root - path to the imagenet vid dataset.\n image_loader (default_image_loader) - The function to read the images. If installed,\n jpeg4py (https://github.com/ajkxyz/jpeg4py) is used by default. Else,\n opencv's imread is used.\n min_length - Minimum allowed sequence length.\n max_target_area - max allowed ratio between target area and image area. Can be used to filter out targets\n which cover complete image.\n \"\"\"\n root = env_settings().imagenet_dir if root is None else root\n super().__init__(\"imagenetvid\", root, image_loader)\n\n cache_file = os.path.join(root, 'cache.json')\n if os.path.isfile(cache_file):\n # If available, load the pre-processed cache file containing meta-info for each sequence\n with open(cache_file, 'r') as f:\n sequence_list_dict = json.load(f)\n\n self.sequence_list = sequence_list_dict\n else:\n # Else process the imagenet annotations and generate the cache file\n self.sequence_list = self._process_anno(root)\n\n with open(cache_file, 'w') as f:\n json.dump(self.sequence_list, f)\n\n # Filter the sequences based on min_length and max_target_area in the first frame\n self.sequence_list = [x for x in self.sequence_list if len(x['anno']) >= min_length and\n get_target_to_image_ratio(x) < max_target_area]\n\n def get_name(self):\n return 'imagenetvid'\n\n def get_num_sequences(self):\n return len(self.sequence_list)\n\n def get_sequence_info(self, seq_id):\n bb_anno = torch.Tensor(self.sequence_list[seq_id]['anno'])\n valid = (bb_anno[:, 2] > 0) & (bb_anno[:, 3] > 0)\n visible = torch.ByteTensor(self.sequence_list[seq_id]['target_visible']) & valid.byte()\n return {'bbox': bb_anno, 'valid': valid, 'visible': visible}\n\n def _get_frame(self, sequence, frame_id):\n set_name = 'ILSVRC2015_VID_train_{:04d}'.format(sequence['set_id'])\n vid_name = 'ILSVRC2015_train_{:08d}'.format(sequence['vid_id'])\n frame_number = frame_id + sequence['start_frame']\n frame_path = os.path.join(self.root, 'Data', 'VID', 'train', set_name, vid_name,\n '{:06d}.JPEG'.format(frame_number))\n return self.image_loader(frame_path)\n\n def get_frames(self, seq_id, frame_ids, anno=None):\n sequence = self.sequence_list[seq_id]\n\n frame_list = [self._get_frame(sequence, f) for f in frame_ids]\n\n if anno is None:\n anno = self.get_sequence_info(seq_id)\n\n # Create anno dict\n anno_frames = {}\n for key, value in anno.items():\n anno_frames[key] = [value[f_id, ...].clone() for f_id in frame_ids]\n\n # added the class info to the meta info\n object_meta = OrderedDict({'object_class': sequence['class_name'],\n 'motion_class': None,\n 'major_class': None,\n 'root_class': None,\n 'motion_adverb': None})\n\n return frame_list, anno_frames, object_meta\n\n def _process_anno(self, root):\n # Builds individual tracklets\n base_vid_anno_path = os.path.join(root, 'Annotations', 'VID', 'train')\n\n all_sequences = []\n for set in sorted(os.listdir(base_vid_anno_path)):\n set_id = int(set.split('_')[-1])\n for vid in sorted(os.listdir(os.path.join(base_vid_anno_path, set))):\n\n vid_id = int(vid.split('_')[-1])\n anno_files = sorted(os.listdir(os.path.join(base_vid_anno_path, set, vid)))\n\n frame1_anno = ET.parse(os.path.join(base_vid_anno_path, set, vid, anno_files[0]))\n image_size = [int(frame1_anno.find('size/width').text), int(frame1_anno.find('size/height').text)]\n\n objects = [ET.ElementTree(file=os.path.join(base_vid_anno_path, set, vid, f)).findall('object')\n for f in anno_files]\n\n tracklets = {}\n\n # Find all tracklets along with start frame\n for f_id, all_targets in enumerate(objects):\n for target in all_targets:\n tracklet_id = target.find('trackid').text\n if tracklet_id not in tracklets:\n tracklets[tracklet_id] = f_id\n\n for tracklet_id, tracklet_start in tracklets.items():\n tracklet_anno = []\n target_visible = []\n class_name_id = None\n\n for f_id in range(tracklet_start, len(objects)):\n found = False\n for target in objects[f_id]:\n if target.find('trackid').text == tracklet_id:\n if not class_name_id:\n class_name_id = target.find('name').text\n x1 = int(target.find('bndbox/xmin').text)\n y1 = int(target.find('bndbox/ymin').text)\n x2 = int(target.find('bndbox/xmax').text)\n y2 = int(target.find('bndbox/ymax').text)\n\n tracklet_anno.append([x1, y1, x2 - x1, y2 - y1])\n target_visible.append(target.find('occluded').text == '0')\n\n found = True\n break\n if not found:\n break\n\n new_sequence = {'set_id': set_id, 'vid_id': vid_id, 'class_name': class_name_id,\n 'start_frame': tracklet_start, 'anno': tracklet_anno,\n 'target_visible': target_visible, 'image_size': image_size}\n all_sequences.append(new_sequence)\n\n return all_sequences" }, { "identifier": "MSCOCOSeq", "path": "lib/train/dataset/coco_seq.py", "snippet": "class MSCOCOSeq(BaseVideoDataset):\n \"\"\" The COCO dataset. COCO is an image dataset. Thus, we treat each image as a sequence of length 1.\n\n Publication:\n Microsoft COCO: Common Objects in Context.\n Tsung-Yi Lin, Michael Maire, Serge J. Belongie, Lubomir D. Bourdev, Ross B. Girshick, James Hays, Pietro Perona,\n Deva Ramanan, Piotr Dollar and C. Lawrence Zitnick\n ECCV, 2014\n https://arxiv.org/pdf/1405.0312.pdf\n\n Download the images along with annotations from http://cocodataset.org/#download. The root folder should be\n organized as follows.\n - coco_root\n - annotations\n - instances_train2014.json\n - instances_train2017.json\n - images\n - train2014\n - train2017\n\n Note: You also have to install the coco pythonAPI from https://github.com/cocodataset/cocoapi.\n \"\"\"\n\n def __init__(self, root=None, image_loader=jpeg4py_loader, data_fraction=None, split=\"train\", version=\"2014\"):\n \"\"\"\n args:\n root - path to the coco dataset.\n image_loader (default_image_loader) - The function to read the images. If installed,\n jpeg4py (https://github.com/ajkxyz/jpeg4py) is used by default. Else,\n opencv's imread is used.\n data_fraction (None) - Fraction of images to be used. The images are selected randomly. If None, all the\n images will be used\n split - 'train' or 'val'.\n version - version of coco dataset (2014 or 2017)\n \"\"\"\n root = env_settings().coco_dir if root is None else root\n super().__init__('COCO', root, image_loader)\n\n self.img_pth = os.path.join(root, 'images/{}{}/'.format(split, version))\n self.anno_path = os.path.join(root, 'annotations/instances_{}{}.json'.format(split, version))\n\n # Load the COCO set.\n self.coco_set = COCO(self.anno_path)\n\n self.cats = self.coco_set.cats\n\n self.class_list = self.get_class_list()\n\n self.sequence_list = self._get_sequence_list()\n\n if data_fraction is not None:\n self.sequence_list = random.sample(self.sequence_list, int(len(self.sequence_list)*data_fraction))\n self.seq_per_class = self._build_seq_per_class()\n\n def _get_sequence_list(self):\n ann_list = list(self.coco_set.anns.keys())\n seq_list = [a for a in ann_list if self.coco_set.anns[a]['iscrowd'] == 0]\n\n return seq_list\n\n def is_video_sequence(self):\n return False\n\n def get_num_classes(self):\n return len(self.class_list)\n\n def get_name(self):\n return 'coco'\n\n def has_class_info(self):\n return True\n\n def get_class_list(self):\n class_list = []\n for cat_id in self.cats.keys():\n class_list.append(self.cats[cat_id]['name'])\n return class_list\n\n def has_segmentation_info(self):\n return True\n\n def get_num_sequences(self):\n return len(self.sequence_list)\n\n def _build_seq_per_class(self):\n seq_per_class = {}\n for i, seq in enumerate(self.sequence_list):\n class_name = self.cats[self.coco_set.anns[seq]['category_id']]['name']\n if class_name not in seq_per_class:\n seq_per_class[class_name] = [i]\n else:\n seq_per_class[class_name].append(i)\n\n return seq_per_class\n\n def get_sequences_in_class(self, class_name):\n return self.seq_per_class[class_name]\n\n def get_sequence_info(self, seq_id):\n anno = self._get_anno(seq_id)\n\n bbox = torch.Tensor(anno['bbox']).view(1, 4)\n\n mask = torch.Tensor(self.coco_set.annToMask(anno)).unsqueeze(dim=0)\n\n '''2021.1.3 To avoid too small bounding boxes. Here we change the threshold to 50 pixels'''\n valid = (bbox[:, 2] > 50) & (bbox[:, 3] > 50)\n\n visible = valid.clone().byte()\n\n return {'bbox': bbox, 'mask': mask, 'valid': valid, 'visible': visible}\n\n def _get_anno(self, seq_id):\n anno = self.coco_set.anns[self.sequence_list[seq_id]]\n\n return anno\n\n def _get_frames(self, seq_id):\n path = self.coco_set.loadImgs([self.coco_set.anns[self.sequence_list[seq_id]]['image_id']])[0]['file_name']\n img = self.image_loader(os.path.join(self.img_pth, path))\n return img\n\n def get_meta_info(self, seq_id):\n try:\n cat_dict_current = self.cats[self.coco_set.anns[self.sequence_list[seq_id]]['category_id']]\n object_meta = OrderedDict({'object_class_name': cat_dict_current['name'],\n 'motion_class': None,\n 'major_class': cat_dict_current['supercategory'],\n 'root_class': None,\n 'motion_adverb': None})\n except:\n object_meta = OrderedDict({'object_class_name': None,\n 'motion_class': None,\n 'major_class': None,\n 'root_class': None,\n 'motion_adverb': None})\n return object_meta\n\n\n def get_class_name(self, seq_id):\n cat_dict_current = self.cats[self.coco_set.anns[self.sequence_list[seq_id]]['category_id']]\n return cat_dict_current['name']\n\n def get_frames(self, seq_id=None, frame_ids=None, anno=None):\n # COCO is an image dataset. Thus we replicate the image denoted by seq_id len(frame_ids) times, and return a\n # list containing these replicated images.\n frame = self._get_frames(seq_id)\n\n frame_list = [frame.copy() for _ in frame_ids]\n\n if anno is None:\n anno = self.get_sequence_info(seq_id)\n\n anno_frames = {}\n for key, value in anno.items():\n anno_frames[key] = [value[0, ...] for _ in frame_ids]\n\n object_meta = self.get_meta_info(seq_id)\n\n return frame_list, anno_frames, object_meta" }, { "identifier": "BDD100K_Night", "path": "lib/train/dataset/bdd100k_night.py", "snippet": "class BDD100K_Night(BaseVideoDataset):\n def __init__(self, root=None, image_loader=jpeg4py_loader, data_fraction=None):\n root = env_settings().bdd100k_dir if root is None else root\n super().__init__('bdd100k_night', root, image_loader)\n\n self.img_pth = os.path.join(root, 'images/')\n self.anno_path = os.path.join(root, 'annotations/bdd100k_night.json')\n\n # load dataset\n self.dataset,self.anns,self.cats,self.imgs = dict(),dict(),dict(),dict()\n self.imgToAnns, self.catToImgs = defaultdict(list), defaultdict(list)\n if not self.anno_path == None:\n print('loading annotations into memory...')\n tic = time.time()\n with open(self.anno_path, 'r') as f:\n dataset = json.load(f)\n print('Done (t={:0.2f}s)'.format(time.time()- tic))\n self.dataset = dataset\n self.sequence_list = self._get_sequence_list()\n if data_fraction is not None:\n self.sequence_list = random.sample(self.sequence_list, int(len(self.sequence_list)*data_fraction))\n\n\n #得到序列\n def _get_sequence_list(self):\n anns = {}\n for picture in self.dataset:\n for box in picture['labels']:\n anns[box['id']] = box\n anns[box['id']]['name'] = picture['name']\n self.anns = anns\n\n #anns对应的是每一个框\n seq_list = list(anns.keys())\n\n return seq_list\n\n def _get_anno(self, seq_id):\n anno = self.anns[self.sequence_list[seq_id]]\n return anno\n\n\n #得到图片帧\n def _get_frames(self, seq_id):\n path = self.anns[self.sequence_list[seq_id]]['name']\n img = self.image_loader(os.path.join(self.img_pth, path))\n return img\n\n #得到每一帧的bounding box\n def get_sequence_info(self, seq_id):\n anno = self._get_anno(seq_id)\n\n x = anno['box2d']['x1']\n y = anno['box2d']['y1']\n width = anno['box2d']['x2'] - anno['box2d']['x1']\n height = anno['box2d']['y2'] - anno['box2d']['y1']\n\n bbox = torch.Tensor([x,y,width,height]).view(1, 4)\n\n '''v0.4 BDD100K_Night avoid too small bounding boxes'''\n valid = (bbox[:, 2] > 50) & (bbox[:, 3] > 50)\n\n visible = valid.clone().byte()\n\n return {'bbox': bbox, 'valid': valid, 'visible': visible}\n\n def is_video_sequence(self):\n return False\n\n def get_frames(self, seq_id=None, frame_ids=None, anno=None):\n # BDD100K is an image dataset. Thus we replicate the image denoted by seq_id len(frame_ids) times, and return a\n # list containing these replicated images.\n frame = self._get_frames(seq_id)\n\n frame_list = [frame.copy() for _ in frame_ids]\n\n if anno is None:\n anno = self.get_sequence_info(seq_id)\n\n anno_frames = {}\n for key, value in anno.items():\n anno_frames[key] = [value[0, ...] for _ in frame_ids]\n\n object_meta = self.get_meta_info(seq_id)\n\n return frame_list, anno_frames, object_meta\n\n def get_name(self):\n return 'bdd100k_night'\n\n def get_num_sequences(self):\n return len(self.sequence_list)\n\n def get_meta_info(self, seq_id):\n try:\n cat_dict_current = self.anns[self.sequence_list[seq_id]]['category']\n object_meta = OrderedDict({'object_class_name': cat_dict_current,\n 'motion_class': None,\n 'major_class': None,\n 'root_class': None,\n 'motion_adverb': None})\n except:\n object_meta = OrderedDict({'object_class_name': None,\n 'motion_class': None,\n 'major_class': None,\n 'root_class': None,\n 'motion_adverb': None})\n return object_meta" }, { "identifier": "SHIFT_Night", "path": "lib/train/dataset/shift_night.py", "snippet": "class SHIFT_Night(BaseVideoDataset):\n def __init__(self, root=None, image_loader=jpeg4py_loader, data_fraction=None):\n \"\"\"\n SHIFT_NIGHT Dataset\n \"\"\"\n root = env_settings().shift_dir if root is None else root\n super().__init__('shift_night', root, image_loader)\n\n sequence_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..')\n sequence_path = os.path.join(sequence_path, 'data_specs', 'shift_info_1fps.json')\n with open(sequence_path, 'r') as f:\n info = json.load(f)\n self.info = info\n\n self.sequence_list = self._build_sequence_list()\n\n if data_fraction is not None:\n self.sequence_list = random.sample(self.sequence_list, int(len(self.sequence_list)*data_fraction))\n\n def _build_sequence_list(self):\n sequence_list = [sequence for sequence in self.info.keys()]\n return sequence_list\n\n def _get_sequence_path(self, seq_id):\n seq_name = self.sequence_list[seq_id]\n video_name = seq_name.split('/')[0]\n return os.path.join(self.root, video_name), seq_name\n\n def _get_frame_path(self, seq_path, seq_name, frame_id):\n frame = self.info[seq_name]['frame'][frame_id]\n return os.path.join(seq_path, frame) # frames extracted from info.json\n\n def _get_frame(self, seq_path, seq_name, frame_id):\n return self.image_loader(self._get_frame_path(seq_path, seq_name, frame_id))\n\n def _read_bb_anno(self, seq_path, seq_name):\n bbox_all = []\n for bbox in self.info[seq_name]['box2d']:\n x = bbox['x1']\n y = bbox['y1']\n width = bbox['x2'] - bbox['x1']\n height = bbox['y2'] - bbox['y1']\n bbox_np = np.array([[x,y,width,height]])\n bbox_all.append(bbox_np)\n bbox_all_np = np.concatenate([bbox for bbox in bbox_all],axis=0)\n return torch.tensor(bbox_all_np)\n\n def get_num_sequences(self):\n return len(self.sequence_list)\n\n def get_sequence_info(self, seq_id):\n seq_path, seq_name = self._get_sequence_path(seq_id)\n bbox = self._read_bb_anno(seq_path, seq_name)\n\n '''v0.4 Shift avoid too small bounding boxes'''\n valid = (bbox[:, 2] > 50) & (bbox[:, 3] > 50)\n visible = valid.clone().byte()\n\n return {'bbox': bbox, 'valid': valid, 'visible': visible}\n\n def get_name(self):\n return 'shift_night'\n\n def get_frames(self, seq_id, frame_ids, anno=None):\n seq_path, seq_name = self._get_sequence_path(seq_id)\n\n frame_list = [self._get_frame(seq_path, seq_name, f_id) for f_id in frame_ids]\n\n if anno is None:\n anno = self.get_sequence_info(seq_id)\n\n anno_frames = {}\n for key, value in anno.items():\n anno_frames[key] = [value[f_id, ...].clone() for f_id in frame_ids]\n\n object_meta = OrderedDict({'object_class_name': self.info[seq_name]['category'],\n 'motion_class': None,\n 'major_class': None,\n 'root_class': None,\n 'motion_adverb': None})\n\n return frame_list, anno_frames, object_meta" }, { "identifier": "ExDark", "path": "lib/train/dataset/exdark.py", "snippet": "class ExDark(BaseVideoDataset):\n \"\"\" The ExDark dataset. ExDark is an image dataset. Thus, we treat each image as a sequence of length 1.\n \"\"\"\n\n def __init__(self, root=None, image_loader=jpeg4py_loader, data_fraction=None):\n \"\"\"\n args:\n root - path to the coco dataset.\n image_loader (default_image_loader) - The function to read the images. If installed,\n jpeg4py (https://github.com/ajkxyz/jpeg4py) is used by default. Else,\n opencv's imread is used.\n data_fraction (None) - Fraction of images to be used. The images are selected randomly. If None, all the\n images will be used\n split - 'train' or 'val'.\n \"\"\"\n root = env_settings().exdark_dir if root is None else root\n super().__init__('exdark', root, image_loader)\n\n self.img_pth = os.path.join(root, 'images/')\n self.anno_path = os.path.join(root, 'annotations/annotations.json')\n\n # Load the COCO set.\n self.coco_set = COCO(self.anno_path)\n\n self.cats = self.coco_set.cats\n\n self.class_list = self.get_class_list()\n\n self.sequence_list = self._get_sequence_list()\n\n if data_fraction is not None:\n self.sequence_list = random.sample(self.sequence_list, int(len(self.sequence_list)*data_fraction))\n self.seq_per_class = self._build_seq_per_class()\n\n def _get_sequence_list(self):\n ann_list = list(self.coco_set.anns.keys())\n seq_list = [a for a in ann_list if self.coco_set.anns[a]['iscrowd'] == 0]\n\n return seq_list\n\n def is_video_sequence(self):\n return False\n\n def get_num_classes(self):\n return len(self.class_list)\n\n def get_name(self):\n return 'exdark'\n\n def has_class_info(self):\n return True\n\n def get_class_list(self):\n class_list = []\n for cat_id in self.cats.keys():\n class_list.append(self.cats[cat_id]['name'])\n return class_list\n\n def has_segmentation_info(self):\n return True\n\n def get_num_sequences(self):\n return len(self.sequence_list)\n\n def _build_seq_per_class(self):\n seq_per_class = {}\n for i, seq in enumerate(self.sequence_list):\n class_name = self.cats[self.coco_set.anns[seq]['category_id']]['name']\n if class_name not in seq_per_class:\n seq_per_class[class_name] = [i]\n else:\n seq_per_class[class_name].append(i)\n\n return seq_per_class\n\n def get_sequences_in_class(self, class_name):\n return self.seq_per_class[class_name]\n\n def get_sequence_info(self, seq_id):\n anno = self._get_anno(seq_id)\n\n bbox = torch.Tensor(anno['bbox']).view(1, 4)\n\n mask = torch.Tensor(self.coco_set.annToMask(anno)).unsqueeze(dim=0)\n\n '''v0.4 ExDark avoid too small bounding boxes'''\n valid = (bbox[:, 2] > 50) & (bbox[:, 3] > 50)\n\n visible = valid.clone().byte()\n\n return {'bbox': bbox, 'mask': mask, 'valid': valid, 'visible': visible}\n\n def _get_anno(self, seq_id):\n anno = self.coco_set.anns[self.sequence_list[seq_id]]\n\n return anno\n\n def _get_frames(self, seq_id):\n path = self.coco_set.loadImgs([self.coco_set.anns[self.sequence_list[seq_id]]['image_id']])[0]['file_name']\n img = self.image_loader(os.path.join(self.img_pth, path))\n return img\n\n def get_meta_info(self, seq_id):\n try:\n cat_dict_current = self.cats[self.coco_set.anns[self.sequence_list[seq_id]]['category_id']]\n object_meta = OrderedDict({'object_class_name': cat_dict_current['name'],\n 'motion_class': None,\n 'major_class': cat_dict_current['supercategory'],\n 'root_class': None,\n 'motion_adverb': None})\n except:\n object_meta = OrderedDict({'object_class_name': None,\n 'motion_class': None,\n 'major_class': None,\n 'root_class': None,\n 'motion_adverb': None})\n return object_meta\n\n\n def get_class_name(self, seq_id):\n cat_dict_current = self.cats[self.coco_set.anns[self.sequence_list[seq_id]]['category_id']]\n return cat_dict_current['name']\n\n def get_frames(self, seq_id=None, frame_ids=None, anno=None):\n # ExDark is an image dataset. Thus we replicate the image denoted by seq_id len(frame_ids) times, and return a\n # list containing these replicated images.\n frame = self._get_frames(seq_id)\n\n frame_list = [frame.copy() for _ in frame_ids]\n\n if anno is None:\n anno = self.get_sequence_info(seq_id)\n\n anno_frames = {}\n for key, value in anno.items():\n anno_frames[key] = [value[0, ...] for _ in frame_ids]\n\n object_meta = self.get_meta_info(seq_id)\n\n return frame_list, anno_frames, object_meta" }, { "identifier": "Got10k_lmdb", "path": "lib/train/dataset/got10k_lmdb.py", "snippet": "class Got10k_lmdb(BaseVideoDataset):\n\n def __init__(self, root=None, image_loader=jpeg4py_loader, split=None, seq_ids=None, data_fraction=None):\n \"\"\"\n args:\n root - path to the got-10k training data. Note: This should point to the 'train' folder inside GOT-10k\n image_loader (jpeg4py_loader) - The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py)\n is used by default.\n split - 'train' or 'val'. Note: The validation split here is a subset of the official got-10k train split,\n not NOT the official got-10k validation split. To use the official validation split, provide that as\n the root folder instead.\n seq_ids - List containing the ids of the videos to be used for training. Note: Only one of 'split' or 'seq_ids'\n options can be used at the same time.\n data_fraction - Fraction of dataset to be used. The complete dataset is used by default\n use_lmdb - whether the dataset is stored in lmdb format\n \"\"\"\n root = env_settings().got10k_lmdb_dir if root is None else root\n super().__init__('GOT10k_lmdb', root, image_loader)\n\n # all folders inside the root\n self.sequence_list = self._get_sequence_list()\n\n # seq_id is the index of the folder inside the got10k root path\n if split is not None:\n if seq_ids is not None:\n raise ValueError('Cannot set both split_name and seq_ids.')\n train_lib_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..')\n if split == 'train':\n file_path = os.path.join(train_lib_path, 'data_specs', 'got10k_train_split.txt')\n elif split == 'val':\n file_path = os.path.join(train_lib_path, 'data_specs', 'got10k_val_split.txt')\n elif split == 'train_full':\n file_path = os.path.join(train_lib_path, 'data_specs', 'got10k_train_full_split.txt')\n elif split == 'vottrain':\n file_path = os.path.join(train_lib_path, 'data_specs', 'got10k_vot_train_split.txt')\n elif split == 'votval':\n file_path = os.path.join(train_lib_path, 'data_specs', 'got10k_vot_val_split.txt')\n else:\n raise ValueError('Unknown split name.')\n seq_ids = pandas.read_csv(file_path, header=None, squeeze=True, dtype=np.int64).values.tolist()\n elif seq_ids is None:\n seq_ids = list(range(0, len(self.sequence_list)))\n\n self.sequence_list = [self.sequence_list[i] for i in seq_ids]\n\n if data_fraction is not None:\n self.sequence_list = random.sample(self.sequence_list, int(len(self.sequence_list)*data_fraction))\n\n self.sequence_meta_info = self._load_meta_info()\n self.seq_per_class = self._build_seq_per_class()\n\n self.class_list = list(self.seq_per_class.keys())\n self.class_list.sort()\n\n def get_name(self):\n return 'got10k_lmdb'\n\n def has_class_info(self):\n return True\n\n def has_occlusion_info(self):\n return True\n\n def _load_meta_info(self):\n def _read_meta(meta_info):\n\n object_meta = OrderedDict({'object_class_name': meta_info[5].split(': ')[-1],\n 'motion_class': meta_info[6].split(': ')[-1],\n 'major_class': meta_info[7].split(': ')[-1],\n 'root_class': meta_info[8].split(': ')[-1],\n 'motion_adverb': meta_info[9].split(': ')[-1]})\n\n return object_meta\n sequence_meta_info = {}\n for s in self.sequence_list:\n try:\n meta_str = decode_str(self.root, \"train/%s/meta_info.ini\" %s)\n sequence_meta_info[s] = _read_meta(meta_str.split('\\n'))\n except:\n sequence_meta_info[s] = OrderedDict({'object_class_name': None,\n 'motion_class': None,\n 'major_class': None,\n 'root_class': None,\n 'motion_adverb': None})\n return sequence_meta_info\n\n def _build_seq_per_class(self):\n seq_per_class = {}\n\n for i, s in enumerate(self.sequence_list):\n object_class = self.sequence_meta_info[s]['object_class_name']\n if object_class in seq_per_class:\n seq_per_class[object_class].append(i)\n else:\n seq_per_class[object_class] = [i]\n\n return seq_per_class\n\n def get_sequences_in_class(self, class_name):\n return self.seq_per_class[class_name]\n\n def _get_sequence_list(self):\n dir_str = decode_str(self.root, 'train/list.txt')\n dir_list = dir_str.split('\\n')\n return dir_list\n\n def _read_bb_anno(self, seq_path):\n bb_anno_file = os.path.join(seq_path, \"groundtruth.txt\")\n gt_str_list = decode_str(self.root, bb_anno_file).split('\\n')[:-1] # the last line in got10k is empty\n gt_list = [list(map(float, line.split(','))) for line in gt_str_list]\n gt_arr = np.array(gt_list).astype(np.float32)\n\n return torch.tensor(gt_arr)\n\n def _read_target_visible(self, seq_path):\n # full occlusion and out_of_view files\n occlusion_file = os.path.join(seq_path, \"absence.label\")\n cover_file = os.path.join(seq_path, \"cover.label\")\n # Read these files\n occ_list = list(map(int, decode_str(self.root, occlusion_file).split('\\n')[:-1])) # the last line in got10k is empty\n occlusion = torch.ByteTensor(occ_list)\n cover_list = list(map(int, decode_str(self.root, cover_file).split('\\n')[:-1])) # the last line in got10k is empty\n cover = torch.ByteTensor(cover_list)\n\n target_visible = ~occlusion & (cover>0).byte()\n\n visible_ratio = cover.float() / 8\n return target_visible, visible_ratio\n\n def _get_sequence_path(self, seq_id):\n return os.path.join(\"train\", self.sequence_list[seq_id])\n\n def get_sequence_info(self, seq_id):\n seq_path = self._get_sequence_path(seq_id)\n bbox = self._read_bb_anno(seq_path)\n\n valid = (bbox[:, 2] > 0) & (bbox[:, 3] > 0)\n visible, visible_ratio = self._read_target_visible(seq_path)\n visible = visible & valid.byte()\n\n return {'bbox': bbox, 'valid': valid, 'visible': visible, 'visible_ratio': visible_ratio}\n\n def _get_frame_path(self, seq_path, frame_id):\n return os.path.join(seq_path, '{:08}.jpg'.format(frame_id+1)) # frames start from 1\n\n def _get_frame(self, seq_path, frame_id):\n return decode_img(self.root, self._get_frame_path(seq_path, frame_id))\n\n def get_class_name(self, seq_id):\n obj_meta = self.sequence_meta_info[self.sequence_list[seq_id]]\n\n return obj_meta['object_class_name']\n\n def get_frames(self, seq_id, frame_ids, anno=None):\n seq_path = self._get_sequence_path(seq_id)\n obj_meta = self.sequence_meta_info[self.sequence_list[seq_id]]\n\n frame_list = [self._get_frame(seq_path, f_id) for f_id in frame_ids]\n\n if anno is None:\n anno = self.get_sequence_info(seq_id)\n\n anno_frames = {}\n for key, value in anno.items():\n anno_frames[key] = [value[f_id, ...].clone() for f_id in frame_ids]\n\n return frame_list, anno_frames, obj_meta" }, { "identifier": "Lasot_lmdb", "path": "lib/train/dataset/lasot_lmdb.py", "snippet": "class Lasot_lmdb(BaseVideoDataset):\n\n def __init__(self, root=None, image_loader=jpeg4py_loader, vid_ids=None, split=None, data_fraction=None):\n \"\"\"\n args:\n root - path to the lasot dataset.\n image_loader (jpeg4py_loader) - The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py)\n is used by default.\n vid_ids - List containing the ids of the videos (1 - 20) used for training. If vid_ids = [1, 3, 5], then the\n videos with subscripts -1, -3, and -5 from each class will be used for training.\n split - If split='train', the official train split (protocol-II) is used for training. Note: Only one of\n vid_ids or split option can be used at a time.\n data_fraction - Fraction of dataset to be used. The complete dataset is used by default\n \"\"\"\n root = env_settings().lasot_lmdb_dir if root is None else root\n super().__init__('LaSOT_lmdb', root, image_loader)\n\n self.sequence_list = self._build_sequence_list(vid_ids, split)\n class_list = [seq_name.split('-')[0] for seq_name in self.sequence_list]\n self.class_list = []\n for ele in class_list:\n if ele not in self.class_list:\n self.class_list.append(ele)\n # Keep a list of all classes\n self.class_to_id = {cls_name: cls_id for cls_id, cls_name in enumerate(self.class_list)}\n\n if data_fraction is not None:\n self.sequence_list = random.sample(self.sequence_list, int(len(self.sequence_list)*data_fraction))\n\n self.seq_per_class = self._build_class_list()\n\n def _build_sequence_list(self, vid_ids=None, split=None):\n if split is not None:\n if vid_ids is not None:\n raise ValueError('Cannot set both split_name and vid_ids.')\n ltr_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..')\n if split == 'train':\n file_path = os.path.join(ltr_path, 'data_specs', 'lasot_train_split.txt')\n else:\n raise ValueError('Unknown split name.')\n sequence_list = pandas.read_csv(file_path, header=None, squeeze=True).values.tolist()\n elif vid_ids is not None:\n sequence_list = [c+'-'+str(v) for c in self.class_list for v in vid_ids]\n else:\n raise ValueError('Set either split_name or vid_ids.')\n\n return sequence_list\n\n def _build_class_list(self):\n seq_per_class = {}\n for seq_id, seq_name in enumerate(self.sequence_list):\n class_name = seq_name.split('-')[0]\n if class_name in seq_per_class:\n seq_per_class[class_name].append(seq_id)\n else:\n seq_per_class[class_name] = [seq_id]\n\n return seq_per_class\n\n def get_name(self):\n return 'lasot_lmdb'\n\n def has_class_info(self):\n return True\n\n def has_occlusion_info(self):\n return True\n\n def get_num_sequences(self):\n return len(self.sequence_list)\n\n def get_num_classes(self):\n return len(self.class_list)\n\n def get_sequences_in_class(self, class_name):\n return self.seq_per_class[class_name]\n\n def _read_bb_anno(self, seq_path):\n bb_anno_file = os.path.join(seq_path, \"groundtruth.txt\")\n gt_str_list = decode_str(self.root, bb_anno_file).split('\\n')[:-1] # the last line is empty\n gt_list = [list(map(float, line.split(','))) for line in gt_str_list]\n gt_arr = np.array(gt_list).astype(np.float32)\n return torch.tensor(gt_arr)\n\n def _read_target_visible(self, seq_path):\n # Read full occlusion and out_of_view\n occlusion_file = os.path.join(seq_path, \"full_occlusion.txt\")\n out_of_view_file = os.path.join(seq_path, \"out_of_view.txt\")\n\n occ_list = list(map(int, decode_str(self.root, occlusion_file).split(',')))\n occlusion = torch.ByteTensor(occ_list)\n out_view_list = list(map(int, decode_str(self.root, out_of_view_file).split(',')))\n out_of_view = torch.ByteTensor(out_view_list)\n\n target_visible = ~occlusion & ~out_of_view\n\n return target_visible\n\n def _get_sequence_path(self, seq_id):\n seq_name = self.sequence_list[seq_id]\n class_name = seq_name.split('-')[0]\n vid_id = seq_name.split('-')[1]\n\n return os.path.join(class_name, class_name + '-' + vid_id)\n\n def get_sequence_info(self, seq_id):\n seq_path = self._get_sequence_path(seq_id)\n bbox = self._read_bb_anno(seq_path)\n\n valid = (bbox[:, 2] > 0) & (bbox[:, 3] > 0)\n visible = self._read_target_visible(seq_path) & valid.byte()\n\n return {'bbox': bbox, 'valid': valid, 'visible': visible}\n\n def _get_frame_path(self, seq_path, frame_id):\n return os.path.join(seq_path, 'img', '{:08}.jpg'.format(frame_id+1)) # frames start from 1\n\n def _get_frame(self, seq_path, frame_id):\n return decode_img(self.root, self._get_frame_path(seq_path, frame_id))\n\n def _get_class(self, seq_path):\n raw_class = seq_path.split('/')[-2]\n return raw_class\n\n def get_class_name(self, seq_id):\n seq_path = self._get_sequence_path(seq_id)\n obj_class = self._get_class(seq_path)\n\n return obj_class\n\n def get_frames(self, seq_id, frame_ids, anno=None):\n seq_path = self._get_sequence_path(seq_id)\n\n obj_class = self._get_class(seq_path)\n frame_list = [self._get_frame(seq_path, f_id) for f_id in frame_ids]\n\n if anno is None:\n anno = self.get_sequence_info(seq_id)\n\n anno_frames = {}\n for key, value in anno.items():\n anno_frames[key] = [value[f_id, ...].clone() for f_id in frame_ids]\n\n object_meta = OrderedDict({'object_class_name': obj_class,\n 'motion_class': None,\n 'major_class': None,\n 'root_class': None,\n 'motion_adverb': None})\n\n return frame_list, anno_frames, object_meta" }, { "identifier": "ImagenetVID_lmdb", "path": "lib/train/dataset/imagenetvid_lmdb.py", "snippet": "class ImagenetVID_lmdb(BaseVideoDataset):\n \"\"\" Imagenet VID dataset.\n\n Publication:\n ImageNet Large Scale Visual Recognition Challenge\n Olga Russakovsky, Jia Deng, Hao Su, Jonathan Krause, Sanjeev Satheesh, Sean Ma, Zhiheng Huang, Andrej Karpathy,\n Aditya Khosla, Michael Bernstein, Alexander C. Berg and Li Fei-Fei\n IJCV, 2015\n https://arxiv.org/pdf/1409.0575.pdf\n\n Download the dataset from http://image-net.org/\n \"\"\"\n def __init__(self, root=None, image_loader=jpeg4py_loader, min_length=0, max_target_area=1):\n \"\"\"\n args:\n root - path to the imagenet vid dataset.\n image_loader (default_image_loader) - The function to read the images. If installed,\n jpeg4py (https://github.com/ajkxyz/jpeg4py) is used by default. Else,\n opencv's imread is used.\n min_length - Minimum allowed sequence length.\n max_target_area - max allowed ratio between target area and image area. Can be used to filter out targets\n which cover complete image.\n \"\"\"\n root = env_settings().imagenet_dir if root is None else root\n super().__init__(\"imagenetvid_lmdb\", root, image_loader)\n\n sequence_list_dict = decode_json(root, \"cache.json\")\n self.sequence_list = sequence_list_dict\n\n # Filter the sequences based on min_length and max_target_area in the first frame\n self.sequence_list = [x for x in self.sequence_list if len(x['anno']) >= min_length and\n get_target_to_image_ratio(x) < max_target_area]\n\n def get_name(self):\n return 'imagenetvid_lmdb'\n\n def get_num_sequences(self):\n return len(self.sequence_list)\n\n def get_sequence_info(self, seq_id):\n bb_anno = torch.Tensor(self.sequence_list[seq_id]['anno'])\n valid = (bb_anno[:, 2] > 0) & (bb_anno[:, 3] > 0)\n visible = torch.ByteTensor(self.sequence_list[seq_id]['target_visible']) & valid.byte()\n return {'bbox': bb_anno, 'valid': valid, 'visible': visible}\n\n def _get_frame(self, sequence, frame_id):\n set_name = 'ILSVRC2015_VID_train_{:04d}'.format(sequence['set_id'])\n vid_name = 'ILSVRC2015_train_{:08d}'.format(sequence['vid_id'])\n frame_number = frame_id + sequence['start_frame']\n frame_path = os.path.join('Data', 'VID', 'train', set_name, vid_name,\n '{:06d}.JPEG'.format(frame_number))\n return decode_img(self.root, frame_path)\n\n def get_frames(self, seq_id, frame_ids, anno=None):\n sequence = self.sequence_list[seq_id]\n\n frame_list = [self._get_frame(sequence, f) for f in frame_ids]\n\n if anno is None:\n anno = self.get_sequence_info(seq_id)\n\n # Create anno dict\n anno_frames = {}\n for key, value in anno.items():\n anno_frames[key] = [value[f_id, ...].clone() for f_id in frame_ids]\n\n # added the class info to the meta info\n object_meta = OrderedDict({'object_class': sequence['class_name'],\n 'motion_class': None,\n 'major_class': None,\n 'root_class': None,\n 'motion_adverb': None})\n\n return frame_list, anno_frames, object_meta" }, { "identifier": "MSCOCOSeq_lmdb", "path": "lib/train/dataset/coco_seq_lmdb.py", "snippet": "class MSCOCOSeq_lmdb(BaseVideoDataset):\n \"\"\" The COCO dataset. COCO is an image dataset. Thus, we treat each image as a sequence of length 1.\n\n Publication:\n Microsoft COCO: Common Objects in Context.\n Tsung-Yi Lin, Michael Maire, Serge J. Belongie, Lubomir D. Bourdev, Ross B. Girshick, James Hays, Pietro Perona,\n Deva Ramanan, Piotr Dollar and C. Lawrence Zitnick\n ECCV, 2014\n https://arxiv.org/pdf/1405.0312.pdf\n\n Download the images along with annotations from http://cocodataset.org/#download. The root folder should be\n organized as follows.\n - coco_root\n - annotations\n - instances_train2014.json\n - instances_train2017.json\n - images\n - train2014\n - train2017\n\n Note: You also have to install the coco pythonAPI from https://github.com/cocodataset/cocoapi.\n \"\"\"\n\n def __init__(self, root=None, image_loader=jpeg4py_loader, data_fraction=None, split=\"train\", version=\"2014\"):\n \"\"\"\n args:\n root - path to the coco dataset.\n image_loader (default_image_loader) - The function to read the images. If installed,\n jpeg4py (https://github.com/ajkxyz/jpeg4py) is used by default. Else,\n opencv's imread is used.\n data_fraction (None) - Fraction of images to be used. The images are selected randomly. If None, all the\n images will be used\n split - 'train' or 'val'.\n version - version of coco dataset (2014 or 2017)\n \"\"\"\n root = env_settings().coco_dir if root is None else root\n super().__init__('COCO_lmdb', root, image_loader)\n self.root = root\n self.img_pth = 'images/{}{}/'.format(split, version)\n self.anno_path = 'annotations/instances_{}{}.json'.format(split, version)\n\n # Load the COCO set.\n print('loading annotations into memory...')\n tic = time.time()\n coco_json = decode_json(root, self.anno_path)\n print('Done (t={:0.2f}s)'.format(time.time() - tic))\n\n self.coco_set = COCO(coco_json)\n\n self.cats = self.coco_set.cats\n\n self.class_list = self.get_class_list()\n\n self.sequence_list = self._get_sequence_list()\n\n if data_fraction is not None:\n self.sequence_list = random.sample(self.sequence_list, int(len(self.sequence_list)*data_fraction))\n self.seq_per_class = self._build_seq_per_class()\n\n def _get_sequence_list(self):\n ann_list = list(self.coco_set.anns.keys())\n seq_list = [a for a in ann_list if self.coco_set.anns[a]['iscrowd'] == 0]\n\n return seq_list\n\n def is_video_sequence(self):\n return False\n\n def get_num_classes(self):\n return len(self.class_list)\n\n def get_name(self):\n return 'coco_lmdb'\n\n def has_class_info(self):\n return True\n\n def get_class_list(self):\n class_list = []\n for cat_id in self.cats.keys():\n class_list.append(self.cats[cat_id]['name'])\n return class_list\n\n def has_segmentation_info(self):\n return True\n\n def get_num_sequences(self):\n return len(self.sequence_list)\n\n def _build_seq_per_class(self):\n seq_per_class = {}\n for i, seq in enumerate(self.sequence_list):\n class_name = self.cats[self.coco_set.anns[seq]['category_id']]['name']\n if class_name not in seq_per_class:\n seq_per_class[class_name] = [i]\n else:\n seq_per_class[class_name].append(i)\n\n return seq_per_class\n\n def get_sequences_in_class(self, class_name):\n return self.seq_per_class[class_name]\n\n def get_sequence_info(self, seq_id):\n anno = self._get_anno(seq_id)\n\n bbox = torch.Tensor(anno['bbox']).view(1, 4)\n\n mask = torch.Tensor(self.coco_set.annToMask(anno)).unsqueeze(dim=0)\n\n '''2021.1.3 To avoid too small bounding boxes. Here we change the threshold to 50 pixels'''\n valid = (bbox[:, 2] > 50) & (bbox[:, 3] > 50)\n\n visible = valid.clone().byte()\n\n return {'bbox': bbox, 'mask': mask, 'valid': valid, 'visible': visible}\n\n def _get_anno(self, seq_id):\n anno = self.coco_set.anns[self.sequence_list[seq_id]]\n\n return anno\n\n def _get_frames(self, seq_id):\n path = self.coco_set.loadImgs([self.coco_set.anns[self.sequence_list[seq_id]]['image_id']])[0]['file_name']\n # img = self.image_loader(os.path.join(self.img_pth, path))\n img = decode_img(self.root, os.path.join(self.img_pth, path))\n return img\n\n def get_meta_info(self, seq_id):\n try:\n cat_dict_current = self.cats[self.coco_set.anns[self.sequence_list[seq_id]]['category_id']]\n object_meta = OrderedDict({'object_class_name': cat_dict_current['name'],\n 'motion_class': None,\n 'major_class': cat_dict_current['supercategory'],\n 'root_class': None,\n 'motion_adverb': None})\n except:\n object_meta = OrderedDict({'object_class_name': None,\n 'motion_class': None,\n 'major_class': None,\n 'root_class': None,\n 'motion_adverb': None})\n return object_meta\n\n\n def get_class_name(self, seq_id):\n cat_dict_current = self.cats[self.coco_set.anns[self.sequence_list[seq_id]]['category_id']]\n return cat_dict_current['name']\n\n def get_frames(self, seq_id=None, frame_ids=None, anno=None):\n # COCO is an image dataset. Thus we replicate the image denoted by seq_id len(frame_ids) times, and return a\n # list containing these replicated images.\n frame = self._get_frames(seq_id)\n\n frame_list = [frame.copy() for _ in frame_ids]\n\n if anno is None:\n anno = self.get_sequence_info(seq_id)\n\n anno_frames = {}\n for key, value in anno.items():\n anno_frames[key] = [value[0, ...] for _ in frame_ids]\n\n object_meta = self.get_meta_info(seq_id)\n\n return frame_list, anno_frames, object_meta" }, { "identifier": "TrackingNet_lmdb", "path": "lib/train/dataset/tracking_net_lmdb.py", "snippet": "class TrackingNet_lmdb(BaseVideoDataset):\n \"\"\" TrackingNet dataset.\n\n Publication:\n TrackingNet: A Large-Scale Dataset and Benchmark for Object Tracking in the Wild.\n Matthias Mueller,Adel Bibi, Silvio Giancola, Salman Al-Subaihi and Bernard Ghanem\n ECCV, 2018\n https://ivul.kaust.edu.sa/Documents/Publications/2018/TrackingNet%20A%20Large%20Scale%20Dataset%20and%20Benchmark%20for%20Object%20Tracking%20in%20the%20Wild.pdf\n\n Download the dataset using the toolkit https://github.com/SilvioGiancola/TrackingNet-devkit.\n \"\"\"\n def __init__(self, root=None, image_loader=jpeg4py_loader, set_ids=None, data_fraction=None):\n \"\"\"\n args:\n root - The path to the TrackingNet folder, containing the training sets.\n image_loader (jpeg4py_loader) - The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py)\n is used by default.\n set_ids (None) - List containing the ids of the TrackingNet sets to be used for training. If None, all the\n sets (0 - 11) will be used.\n data_fraction - Fraction of dataset to be used. The complete dataset is used by default\n \"\"\"\n root = env_settings().trackingnet_lmdb_dir if root is None else root\n super().__init__('TrackingNet_lmdb', root, image_loader)\n\n if set_ids is None:\n set_ids = [i for i in range(12)]\n\n self.set_ids = set_ids\n\n # Keep a list of all videos. Sequence list is a list of tuples (set_id, video_name) containing the set_id and\n # video_name for each sequence\n self.sequence_list = list_sequences(self.root)\n\n if data_fraction is not None:\n self.sequence_list = random.sample(self.sequence_list, int(len(self.sequence_list) * data_fraction))\n\n self.seq_to_class_map, self.seq_per_class = self._load_class_info()\n\n # we do not have the class_lists for the tracking net\n self.class_list = list(self.seq_per_class.keys())\n self.class_list.sort()\n\n def _load_class_info(self):\n ltr_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..')\n class_map_path = os.path.join(ltr_path, 'data_specs', 'trackingnet_classmap.txt')\n\n with open(class_map_path, 'r') as f:\n seq_to_class_map = {seq_class.split('\\t')[0]: seq_class.rstrip().split('\\t')[1] for seq_class in f}\n\n seq_per_class = {}\n for i, seq in enumerate(self.sequence_list):\n class_name = seq_to_class_map.get(seq[1], 'Unknown')\n if class_name not in seq_per_class:\n seq_per_class[class_name] = [i]\n else:\n seq_per_class[class_name].append(i)\n\n return seq_to_class_map, seq_per_class\n\n def get_name(self):\n return 'trackingnet_lmdb'\n\n def has_class_info(self):\n return True\n\n def get_sequences_in_class(self, class_name):\n return self.seq_per_class[class_name]\n\n def _read_bb_anno(self, seq_id):\n set_id = self.sequence_list[seq_id][0]\n vid_name = self.sequence_list[seq_id][1]\n gt_str_list = decode_str(os.path.join(self.root, \"TRAIN_%d_lmdb\" % set_id),\n os.path.join(\"anno\", vid_name + \".txt\")).split('\\n')[:-1]\n gt_list = [list(map(float, line.split(','))) for line in gt_str_list]\n gt_arr = np.array(gt_list).astype(np.float32)\n return torch.tensor(gt_arr)\n\n def get_sequence_info(self, seq_id):\n bbox = self._read_bb_anno(seq_id)\n\n valid = (bbox[:, 2] > 0) & (bbox[:, 3] > 0)\n visible = valid.clone().byte()\n return {'bbox': bbox, 'valid': valid, 'visible': visible}\n\n def _get_frame(self, seq_id, frame_id):\n set_id = self.sequence_list[seq_id][0]\n vid_name = self.sequence_list[seq_id][1]\n return decode_img(os.path.join(self.root, \"TRAIN_%d_lmdb\" % set_id),\n os.path.join(\"frames\", vid_name, str(frame_id) + \".jpg\"))\n\n def _get_class(self, seq_id):\n seq_name = self.sequence_list[seq_id][1]\n return self.seq_to_class_map[seq_name]\n\n def get_class_name(self, seq_id):\n obj_class = self._get_class(seq_id)\n\n return obj_class\n\n def get_frames(self, seq_id, frame_ids, anno=None):\n frame_list = [self._get_frame(seq_id, f) for f in frame_ids]\n\n if anno is None:\n anno = self.get_sequence_info(seq_id)\n\n anno_frames = {}\n for key, value in anno.items():\n anno_frames[key] = [value[f_id, ...].clone() for f_id in frame_ids]\n\n obj_class = self._get_class(seq_id)\n\n object_meta = OrderedDict({'object_class_name': obj_class,\n 'motion_class': None,\n 'major_class': None,\n 'root_class': None,\n 'motion_adverb': None})\n\n return frame_list, anno_frames, object_meta" }, { "identifier": "sampler", "path": "lib/train/data/sampler.py", "snippet": "def no_processing(data):\n def __init__(self, datasets, p_datasets, samples_per_epoch, max_gap,\n num_search_frames, num_template_frames=1, processing=no_processing, frame_sample_mode='causal',\n train_cls=False, pos_prob=0.5):\n def __len__(self):\n def _sample_visible_ids(self, visible, num_ids=1, min_id=None, max_id=None,\n allow_invisible=False, force_invisible=False):\n def __getitem__(self, index):\n def getitem(self):\n def getitem_cls(self):\n def get_center_box(self, H, W, ratio=1/8):\n def sample_seq_from_dataset(self, dataset, is_video_dataset):\n def get_one_search(self):\n def get_frame_ids_trident(self, visible):\n def get_frame_ids_stark(self, visible, valid):\nclass TrackingSampler(torch.utils.data.Dataset):\n H, W, _ = template_frames[0].shape\n H, W, _ = template_frames[0].shape\n H, W, _ = search_frames[0].shape" }, { "identifier": "processing", "path": "lib/train/data/processing.py", "snippet": "def stack_tensors(x):\n def __init__(self, transform=transforms.ToTensor(), template_transform=None, search_transform=None, joint_transform=None):\n def __call__(self, data: TensorDict):\n def __init__(self, search_area_factor, output_sz, center_jitter_factor, scale_jitter_factor,\n mode='pair', settings=None, *args, **kwargs):\n def _get_jittered_box(self, box, mode):\n def __call__(self, data: TensorDict):\nclass BaseProcessing:\nclass STARKProcessing(BaseProcessing):" }, { "identifier": "LTRLoader", "path": "lib/train/data/loader.py", "snippet": "class LTRLoader(torch.utils.data.dataloader.DataLoader):\n \"\"\"\n Data loader. Combines a dataset and a sampler, and provides\n single- or multi-process iterators over the dataset.\n\n Note: The only difference with default pytorch DataLoader is that an additional option stack_dim is available to\n select along which dimension the data should be stacked to form a batch.\n\n Arguments:\n dataset (Dataset): dataset from which to load the data.\n batch_size (int, optional): how many samples per batch to load\n (default: 1).\n shuffle (bool, optional): set to ``True`` to have the data reshuffled\n at every epoch (default: False).\n sampler (Sampler, optional): defines the strategy to draw samples from\n the dataset. If specified, ``shuffle`` must be False.\n batch_sampler (Sampler, optional): like sampler, but returns a batch of\n indices at a time. Mutually exclusive with batch_size, shuffle,\n sampler, and drop_last.\n num_workers (int, optional): how many subprocesses to use for data\n loading. 0 means that the data will be loaded in the main process.\n (default: 0)\n collate_fn (callable, optional): merges a list of samples to form a mini-batch.\n stack_dim (int): Dimension along which to stack to form the batch. (default: 0)\n pin_memory (bool, optional): If ``True``, the data loader will copy tensors\n into CUDA pinned memory before returning them.\n drop_last (bool, optional): set to ``True`` to drop the last incomplete batch,\n if the dataset size is not divisible by the batch size. If ``False`` and\n the size of dataset is not divisible by the batch size, then the last batch\n will be smaller. (default: False)\n timeout (numeric, optional): if positive, the timeout value for collecting a batch\n from workers. Should always be non-negative. (default: 0)\n worker_init_fn (callable, optional): If not None, this will be called on each\n worker subprocess with the worker id (an int in ``[0, num_workers - 1]``) as\n input, after seeding and before data loading. (default: None)\n\n .. note:: By default, each worker will have its PyTorch seed set to\n ``base_seed + worker_id``, where ``base_seed`` is a long generated\n by main process using its RNG. However, seeds for other libraries\n may be duplicated upon initializing workers (w.g., NumPy), causing\n each worker to return identical random numbers. (See\n :ref:`dataloader-workers-random-seed` section in FAQ.) You may\n use ``torch.initial_seed()`` to access the PyTorch seed for each\n worker in :attr:`worker_init_fn`, and use it to set other seeds\n before data loading.\n\n .. warning:: If ``spawn`` start method is used, :attr:`worker_init_fn` cannot be an\n unpicklable object, e.g., a lambda function.\n \"\"\"\n\n __initialized = False\n\n def __init__(self, name, dataset, training=True, batch_size=1, shuffle=False, sampler=None, batch_sampler=None,\n num_workers=0, epoch_interval=1, collate_fn=None, stack_dim=0, pin_memory=False, drop_last=False,\n timeout=0, worker_init_fn=None):\n if collate_fn is None:\n if stack_dim == 0:\n collate_fn = ltr_collate\n elif stack_dim == 1:\n collate_fn = ltr_collate_stack1\n else:\n raise ValueError('Stack dim no supported. Must be 0 or 1.')\n\n super(LTRLoader, self).__init__(dataset, batch_size, shuffle, sampler, batch_sampler,\n num_workers, collate_fn, pin_memory, drop_last,\n timeout, worker_init_fn)\n\n self.name = name\n self.training = training\n self.epoch_interval = epoch_interval\n self.stack_dim = stack_dim" }, { "identifier": "opencv_loader", "path": "lib/train/data/image_loader.py", "snippet": "def opencv_loader(path):\n \"\"\" Read image using opencv's imread function and returns it in rgb format\"\"\"\n try:\n im = cv.imread(path, cv.IMREAD_COLOR)\n\n # convert to rgb and return\n return cv.cvtColor(im, cv.COLOR_BGR2RGB)\n except Exception as e:\n print('ERROR: Could not read image \"{}\"'.format(path))\n print(e)\n return None" }, { "identifier": "is_main_process", "path": "lib/utils/misc.py", "snippet": "def is_main_process():\n return get_rank() == 0" } ]
import torch import lib.train.data.transforms as tfm from torch.utils.data.distributed import DistributedSampler from lib.train.dataset import Lasot, Got10k, MSCOCOSeq, ImagenetVID, TrackingNet, BDD100K_Night, SHIFT_Night, ExDark from lib.train.dataset import Lasot_lmdb, Got10k_lmdb, MSCOCOSeq_lmdb, ImagenetVID_lmdb, TrackingNet_lmdb from lib.train.data import sampler, opencv_loader, processing, LTRLoader from lib.utils.misc import is_main_process
21,048
# datasets related def update_settings(settings, cfg): settings.print_interval = cfg.TRAIN.PRINT_INTERVAL settings.search_area_factor = {'template': cfg.DATA.TEMPLATE.FACTOR, 'search': cfg.DATA.SEARCH.FACTOR} settings.output_sz = {'template': cfg.DATA.TEMPLATE.SIZE, 'search': cfg.DATA.SEARCH.SIZE} settings.center_jitter_factor = {'template': cfg.DATA.TEMPLATE.CENTER_JITTER, 'search': cfg.DATA.SEARCH.CENTER_JITTER} settings.scale_jitter_factor = {'template': cfg.DATA.TEMPLATE.SCALE_JITTER, 'search': cfg.DATA.SEARCH.SCALE_JITTER} settings.grad_clip_norm = cfg.TRAIN.GRAD_CLIP_NORM settings.print_stats = None settings.batchsize = cfg.TRAIN.BATCH_SIZE settings.scheduler_type = cfg.TRAIN.SCHEDULER.TYPE def names2datasets(name_list: list, settings, image_loader): assert isinstance(name_list, list) datasets = [] for name in name_list: assert name in ["LASOT", "GOT10K_vottrain", "GOT10K_votval", "GOT10K_train_full", "GOT10K_official_val", "COCO17", "VID", "TRACKINGNET", "BDD100K_NIGHT", "SHIFT_NIGHT", "ExDark"] if name == "LASOT": if settings.use_lmdb: print("Building lasot dataset from lmdb") datasets.append(Lasot_lmdb(settings.env.lasot_lmdb_dir, split='train', image_loader=image_loader)) else: datasets.append(Lasot(settings.env.lasot_dir, split='train', image_loader=image_loader)) if name == "GOT10K_vottrain": if settings.use_lmdb: print("Building got10k from lmdb")
# datasets related def update_settings(settings, cfg): settings.print_interval = cfg.TRAIN.PRINT_INTERVAL settings.search_area_factor = {'template': cfg.DATA.TEMPLATE.FACTOR, 'search': cfg.DATA.SEARCH.FACTOR} settings.output_sz = {'template': cfg.DATA.TEMPLATE.SIZE, 'search': cfg.DATA.SEARCH.SIZE} settings.center_jitter_factor = {'template': cfg.DATA.TEMPLATE.CENTER_JITTER, 'search': cfg.DATA.SEARCH.CENTER_JITTER} settings.scale_jitter_factor = {'template': cfg.DATA.TEMPLATE.SCALE_JITTER, 'search': cfg.DATA.SEARCH.SCALE_JITTER} settings.grad_clip_norm = cfg.TRAIN.GRAD_CLIP_NORM settings.print_stats = None settings.batchsize = cfg.TRAIN.BATCH_SIZE settings.scheduler_type = cfg.TRAIN.SCHEDULER.TYPE def names2datasets(name_list: list, settings, image_loader): assert isinstance(name_list, list) datasets = [] for name in name_list: assert name in ["LASOT", "GOT10K_vottrain", "GOT10K_votval", "GOT10K_train_full", "GOT10K_official_val", "COCO17", "VID", "TRACKINGNET", "BDD100K_NIGHT", "SHIFT_NIGHT", "ExDark"] if name == "LASOT": if settings.use_lmdb: print("Building lasot dataset from lmdb") datasets.append(Lasot_lmdb(settings.env.lasot_lmdb_dir, split='train', image_loader=image_loader)) else: datasets.append(Lasot(settings.env.lasot_dir, split='train', image_loader=image_loader)) if name == "GOT10K_vottrain": if settings.use_lmdb: print("Building got10k from lmdb")
datasets.append(Got10k_lmdb(settings.env.got10k_lmdb_dir, split='vottrain', image_loader=image_loader))
8
2023-11-20 06:41:15+00:00
24k
shercoo/RGDiffSR
ldm/models/diffusion/ddpm.py
[ { "identifier": "log_txt_as_img", "path": "ldm/util.py", "snippet": "def log_txt_as_img(wh, xc, size=10):\n # wh a tuple of (width, height)\n # xc a list of captions to plot\n b = len(xc)\n txts = list()\n for bi in range(b):\n txt = Image.new(\"RGB\", wh, color=\"white\")\n draw = ImageDraw.Draw(txt)\n font = ImageFont.truetype('data/DejaVuSans.ttf', size=size)\n nc = int(40 * (wh[0] / 256))\n lines = \"\\n\".join(xc[bi][start:start + nc] for start in range(0, len(xc[bi]), nc))\n\n try:\n draw.text((0, 0), lines, fill=\"black\", font=font)\n except UnicodeEncodeError:\n print(\"Cant encode string for logging. Skipping.\")\n\n txt = np.array(txt).transpose(2, 0, 1) / 127.5 - 1.0\n txts.append(txt)\n txts = np.stack(txts)\n txts = torch.tensor(txts)\n return txts" }, { "identifier": "exists", "path": "ldm/util.py", "snippet": "def exists(x):\n return x is not None" }, { "identifier": "default", "path": "ldm/util.py", "snippet": "def default(val, d):\n if exists(val):\n return val\n return d() if isfunction(d) else d" }, { "identifier": "ismap", "path": "ldm/util.py", "snippet": "def ismap(x):\n if not isinstance(x, torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] > 3)" }, { "identifier": "isimage", "path": "ldm/util.py", "snippet": "def isimage(x):\n if not isinstance(x, torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] == 3 or x.shape[1] == 1)" }, { "identifier": "mean_flat", "path": "ldm/util.py", "snippet": "def mean_flat(tensor):\n \"\"\"\n https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/nn.py#L86\n Take the mean over all non-batch dimensions.\n \"\"\"\n return tensor.mean(dim=list(range(1, len(tensor.shape))))" }, { "identifier": "count_params", "path": "ldm/util.py", "snippet": "def count_params(model, verbose=False):\n total_params = sum(p.numel() for p in model.parameters())\n if verbose:\n print(f\"{model.__class__.__name__} has {total_params * 1.e-6:.2f} M params.\")\n return total_params" }, { "identifier": "instantiate_from_config", "path": "ldm/util.py", "snippet": "def instantiate_from_config(config):\n if not \"target\" in config:\n if config == '__is_first_stage__':\n return None\n elif config == \"__is_unconditional__\":\n return None\n raise KeyError(\"Expected key `target` to instantiate.\")\n return get_obj_from_str(config[\"target\"])(**config.get(\"params\", dict()))" }, { "identifier": "LitEma", "path": "ldm/modules/ema.py", "snippet": "class LitEma(nn.Module):\n def __init__(self, model, decay=0.9999, use_num_upates=True):\n super().__init__()\n if decay < 0.0 or decay > 1.0:\n raise ValueError('Decay must be between 0 and 1')\n\n self.m_name2s_name = {}\n self.register_buffer('decay', torch.tensor(decay, dtype=torch.float32))\n self.register_buffer('num_updates', torch.tensor(0,dtype=torch.int) if use_num_upates\n else torch.tensor(-1,dtype=torch.int))\n\n for name, p in model.named_parameters():\n if p.requires_grad:\n #remove as '.'-character is not allowed in buffers\n s_name = name.replace('.','')\n self.m_name2s_name.update({name:s_name})\n self.register_buffer(s_name,p.clone().detach().data)\n\n self.collected_params = []\n\n def forward(self,model):\n decay = self.decay\n\n if self.num_updates >= 0:\n self.num_updates += 1\n decay = min(self.decay,(1 + self.num_updates) / (10 + self.num_updates))\n\n one_minus_decay = 1.0 - decay\n\n with torch.no_grad():\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n\n for key in m_param:\n if m_param[key].requires_grad:\n sname = self.m_name2s_name[key]\n shadow_params[sname] = shadow_params[sname].type_as(m_param[key])\n shadow_params[sname].sub_(one_minus_decay * (shadow_params[sname] - m_param[key]))\n else:\n assert not key in self.m_name2s_name\n\n def copy_to(self, model):\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n for key in m_param:\n if m_param[key].requires_grad:\n m_param[key].data.copy_(shadow_params[self.m_name2s_name[key]].data)\n else:\n assert not key in self.m_name2s_name\n\n def store(self, parameters):\n \"\"\"\n Save the current parameters for restoring later.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n temporarily stored.\n \"\"\"\n self.collected_params = [param.clone() for param in parameters]\n\n def restore(self, parameters):\n \"\"\"\n Restore the parameters stored with the `store` method.\n Useful to validate the model with EMA parameters without affecting the\n original optimization process. Store the parameters before the\n `copy_to` method. After validation (or model saving), use this to\n restore the former parameters.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n updated with the stored parameters.\n \"\"\"\n for c_param, param in zip(self.collected_params, parameters):\n param.data.copy_(c_param.data)" }, { "identifier": "normal_kl", "path": "ldm/modules/distributions/distributions.py", "snippet": "def normal_kl(mean1, logvar1, mean2, logvar2):\n \"\"\"\n source: https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/losses.py#L12\n Compute the KL divergence between two gaussians.\n Shapes are automatically broadcasted, so batches can be compared to\n scalars, among other use cases.\n \"\"\"\n tensor = None\n for obj in (mean1, logvar1, mean2, logvar2):\n if isinstance(obj, torch.Tensor):\n tensor = obj\n break\n assert tensor is not None, \"at least one argument must be a Tensor\"\n\n # Force variances to be Tensors. Broadcasting helps convert scalars to\n # Tensors, but it does not work for torch.exp().\n logvar1, logvar2 = [\n x if isinstance(x, torch.Tensor) else torch.tensor(x).to(tensor)\n for x in (logvar1, logvar2)\n ]\n\n return 0.5 * (\n -1.0\n + logvar2\n - logvar1\n + torch.exp(logvar1 - logvar2)\n + ((mean1 - mean2) ** 2) * torch.exp(-logvar2)\n )" }, { "identifier": "DiagonalGaussianDistribution", "path": "ldm/modules/distributions/distributions.py", "snippet": "class DiagonalGaussianDistribution(object):\n def __init__(self, parameters, deterministic=False):\n self.parameters = parameters\n self.mean, self.logvar = torch.chunk(parameters, 2, dim=1)\n self.logvar = torch.clamp(self.logvar, -30.0, 20.0)\n self.deterministic = deterministic\n self.std = torch.exp(0.5 * self.logvar)\n self.var = torch.exp(self.logvar)\n if self.deterministic:\n self.var = self.std = torch.zeros_like(self.mean).to(device=self.parameters.device)\n\n def sample(self):\n x = self.mean + self.std * torch.randn(self.mean.shape).to(device=self.parameters.device)\n return x\n\n def kl(self, other=None):\n if self.deterministic:\n return torch.Tensor([0.])\n else:\n if other is None:\n return 0.5 * torch.sum(torch.pow(self.mean, 2)\n + self.var - 1.0 - self.logvar,\n dim=[1, 2, 3])\n else:\n return 0.5 * torch.sum(\n torch.pow(self.mean - other.mean, 2) / other.var\n + self.var / other.var - 1.0 - self.logvar + other.logvar,\n dim=[1, 2, 3])\n\n def nll(self, sample, dims=[1,2,3]):\n if self.deterministic:\n return torch.Tensor([0.])\n logtwopi = np.log(2.0 * np.pi)\n return 0.5 * torch.sum(\n logtwopi + self.logvar + torch.pow(sample - self.mean, 2) / self.var,\n dim=dims)\n\n def mode(self):\n return self.mean" }, { "identifier": "VQModelInterface", "path": "ldm/models/autoencoder.py", "snippet": "class VQModelInterface(VQModel):\n def __init__(self, embed_dim, *args, **kwargs):\n super().__init__(embed_dim=embed_dim, *args, **kwargs)\n self.embed_dim = embed_dim\n\n def encode(self, x):\n # print('************************encoder shape',x.shape)\n\n h = self.encoder(x)\n h = self.quant_conv(h)\n return h\n\n def decode(self, h, force_not_quantize=False):\n # also go through quantization layer\n if not force_not_quantize:\n quant, emb_loss, info = self.quantize(h)\n else:\n quant = h\n quant = self.post_quant_conv(quant)\n dec = self.decoder(quant)\n return dec" }, { "identifier": "IdentityFirstStage", "path": "ldm/models/autoencoder.py", "snippet": "class IdentityFirstStage(torch.nn.Module):\n def __init__(self, *args, vq_interface=False, **kwargs):\n self.vq_interface = vq_interface # TODO: Should be true by default but check to not break older stuff\n super().__init__()\n\n def encode(self, x, *args, **kwargs):\n return x\n\n def decode(self, x, *args, **kwargs):\n return x\n\n def quantize(self, x, *args, **kwargs):\n if self.vq_interface:\n return x, None, [None, None, None]\n return x\n\n def forward(self, x, *args, **kwargs):\n return x" }, { "identifier": "AutoencoderKL", "path": "ldm/models/autoencoder.py", "snippet": "class AutoencoderKL(pl.LightningModule):\n def __init__(self,\n ddconfig,\n lossconfig,\n embed_dim,\n ckpt_path=None,\n ignore_keys=[],\n image_key=\"image\",\n colorize_nlabels=None,\n monitor=None,\n ):\n super().__init__()\n self.image_key = image_key\n self.encoder = Encoder(**ddconfig)\n self.decoder = Decoder(**ddconfig)\n self.loss = instantiate_from_config(lossconfig)\n assert ddconfig[\"double_z\"]\n self.quant_conv = torch.nn.Conv2d(2*ddconfig[\"z_channels\"], 2*embed_dim, 1)\n self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig[\"z_channels\"], 1)\n self.embed_dim = embed_dim\n if colorize_nlabels is not None:\n assert type(colorize_nlabels)==int\n self.register_buffer(\"colorize\", torch.randn(3, colorize_nlabels, 1, 1))\n if monitor is not None:\n self.monitor = monitor\n if ckpt_path is not None:\n self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys)\n\n def init_from_ckpt(self, path, ignore_keys=list()):\n sd = torch.load(path, map_location=\"cpu\")[\"state_dict\"]\n keys = list(sd.keys())\n for k in keys:\n for ik in ignore_keys:\n if k.startswith(ik):\n print(\"Deleting key {} from state_dict.\".format(k))\n del sd[k]\n self.load_state_dict(sd, strict=False)\n print(f\"Restored from {path}\")\n\n def encode(self, x):\n h = self.encoder(x)\n moments = self.quant_conv(h)\n posterior = DiagonalGaussianDistribution(moments)\n return posterior\n\n def decode(self, z):\n z = self.post_quant_conv(z)\n dec = self.decoder(z)\n return dec\n\n def forward(self, input, sample_posterior=True):\n posterior = self.encode(input)\n if sample_posterior:\n z = posterior.sample()\n else:\n z = posterior.mode()\n dec = self.decode(z)\n return dec, posterior\n\n def get_input(self, batch, k):\n x = batch[k]\n if len(x.shape) == 3:\n x = x[..., None]\n x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format).float()\n return x\n\n def training_step(self, batch, batch_idx, optimizer_idx):\n inputs = self.get_input(batch, self.image_key)\n reconstructions, posterior = self(inputs)\n\n if optimizer_idx == 0:\n # train encoder+decoder+logvar\n aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step,\n last_layer=self.get_last_layer(), split=\"train\")\n self.log(\"aeloss\", aeloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)\n self.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=False)\n return aeloss\n\n if optimizer_idx == 1:\n # train the discriminator\n discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step,\n last_layer=self.get_last_layer(), split=\"train\")\n\n self.log(\"discloss\", discloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)\n self.log_dict(log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=False)\n return discloss\n\n def validation_step(self, batch, batch_idx):\n inputs = self.get_input(batch, self.image_key)\n reconstructions, posterior = self(inputs)\n aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, 0, self.global_step,\n last_layer=self.get_last_layer(), split=\"val\")\n\n discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, 1, self.global_step,\n last_layer=self.get_last_layer(), split=\"val\")\n\n self.log(\"val/rec_loss\", log_dict_ae[\"val/rec_loss\"])\n self.log_dict(log_dict_ae)\n self.log_dict(log_dict_disc)\n return self.log_dict\n\n def configure_optimizers(self):\n lr = self.learning_rate\n opt_ae = torch.optim.Adam(list(self.encoder.parameters())+\n list(self.decoder.parameters())+\n list(self.quant_conv.parameters())+\n list(self.post_quant_conv.parameters()),\n lr=lr, betas=(0.5, 0.9))\n opt_disc = torch.optim.Adam(self.loss.discriminator.parameters(),\n lr=lr, betas=(0.5, 0.9))\n return [opt_ae, opt_disc], []\n\n def get_last_layer(self):\n return self.decoder.conv_out.weight\n\n @torch.no_grad()\n def log_images(self, batch, only_inputs=False, **kwargs):\n log = dict()\n x = self.get_input(batch, self.image_key)\n x = x.to(self.device)\n if not only_inputs:\n xrec, posterior = self(x)\n if x.shape[1] > 3:\n # colorize with random projection\n assert xrec.shape[1] > 3\n x = self.to_rgb(x)\n xrec = self.to_rgb(xrec)\n log[\"samples\"] = self.decode(torch.randn_like(posterior.sample()))\n log[\"reconstructions\"] = xrec\n log[\"inputs\"] = x\n return log\n\n def to_rgb(self, x):\n assert self.image_key == \"segmentation\"\n if not hasattr(self, \"colorize\"):\n self.register_buffer(\"colorize\", torch.randn(3, x.shape[1], 1, 1).to(x))\n x = F.conv2d(x, weight=self.colorize)\n x = 2.*(x-x.min())/(x.max()-x.min()) - 1.\n return x" }, { "identifier": "make_beta_schedule", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def make_beta_schedule(schedule, n_timestep, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):\n if schedule == \"linear\":\n betas = (\n torch.linspace(linear_start ** 0.5, linear_end ** 0.5, n_timestep, dtype=torch.float64) ** 2\n )\n\n elif schedule == \"cosine\":\n timesteps = (\n torch.arange(n_timestep + 1, dtype=torch.float64) / n_timestep + cosine_s\n )\n alphas = timesteps / (1 + cosine_s) * np.pi / 2\n alphas = torch.cos(alphas).pow(2)\n alphas = alphas / alphas[0]\n betas = 1 - alphas[1:] / alphas[:-1]\n betas = np.clip(betas, a_min=0, a_max=0.999)\n\n elif schedule == \"sqrt_linear\":\n betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64)\n elif schedule == \"sqrt\":\n betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64) ** 0.5\n else:\n raise ValueError(f\"schedule '{schedule}' unknown.\")\n return betas.numpy()" }, { "identifier": "extract_into_tensor", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def extract_into_tensor(a, t, x_shape):\n b, *_ = t.shape\n out = a.gather(-1, t)\n return out.reshape(b, *((1,) * (len(x_shape) - 1)))" }, { "identifier": "noise_like", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def noise_like(shape, device, repeat=False):\n repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(shape[0], *((1,) * (len(shape) - 1)))\n noise = lambda: torch.randn(shape, device=device)\n return repeat_noise() if repeat else noise()" }, { "identifier": "DDIMSampler", "path": "ldm/models/diffusion/ddim.py", "snippet": "class DDIMSampler(object):\n def __init__(self, model, schedule=\"linear\", **kwargs):\n super().__init__()\n self.model = model\n self.ddpm_num_timesteps = model.num_timesteps\n self.schedule = schedule\n\n def register_buffer(self, name, attr):\n if type(attr) == torch.Tensor:\n if attr.device != torch.device(\"cuda\"):\n attr = attr.to(torch.device(\"cuda\"))\n setattr(self, name, attr)\n\n def make_schedule(self, ddim_num_steps, ddim_discretize=\"uniform\", ddim_eta=0., verbose=True):\n self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps,\n num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose)\n alphas_cumprod = self.model.alphas_cumprod\n assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep'\n to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device)\n\n self.register_buffer('betas', to_torch(self.model.betas))\n self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))\n self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev))\n\n # calculations for diffusion q(x_t | x_{t-1}) and others\n self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu())))\n self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu())))\n self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1)))\n\n # ddim sampling parameters\n ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(),\n ddim_timesteps=self.ddim_timesteps,\n eta=ddim_eta,verbose=verbose)\n self.register_buffer('ddim_sigmas', ddim_sigmas)\n self.register_buffer('ddim_alphas', ddim_alphas)\n self.register_buffer('ddim_alphas_prev', ddim_alphas_prev)\n self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas))\n sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(\n (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * (\n 1 - self.alphas_cumprod / self.alphas_cumprod_prev))\n self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps)\n\n @torch.no_grad()\n def sample(self,\n S,\n batch_size,\n shape,\n conditioning=None,\n callback=None,\n normals_sequence=None,\n img_callback=None,\n quantize_x0=False,\n eta=0.,\n mask=None,\n x0=None,\n temperature=1.,\n noise_dropout=0.,\n score_corrector=None,\n corrector_kwargs=None,\n verbose=True,\n x_T=None,\n log_every_t=100,\n unconditional_guidance_scale=1.,\n unconditional_conditioning=None,\n # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...\n **kwargs\n ):\n\n\n if conditioning is not None:\n if isinstance(conditioning, dict):\n if isinstance(list(conditioning.values())[0],list):\n cbs = conditioning[list(conditioning.keys())[0]][0].shape[0]\n else:\n cbs = conditioning[list(conditioning.keys())[0]].shape[0]\n if cbs != batch_size:\n print(f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\")\n else:\n if conditioning.shape[0] != batch_size:\n print(f\"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}\")\n\n self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)\n # sampling\n C, H, W = shape\n size = (batch_size, C, H, W)\n print(f'Data shape for DDIM sampling is {size}, eta {eta}')\n\n samples, intermediates = self.ddim_sampling(conditioning, size,\n callback=callback,\n img_callback=img_callback,\n quantize_denoised=quantize_x0,\n mask=mask, x0=x0,\n ddim_use_original_steps=False,\n noise_dropout=noise_dropout,\n temperature=temperature,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n x_T=x_T,\n log_every_t=log_every_t,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n )\n return samples, intermediates\n\n @torch.no_grad()\n def ddim_sampling(self, cond, shape,\n x_T=None, ddim_use_original_steps=False,\n callback=None, timesteps=None, quantize_denoised=False,\n mask=None, x0=None, img_callback=None, log_every_t=100,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None,):\n device = self.model.betas.device\n b = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=device)\n else:\n img = x_T\n\n if timesteps is None:\n timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps\n elif timesteps is not None and not ddim_use_original_steps:\n subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1\n timesteps = self.ddim_timesteps[:subset_end]\n\n intermediates = {'x_inter': [img], 'pred_x0': [img]}\n time_range = reversed(range(0,timesteps)) if ddim_use_original_steps else np.flip(timesteps)\n total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]\n print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc='DDIM Sampler', total=total_steps)\n\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((b,), step, device=device, dtype=torch.long)\n\n if mask is not None:\n assert x0 is not None\n img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass?\n img = img_orig * mask + (1. - mask) * img\n\n outs = self.p_sample_ddim(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps,\n quantize_denoised=quantize_denoised, temperature=temperature,\n noise_dropout=noise_dropout, score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning)\n img, pred_x0 = outs\n if callback: callback(i)\n if img_callback: img_callback(pred_x0, i)\n\n if index % log_every_t == 0 or index == total_steps - 1:\n intermediates['x_inter'].append(img)\n intermediates['pred_x0'].append(pred_x0)\n\n return img, intermediates\n\n @torch.no_grad()\n def p_sample_ddim(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None):\n b, *_, device = *x.shape, x.device\n\n if unconditional_conditioning is None or unconditional_guidance_scale == 1.:\n e_t = self.model.apply_model(x, t, c)\n else:\n x_in = torch.cat([x] * 2)\n t_in = torch.cat([t] * 2)\n c_in = torch.cat([unconditional_conditioning, c])\n e_t_uncond, e_t = self.model.apply_model(x_in, t_in, c_in).chunk(2)\n e_t = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond)\n\n if score_corrector is not None:\n assert self.model.parameterization == \"eps\"\n e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs)\n\n alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas\n alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev\n sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas\n sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas\n # select parameters corresponding to the currently considered timestep\n a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)\n a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)\n sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)\n sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device)\n\n # current prediction for x_0\n pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()\n if quantize_denoised:\n pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)\n # direction pointing to x_t\n dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t\n noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature\n if noise_dropout > 0.:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise\n return x_prev, pred_x0" }, { "identifier": "Attention_AR_counter", "path": "text_super_resolution/model/VisionLAN/utils.py", "snippet": "class Attention_AR_counter():\n def __init__(self, display_string, dict_file, case_sensitive):\n self.correct = 0\n self.total_samples = 0.\n self.distance_C = 0\n self.total_C = 0.\n self.distance_W = 0\n self.total_W = 0.\n self.display_string = display_string\n self.case_sensitive = case_sensitive\n self.de = cha_encdec(dict_file, case_sensitive)\n\n def clear(self):\n self.correct = 0\n self.total_samples = 0.\n self.distance_C = 0\n self.total_C = 0.\n self.distance_W = 0\n self.total_W = 0.\n \n def add_iter(self, output, out_length, label_length, labels):\n self.total_samples += label_length.size()[0]\n prdt_texts, prdt_prob = self.de.decode(output, out_length)\n for i in range(0, len(prdt_texts)):\n if not self.case_sensitive:\n prdt_texts[i] = prdt_texts[i].lower()\n labels[i] = labels[i].lower()\n all_words = []\n for w in labels[i].split('|') + prdt_texts[i].split('|'):\n if w not in all_words:\n all_words.append(w)\n l_words = [all_words.index(_) for _ in labels[i].split('|')]\n p_words = [all_words.index(_) for _ in prdt_texts[i].split('|')]\n self.distance_C += ed.eval(labels[i], prdt_texts[i])\n self.distance_W += ed.eval(l_words, p_words)\n self.total_C += len(labels[i])\n self.total_W += len(l_words)\n self.correct = self.correct + 1 if labels[i] == prdt_texts[i] else self.correct\n return prdt_texts, labels\n\n def show(self):\n print(self.display_string)\n if self.total_samples == 0:\n pass\n print('Accuracy: {:.6f}, AR: {:.6f}, CER: {:.6f}, WER: {:.6f}'.format(\n self.correct / self.total_samples,\n 1 - self.distance_C / self.total_C,\n self.distance_C / self.total_C,\n self.distance_W / self.total_W))\n self.clear()\n def show_test(self,best_acc, change= False):\n print(self.display_string)\n if self.total_samples == 0:\n pass\n if (self.correct / self.total_samples) > best_acc:\n best_acc = np.copy(self.correct / self.total_samples)\n change = True\n print('Accuracy: {:.6f}, AR: {:.6f}, CER: {:.6f}, WER: {:.6f}, best_acc: {:.6f}'.format(\n self.correct / self.total_samples,\n 1 - self.distance_C / self.total_C,\n self.distance_C / self.total_C,\n self.distance_W / self.total_W, best_acc))\n\n self.clear()\n return best_acc, change\n \n def convert(self, output, out_length):\n prdt_texts, prdt_prob = self.de.decode(output, out_length)\n prdt_prob = prdt_prob.cpu().unsqueeze(0)\n MAX_LEN = 25\n length = prdt_prob.size(1)\n if length >= MAX_LEN:\n return prdt_prob[:, :MAX_LEN, :], prdt_prob\n pad = torch.zeros([prdt_prob.shape[0], MAX_LEN - length, prdt_prob.shape[2]])\n prdt_prob = torch.cat([prdt_prob, pad], dim=1)\n return prdt_texts, prdt_prob" }, { "identifier": "TPSSpatialTransformer", "path": "text_super_resolution/model/tps_spatial_transformer.py", "snippet": "class TPSSpatialTransformer(nn.Module):\n\n def __init__(self, output_image_size=None, num_control_points=None, margins=None):\n super(TPSSpatialTransformer, self).__init__()\n self.output_image_size = output_image_size\n self.num_control_points = num_control_points\n self.margins = margins\n\n self.target_height, self.target_width = output_image_size\n target_control_points = build_output_control_points(num_control_points, margins)\n N = num_control_points\n # N = N - 4\n\n # create padded kernel matrix\n forward_kernel = torch.zeros(N + 3, N + 3)\n target_control_partial_repr = compute_partial_repr(target_control_points, target_control_points)\n forward_kernel[:N, :N].copy_(target_control_partial_repr)\n forward_kernel[:N, -3].fill_(1)\n forward_kernel[-3, :N].fill_(1)\n forward_kernel[:N, -2:].copy_(target_control_points)\n forward_kernel[-2:, :N].copy_(target_control_points.transpose(0, 1))\n # compute inverse matrix\n inverse_kernel = torch.inverse(forward_kernel)\n\n # create target cordinate matrix\n HW = self.target_height * self.target_width\n target_coordinate = list(itertools.product(range(self.target_height), range(self.target_width)))\n target_coordinate = torch.Tensor(target_coordinate) # HW x 2\n Y, X = target_coordinate.split(1, dim = 1)\n Y = Y / (self.target_height - 1)\n X = X / (self.target_width - 1)\n target_coordinate = torch.cat([X, Y], dim = 1) # convert from (y, x) to (x, y)\n target_coordinate_partial_repr = compute_partial_repr(target_coordinate, target_control_points)\n target_coordinate_repr = torch.cat([\n target_coordinate_partial_repr, torch.ones(HW, 1), target_coordinate\n ], dim = 1)\n\n # register precomputed matrices\n self.register_buffer('inverse_kernel', inverse_kernel)\n self.register_buffer('padding_matrix', torch.zeros(3, 2))\n self.register_buffer('target_coordinate_repr', target_coordinate_repr)\n self.register_buffer('target_control_points', target_control_points)\n\n def forward(self, input, source_control_points):\n assert source_control_points.ndimension() == 3\n assert source_control_points.size(1) == self.num_control_points\n assert source_control_points.size(2) == 2\n batch_size = source_control_points.size(0)\n\n Y = torch.cat([source_control_points, self.padding_matrix.expand(batch_size, 3, 2)], 1)\n mapping_matrix = torch.matmul(self.inverse_kernel, Y)\n source_coordinate = torch.matmul(self.target_coordinate_repr, mapping_matrix)\n\n grid = source_coordinate.view(-1, self.target_height, self.target_width, 2)\n grid = torch.clamp(grid, 0, 1) # the source_control_points may be out of [0, 1].\n # the input to grid_sample is normalized [-1, 1], but what we get is [0, 1]\n grid = 2.0 * grid - 1.0\n output_maps = grid_sample(input, grid, canvas=None)\n return output_maps, source_coordinate" }, { "identifier": "STNHead", "path": "text_super_resolution/model/stn_head.py", "snippet": "class STNHead(nn.Module):\n def __init__(self, in_planes, num_ctrlpoints, activation='none', input_size=(16, 64)):\n super(STNHead, self).__init__()\n\n self.in_planes = in_planes\n self.num_ctrlpoints = num_ctrlpoints\n self.activation = activation\n self.stn_convnet = nn.Sequential(\n # conv3x3_block(in_planes, 32), # 32*128\n # nn.MaxPool2d(kernel_size=2, stride=2),\n conv3x3_block(in_planes, 32), # 16*64\n nn.MaxPool2d(kernel_size=2, stride=2),\n conv3x3_block(32, 64), # 8*32\n nn.MaxPool2d(kernel_size=2, stride=2),\n conv3x3_block(64, 128), # 4*16\n nn.MaxPool2d(kernel_size=2, stride=2),\n conv3x3_block(128, 256), # 2*8\n nn.MaxPool2d(kernel_size=2, stride=2),\n conv3x3_block(256, 256), # 1*4,\n nn.MaxPool2d(kernel_size=(1,2), stride=(1,2)),\n conv3x3_block(256, 256)) # 1*2\n\n flatten_width = int(input_size[1] / 32)\n # print(\"flw:\", input_size[1] / 32)\n self.stn_fc1 = nn.Sequential(\n nn.Linear(512, 512), #flatten_width*256\n nn.BatchNorm1d(512),\n nn.ReLU(inplace=True))\n self.stn_fc2 = nn.Linear(512, num_ctrlpoints*2)\n\n self.init_weights(self.stn_convnet)\n self.init_weights(self.stn_fc1)\n self.init_stn(self.stn_fc2)\n\n def init_weights(self, module):\n for m in module.modules():\n if isinstance(m, nn.Conv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2. / n))\n if m.bias is not None:\n m.bias.data.zero_()\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n elif isinstance(m, nn.Linear):\n m.weight.data.normal_(0, 0.001)\n m.bias.data.zero_()\n\n def init_stn(self, stn_fc2):\n margin = 0.01\n sampling_num_per_side = int(self.num_ctrlpoints / 2)\n ctrl_pts_x = np.linspace(margin, 1.-margin, sampling_num_per_side)\n ctrl_pts_y_top = np.ones(sampling_num_per_side) * margin\n ctrl_pts_y_bottom = np.ones(sampling_num_per_side) * (1-margin)\n ctrl_pts_top = np.stack([ctrl_pts_x, ctrl_pts_y_top], axis=1)\n ctrl_pts_bottom = np.stack([ctrl_pts_x, ctrl_pts_y_bottom], axis=1)\n ctrl_points = np.concatenate([ctrl_pts_top, ctrl_pts_bottom], axis=0).astype(np.float32)\n # print(ctrl_points.shape)\n if self.activation is 'none':\n pass\n elif self.activation == 'sigmoid':\n ctrl_points = -np.log(1. / ctrl_points - 1.)\n elif self.activation == 'relu':\n ctrl_points = F.relu(torch.Tensor(ctrl_points))\n stn_fc2.weight.data.zero_()\n stn_fc2.bias.data = torch.Tensor(ctrl_points).view(-1)\n\n def forward(self, x):\n x = self.stn_convnet(x)\n batch_size, _, h, w = x.size()\n x = x.view(batch_size, -1)\n\n # print(\"x:\", x.shape)\n\n img_feat = self.stn_fc1(x)\n x = self.stn_fc2(0.1 * img_feat)\n if self.activation == 'sigmoid':\n x = torch.sigmoid(x)\n if self.activation == 'relu':\n x = F.relu(x)\n x = x.view(-1, self.num_ctrlpoints, 2)\n return img_feat, x" }, { "identifier": "VisionLAN", "path": "text_super_resolution/model/VisionLAN/VisionLAN.py", "snippet": "class VisionLAN(nn.Module):\n '''\n Architecture of VisionLAN\n input\n input: input image\n label_pos: character index\n output\n text_pre: word-level prediction from VRM\n test_rem: remaining string prediction from MLM\n text_mas: occluded character prediction from MLM\n '''\n def __init__(self, strides, input_shape):\n super(VisionLAN, self).__init__()\n self.backbone = resnet.resnet45(strides, compress_layer=False)\n self.input_shape = input_shape\n self.MLM_VRM = MLM_VRM()\n def forward(self, input, label_pos, training_stp, Train_in = True):\n # extract features\n features = self.backbone(input)\n # MLM + VRM\n if Train_in:\n text_pre, test_rem, text_mas, mask_map = self.MLM_VRM(features[-1], label_pos, training_stp, is_Train=Train_in)\n return text_pre, test_rem, text_mas, mask_map\n else:\n output, out_length = self.MLM_VRM(features[-1], label_pos, training_stp, is_Train=Train_in)\n return output, out_length" }, { "identifier": "SemanticLoss", "path": "text_super_resolution/loss/semantic_loss.py", "snippet": "class SemanticLoss(nn.Module):\n def __init__(self, margin=0.1):\n super(SemanticLoss, self).__init__()\n self.cos_sim = nn.CosineSimilarity(dim=-1, eps=1e-8)\n self.margin = margin\n\n self.lambda1 = 1.0\n self.lambda2 = 1.0\n\n self.kl_loss = torch.nn.KLDivLoss()\n\n def forward(self, pred_vec, gt_vec):\n # pred_vec: [N, C]\n # gt_vec: [N, C]\n # mean_sim = torch.mean(self.cos_sim(gt_vec, pred_vec))\n # sim_loss = 1 - mean_sim\n \n #noise = Variable(torch.rand(pred_vec.shape)) * 0.1 - 0.05\n\n #normed_pred_vec = pred_vec + noise.to(pred_vec.device)\n # print(\"pred_vec:\", pred_vec.shape)\n norm_vec = torch.abs(gt_vec - pred_vec)\n margin_loss = torch.mean(norm_vec) #\n\n # pr int(\"sem_loss:\", float(margin_loss.data), \"sim_loss:\", float(sim_loss.data))\n ce_loss = self.kl_loss(torch.log(pred_vec + 1e-20), gt_vec + 1e-20)\n # print(\"sem_loss:\", float(margin_loss.data), \"sim_loss:\", float(sim_loss.data))\n\n return self.lambda1 * margin_loss + self.lambda2 * ce_loss# ce_loss #margin_loss # + ce_loss # + sim_loss #margin_loss +\n\n def cross_entropy(self, pred_vec, gt_vec, l=1e-5):\n cal = gt_vec * torch.log(pred_vec+l) + (1 - gt_vec) * torch.log(1 - pred_vec+l)\n #print(\"cal:\", cal)\n return -cal" }, { "identifier": "ssim_psnr", "path": "text_super_resolution/utils/ssim_psnr.py", "snippet": "def calculate_psnr(img1, img2):\ndef weighted_calculate_psnr(img1, img2, weighted_mask):\ndef gaussian(window_size, sigma):\ndef create_window(window_size, channel):\ndef create_rect_window(window_H, window_W, channel):\ndef _ssim_weighted(img1_, img2_, window, window_size, channel, weighted_mask, size_average=True):\ndef _ssim(img1, img2, window, window_size, channel, size_average=True):\ndef _tri_ssim(img1, img2, img3, window, window_size, channel, size_average=True):\ndef _ssim_rect(img1, img2, window, window_size, channel, size_average=True):\n def __init__(self, size_average=True):\n def forward(self, img1, img2):\n def __init__(self, window_size=11, size_average=True):\n def forward(self, img1, img2):\n def __init__(self, window_size=11, size_average=True):\n def forward(self, img1, img2, img3):\n def __init__(self, window_size=11, size_average=True):\n def forward(self, img1, img2, weighted_mask):\n def __init__(self, window_size=11, size_average=True):\n def forward(self, img1, img2):\ndef ssim(img1, img2, window_size=11, size_average=True):\ndef ssim_weighted(img1, img2, weighted_mask, window_size=11, size_average=True):\n C1 = 0.01 ** 2\n C2 = 0.03 ** 2\n C1 = 0.01 ** 2\n C2 = 0.03 ** 2\n C1 = 0.01 ** 2\n C2 = 0.03 ** 2\n H, W = window_size\n C1 = 0.01 ** 2\n C2 = 0.03 ** 2\nclass Distorted_SSIM(torch.nn.Module):\nclass SSIM(torch.nn.Module):\nclass TRI_SSIM(torch.nn.Module):\nclass SSIM_WEIGHTED(torch.nn.Module):\nclass SSIM_TSR(torch.nn.Module):" } ]
import datetime import math import cv2 import torch import torch.nn as nn import numpy as np import pytorch_lightning as pl import pygame from collections import OrderedDict from matplotlib import pyplot as plt from torch.optim.lr_scheduler import LambdaLR from einops import rearrange, repeat from contextlib import contextmanager from functools import partial from torchvision import transforms from tqdm import tqdm from torchvision.utils import make_grid from pytorch_lightning.utilities.distributed import rank_zero_only from ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config from ldm.modules.ema import LitEma from ldm.modules.distributions.distributions import normal_kl, DiagonalGaussianDistribution from ldm.models.autoencoder import VQModelInterface, IdentityFirstStage, AutoencoderKL from ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like from ldm.models.diffusion.ddim import DDIMSampler from text_super_resolution.model.VisionLAN.utils import Attention_AR_counter from text_super_resolution.model.tps_spatial_transformer import TPSSpatialTransformer from text_super_resolution.model.stn_head import STNHead from text_super_resolution.model.VisionLAN.VisionLAN import VisionLAN from utils.render_standard_text import * from text_super_resolution.loss.semantic_loss import SemanticLoss from text_super_resolution.utils import ssim_psnr from pygame import freetype from utils.metrics import *
14,561
mask=None, x0=None, img_callback=None, start_T=None, log_every_t=None): if not log_every_t: log_every_t = self.log_every_t device = self.betas.device b = shape[0] if x_T is None: img = torch.randn(shape, device=device) else: img = x_T intermediates = [img] if timesteps is None: timesteps = self.num_timesteps if start_T is not None: timesteps = min(timesteps, start_T) iterator = tqdm(reversed(range(0, timesteps)), desc='Sampling t', total=timesteps) if verbose else reversed( range(0, timesteps)) if mask is not None: assert x0 is not None assert x0.shape[2:3] == mask.shape[2:3] # spatial size has to match for i in iterator: ts = torch.full((b,), i, device=device, dtype=torch.long) if self.shorten_cond_schedule: assert self.model.conditioning_key != 'hybrid' tc = self.cond_ids[ts].to(cond.device) cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) img = self.p_sample(img, cond, ts, clip_denoised=self.clip_denoised, quantize_denoised=quantize_denoised) if mask is not None: img_orig = self.q_sample(x0, ts) img = img_orig * mask + (1. - mask) * img if i % log_every_t == 0 or i == timesteps - 1: intermediates.append(img) if callback: callback(i) if img_callback: img_callback(img, i) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, cond, batch_size=16, return_intermediates=False, x_T=None, verbose=True, timesteps=None, quantize_denoised=False, mask=None, x0=None, shape=None, **kwargs): if shape is None: shape = (batch_size, self.channels, self.image_size, self.image_size) if cond is not None: if isinstance(cond, dict): cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else list(map(lambda x: x[:batch_size], cond[key])) for key in cond} else: cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] return self.p_sample_loop(cond, shape, return_intermediates=return_intermediates, x_T=x_T, verbose=verbose, timesteps=timesteps, quantize_denoised=quantize_denoised, mask=mask, x0=x0) @torch.no_grad() def sample_log(self, cond, batch_size, ddim, ddim_steps, **kwargs): if ddim: ddim_sampler = DDIMSampler(self) # print(cond.shape) if self.text_prior_enable: if isinstance(cond, dict): shape = (self.channels, cond['c_concat'][0].shape[2], cond['c_concat'][0].shape[3]) elif isinstance(cond, list): shape = (self.channels, cond[0].shape[2], cond[0].shape[3]) else: shape = (self.channels, cond.shape[2], cond.shape[3]) else: shape = (self.channels, cond.shape[2], cond.shape[3]) # shape = (self.channels, self.image_size, self.image_size) samples, intermediates = ddim_sampler.sample(ddim_steps, batch_size, shape, cond, verbose=False, **kwargs) else: samples, intermediates = self.sample(cond=cond, batch_size=batch_size, return_intermediates=True, **kwargs) return samples, intermediates @torch.no_grad() def log_images(self, batch, N=8, n_row=4, sample=True, ddim_steps=200, ddim_eta=1., return_keys=None, quantize_denoised=True, inpaint=True, plot_denoise_rows=False, plot_progressive_rows=True, plot_diffusion_rows=True, **kwargs): use_ddim = ddim_steps is not None log = dict() z, c, x, xrec, xc = self.get_input(batch, self.first_stage_key, return_first_stage_outputs=True, force_c_encode=True, return_original_cond=True, bs=N) # print('**********************c shape',c.shape) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) log["inputs"] = x log["reconstruction"] = xrec if self.model.conditioning_key is not None: if hasattr(self.cond_stage_model, "decode"): xc = self.cond_stage_model.decode(c) log["conditioning"] = xc elif self.cond_stage_key in ["caption"]: xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["caption"]) log["conditioning"] = xc elif self.cond_stage_key == 'class_label': xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["human_label"]) log['conditioning'] = xc
""" wild mixture of https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py https://github.com/CompVis/taming-transformers -- merci """ __conditioning_keys__ = {'concat': 'c_concat', 'crossattn': 'c_crossattn', 'adm': 'y'} sem_loss = SemanticLoss() def disabled_train(self, mode=True): """Overwrite model.train with this function to make sure train/eval mode does not change anymore.""" return self def uniform_on_device(r1, r2, shape, device): return (r1 - r2) * torch.rand(*shape, device=device) + r2 class DDPM(pl.LightningModule): # classic DDPM with Gaussian diffusion, in image space def __init__(self, unet_config, timesteps=1000, beta_schedule="linear", loss_type="l2", ckpt_path=None, ignore_keys=[], load_only_unet=False, monitor="val/loss", use_ema=True, first_stage_key="image", image_size=256, channels=3, log_every_t=100, clip_denoised=True, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, given_betas=None, original_elbo_weight=0., v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta l_simple_weight=1., conditioning_key=None, parameterization="eps", # all assuming fixed variance schedules scheduler_config=None, use_positional_encodings=False, learn_logvar=False, logvar_init=0., ): super().__init__() assert parameterization in ["eps", "x0"], 'currently only supporting "eps" and "x0"' self.parameterization = parameterization print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode") self.cond_stage_model = None self.clip_denoised = clip_denoised self.log_every_t = log_every_t self.first_stage_key = first_stage_key self.image_size = image_size # try conv? self.channels = channels self.use_positional_encodings = use_positional_encodings self.model = DiffusionWrapper(unet_config, conditioning_key) count_params(self.model, verbose=True) self.use_ema = use_ema if self.use_ema: self.model_ema = LitEma(self.model) print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") self.use_scheduler = scheduler_config is not None if self.use_scheduler: self.scheduler_config = scheduler_config self.v_posterior = v_posterior self.original_elbo_weight = original_elbo_weight self.l_simple_weight = l_simple_weight if monitor is not None: self.monitor = monitor if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet) self.register_schedule(given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) self.loss_type = loss_type self.learn_logvar = learn_logvar self.logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,)) if self.learn_logvar: self.logvar = nn.Parameter(self.logvar, requires_grad=True) def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): if exists(given_betas): betas = given_betas else: betas = make_beta_schedule(beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) alphas = 1. - betas alphas_cumprod = np.cumprod(alphas, axis=0) alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1]) timesteps, = betas.shape self.num_timesteps = int(timesteps) self.linear_start = linear_start self.linear_end = linear_end assert alphas_cumprod.shape[0] == self.num_timesteps, 'alphas have to be defined for each timestep' to_torch = partial(torch.tensor, dtype=torch.float32) self.register_buffer('betas', to_torch(betas)) self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod)) self.register_buffer('alphas_cumprod_prev', to_torch(alphas_cumprod_prev)) # calculations for diffusion q(x_t | x_{t-1}) and others self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod))) self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod))) self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod))) self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod))) self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1))) # calculations for posterior q(x_{t-1} | x_t, x_0) posterior_variance = (1 - self.v_posterior) * betas * (1. - alphas_cumprod_prev) / ( 1. - alphas_cumprod) + self.v_posterior * betas # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t) self.register_buffer('posterior_variance', to_torch(posterior_variance)) # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain self.register_buffer('posterior_log_variance_clipped', to_torch(np.log(np.maximum(posterior_variance, 1e-20)))) self.register_buffer('posterior_mean_coef1', to_torch( betas * np.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod))) self.register_buffer('posterior_mean_coef2', to_torch( (1. - alphas_cumprod_prev) * np.sqrt(alphas) / (1. - alphas_cumprod))) if self.parameterization == "eps": lvlb_weights = self.betas ** 2 / ( 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod)) elif self.parameterization == "x0": lvlb_weights = 0.5 * np.sqrt(torch.Tensor(alphas_cumprod)) / (2. * 1 - torch.Tensor(alphas_cumprod)) else: raise NotImplementedError("mu not supported") # TODO how to choose this term lvlb_weights[0] = lvlb_weights[1] self.register_buffer('lvlb_weights', lvlb_weights, persistent=False) assert not torch.isnan(self.lvlb_weights).all() @contextmanager def ema_scope(self, context=None): if self.use_ema: self.model_ema.store(self.model.parameters()) self.model_ema.copy_to(self.model) if context is not None: print(f"{context}: Switched to EMA weights") try: yield None finally: if self.use_ema: self.model_ema.restore(self.model.parameters()) if context is not None: print(f"{context}: Restored training weights") def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): sd = torch.load(path, map_location="cpu") print(sd.keys()) print(sd['epoch']) print(sd['global_step']) print(sd['callbacks']) # print(sd['optimizer_states']) # print(sd['lr_schedulers']) # print(sd['state_dict'].keys()) # exit(0) if "state_dict" in list(sd.keys()): sd = sd["state_dict"] keys = list(sd.keys()) for k in keys: for ik in ignore_keys: if k.startswith(ik): print("Deleting key {} from state_dict.".format(k)) del sd[k] missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict( sd, strict=False) print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys") if len(missing) > 0: print(f"Missing Keys: {missing}") if len(unexpected) > 0: print(f"Unexpected Keys: {unexpected}") def q_mean_variance(self, x_start, t): """ Get the distribution q(x_t | x_0). :param x_start: the [N x C x ...] tensor of noiseless inputs. :param t: the number of diffusion steps (minus 1). Here, 0 means one step. :return: A tuple (mean, variance, log_variance), all of x_start's shape. """ mean = (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start) variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape) log_variance = extract_into_tensor(self.log_one_minus_alphas_cumprod, t, x_start.shape) return mean, variance, log_variance def predict_start_from_noise(self, x_t, t, noise): return ( extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise ) def q_posterior(self, x_start, x_t, t): posterior_mean = ( extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start + extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t ) posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape) posterior_log_variance_clipped = extract_into_tensor(self.posterior_log_variance_clipped, t, x_t.shape) return posterior_mean, posterior_variance, posterior_log_variance_clipped def p_mean_variance(self, x, t, clip_denoised: bool): model_out = self.model(x, t) if self.parameterization == "eps": x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) elif self.parameterization == "x0": x_recon = model_out if clip_denoised: x_recon.clamp_(-1., 1.) model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t) return model_mean, posterior_variance, posterior_log_variance @torch.no_grad() def p_sample(self, x, t, clip_denoised=True, repeat_noise=False): b, *_, device = *x.shape, x.device model_mean, _, model_log_variance = self.p_mean_variance(x=x, t=t, clip_denoised=clip_denoised) noise = noise_like(x.shape, device, repeat_noise) # no noise when t == 0 nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise @torch.no_grad() def p_sample_loop(self, shape, return_intermediates=False): device = self.betas.device b = shape[0] img = torch.randn(shape, device=device) intermediates = [img] for i in tqdm(reversed(range(0, self.num_timesteps)), desc='Sampling t', total=self.num_timesteps): img = self.p_sample(img, torch.full((b,), i, device=device, dtype=torch.long), clip_denoised=self.clip_denoised) if i % self.log_every_t == 0 or i == self.num_timesteps - 1: intermediates.append(img) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, batch_size=16, return_intermediates=False): image_size = self.image_size channels = self.channels return self.p_sample_loop((batch_size, channels, image_size, image_size), return_intermediates=return_intermediates) def q_sample(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) return (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise) def get_loss(self, pred, target, mean=True): if self.loss_type == 'l1': loss = (target - pred).abs() if mean: loss = loss.mean() elif self.loss_type == 'l2': if mean: loss = torch.nn.functional.mse_loss(target, pred) else: loss = torch.nn.functional.mse_loss(target, pred, reduction='none') else: raise NotImplementedError("unknown loss type '{loss_type}'") return loss def p_losses(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) model_out = self.model(x_noisy, t) loss_dict = {} if self.parameterization == "eps": target = noise elif self.parameterization == "x0": target = x_start else: raise NotImplementedError(f"Paramterization {self.parameterization} not yet supported") loss = self.get_loss(model_out, target, mean=False).mean(dim=[1, 2, 3]) log_prefix = 'train' if self.training else 'val' loss_dict.update({f'{log_prefix}/loss_simple': loss.mean()}) loss_simple = loss.mean() * self.l_simple_weight loss_vlb = (self.lvlb_weights[t] * loss).mean() loss_dict.update({f'{log_prefix}/loss_vlb': loss_vlb}) loss = loss_simple + self.original_elbo_weight * loss_vlb loss_dict.update({f'{log_prefix}/loss': loss}) return loss, loss_dict def forward(self, x, *args, **kwargs): # b, c, h, w, device, img_size, = *x.shape, x.device, self.image_size # assert h == img_size and w == img_size, f'height and width of image must be {img_size}' t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long() return self.p_losses(x, t, *args, **kwargs) def get_input(self, batch, k): # print('************************fuck',k) x = batch[k] if len(x.shape) == 3: x = x[..., None] x = rearrange(x, 'b h w c -> b c h w') x = x.to(memory_format=torch.contiguous_format).float() return x def shared_step(self, batch): x = self.get_input(batch, self.first_stage_key) loss, loss_dict = self(x) return loss, loss_dict def training_step(self, batch, batch_idx): loss, loss_dict = self.shared_step(batch) self.log_dict(loss_dict, prog_bar=True, logger=True, on_step=True, on_epoch=True) self.log("global_step", self.global_step, prog_bar=True, logger=True, on_step=True, on_epoch=False) if self.use_scheduler: lr = self.optimizers().param_groups[0]['lr'] self.log('lr_abs', lr, prog_bar=True, logger=True, on_step=True, on_epoch=False) return loss @torch.no_grad() def validation_step(self, batch, batch_idx): # print('******************************in validation') _, loss_dict_no_ema = self.shared_step(batch) with self.ema_scope(): _, loss_dict_ema = self.shared_step(batch) loss_dict_ema = {key + '_ema': loss_dict_ema[key] for key in loss_dict_ema} self.log_dict(loss_dict_no_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True) self.log_dict(loss_dict_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True) def on_train_batch_end(self, *args, **kwargs): if self.use_ema: self.model_ema(self.model) def _get_rows_from_list(self, samples): n_imgs_per_row = len(samples) denoise_grid = rearrange(samples, 'n b c h w -> b n c h w') denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w') denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) return denoise_grid @torch.no_grad() def log_images(self, batch, N=8, n_row=2, sample=True, return_keys=None, **kwargs): log = dict() x = self.get_input(batch, self.first_stage_key) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) x = x.to(self.device)[:N] log["inputs"] = x # get diffusion row diffusion_row = list() x_start = x[:n_row] for t in range(self.num_timesteps): if t % self.log_every_t == 0 or t == self.num_timesteps - 1: t = repeat(torch.tensor([t]), '1 -> b', b=n_row) t = t.to(self.device).long() noise = torch.randn_like(x_start) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) diffusion_row.append(x_noisy) log["diffusion_row"] = self._get_rows_from_list(diffusion_row) if sample: # get denoise row with self.ema_scope("Plotting"): samples, denoise_row = self.sample(batch_size=N, return_intermediates=True) log["samples"] = samples log["denoise_row"] = self._get_rows_from_list(denoise_row) if return_keys: if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0: return log else: return {key: log[key] for key in return_keys} return log def configure_optimizers(self): lr = self.learning_rate params = list(self.model.parameters()) if self.learn_logvar: params = params + [self.logvar] opt = torch.optim.AdamW(params, lr=lr) return opt class LatentDiffusion(DDPM): """main class""" def __init__(self, first_stage_config, cond_stage_config, num_timesteps_cond=None, cond_stage_key="image", cond_stage_trainable=False, concat_mode=True, cond_stage_forward=None, conditioning_key=None, scale_factor=1.0, scale_by_std=False, text_prior_enable=False, image_height=32, image_width=128, STN_enable=False, standard_text=False, VL_pretrained_path=None, fid_eval=False, visualize=False, down_sample_rate=2, recog_loss_enable=False, font_path=None, *args, **kwargs): self.fid_eval = fid_eval self.visualize = visualize self.text_prior_enable = text_prior_enable self.recog_loss_enable = recog_loss_enable self.num_timesteps_cond = default(num_timesteps_cond, 1) self.scale_by_std = scale_by_std assert self.num_timesteps_cond <= kwargs['timesteps'] # for backwards compatibility after implementation of DiffusionWrapper if conditioning_key is None: conditioning_key = 'concat' if concat_mode else 'crossattn' if cond_stage_config == '__is_unconditional__': conditioning_key = None ckpt_path = kwargs.pop("ckpt_path", None) ignore_keys = kwargs.pop("ignore_keys", []) super().__init__(conditioning_key=conditioning_key, *args, **kwargs) self.concat_mode = concat_mode self.cond_stage_trainable = cond_stage_trainable self.cond_stage_key = cond_stage_key try: self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1 except: self.num_downs = 0 if not scale_by_std: self.scale_factor = scale_factor else: self.register_buffer('scale_factor', torch.tensor(scale_factor)) self.instantiate_first_stage(first_stage_config) self.instantiate_cond_stage(cond_stage_config) self.cond_stage_forward = cond_stage_forward self.clip_denoised = False self.bbox_tokenizer = None self.restarted_from_ckpt = False if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys) self.restarted_from_ckpt = True self.image_height = image_height self.image_width = image_width self.stn = STN_enable if self.stn: self.tps_inputsize = [image_height // down_sample_rate, image_width // down_sample_rate] tps_outputsize = [image_height // down_sample_rate, image_width // down_sample_rate] num_control_points = 20 tps_margins = [0.05, 0.05] self.tps = TPSSpatialTransformer( output_image_size=tuple(tps_outputsize), num_control_points=num_control_points, margins=tuple(tps_margins)) self.stn_head = STNHead( in_planes=3, num_ctrlpoints=num_control_points, activation='none', input_size=self.tps_inputsize) self.standard_text = standard_text if self.standard_text: # self.VL_model = self.VisionLAN_init(VL_pretrained_path) # self.test_acc_counter = Attention_AR_counter('\ntest accuracy: ', # '/home/zhouyuxuan/latent-diffusion/dic_36.txt', False) self.font_path = font_path pygame.init() freetype.init() self.cal_psnr = ssim_psnr.calculate_psnr self.cal_ssim = ssim_psnr.SSIM() def VisionLAN_init(self, path=None): cfg = {'args': { 'strides': [(1, 1), (2, 2), (2, 2), (2, 2), (1, 1), (1, 1)], 'input_shape': [3, 64, 256], # C x H x W }, 'init_state_dict': '/home/zhouyuxuan/latent-diffusion/visionlan.pth', } model_VL = VisionLAN(**cfg['args']) model_path = cfg['init_state_dict'] if path is None else path print('load pre_trained VisionLAN model from %s' % model_path) model_VL = model_VL.to(self.device) model_VL = nn.DataParallel(model_VL) if cfg['init_state_dict'] != None: fe_state_dict_ori = torch.load(model_path) fe_state_dict = OrderedDict() for k, v in fe_state_dict_ori.items(): if 'module' not in k: k = 'module.' + k else: k = k.replace('features.module.', 'module.features.') fe_state_dict[k] = v model_dict_fe = model_VL.state_dict() state_dict_fe = {k: v for k, v in fe_state_dict.items() if k in model_dict_fe.keys()} model_dict_fe.update(state_dict_fe) model_VL.load_state_dict(model_dict_fe) return model_VL def parse_visionlan_data(self, imgs_input): imgs_input = transforms.ToPILImage()(imgs_input).convert('RGB') imgs_input = cv2.resize(np.array(imgs_input), (256, 64)) imgs_input = transforms.ToTensor()(imgs_input).unsqueeze(0) imgs_input = imgs_input.to(self.device) return imgs_input def make_cond_schedule(self, ): self.cond_ids = torch.full(size=(self.num_timesteps,), fill_value=self.num_timesteps - 1, dtype=torch.long) ids = torch.round(torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond)).long() self.cond_ids[:self.num_timesteps_cond] = ids def on_save_checkpoint(self, checkpoint): if not isinstance(self.cond_stage_model, torch.nn.Identity): self.cond_stage_model.save_state_dict( '/home/zhouyuxuan/latent-diffusion/crnn_ckpt/', self.current_epoch) @rank_zero_only @torch.no_grad() def on_train_batch_start(self, batch, batch_idx, dataloader_idx): # only for very first batch if self.scale_by_std and self.current_epoch == 0 and self.global_step == 0 and batch_idx == 0 and not self.restarted_from_ckpt: assert self.scale_factor == 1., 'rather not use custom rescaling and std-rescaling simultaneously' # set rescale weight to 1./std of encodings print("### USING STD-RESCALING ###") x = super().get_input(batch, self.first_stage_key) x = x.to(self.device) encoder_posterior = self.encode_first_stage(x) z = self.get_first_stage_encoding(encoder_posterior).detach() del self.scale_factor self.register_buffer('scale_factor', 1. / z.flatten().std()) print(f"setting self.scale_factor to {self.scale_factor}") print("### USING STD-RESCALING ###") def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): super().register_schedule(given_betas, beta_schedule, timesteps, linear_start, linear_end, cosine_s) self.shorten_cond_schedule = self.num_timesteps_cond > 1 if self.shorten_cond_schedule: self.make_cond_schedule() def instantiate_first_stage(self, config): model = instantiate_from_config(config) self.first_stage_model = model.eval() self.first_stage_model.train = disabled_train for param in self.first_stage_model.parameters(): param.requires_grad = False def instantiate_cond_stage(self, config): if not self.cond_stage_trainable: if config == "__is_first_stage__": print("Using first stage also as cond stage.") self.cond_stage_model = self.first_stage_model elif config == "__is_unconditional__": print(f"Training {self.__class__.__name__} as an unconditional model.") self.cond_stage_model = None # self.be_unconditional = True else: model = instantiate_from_config(config) self.cond_stage_model = model.eval() self.cond_stage_model.train = disabled_train for param in self.cond_stage_model.parameters(): param.requires_grad = False else: assert config != '__is_first_stage__' assert config != '__is_unconditional__' model = instantiate_from_config(config) self.cond_stage_model = model def _get_denoise_row_from_list(self, samples, desc='', force_no_decoder_quantization=False): denoise_row = [] for zd in tqdm(samples, desc=desc): denoise_row.append(self.decode_first_stage(zd.to(self.device), force_not_quantize=force_no_decoder_quantization)) n_imgs_per_row = len(denoise_row) denoise_row = torch.stack(denoise_row) # n_log_step, n_row, C, H, W denoise_grid = rearrange(denoise_row, 'n b c h w -> b n c h w') denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w') denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) return denoise_grid def get_first_stage_encoding(self, encoder_posterior): if isinstance(encoder_posterior, DiagonalGaussianDistribution): z = encoder_posterior.sample() elif isinstance(encoder_posterior, torch.Tensor): z = encoder_posterior else: raise NotImplementedError(f"encoder_posterior of type '{type(encoder_posterior)}' not yet implemented") return self.scale_factor * z def get_learned_conditioning(self, c): if self.cond_stage_forward is None: if hasattr(self.cond_stage_model, 'encode') and callable(self.cond_stage_model.encode): c = self.cond_stage_model.encode(c) if isinstance(c, DiagonalGaussianDistribution): c = c.mode() else: c = self.cond_stage_model(c) else: assert hasattr(self.cond_stage_model, self.cond_stage_forward) c = getattr(self.cond_stage_model, self.cond_stage_forward)(c) return c def meshgrid(self, h, w): y = torch.arange(0, h).view(h, 1, 1).repeat(1, w, 1) x = torch.arange(0, w).view(1, w, 1).repeat(h, 1, 1) arr = torch.cat([y, x], dim=-1) return arr def delta_border(self, h, w): """ :param h: height :param w: width :return: normalized distance to image border, wtith min distance = 0 at border and max dist = 0.5 at image center """ lower_right_corner = torch.tensor([h - 1, w - 1]).view(1, 1, 2) arr = self.meshgrid(h, w) / lower_right_corner dist_left_up = torch.min(arr, dim=-1, keepdims=True)[0] dist_right_down = torch.min(1 - arr, dim=-1, keepdims=True)[0] edge_dist = torch.min(torch.cat([dist_left_up, dist_right_down], dim=-1), dim=-1)[0] return edge_dist def get_weighting(self, h, w, Ly, Lx, device): weighting = self.delta_border(h, w) weighting = torch.clip(weighting, self.split_input_params["clip_min_weight"], self.split_input_params["clip_max_weight"], ) weighting = weighting.view(1, h * w, 1).repeat(1, 1, Ly * Lx).to(device) if self.split_input_params["tie_braker"]: L_weighting = self.delta_border(Ly, Lx) L_weighting = torch.clip(L_weighting, self.split_input_params["clip_min_tie_weight"], self.split_input_params["clip_max_tie_weight"]) L_weighting = L_weighting.view(1, 1, Ly * Lx).to(device) weighting = weighting * L_weighting return weighting def get_fold_unfold(self, x, kernel_size, stride, uf=1, df=1): # todo load once not every time, shorten code """ :param x: img of size (bs, c, h, w) :return: n img crops of size (n, bs, c, kernel_size[0], kernel_size[1]) """ bs, nc, h, w = x.shape # print(x.shape) # number of crops in image Ly = (h - kernel_size[0]) // stride[0] + 1 Lx = (w - kernel_size[1]) // stride[1] + 1 if uf == 1 and df == 1: fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) unfold = torch.nn.Unfold(**fold_params) fold = torch.nn.Fold(output_size=x.shape[2:], **fold_params) weighting = self.get_weighting(kernel_size[0], kernel_size[1], Ly, Lx, x.device).to(x.dtype) normalization = fold(weighting).view(1, 1, h, w) # normalizes the overlap weighting = weighting.view((1, 1, kernel_size[0], kernel_size[1], Ly * Lx)) elif uf > 1 and df == 1: fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) unfold = torch.nn.Unfold(**fold_params) fold_params2 = dict(kernel_size=(kernel_size[0] * uf, kernel_size[1] * uf), dilation=1, padding=0, stride=(stride[0] * uf, stride[1] * uf)) fold = torch.nn.Fold(output_size=(x.shape[2] * uf, x.shape[3] * uf), **fold_params2) weighting = self.get_weighting(kernel_size[0] * uf, kernel_size[1] * uf, Ly, Lx, x.device).to(x.dtype) # print('weighting',weighting.shape,Ly,Lx) normalization = fold(weighting).view(1, 1, h * uf, w * uf) # normalizes the overlap weighting = weighting.view((1, 1, kernel_size[0] * uf, kernel_size[1] * uf, Ly * Lx)) elif df > 1 and uf == 1: fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) unfold = torch.nn.Unfold(**fold_params) fold_params2 = dict(kernel_size=(kernel_size[0] // df, kernel_size[1] // df), dilation=1, padding=0, stride=(stride[0] // df, stride[1] // df)) fold = torch.nn.Fold(output_size=(x.shape[2] // df, x.shape[3] // df), **fold_params2) weighting = self.get_weighting(kernel_size[0] // df, kernel_size[1] // df, Ly, Lx, x.device).to(x.dtype) normalization = fold(weighting).view(1, 1, h // df, w // df) # normalizes the overlap weighting = weighting.view((1, 1, kernel_size[0] // df, kernel_size[1] // df, Ly * Lx)) else: raise NotImplementedError return fold, unfold, normalization, weighting @torch.no_grad() def get_input(self, batch, k, return_first_stage_outputs=False, force_c_encode=False, cond_key=None, return_original_cond=False, bs=None): x = super().get_input(batch, k) if bs is not None: x = x[:bs] x = x.to(self.device) encoder_posterior = self.encode_first_stage(x) z = self.get_first_stage_encoding(encoder_posterior).detach() if self.model.conditioning_key is not None: if cond_key is None: cond_key = self.cond_stage_key if cond_key != self.first_stage_key: if cond_key in ['caption', 'coordinates_bbox']: xc = batch[cond_key] elif cond_key == 'class_label': xc = batch else: xc = super().get_input(batch, cond_key).to(self.device) else: xc = x if not self.cond_stage_trainable or force_c_encode: # if not self.cond_stage_trainable or force_c_encode: if isinstance(xc, dict) or isinstance(xc, list): # import pudb; pudb.set_trace() c = self.get_learned_conditioning(xc) else: c = self.get_learned_conditioning(xc.to(self.device)) if self.text_prior_enable: c = self.get_additional_cond(xc, c) # c = {'c_concat': [xc], 'c_crossattn': [c]} else: c = xc if bs is not None: if isinstance(c, dict): for k, v in c.items(): c[k] = [v[0][:bs]] else: c = c[:bs] if self.use_positional_encodings: pos_x, pos_y = self.compute_latent_shifts(batch) ckey = __conditioning_keys__[self.model.conditioning_key] c = {ckey: c, 'pos_x': pos_x, 'pos_y': pos_y} else: c = None xc = None if self.use_positional_encodings: pos_x, pos_y = self.compute_latent_shifts(batch) c = {'pos_x': pos_x, 'pos_y': pos_y} out = [z, c] # print('fuck',c.shape) if return_first_stage_outputs: xrec = self.decode_first_stage(z) out.extend([x, xrec]) if return_original_cond: out.append(xc) return out @torch.no_grad() def decode_first_stage(self, z, predict_cids=False, force_not_quantize=False): if predict_cids: if z.dim() == 4: z = torch.argmax(z.exp(), dim=1).long() z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None) z = rearrange(z, 'b h w c -> b c h w').contiguous() z = 1. / self.scale_factor * z if hasattr(self, "split_input_params"): if self.split_input_params["patch_distributed_vq"]: ks = self.split_input_params["ks"] # eg. (128, 128) stride = self.split_input_params["stride"] # eg. (64, 64) uf = self.split_input_params["vqf"] bs, nc, h, w = z.shape print('decode z shape', z.shape) if ks[0] > h or ks[1] > w: ks = (min(ks[0], h), min(ks[1], w)) print("reducing Kernel") if stride[0] > h or stride[1] > w: stride = (min(stride[0], h), min(stride[1], w)) print("reducing stride") print(ks, stride, uf) fold, unfold, normalization, weighting = self.get_fold_unfold(z, ks, stride, uf=uf) z = unfold(z) # (bn, nc * prod(**ks), L) # 1. Reshape to img shape z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L ) # 2. apply model loop over last dim if isinstance(self.first_stage_model, VQModelInterface): output_list = [self.first_stage_model.decode(z[:, :, :, :, i], force_not_quantize=predict_cids or force_not_quantize) for i in range(z.shape[-1])] else: output_list = [self.first_stage_model.decode(z[:, :, :, :, i]) for i in range(z.shape[-1])] o = torch.stack(output_list, axis=-1) # # (bn, nc, ks[0], ks[1], L) o = o * weighting # Reverse 1. reshape to img shape o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) # stitch crops together decoded = fold(o) decoded = decoded / normalization # norm is shape (1, 1, h, w) return decoded else: if isinstance(self.first_stage_model, VQModelInterface): return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize) else: return self.first_stage_model.decode(z) else: if isinstance(self.first_stage_model, VQModelInterface): return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize) else: return self.first_stage_model.decode(z) # same as above but without decorator def differentiable_decode_first_stage(self, z, predict_cids=False, force_not_quantize=False): if predict_cids: if z.dim() == 4: z = torch.argmax(z.exp(), dim=1).long() z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None) z = rearrange(z, 'b h w c -> b c h w').contiguous() z = 1. / self.scale_factor * z if hasattr(self, "split_input_params"): if self.split_input_params["patch_distributed_vq"]: ks = self.split_input_params["ks"] # eg. (128, 128) stride = self.split_input_params["stride"] # eg. (64, 64) uf = self.split_input_params["vqf"] bs, nc, h, w = z.shape if ks[0] > h or ks[1] > w: ks = (min(ks[0], h), min(ks[1], w)) print("reducing Kernel") if stride[0] > h or stride[1] > w: stride = (min(stride[0], h), min(stride[1], w)) print("reducing stride") fold, unfold, normalization, weighting = self.get_fold_unfold(z, ks, stride, uf=uf) z = unfold(z) # (bn, nc * prod(**ks), L) # 1. Reshape to img shape z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L ) # 2. apply model loop over last dim if isinstance(self.first_stage_model, VQModelInterface): output_list = [self.first_stage_model.decode(z[:, :, :, :, i], force_not_quantize=predict_cids or force_not_quantize) for i in range(z.shape[-1])] else: output_list = [self.first_stage_model.decode(z[:, :, :, :, i]) for i in range(z.shape[-1])] o = torch.stack(output_list, axis=-1) # # (bn, nc, ks[0], ks[1], L) o = o * weighting # Reverse 1. reshape to img shape o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) # stitch crops together decoded = fold(o) decoded = decoded / normalization # norm is shape (1, 1, h, w) return decoded else: if isinstance(self.first_stage_model, VQModelInterface): return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize) else: return self.first_stage_model.decode(z) else: if isinstance(self.first_stage_model, VQModelInterface): return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize) else: return self.first_stage_model.decode(z) @torch.no_grad() def encode_first_stage(self, x): if hasattr(self, "split_input_params"): if self.split_input_params["patch_distributed_vq"]: ks = self.split_input_params["ks"] # eg. (128, 128) stride = self.split_input_params["stride"] # eg. (64, 64) df = self.split_input_params["vqf"] self.split_input_params['original_image_size'] = x.shape[-2:] bs, nc, h, w = x.shape print('encode x shape', x.shape) print('ks', ks, 'stride', stride, 'df', df) if ks[0] > h or ks[1] > w: ks = (min(ks[0], h), min(ks[1], w)) print("reducing Kernel") if stride[0] > h or stride[1] > w: stride = (min(stride[0], h), min(stride[1], w)) print("reducing stride") fold, unfold, normalization, weighting = self.get_fold_unfold(x, ks, stride, df=df) z = unfold(x) # (bn, nc * prod(**ks), L) # Reshape to img shape z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L ) print('encode z shape', z.shape) output_list = [self.first_stage_model.encode(z[:, :, :, :, i]) for i in range(z.shape[-1])] o = torch.stack(output_list, axis=-1) o = o * weighting # Reverse reshape to img shape o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) # stitch crops together decoded = fold(o) decoded = decoded / normalization return decoded else: return self.first_stage_model.encode(x) else: return self.first_stage_model.encode(x) def on_validation_start(self) -> None: print(f'******************************in validation {self.current_epoch}') def validation_step(self, batch, batch_idx): # print('******************************in validation') _, loss_dict_no_ema = self.shared_step(batch) with self.ema_scope(): _, loss_dict_ema = self.shared_step(batch) loss_dict_ema = {key + '_ema': loss_dict_ema[key] for key in loss_dict_ema} self.log_dict(loss_dict_no_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True) self.log_dict(loss_dict_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True) if self.fid_eval and self.current_epoch % 10 == 0: results = self.recognize_sample(batch, N=114514, inpaint=False) rec_image = results['samples'] target = batch[self.first_stage_key] target = rearrange(target, 'b h w c -> b c h w') cond = batch[self.cond_stage_key] cond = rearrange(cond, 'b h w c -> b c h w') if self.visualize: batchlen = rec_image.shape[0] rc = int(math.sqrt(batchlen)) f, axs = plt.subplots(rc, rc, figsize=(16, 4), sharex=True, sharey=True) plt.subplots_adjust(wspace=0, hspace=0) print(len(axs), batchlen, int(math.sqrt(batchlen))) assert len(axs) ** 2 == batchlen for i in range(batchlen): axs[i // rc, i % rc].set_xticklabels([]) axs[i // rc, i % rc].set_yticklabels([]) axs[i // rc, i % rc].set_aspect('equal') axs[i // rc, i % rc].imshow(rec_image[i, :3, :, :].cpu().numpy().transpose(1, 2, 0)) axs[i // rc, i % rc].axis('off') plt.savefig(f'/home/zhouyuxuan/res/sample_{batch_idx}.jpg') plt.cla() f, axs = plt.subplots(rc, rc, figsize=(16, 4), sharex=True, sharey=True) plt.subplots_adjust(wspace=0, hspace=0) for i in range(batchlen): axs[i // rc, i % rc].imshow(target[i, :3, :, :].cpu().numpy().transpose(1, 2, 0)) axs[i // rc, i % rc].axis('off') plt.savefig(f'/home/zhouyuxuan/res/target_{batch_idx}.jpg') plt.cla() f, axs = plt.subplots(rc, rc, figsize=(16, 4), sharex=True, sharey=True) plt.subplots_adjust(wspace=0, hspace=0) for i in range(batchlen): axs[i // rc, i % rc].imshow(cond[i, :3, :, :].cpu().numpy().transpose(1, 2, 0)) axs[i // rc, i % rc].axis('off') plt.savefig(f'/home/zhouyuxuan/res/input_{batch_idx}.jpg') PSNR = self.cal_psnr(rec_image[:, :3], target[:, :3]) SSIM = self.cal_ssim(rec_image[:, :3], target[:, :3]) self.log_dict({'PSNR': PSNR, 'SSIM': SSIM}, prog_bar=False, logger=True, on_step=False, on_epoch=True) def shared_step(self, batch, **kwargs): # print('*******************************************************batch',batch['image'].shape) # print('*******************************************************batch',batch['image'].shape) # if hasattr(self, "split_input_params"): # print(self.split_input_params) # else: # print('fuck') x, c = self.get_input(batch, self.first_stage_key) loss, loss_dict = self(x, c) if self.recog_loss_enable: HR = batch['image'] HR = rearrange(HR, 'b h w c -> b c h w') HR = HR.to(memory_format=torch.contiguous_format).float() LR = c label_vecs = self.get_learned_conditioning(c).permute(1, 0, 2) label_vecs_hr = self.get_learned_conditioning(HR).permute(1, 0, 2) loss_recog_distill = sem_loss(label_vecs, label_vecs_hr) * 100 # 100 loss = loss + loss_recog_distill loss_dict.update({f'loss_recog': loss_recog_distill}) # return loss + loss_recog_distill, loss_dict # # else: return loss, loss_dict def get_additional_cond(self, c, tp): if self.stn: _, ctrl_points_c = self.stn_head(c) c, _ = self.tps(c, ctrl_points_c) if self.standard_text: x_q = torch.empty(1, 2, c.shape[2], c.shape[3]) # prob_lr = torch.empty(1, 25, 37) rec_results = get_string_crnn(tp.permute(1, 0, 2), False) for i in range(c.shape[0]): # visionlan_dict_lr = self.parse_visionlan_data(c[i, :3, :, :]) # target = '' # label_lr, label_length = self.VL_model(visionlan_dict_lr, target, '', False) # pred_str_lr, pred_prob = self.test_acc_counter.convert(label_lr, label_length) # s = pred_str_lr[0] # prob_lr = torch.cat([prob_lr, pred_prob], dim=0) s = rec_results[i] if s == "" or type(s) == torch.Tensor: s = "\t" lower_case = s.lower() upper_case = s.upper() i_t_lower = make_standard_text(self.font_path, lower_case, (c.shape[2], c.shape[3])) i_t_lower_tensor = torch.from_numpy(i_t_lower).unsqueeze(0).unsqueeze(0) i_t_upper = make_standard_text(self.font_path, upper_case, (c.shape[2], c.shape[3])) i_t_upper_tensor = torch.from_numpy(i_t_upper).unsqueeze(0).unsqueeze(0) i_t_tensor = torch.cat([i_t_lower_tensor, i_t_upper_tensor], dim=1) x_q = torch.cat([x_q, i_t_tensor], dim=0) x_q = x_q[1:] # prob_lr = prob_lr[1:] x_q = x_q.to(self.device) # prob_lr = prob_lr.to(self.device) c = torch.cat([c, x_q], dim=1) return {'c_concat': [c], 'c_crossattn': [tp]} def forward(self, x, c, *args, **kwargs): t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long() if self.model.conditioning_key is not None: assert c is not None if self.text_prior_enable and self.model.conditioning_key == 'hybrid': tp = self.get_learned_conditioning(c) c = self.get_additional_cond(c, tp) else: if self.cond_stage_trainable: c = self.get_learned_conditioning(c) if self.shorten_cond_schedule: # TODO: drop this option tc = self.cond_ids[t].to(self.device) c = self.q_sample(x_start=c, t=tc, noise=torch.randn_like(c.float())) return self.p_losses(x, c, t, *args, **kwargs) def _rescale_annotations(self, bboxes, crop_coordinates): # TODO: move to dataset def rescale_bbox(bbox): x0 = clamp((bbox[0] - crop_coordinates[0]) / crop_coordinates[2]) y0 = clamp((bbox[1] - crop_coordinates[1]) / crop_coordinates[3]) w = min(bbox[2] / crop_coordinates[2], 1 - x0) h = min(bbox[3] / crop_coordinates[3], 1 - y0) return x0, y0, w, h return [rescale_bbox(b) for b in bboxes] def apply_model(self, x_noisy, t, cond, return_ids=False): if isinstance(cond, dict): # hybrid case, cond is exptected to be a dict pass else: if not isinstance(cond, list): cond = [cond] key = 'c_concat' if self.model.conditioning_key == 'concat' else 'c_crossattn' cond = {key: cond} if hasattr(self, "split_input_params"): assert len(cond) == 1 # todo can only deal with one conditioning atm assert not return_ids ks = self.split_input_params["ks"] # eg. (128, 128) stride = self.split_input_params["stride"] # eg. (64, 64) h, w = x_noisy.shape[-2:] if ks[0] > h or ks[1] > w: ks = (min(ks[0], h), min(ks[1], w)) # print("reducing Kernel") if stride[0] > h or stride[1] > w: stride = (min(stride[0], h), min(stride[1], w)) # print("reducing stride") # print('ddpm','x_noisy shape',x_noisy.shape,'ks',ks,'stride',stride) fold, unfold, normalization, weighting = self.get_fold_unfold(x_noisy, ks, stride) z = unfold(x_noisy) # (bn, nc * prod(**ks), L) # Reshape to img shape z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L ) z_list = [z[:, :, :, :, i] for i in range(z.shape[-1])] if self.cond_stage_key in ["image", "LR_image", "segmentation", 'bbox_img'] and self.model.conditioning_key: # todo check for completeness c_key = next(iter(cond.keys())) # get key c = next(iter(cond.values())) # get value assert (len(c) == 1) # todo extend to list with more than one elem c = c[0] # get element c = unfold(c) c = c.view((c.shape[0], -1, ks[0], ks[1], c.shape[-1])) # (bn, nc, ks[0], ks[1], L ) cond_list = [{c_key: [c[:, :, :, :, i]]} for i in range(c.shape[-1])] elif self.cond_stage_key == 'coordinates_bbox': assert 'original_image_size' in self.split_input_params, 'BoudingBoxRescaling is missing original_image_size' # assuming padding of unfold is always 0 and its dilation is always 1 n_patches_per_row = int((w - ks[0]) / stride[0] + 1) full_img_h, full_img_w = self.split_input_params['original_image_size'] # as we are operating on latents, we need the factor from the original image size to the # spatial latent size to properly rescale the crops for regenerating the bbox annotations num_downs = self.first_stage_model.encoder.num_resolutions - 1 rescale_latent = 2 ** (num_downs) # get top left postions of patches as conforming for the bbbox tokenizer, therefore we # need to rescale the tl patch coordinates to be in between (0,1) tl_patch_coordinates = [(rescale_latent * stride[0] * (patch_nr % n_patches_per_row) / full_img_w, rescale_latent * stride[1] * (patch_nr // n_patches_per_row) / full_img_h) for patch_nr in range(z.shape[-1])] # patch_limits are tl_coord, width and height coordinates as (x_tl, y_tl, h, w) patch_limits = [(x_tl, y_tl, rescale_latent * ks[0] / full_img_w, rescale_latent * ks[1] / full_img_h) for x_tl, y_tl in tl_patch_coordinates] # patch_values = [(np.arange(x_tl,min(x_tl+ks, 1.)),np.arange(y_tl,min(y_tl+ks, 1.))) for x_tl, y_tl in tl_patch_coordinates] # tokenize crop coordinates for the bounding boxes of the respective patches patch_limits_tknzd = [torch.LongTensor(self.bbox_tokenizer._crop_encoder(bbox))[None].to(self.device) for bbox in patch_limits] # list of length l with tensors of shape (1, 2) print(patch_limits_tknzd[0].shape) # cut tknzd crop position from conditioning assert isinstance(cond, dict), 'cond must be dict to be fed into model' cut_cond = cond['c_crossattn'][0][..., :-2].to(self.device) print(cut_cond.shape) adapted_cond = torch.stack([torch.cat([cut_cond, p], dim=1) for p in patch_limits_tknzd]) adapted_cond = rearrange(adapted_cond, 'l b n -> (l b) n') print(adapted_cond.shape) adapted_cond = self.get_learned_conditioning(adapted_cond) print(adapted_cond.shape) adapted_cond = rearrange(adapted_cond, '(l b) n d -> l b n d', l=z.shape[-1]) print(adapted_cond.shape) cond_list = [{'c_crossattn': [e]} for e in adapted_cond] else: cond_list = [cond for i in range(z.shape[-1])] # Todo make this more efficient # apply model by loop over crops output_list = [self.model(z_list[i], t, **cond_list[i]) for i in range(z.shape[-1])] assert not isinstance(output_list[0], tuple) # todo cant deal with multiple model outputs check this never happens o = torch.stack(output_list, axis=-1) o = o * weighting # Reverse reshape to img shape o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) # stitch crops together x_recon = fold(o) / normalization else: x_recon = self.model(x_noisy, t, **cond) if isinstance(x_recon, tuple) and not return_ids: return x_recon[0] else: return x_recon def _predict_eps_from_xstart(self, x_t, t, pred_xstart): return (extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - pred_xstart) / \ extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) def _prior_bpd(self, x_start): """ Get the prior KL term for the variational lower-bound, measured in bits-per-dim. This term can't be optimized, as it only depends on the encoder. :param x_start: the [N x C x ...] tensor of inputs. :return: a batch of [N] KL values (in bits), one per batch element. """ batch_size = x_start.shape[0] t = torch.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device) qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t) kl_prior = normal_kl(mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0) return mean_flat(kl_prior) / np.log(2.0) def p_losses(self, x_start, cond, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) model_output = self.apply_model(x_noisy, t, cond) loss_dict = {} prefix = 'train' if self.training else 'val' if self.parameterization == "x0": target = x_start elif self.parameterization == "eps": target = noise else: raise NotImplementedError() loss_simple = self.get_loss(model_output, target, mean=False).mean([1, 2, 3]) loss_dict.update({f'{prefix}/loss_simple': loss_simple.mean()}) self.logvar = self.logvar.to(self.device) logvar_t = self.logvar[t].to(self.device) loss = loss_simple / torch.exp(logvar_t) + logvar_t # loss = loss_simple / torch.exp(self.logvar) + self.logvar if self.learn_logvar: loss_dict.update({f'{prefix}/loss_gamma': loss.mean()}) loss_dict.update({'logvar': self.logvar.data.mean()}) loss = self.l_simple_weight * loss.mean() loss_vlb = self.get_loss(model_output, target, mean=False).mean(dim=(1, 2, 3)) loss_vlb = (self.lvlb_weights[t] * loss_vlb).mean() loss_dict.update({f'{prefix}/loss_vlb': loss_vlb}) loss += (self.original_elbo_weight * loss_vlb) loss_dict.update({f'{prefix}/loss': loss}) return loss, loss_dict def p_mean_variance(self, x, c, t, clip_denoised: bool, return_codebook_ids=False, quantize_denoised=False, return_x0=False, score_corrector=None, corrector_kwargs=None): t_in = t model_out = self.apply_model(x, t_in, c, return_ids=return_codebook_ids) if score_corrector is not None: assert self.parameterization == "eps" model_out = score_corrector.modify_score(self, model_out, x, t, c, **corrector_kwargs) if return_codebook_ids: model_out, logits = model_out if self.parameterization == "eps": x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) elif self.parameterization == "x0": x_recon = model_out else: raise NotImplementedError() if clip_denoised: x_recon.clamp_(-1., 1.) if quantize_denoised: x_recon, _, [_, _, indices] = self.first_stage_model.quantize(x_recon) model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t) if return_codebook_ids: return model_mean, posterior_variance, posterior_log_variance, logits elif return_x0: return model_mean, posterior_variance, posterior_log_variance, x_recon else: return model_mean, posterior_variance, posterior_log_variance @torch.no_grad() def p_sample(self, x, c, t, clip_denoised=False, repeat_noise=False, return_codebook_ids=False, quantize_denoised=False, return_x0=False, temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None): b, *_, device = *x.shape, x.device outputs = self.p_mean_variance(x=x, c=c, t=t, clip_denoised=clip_denoised, return_codebook_ids=return_codebook_ids, quantize_denoised=quantize_denoised, return_x0=return_x0, score_corrector=score_corrector, corrector_kwargs=corrector_kwargs) if return_codebook_ids: raise DeprecationWarning("Support dropped.") model_mean, _, model_log_variance, logits = outputs elif return_x0: model_mean, _, model_log_variance, x0 = outputs else: model_mean, _, model_log_variance = outputs noise = noise_like(x.shape, device, repeat_noise) * temperature if noise_dropout > 0.: noise = torch.nn.functional.dropout(noise, p=noise_dropout) # no noise when t == 0 nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) if return_codebook_ids: return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, logits.argmax(dim=1) if return_x0: return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, x0 else: return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise @torch.no_grad() def progressive_denoising(self, cond, shape, verbose=True, callback=None, quantize_denoised=False, img_callback=None, mask=None, x0=None, temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None, batch_size=None, x_T=None, start_T=None, log_every_t=None): if not log_every_t: log_every_t = self.log_every_t timesteps = self.num_timesteps if batch_size is not None: b = batch_size if batch_size is not None else shape[0] shape = [batch_size] + list(shape) else: b = batch_size = shape[0] if x_T is None: img = torch.randn(shape, device=self.device) else: img = x_T intermediates = [] if cond is not None: if isinstance(cond, dict): cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else list(map(lambda x: x[:batch_size], cond[key])) for key in cond} else: cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] if start_T is not None: timesteps = min(timesteps, start_T) iterator = tqdm(reversed(range(0, timesteps)), desc='Progressive Generation', total=timesteps) if verbose else reversed( range(0, timesteps)) if type(temperature) == float: temperature = [temperature] * timesteps for i in iterator: ts = torch.full((b,), i, device=self.device, dtype=torch.long) if self.shorten_cond_schedule: assert self.model.conditioning_key != 'hybrid' tc = self.cond_ids[ts].to(cond.device) cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) img, x0_partial = self.p_sample(img, cond, ts, clip_denoised=self.clip_denoised, quantize_denoised=quantize_denoised, return_x0=True, temperature=temperature[i], noise_dropout=noise_dropout, score_corrector=score_corrector, corrector_kwargs=corrector_kwargs) if mask is not None: assert x0 is not None img_orig = self.q_sample(x0, ts) img = img_orig * mask + (1. - mask) * img if i % log_every_t == 0 or i == timesteps - 1: intermediates.append(x0_partial) if callback: callback(i) if img_callback: img_callback(img, i) return img, intermediates @torch.no_grad() def p_sample_loop(self, cond, shape, return_intermediates=False, x_T=None, verbose=True, callback=None, timesteps=None, quantize_denoised=False, mask=None, x0=None, img_callback=None, start_T=None, log_every_t=None): if not log_every_t: log_every_t = self.log_every_t device = self.betas.device b = shape[0] if x_T is None: img = torch.randn(shape, device=device) else: img = x_T intermediates = [img] if timesteps is None: timesteps = self.num_timesteps if start_T is not None: timesteps = min(timesteps, start_T) iterator = tqdm(reversed(range(0, timesteps)), desc='Sampling t', total=timesteps) if verbose else reversed( range(0, timesteps)) if mask is not None: assert x0 is not None assert x0.shape[2:3] == mask.shape[2:3] # spatial size has to match for i in iterator: ts = torch.full((b,), i, device=device, dtype=torch.long) if self.shorten_cond_schedule: assert self.model.conditioning_key != 'hybrid' tc = self.cond_ids[ts].to(cond.device) cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) img = self.p_sample(img, cond, ts, clip_denoised=self.clip_denoised, quantize_denoised=quantize_denoised) if mask is not None: img_orig = self.q_sample(x0, ts) img = img_orig * mask + (1. - mask) * img if i % log_every_t == 0 or i == timesteps - 1: intermediates.append(img) if callback: callback(i) if img_callback: img_callback(img, i) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, cond, batch_size=16, return_intermediates=False, x_T=None, verbose=True, timesteps=None, quantize_denoised=False, mask=None, x0=None, shape=None, **kwargs): if shape is None: shape = (batch_size, self.channels, self.image_size, self.image_size) if cond is not None: if isinstance(cond, dict): cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else list(map(lambda x: x[:batch_size], cond[key])) for key in cond} else: cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] return self.p_sample_loop(cond, shape, return_intermediates=return_intermediates, x_T=x_T, verbose=verbose, timesteps=timesteps, quantize_denoised=quantize_denoised, mask=mask, x0=x0) @torch.no_grad() def sample_log(self, cond, batch_size, ddim, ddim_steps, **kwargs): if ddim: ddim_sampler = DDIMSampler(self) # print(cond.shape) if self.text_prior_enable: if isinstance(cond, dict): shape = (self.channels, cond['c_concat'][0].shape[2], cond['c_concat'][0].shape[3]) elif isinstance(cond, list): shape = (self.channels, cond[0].shape[2], cond[0].shape[3]) else: shape = (self.channels, cond.shape[2], cond.shape[3]) else: shape = (self.channels, cond.shape[2], cond.shape[3]) # shape = (self.channels, self.image_size, self.image_size) samples, intermediates = ddim_sampler.sample(ddim_steps, batch_size, shape, cond, verbose=False, **kwargs) else: samples, intermediates = self.sample(cond=cond, batch_size=batch_size, return_intermediates=True, **kwargs) return samples, intermediates @torch.no_grad() def log_images(self, batch, N=8, n_row=4, sample=True, ddim_steps=200, ddim_eta=1., return_keys=None, quantize_denoised=True, inpaint=True, plot_denoise_rows=False, plot_progressive_rows=True, plot_diffusion_rows=True, **kwargs): use_ddim = ddim_steps is not None log = dict() z, c, x, xrec, xc = self.get_input(batch, self.first_stage_key, return_first_stage_outputs=True, force_c_encode=True, return_original_cond=True, bs=N) # print('**********************c shape',c.shape) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) log["inputs"] = x log["reconstruction"] = xrec if self.model.conditioning_key is not None: if hasattr(self.cond_stage_model, "decode"): xc = self.cond_stage_model.decode(c) log["conditioning"] = xc elif self.cond_stage_key in ["caption"]: xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["caption"]) log["conditioning"] = xc elif self.cond_stage_key == 'class_label': xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["human_label"]) log['conditioning'] = xc
elif isimage(xc):
4
2023-11-20 06:34:21+00:00
24k
microsoft/Project-BayesDAG
src/causica/models/visl.py
[ { "identifier": "Dataset", "path": "src/causica/datasets/dataset.py", "snippet": "class Dataset(BaseDataset):\n \"\"\"\n Class to store dense train/val/test data and masks and variables metadata.\n Note that the data and masks provided by this class are read only.\n \"\"\"\n\n def __init__(\n self,\n train_data: np.ndarray,\n train_mask: np.ndarray,\n val_data: Optional[np.ndarray] = None,\n val_mask: Optional[np.ndarray] = None,\n test_data: Optional[np.ndarray] = None,\n test_mask: Optional[np.ndarray] = None,\n variables: Optional[Variables] = None,\n data_split: Optional[Dict[str, Any]] = None,\n held_out_interventions: Optional[Dict[str, Any]]=None,\n true_posterior: Optional[Any]=None,\n graph_args: Optional[Dict[str, Any]]=None\n ) -> None:\n super().__init__(train_data, train_mask, val_data, val_mask, test_data, test_mask, variables, data_split, held_out_interventions, true_posterior, graph_args)\n\n # Ensure that data and masks are immutable\n if not issparse(self._train_data):\n self._train_data.setflags(write=False)\n self._train_mask.setflags(write=False)\n if test_data is not None and not issparse(test_data):\n self._test_data = cast(np.ndarray, test_data)\n self._test_data.setflags(write=False)\n self._test_mask = cast(np.ndarray, test_mask)\n self._test_mask.setflags(write=False)\n\n if val_data is not None and not issparse(val_data):\n self._val_data = cast(np.ndarray, val_data)\n self._val_mask = cast(np.ndarray, val_mask)\n self._val_data.setflags(write=False)\n self._val_mask.setflags(write=False)\n\n def to_causal(\n self,\n adjacency_data: Optional[np.ndarray],\n subgraph_data: Optional[np.ndarray],\n intervention_data: Optional[List[InterventionData]],\n counterfactual_data: Optional[List[InterventionData]] = None,\n ):\n \"\"\"\n Return the dag version of this dataset.\n \"\"\"\n return CausalDataset(\n train_data=self._train_data,\n train_mask=self._train_mask,\n adjacency_data=adjacency_data,\n subgraph_data=subgraph_data,\n intervention_data=intervention_data,\n counterfactual_data=counterfactual_data,\n val_data=self._val_data,\n val_mask=self._val_mask,\n test_data=self._test_data,\n test_mask=self._test_mask,\n variables=self._variables,\n data_split=self._data_split,\n held_out_interventions=self._held_out_interventions,\n true_posterior=self._true_posterior,\n graph_args=self._graph_args\n )\n\n @property\n def train_data_and_mask(self) -> Tuple[np.ndarray, np.ndarray]:\n # Add to avoid inconsistent type mypy error\n return self._train_data, self._train_mask" }, { "identifier": "Variables", "path": "src/causica/datasets/variables.py", "snippet": "class Variables:\n \"\"\"\n This class represents any variables present in a model.\n \"\"\"\n\n def __init__(\n self,\n variables: List[Variable],\n auxiliary_variables: Optional[List[Variable]] = None,\n used_cols: Optional[List[int]] = None,\n ) -> None:\n \"\"\"\n Args:\n variables: A list Variable objects.\n auxiliary_variables: A list of Variable objects only used for input into VAE,\n not produced in output.\n These are assumed to be appended onto the end of the variables in the data.\n Defaults to None - no aux variables present.\n used_cols: A list of column ids that were used when processing the original data.\n \"\"\"\n if not auxiliary_variables:\n auxiliary_variables = []\n self.auxiliary_variables = auxiliary_variables\n self._variables = variables\n\n self._deduplicate_names()\n\n # Dictionary mapping from variable name to variable index.\n self.name_to_idx = {var.name: idx for idx, var in enumerate(self._variables)}\n\n # Lists containing query and target variable indices\n self.target_var_idxs = []\n self.not_target_var_idxs = []\n self.query_var_idxs = []\n self.not_query_var_idxs = []\n for idx, var in enumerate(self._variables):\n if var.query:\n self.query_var_idxs.append(idx)\n else:\n self.not_query_var_idxs.append(idx)\n if var.target:\n self.target_var_idxs.append(idx)\n else:\n self.not_target_var_idxs.append(idx)\n\n if len(self.target_var_idxs) > 0 and all(idx in self.query_var_idxs for idx in self.target_var_idxs):\n warnings.warn(\n \"All target variables are marked as queriable, it is likely that active learning will always \"\n \"select these variables first.\"\n )\n\n # Lists containing continuous (including text) and binary/categorical variable indices\n self.var_idxs_by_type: DefaultDict[str, List[int]] = defaultdict(list)\n for idx, var in enumerate(self._variables + self.auxiliary_variables):\n self.var_idxs_by_type[var.type_].append(idx)\n\n # List of lists, where self.unprocessed_cols[i] gives the columns occupied by the ith variable in the unprocessed\n # data.\n self.unprocessed_cols = []\n start_col = 0\n for var in self._all_variables:\n end_col = start_col + var.unprocessed_dim\n self.unprocessed_cols.append(list(range(start_col, end_col)))\n start_col = end_col\n\n # List of lists, where self.unprocessed_non_aux_cols[i] gives the columns occupied by the ith variable in the unprocessed\n # data (non-auxiliary).\n self.unprocessed_non_aux_cols = []\n start_col = 0\n for var in self._variables:\n end_col = start_col + var.unprocessed_dim\n self.unprocessed_non_aux_cols.append(list(range(start_col, end_col)))\n start_col = end_col\n\n # List of lists, where self.processed_cols[i] gives the columns occupied by the ith variable in the processed\n # data.\n self.processed_cols = []\n start_col = 0\n for var in self._all_variables:\n end_col = start_col + var.processed_dim\n self.processed_cols.append(list(range(start_col, end_col)))\n start_col = end_col\n\n # List of lists, where self.processed_non_aux_cols[i] gives the columns occupied by the ith variable in the processed\n # data (non-auxiliary).\n self.processed_non_aux_cols = []\n start_col = 0\n for var in self._variables:\n end_col = start_col + var.processed_dim\n self.processed_non_aux_cols.append(list(range(start_col, end_col)))\n start_col = end_col\n\n # Set of all query group names, maintaining order in which they are first encountered when iterating through\n # the variables list. This is the simplest way to do this since dictionaries are guaranteed to be\n # insertion-ordered since Python 3.7\n self.group_names = list(dict.fromkeys([var.group_name for var in self._variables]))\n\n # List containing indices for each query group, where the query group names are assumed to be in the same order\n # as self.group_names\n self.group_idxs = [\n [idx for idx, var in enumerate(self._variables) if var.group_name == group_name]\n for group_name in self.group_names\n ]\n\n # Remove groups containing no queriable variables from self.group_names and self.group_idxs, as\n # we can guarantee that we will never query these groups.\n is_group_queriable = [any(self._variables[idx].query for idx in idxs) for idxs in self.group_idxs]\n\n self.group_names = [name for group_idx, name in enumerate(self.group_names) if is_group_queriable[group_idx]]\n self.group_idxs = [idxs for group_idx, idxs in enumerate(self.group_idxs) if is_group_queriable[group_idx]]\n\n # Save the list of observed column ids\n default_used_cols = list(range(len(self._variables) + len(auxiliary_variables))) # All columns observed\n self.used_cols = used_cols if used_cols is not None else default_used_cols\n assert len(self.used_cols) == len(self._variables) + len(self.auxiliary_variables)\n\n self.col_id_to_var_index = {old: new for new, old in enumerate(self.used_cols)}\n\n def __repr__(self):\n return str(self._variables)\n\n def __iter__(self) -> Iterator[Variable]:\n \"\"\"\n Iterate through the variables within the container.\n Note - Now it iterate through all the variables within the container\n (including auxiliary variables, if they are present)\n \"\"\"\n for var in self._all_variables:\n yield var\n\n def __getitem__(self, idx):\n return (self._all_variables)[idx]\n\n def __len__(self) -> int:\n return len(self._variables) + len(self.auxiliary_variables)\n\n @classmethod\n def create_from_json(cls, path: str) -> Variables:\n return cls.create_from_dict(read_json_as(path, dict))\n\n @classmethod\n def create_from_dict(cls, variables_dict: Dict[str, List[Any]]) -> Variables:\n \"\"\"\n Create variables object from a dictionary\n \"\"\"\n variables = variables_dict[\"variables\"]\n for var in variables:\n # remove deprecated \"id\" key if present\n var.pop(\"id\", None)\n var_obj_list = [Variable(**var) for var in variables]\n\n auxiliary_vars = variables_dict.get(\"auxiliary_variables\", [])\n if len(auxiliary_vars) == 0:\n auxiliary_vars_obj = None\n else:\n for var in auxiliary_vars:\n # remove deprecated \"id\" key if present\n var.pop(\"id\", None)\n\n auxiliary_vars_obj = [Variable(**var) for var in auxiliary_vars]\n\n used_cols = variables_dict.get(\"used_cols\", None)\n\n return cls(var_obj_list, auxiliary_vars_obj, used_cols)\n\n @classmethod\n def create_from_data_and_dict(\n cls, data: np.ndarray, mask: np.ndarray, variables_dict: Optional[Dict[str, Any]] = None\n ) -> Variables:\n \"\"\"\n Create variables object from an input dictionary, inferring missing fields using `data` and `mask`.\n \"\"\"\n # Infer missing fields in variables_dict\n variables_dict = cls.infer_from_data(data, mask, variables_dict, True)\n variables = cls.create_from_dict(variables_dict)\n return variables\n\n @staticmethod\n def _metadata_from_dict(\n data, mask, variables_dict, variables_type=\"variables\"\n ) -> Tuple[List[Any], Union[List[Any], None]]:\n \"\"\"\n Infer variables_metadata from input data\n\n Args:\n data: NumPy array containing data\n mask: NumPy array containing 1 for observed data values, 0 for unobserved data values.\n variables_dict: Dictionary containing metadata for each variable (column) in the input data. Missing variables,\n or missing fields for a particular variable, will attempt to be inferred from the input data.\n variables_type: is it aux variables, or normal variables\n Returns:\n varaibles_metadata: inferred metadata from input data\n A list of column ids that were used when processing the original data.\n \"\"\"\n\n variables_metadata = []\n # Use None rather than {} as default since mutable default args are dangerous in Python.\n used_cols = variables_dict.get(\"used_cols\", None)\n if used_cols:\n used_cols = cast(List[int], used_cols)\n assert len(used_cols) == data.shape[1]\n\n for idx, variable_metadata in enumerate(variables_dict[variables_type]):\n if not all(\n k in variable_metadata for k in [\"name\", \"type\", \"lower\", \"upper\", \"query\", \"target\", \"always_observed\"]\n ):\n # If variable metadata fully specified, do not try to infer, as doing column indexing can be expensive\n # for CSR sparse matrices.\n var_data = data[:, idx]\n var_mask = mask[:, idx]\n if issparse(var_data):\n var_data = var_data.toarray()\n var_mask = var_mask.toarray()\n\n if \"name\" not in variable_metadata:\n if used_cols:\n variable_metadata[\"name\"] = str(used_cols[idx])\n else:\n variable_metadata[\"name\"] = f\"Column {idx}\"\n\n # If data type/min max/num categories specified explicitly, overwrite variables file\n if \"type\" not in variable_metadata:\n # Test if all unmasked elements are integers\n\n if np.all((var_data * var_mask) // 1 == var_data * var_mask):\n if (var_data * var_mask).max() <= 1:\n print(\n f'Type of variable {variable_metadata[\"name\"]} inferred as binary. This can be '\n \"changed manually in the dataset's variables.json file\"\n )\n variable_metadata[\"type\"] = \"binary\"\n else:\n # Note that we always infer integer values with a max value > 1 as categorical. This may want to be\n # reconsidered if support for ordinal variables is introduced at a later date.\n print(\n f'Type of variable {variable_metadata[\"name\"]} inferred as categorical. This can be'\n \" changed manually in the dataset's variables.json file\"\n )\n variable_metadata[\"type\"] = \"categorical\"\n else:\n variable_metadata[\"type\"] = \"continuous\"\n\n if \"lower\" not in variable_metadata:\n if variable_metadata[\"type\"] == \"binary\":\n inferred_lower = 0\n else:\n inferred_lower = min(var_data[np.where(var_mask == 1)]).item()\n variable_metadata[\"lower\"] = inferred_lower\n print(\n f'Minimum value of variable {variable_metadata[\"name\"]} inferred as {inferred_lower}. This'\n \" can be changed manually in the dataset's variables.json file\"\n )\n\n if \"upper\" not in variable_metadata:\n if variable_metadata[\"type\"] == \"binary\":\n inferred_upper = 1\n else:\n inferred_upper = max(var_data[np.where(var_mask == 1)]).item()\n variable_metadata[\"upper\"] = inferred_upper\n print(\n f'Max value of variable {variable_metadata[\"name\"]} inferred as {inferred_upper}. This can '\n \"be changed manually in the dataset's variables.json file\"\n )\n\n if \"query\" not in variable_metadata:\n # By default, assume all variables can be queried unless specified otherwise.\n if variables_type == \"auxiliary_variables\":\n variable_metadata[\"query\"] = False\n print(\n f'Variable {variable_metadata[\"name\"]} inferred to be a non-queriable variable. '\n 'This can be changed manually in the dataset\\'s variables.json file by updating the \"query\" field.'\n )\n else:\n variable_metadata[\"query\"] = True\n print(\n f'Variable {variable_metadata[\"name\"]} inferred to be a queriable variable. '\n 'This can be changed manually in the dataset\\'s variables.json file by updating the \"query\" field.'\n )\n\n if \"target\" not in variable_metadata:\n # By default, assume variable is a target if and only if it is not queriable.\n variable_metadata[\"target\"] = not variable_metadata[\"query\"]\n fill_string = \"not \" if not variable_metadata[\"target\"] else \"\"\n print(\n f'Variable {variable_metadata[\"name\"]} inferred as {fill_string}an active learning target variable. '\n 'This can be changed manually in the dataset\\'s variables.json file by updating the \"target\" field.'\n )\n\n if \"always_observed\" not in variable_metadata:\n # By default, assume variable is always observed if there is no missing in the mask.\n if np.sum((var_mask - 1) ** 2) == 0:\n variable_metadata[\"always_observed\"] = True\n else:\n variable_metadata[\"always_observed\"] = False\n fill_string = \"not \" if not variable_metadata[\"always_observed\"] else \"\"\n print(\n f'Variable {variable_metadata[\"name\"]} inferred as {fill_string}an always observed target variable. '\n 'This can be changed manually in the dataset\\'s variables.json file by updating the \"always_observed\" field.'\n )\n\n variables_metadata.append(variable_metadata)\n\n return variables_metadata, used_cols\n\n @staticmethod\n def infer_from_data(data, mask, variables_dict=None, infer_aux_variables=False) -> Dict[str, List[Any]]:\n \"\"\"\n Infer missing values in an input variables dictionary, using the input data.\n\n Args:\n data: NumPy array containing data\n mask: NumPy array containing 1 for observed data values, 0 for unobserved data values.\n variables_dict: Dictionary containing metadata for each variable (column) in the input data. Missing variables,\n or missing fields for a particular variable, will attempt to be inferred from the input data.\n infer_aux_variables: infer auxiliary variables for GINA or not.\n Returns:\n variables_dict: Updated version of the input variables_dict, with missing variables and fields inferred from the\n data.\n \"\"\"\n\n if variables_dict is None:\n variables_dict = {}\n\n # NOTE this assumes all variables have only one column in unprocessed data, which should always be the case when\n # inferring from a dataset.\n if \"auxiliary_variables\" not in variables_dict:\n variables_dict[\"auxiliary_variables\"] = []\n\n if \"variables\" not in variables_dict or variables_dict[\"variables\"] == []:\n num_var_cols = data.shape[1] - len(variables_dict[\"auxiliary_variables\"])\n variables_dict[\"variables\"] = [{} for _ in range(num_var_cols)]\n\n variables_metadata, used_cols = Variables._metadata_from_dict(\n data, mask, variables_dict, variables_type=\"variables\"\n )\n variables_dict = {\n \"variables\": variables_metadata,\n \"auxiliary_variables\": variables_dict[\"auxiliary_variables\"],\n \"used_cols\": used_cols,\n }\n if infer_aux_variables:\n aux_variables_metadata, used_cols = Variables._metadata_from_dict(\n data, mask, variables_dict, variables_type=\"auxiliary_variables\"\n )\n variables_dict = {\n \"variables\": variables_metadata,\n \"auxiliary_variables\": aux_variables_metadata,\n \"used_cols\": used_cols,\n }\n\n return variables_dict\n\n @property\n def _all_variables(self):\n return self._variables + self.auxiliary_variables\n\n @property\n def has_auxiliary(self) -> bool:\n \"\"\"\n True if there are aux variables present.\n \"\"\"\n return len(self.auxiliary_variables) > 0\n\n @property\n def binary_idxs(self) -> List[int]:\n \"\"\"\n Return a list of the indices of all binary variables.\n \"\"\"\n return self.var_idxs_by_type[\"binary\"]\n\n @property\n def categorical_idxs(self) -> List[int]:\n \"\"\"\n Return a list of the indices of all categorical variables.\n \"\"\"\n return self.var_idxs_by_type[\"categorical\"]\n\n @property\n def discrete_idxs(self) -> List[int]:\n \"\"\"\n Return a list of the indices of all discrete (i.e. binary or categorical) variables. We sort to ensure that the\n combined list is in ascending order.\n \"\"\"\n return sorted(self.var_idxs_by_type[\"categorical\"] + self.var_idxs_by_type[\"binary\"])\n\n @property\n def continuous_idxs(self) -> List[int]:\n \"\"\"\n Return a list of the indices of all continuous variables.\n \"\"\"\n return self.var_idxs_by_type[\"continuous\"]\n\n @property\n def text_idxs(self) -> List[int]:\n \"\"\"\n Return a list of the indices of all text variables.\n \"\"\"\n return self.var_idxs_by_type[\"text\"]\n\n @property\n def non_text_idxs(self) -> List[bool]:\n \"\"\"Helper method. Returns list of booleans, where an element\n at index i indicates whether a variable at index i is non-text or not\n e.g. For Variables object of [...\"continous\"..., ...\"text\"..., \"continuous\"],\n the result would be [True, False, True]\n \"\"\"\n unproc_cols_by_type = self.unprocessed_cols_by_type\n if \"text\" not in unproc_cols_by_type:\n return [True for _ in range(len(self))]\n return (~np.in1d(range(len(self)), unproc_cols_by_type[\"text\"])).tolist()\n\n @property\n def num_unprocessed_cols(self) -> int:\n \"\"\"\n Return number of columns in the unprocessed data represented by all variables\n \"\"\"\n return sum(len(idxs) for idxs in self.unprocessed_cols)\n\n @property\n def num_unprocessed_non_aux_cols(self) -> int:\n \"\"\"\n Return number of columns in the unprocessed data represented by non auxiliary variables\n \"\"\"\n return sum(len(idxs) for idxs in self.unprocessed_non_aux_cols)\n\n @property\n def num_processed_cols(self) -> int:\n \"\"\"\n Return number of columns in the processed data represented by all variables\n \"\"\"\n return sum(len(idxs) for idxs in self.processed_cols)\n\n @property\n def num_processed_non_aux_cols(self) -> int:\n \"\"\"\n Return number of columns in the processed data represented by non auxiliary variables\n \"\"\"\n return sum(len(idxs) for idxs in self.processed_non_aux_cols)\n\n @property\n def num_groups(self) -> int:\n \"\"\"\n Return the number of unique query groups in the variables object.\n \"\"\"\n return len(self.group_names)\n\n @property\n def group_mask(self) -> np.ndarray:\n \"\"\"\n Return a mask of shape (num_groups, num_processed_cols) indicating which column\n corresponds to which group.\n \"\"\"\n mask = np.zeros((self.num_groups, self.num_processed_cols), dtype=bool)\n for group_idx, group in enumerate(self.group_idxs):\n for var in group:\n for proc_col in self.processed_cols[var]:\n mask[group_idx, proc_col] = 1\n return mask\n\n @property\n def proc_always_observed_list(self) -> List[Optional[bool]]:\n \"\"\"\n The mask that indicates if the variable is always observed (for processed data)\n \"\"\"\n return sum(([var.always_observed] * var.processed_dim for var in self._all_variables), [])\n\n @property\n def processed_cols_by_type(self) -> Dict[str, List[List[int]]]:\n \"\"\"\n Return a dictionary mapping each type of data (e.g. continuous, binary, ...) to a list of lists, where each\n sublist represents indices in the processed (i.e. one-hot) data associated with each variable of that type.\n\n E.g. for a two categorical variables each taking 3 values, followed by a binary variable, return\n {'categorical': [[0,1,2], [3,4,5]], 'binary': [[6]]}.\n \"\"\"\n grouped_vars: DefaultDict[str, List[List[int]]] = defaultdict(list)\n for var, cols in zip(self._all_variables, self.processed_cols):\n grouped_vars[var.type_].append(cols)\n return grouped_vars\n\n @property\n def processed_non_aux_cols_by_type(self) -> Dict[str, List[List[int]]]:\n \"\"\"\n Return a dictionary mapping each type of data (e.g. continuous, binary, ...) to a list of lists, where each\n sublist represents indices in the processed (i.e. one-hot) data (w/o aux variables) associated with each\n variable of that type.\n E.g. for a two categorical variables each taking 3 values, followed by a binary variable, return\n {'categorical': [[0,1,2], [3,4,5]], 'binary': [[6]]}.\n \"\"\"\n grouped_vars: DefaultDict[str, List[List[int]]] = defaultdict(list)\n for var, cols in zip(self._variables, self.processed_cols):\n grouped_vars[var.type_].append(cols)\n return grouped_vars\n\n @property\n def unprocessed_cols_by_type(self) -> DefaultDict[str, List[int]]:\n \"\"\"\n Return a dictionary mapping each type of data (e.g. continuous, binary, ...) to a list containing the column\n indices in the unprocessed data for all variables of that type.\n\n E.g. for a two categorical variables each taking 3 values, followed by a binary variable, return\n {'categorical': [0, 1], 'binary': [2]}.\n \"\"\"\n grouped_vars: DefaultDict[str, List[int]] = defaultdict(list)\n i = 0\n for var, cols in zip(self._all_variables, self.unprocessed_cols):\n grouped_vars[var.type_] += cols\n i += var.unprocessed_dim\n return grouped_vars\n\n @property\n def unprocessed_non_aux_cols_by_type(self) -> DefaultDict[str, List[int]]:\n \"\"\"\n Return a dictionary mapping each type of data (e.g. continuous, binary, ...) to a list containing the column\n indices in the unprocessed data for all variables of that type.\n\n E.g. for a two categorical variables each taking 3 values, followed by a binary variable, return\n {'categorical': [0, 1], 'binary': [2]}.\n \"\"\"\n grouped_vars: DefaultDict[str, List[int]] = defaultdict(list)\n i = 0\n for var, cols in zip(self._variables, self.unprocessed_cols):\n grouped_vars[var.type_] += cols\n i += var.unprocessed_dim\n return grouped_vars\n\n def subset(self, idxs: List[int], auxiliary_idxs: Optional[List[int]] = None) -> Variables:\n \"\"\"\n Returns a new Variables object containing only the Variable objects whose indices are given in `idxs`.\n Note that this currently ignores metadata variables.\n \"\"\"\n if auxiliary_idxs is None:\n auxiliary_idxs = []\n\n variables_list = [self._variables[idx] for idx in idxs]\n auxiliary_variables_list = [self.auxiliary_variables[idx] for idx in auxiliary_idxs]\n return Variables(variables_list, auxiliary_variables_list)\n\n def to_dict(self) -> Dict[str, Any]:\n variables_list = [var.to_json() for var in self._variables]\n if self.auxiliary_variables is None:\n auxiliary_vars_list = []\n else:\n auxiliary_vars_list = [var.to_json() for var in self.auxiliary_variables]\n\n variables_json = {\n \"variables\": variables_list,\n \"auxiliary_variables\": auxiliary_vars_list,\n \"used_cols\": [int(col) for col in self.used_cols],\n }\n return variables_json\n\n def save(self, path: str) -> None:\n variables_json = self.to_dict()\n save_json(variables_json, path)\n\n def as_list(self) -> List[Variable]:\n return self._variables\n\n def get_idxs_from_name_list(self, variable_names: List[Union[str, int]]) -> np.ndarray:\n \"\"\"\n Get a binary array of shape (variable_count,), where for each index the array value is 1 if the corresponding\n variable is named in `variable_names`, and 0 otherwise.\n \"\"\"\n variables_to_query = np.zeros((len(self._variables),))\n # Look up indices of specified variables and mark as queriable.\n for variable_name in variable_names:\n # Cast name to string in case numeric names (e.g. question ids) have been input as integers.\n variable_name = str(variable_name)\n variable_idx = self.name_to_idx[variable_name]\n variables_to_query[variable_idx] = 1\n\n return variables_to_query\n\n def get_observable_groups(self, data_mask_row: np.ndarray, obs_mask_row: np.ndarray) -> List[int]:\n \"\"\"\n Get list of indices for groups that are still observable in the current row\n Args:\n data_mask_row: 1D numpy array containing 1 for observed variables and 0 for unobserved in the underlying data\n obs_mask_row: 1D numpy array containing 1 for variables observed during active learning and 0 for ones unobserved\n\n Returns:\n list of indices of groups that can be observed, where the indices correspond to the corresponding group\n names in `self.group_names`.\n \"\"\"\n observable_variables_idxs = self.get_observable_variable_idxs(data_mask_row, obs_mask_row)\n observable_groups_idxs: List[int] = []\n for group_idx, idxs in enumerate(self.group_idxs):\n if any(i in observable_variables_idxs for i in idxs):\n observable_groups_idxs.append(group_idx)\n return observable_groups_idxs\n\n def get_observable_variable_idxs(self, data_mask_row: np.ndarray, obs_mask_row: np.ndarray) -> List[int]:\n \"\"\"\n Get list of variable idxs for variables that are still observable in the current row.\n Args:\n data_mask_row: 1D numpy array containing 1 for observed variables and 0 for unobserved in the underlying data\n obs_mask_row: 1D numpy array containing 1 for variables observed during active learning and 0 for ones unobserved\n\n Returns:\n observable_vars: List of indices of variables that can be observed.\n \"\"\"\n if data_mask_row.ndim != 1:\n raise ValueError(f\"Test mask should be 1D, had {data_mask_row.ndim} dims and shape {data_mask_row.shape}.\")\n if obs_mask_row.ndim != 1:\n raise ValueError(\n f\"Observation mask should be 1D, had {obs_mask_row.ndim} dims and shape {obs_mask_row.shape}.\"\n )\n if len(obs_mask_row) != len(data_mask_row) or len(data_mask_row) != len(self._variables):\n # One likely cause is accidentally passing 'processed' masks, which may be longer\n # if some variables are categorical.\n raise ValueError(\n f\"Lengths of obs_mask_row {len(obs_mask_row)}, data_mask_row {len(data_mask_row)}, \"\n f\"and variables list {len(self._variables)} should all be the same.\"\n )\n # Get ids where there is an underlying data value (test_mask == 1) and that we haven't yet queried (obs_mask == 0)\n unobserved_idxs = np.where((data_mask_row == 1) & (obs_mask_row == 0))[0]\n\n # Intersection of these and query_var_idxs.\n observable_idx_set = set(unobserved_idxs).intersection(set(self.query_var_idxs))\n return list(observable_idx_set)\n\n def get_var_cols_from_data(self, var_idx, data):\n \"\"\"\n Get data from an array for a single variable only.\n\n Args:\n var_idx: Index of variable we want data for.\n data (shape (batch_size, variable_count)): Array to get variable info from.\n\n Returns:\n var_data (shape (observed_count, processed_dim)): Values only for\n the corresponding variable.\n \"\"\"\n return data[:, self.processed_cols[var_idx]]\n\n def get_variables_to_observe(self, data_mask: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Return a boolean tensor of length num_variables, where each element indicates whether the corresponding variable\n can be queried during active learning (i.e. the variable is queriable and has at least one observed value in\n the data).\n Args:\n data_mask (shape (batch_size, num_processed_cols)): Processed mask\n\n Returns:\n torch.Tensor (shape (variable_count,)): True where it's a query-able variable and we have at least one\n observed value\n \"\"\"\n cols_with_data = data_mask.sum(dim=0).to(torch.bool)\n\n # data_mask may have multiple columns for a single variable, if it's a categorical variable. Pick first entry per variable\n ii = torch.tensor([cols[0] for cols in self.processed_cols], dtype=torch.long, device=cols_with_data.device)\n cols_with_data = torch.index_select(cols_with_data, 0, ii)\n is_query_id = torch.zeros(len(self), dtype=torch.bool, device=cols_with_data.device)\n is_query_id[\n tuple(self.query_var_idxs),\n ] = True\n return is_query_id * cols_with_data\n\n def _deduplicate_names(self):\n # Produce warning if var name is reused and add an increasing integer to the end until it is unique.\n var_names = set()\n for var in self._all_variables:\n i = 2\n original_name = var.name\n while var.name in var_names:\n new_name = f\"{original_name}_{i}\"\n var.name = new_name\n i += 1\n if var.name != original_name:\n # Do the warning in a separate block to the while loop so that we only raise one warning if we have to\n # try appending several different integers to the name.\n warnings.warn(\n f\"Name {original_name} has already been used, renaming to {var.name}\",\n UserWarning,\n )\n var_names.add(var.name)\n\n # TODO: Maybe create Variables.Utils for methods like the below one\n @staticmethod\n def create_empty_data(variables: Variables) -> np.ndarray:\n var_count = len(variables)\n empty_data = np.zeros((1, var_count), dtype=object)\n for i in range(var_count):\n if variables[i].type_ == \"text\":\n empty_data[:, i] = \"empty str\"\n return empty_data" }, { "identifier": "IModelForCausalInference", "path": "src/causica/models/imodel.py", "snippet": "class IModelForCausalInference(IModel):\n @abstractmethod\n def cate(\n self,\n intervention_idxs: Union[torch.Tensor, np.ndarray],\n intervention_values: Union[torch.Tensor, np.ndarray],\n reference_values: Optional[np.ndarray] = None,\n effect_idxs: Optional[np.ndarray] = None,\n conditioning_idxs: Optional[Union[torch.Tensor, np.ndarray]] = None,\n conditioning_values: Optional[Union[torch.Tensor, np.ndarray]] = None,\n Nsamples_per_graph: int = 1,\n Ngraphs: Optional[int] = 1000,\n most_likely_graph: bool = False,\n fixed_seed: Optional[int] = None,\n ):\n \"\"\"\n Evaluate (optionally conditional) average treatment effect given the learnt causal model.\n \"\"\"\n raise NotImplementedError" }, { "identifier": "to_tensors", "path": "src/causica/utils/helper_functions.py", "snippet": "def to_tensors(\n *arrays: Union[torch.Tensor, np.ndarray], device: torch.device, dtype: torch.dtype = torch.float\n) -> Tuple[torch.Tensor, ...]:\n return tuple(torch.tensor(array, dtype=dtype, device=device) for array in arrays)" }, { "identifier": "save_json", "path": "src/causica/utils/io_utils.py", "snippet": "def save_json(data: Any, path: str) -> None:\n save(data, path, \".json\", partial(json.dump, indent=4, sort_keys=True))" }, { "identifier": "compute_dag_loss", "path": "src/causica/utils/nri_utils.py", "snippet": "def compute_dag_loss(vec, num_nodes):\n \"\"\"\n vec is a n*(n-1) array with the flattened adjacency matrix (without the diag).\n \"\"\"\n dev = vec.device\n adj_matrix = torch.zeros(num_nodes, num_nodes, device=dev)\n mask = (torch.ones(num_nodes, num_nodes, device=dev) - torch.eye(num_nodes, device=dev)).to(bool)\n adj_matrix[mask] = vec\n return torch.abs(torch.trace(torch.matrix_exp(adj_matrix * adj_matrix)) - num_nodes)" }, { "identifier": "get_feature_indices_per_node", "path": "src/causica/utils/nri_utils.py", "snippet": "def get_feature_indices_per_node(variables):\n \"\"\"\n Returns a list in which the i-th element is a list with the features indices that correspond to the i-th node.\n For each Variable in 'variables' argument, the node is specified through the group_name field.\n \"\"\"\n nodes = [v.group_name for v in variables]\n nodes_unique = sorted(set(nodes))\n if len(nodes_unique) == len(nodes):\n nodes_unique = nodes\n output = []\n for node in nodes_unique:\n output.append([i for (i, e) in enumerate(nodes) if e == node])\n return output, nodes_unique" }, { "identifier": "kl_categorical", "path": "src/causica/utils/nri_utils.py", "snippet": "def kl_categorical(preds, log_prior, num_atoms, eps=1e-16):\n \"\"\"\n preds: [num_sims, num_edges, num_edge_types]\n log_prior: [1, 1, num_edge_types]\n \"\"\"\n kl_div = preds * (torch.log(preds + eps) - log_prior)\n return kl_div.sum() / (num_atoms * preds.size(0))" }, { "identifier": "piecewise_linear", "path": "src/causica/utils/nri_utils.py", "snippet": "def piecewise_linear(x, start, width, max_val=1):\n \"\"\"\n Piecewise linear function whose value is:\n 0 if x<=start\n max_val if x>=start+width\n grows linearly from 0 to max_val if start<=x<=(start+width)\n It is used to define the coefficient of the DAG-loss in NRI-MV.\n \"\"\"\n return max_val * max(min((x - start) / width, 1), 0)" }, { "identifier": "get_input_and_scoring_masks", "path": "src/causica/utils/training_objectives.py", "snippet": "def get_input_and_scoring_masks(\n mask: torch.Tensor, *, max_p_train_dropout: float, score_imputation: bool, score_reconstruction: bool\n) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"Apply random dropout to an unprocessed mask, and calculate output positions to be included in the training loss.\n\n Args:\n mask: unprocessed mask indicating which variables are available for training. 1 indicates observed, 0 indicates\n unobserved.\n max_p_train_dropout: max proportion of columns to mask in each row.\n score_imputation: If true, the scoring mask has 1.0 for entries that are present in the data but masked in the\n input to the model.\n score_reconstruction: if true, the scoring mask has 1.0 for entries that are unmasked in the model input.\n\n Returns:\n A tuple (input_mask, scoring_mask), where input_mask is is the unprocessed mask to be applied before passing\n data to the model for reconstruction/imputation. scoring_mask (also, unprocessed mask) indicates which entries\n in the output should be included when calculating negative log-likelihood loss.\n \"\"\"\n\n if max_p_train_dropout > 0:\n p_missing = torch.rand(mask.shape[0], 1) * max_p_train_dropout\n input_mask = mask * torch.bernoulli(1.0 - p_missing.expand_as(mask)).to(mask.dtype).to(mask.device)\n else:\n input_mask = mask\n if score_reconstruction:\n if score_imputation:\n # Score both reconstruction and imputation\n scoring_mask = mask\n else:\n # Only score reconstruction\n scoring_mask = input_mask\n else:\n # Only score imputation\n scoring_mask = mask - input_mask\n return input_mask, scoring_mask" }, { "identifier": "kl_divergence", "path": "src/causica/utils/training_objectives.py", "snippet": "def kl_divergence(\n z1: Tuple[torch.Tensor, torch.Tensor],\n z2: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,\n):\n mean1, logvar1 = z1\n\n if z2 is not None:\n mean2, logvar2 = z2\n else:\n mean2 = torch.zeros_like(mean1)\n logvar2 = torch.zeros_like(logvar1)\n\n sigma1 = logvar1.exp().sqrt()\n sigma2 = logvar2.exp().sqrt()\n\n normal1 = tdist.Normal(mean1, sigma1)\n normal2 = tdist.Normal(mean2, sigma2)\n\n kld = tdist.kl_divergence(normal1, normal2)\n kld = kld.sum(axis=1)\n return kld" }, { "identifier": "negative_log_likelihood", "path": "src/causica/utils/training_objectives.py", "snippet": "def negative_log_likelihood(\n data: torch.Tensor,\n decoder_mean: torch.Tensor,\n decoder_logvar: torch.Tensor,\n variables: Variables,\n alpha: float,\n mask: Optional[torch.Tensor] = None,\n sum_type: Optional[str] = \"all\",\n) -> torch.Tensor:\n \"\"\"\n This function computes the negative log likelihood for all features and sums them.\n\n Args:\n data: Input data, shape (batch_size, input_count).\n decoder_mean: Output from the decoder, shape (batch_size, output_count)\n decoder_logvar: Output from the decoder, shape (batch_size, output_count)\n variables: List of all variables\n alpha: Categorical likelihood coefficient in NLL calculation.\n mask: Mask for input data, shape (batch_size, input_count). 1 is present, 0 is missing. Set to all 1's if None.\n sum_type: How to sum result. None will return the entire array, 'cols' will sum per variable,'all' will sum all\n elements.\n Returns:\n nll: Negative log likelihood summed as per `sum_type`. torch.Tensor of shape (batch_size, num_vars)\n if `sum_type=='all'`, shape (1, num_vars) if `sum_type=='cols'` or a scalar if `sum_type is None`. Note that if\n the data contains categorical variables, then num_vars <= num_features, where num_features is the number of\n features in the input data, since these are encoded using a one-hot encoding which spans multiple columns.\n \"\"\"\n variables = variables.subset(list(range(0, variables.num_unprocessed_non_aux_cols)))\n assert sum_type in [None, \"cols\", \"all\"]\n if data.ndim != 2: # type: ignore\n raise ValueError(\"Data should have dims (batch_size, input_count)\")\n if decoder_logvar.ndim != 2 or decoder_mean.ndim != 2: # type: ignore\n raise ValueError(\"decoder_logvar and decoder_mean should each have dims (batch_size, output_count)\")\n\n batch_size = data.shape[0]\n num_vars = variables.num_unprocessed_cols\n if mask is None:\n mask = torch.ones_like(data)\n\n # Call unprocessed columns vars, processed columns idxs\n vars_by_type, idxs_by_type = (\n variables.unprocessed_cols_by_type,\n variables.processed_cols_by_type,\n )\n\n if sum_type is None:\n nlls = torch.zeros(batch_size, num_vars, device=data.device, dtype=data.dtype)\n else:\n nlls = torch.zeros(1, num_vars, device=data.device, dtype=data.dtype)\n\n def flatten(lists):\n \"\"\"\n Flatten idxs for continuous and binary vars, since they will be of form [[1], [2], ...]\n \"\"\"\n return [i for sublist in lists for i in sublist]\n\n # If returning columnwise/total sum, we sum the NLL for each var. Note if returning the total sum, we don't sum over\n # all elements of each type here, to make it easier to collect everything in a single nlls tensor.\n feature_sum_type = \"cols\" if sum_type is not None else None\n if \"continuous\" in vars_by_type:\n continuous_vars, continuous_idxs = (\n vars_by_type[\"continuous\"],\n flatten(idxs_by_type[\"continuous\"]),\n )\n continuous_idxs_nlls = gaussian_negative_log_likelihood(\n data[:, continuous_idxs],\n decoder_mean[:, continuous_idxs],\n decoder_logvar[:, continuous_idxs],\n mask[:, continuous_idxs],\n sum_type=feature_sum_type,\n )\n # Need to account for VAEM's overwrite_processed_dim hack\n # (i.e. continuous variables possible being of dimension>1)\n if all(len(idxs) == 1 for idxs in idxs_by_type[\"continuous\"]):\n # Optimized operation when all continuous variables are of dimension 1\n nlls[:, continuous_vars] = continuous_idxs_nlls\n else:\n # Slower, correct operation if there is continuous variable of dimension > 1\n if len(continuous_idxs_nlls.shape) == 1:\n continuous_idxs_nlls = continuous_idxs_nlls.unsqueeze(dim=0)\n current_idx = 0\n for var, idxs in zip(continuous_vars, idxs_by_type[\"continuous\"]):\n var_idxs = range(current_idx, current_idx + len(idxs))\n nlls[:, var] = continuous_idxs_nlls[:, var_idxs].sum(dim=1)\n current_idx += len(idxs_by_type[\"continuous\"][-1])\n if \"binary\" in vars_by_type:\n binary_vars, binary_idxs = (\n vars_by_type[\"binary\"],\n flatten(idxs_by_type[\"binary\"]),\n )\n nlls[:, binary_vars] = bernoulli_negative_log_likelihood(\n data[:, binary_idxs],\n decoder_mean[:, binary_idxs],\n mask[:, binary_idxs],\n sum_type=feature_sum_type,\n )\n if \"categorical\" in vars_by_type:\n categorical_vars, categorical_idxs = (\n vars_by_type[\"categorical\"],\n idxs_by_type[\"categorical\"],\n )\n for var, idxs in zip(categorical_vars, categorical_idxs):\n # Have to compute NLL for each categorical variable separately due to different numbers of categories\n nlls[:, var] = alpha * categorical_negative_log_likelihood(\n data[:, idxs],\n decoder_mean[:, idxs],\n mask[:, idxs],\n sum_type=feature_sum_type,\n )\n # Now sum everything if returning total sum.\n if sum_type == \"all\":\n nlls = nlls.sum()\n\n return nlls" }, { "identifier": "PVAEBaseModel", "path": "src/causica/models/pvae_base_model.py", "snippet": "class PVAEBaseModel(TorchModel, IModelForObjective):\n \"\"\"\n Abstract model class.\n\n To instantiate this class, these functions need to be implemented:\n _train: Run the training loop for the model.\n _impute: Fill in any missing values for test data.\n _reconstruct: Reconstruct data by passing them through the VAE\n name: Name of model implementation.\n \"\"\"\n\n def __init__(self, model_id: str, variables: Variables, save_dir: str, device: torch.device) -> None:\n \"\"\"\n Args:\n model_id: Unique model ID for referencing this model instance.\n variables: Information about variables/features used by this model.\n save_dir: Location to save any information about this model, including training data.\n It will be created if it doesn't exist.\n device: Name of Torch device to create the model on. Valid options are 'cpu', 'gpu', or a device ID\n (e.g. 0 or 1 on a two-GPU machine).\n \"\"\"\n super().__init__(model_id, variables, save_dir, device)\n self._alpha = 1.0 # The default value for the categorical likelihood coefficient.\n\n @staticmethod\n def _split_vamp_prior_config(training_config: Dict[str, Any]) -> Tuple[dict, dict]:\n # Split training config into (training_config, vamp_prior_config)\n training_config = training_config.copy()\n vamp_prior_config = {\"save_vamp_prior\": training_config.pop(\"save_vamp_prior\")}\n for k in [\"vamp_prior_reward_samples\", \"vamp_prior_inducing_points\"]:\n vamp_prior_config.update({k: training_config.pop(k, None)})\n return training_config, vamp_prior_config\n\n def _save_vamp_prior(\n self,\n processed_dataset: Union[Dataset, SparseDataset],\n save_vamp_prior: bool,\n vamp_prior_inducing_points: Optional[int] = None,\n vamp_prior_reward_samples: Optional[int] = None,\n ) -> None:\n if not save_vamp_prior:\n return\n assert vamp_prior_inducing_points is not None\n assert vamp_prior_reward_samples is not None\n train_data, train_mask = processed_dataset.train_data_and_mask\n vamp_prior_data = sample_inducing_points(train_data, train_mask, row_count=vamp_prior_inducing_points)\n vamp_prior_data = cast(Union[Tuple[np.ndarray, np.ndarray], Tuple[csr_matrix, csr_matrix]], vamp_prior_data)\n EDDIObjective.calc_and_save_vamp_prior_info_gain(self, vamp_prior_data, sample_count=vamp_prior_reward_samples)\n\n def run_train(\n self,\n dataset: Union[Dataset, SparseDataset],\n train_config_dict: Optional[Dict[str, Any]] = None,\n report_progress_callback: Optional[Callable[[str, int, int], None]] = None,\n ) -> None:\n\n \"\"\"\n Train the model.\n Training results will be saved.\n\n Args:\n dataset: Dataset object with data and masks in unprocessed form.\n train_config_dict (dictionary): Any other parameters needed by a specific concrete class. Of\n the form {arg_name: arg_value}. e.g. {\"learning_rate\": 1e-3, \"epochs\": 100}\n report_progress_callback: Function to report model progress for API.\n \"\"\"\n if train_config_dict is None:\n train_config_dict = {}\n train_config_dict, vamp_prior_config = self._split_vamp_prior_config(train_config_dict)\n processed_dataset = self.data_processor.process_dataset(dataset)\n self._train(\n dataset=processed_dataset,\n report_progress_callback=report_progress_callback,\n **train_config_dict,\n )\n self._save_vamp_prior(processed_dataset, **vamp_prior_config)\n\n @abstractmethod\n def _train(self, *args, **kwargs):\n pass\n\n def impute(self, data, mask, impute_config_dict=None, *, vamp_prior_data=None, average=True):\n if vamp_prior_data is None:\n return impute(self, data, mask, impute_config_dict=impute_config_dict, average=average)\n else:\n processed_vamp_data_array = self.data_processor.process_data_and_masks(*vamp_prior_data)\n # Keep processed VampPrior data on CPU until we sample inducing points, as this data can be large and is\n # not required for any CUDA computations.\n return impute(\n self,\n data,\n mask,\n impute_config_dict=impute_config_dict,\n average=average,\n vamp_prior_data=to_tensors(*processed_vamp_data_array, device=torch.device(\"cpu\")),\n )\n\n def impute_processed_batch(\n self: PVAEBaseModel,\n data: torch.Tensor,\n mask: torch.Tensor,\n *,\n sample_count: int,\n preserve_data: bool = True,\n vamp_prior_data: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,\n **kwargs,\n ) -> torch.Tensor:\n \"\"\"\n Fill in unobserved variables in a minibatch of data using a trained model. Optionally, use a vamp prior to\n impute empty rows, and optionally replace imputed values with input values for observed features.\n\n Assumes data is a torch.Tensor and in processed form (i.e. variables will be in their squashed ranges,\n and categorical variables will be in one-hot form).\n\n Args:\n data (shape (batch_size, input_dim)): Data to be used to train the model, in processed form.\n mask (shape (batch_size, input_dim)): Data observation mask, where observed values are 1 and unobserved\n values are 0.\n sample_count: Number of imputation samples to generate.\n vamp_prior_data (Tuple of (torch tensor, torch tensor)): Data to be used to fill variables if using the VAMP\n prior method. Format: (data, mask). This defaults to None, in which case the VAMP Prior method will not\n be used.\n preserve_data (bool): Whether or not to impute data already present. Defaults to True, which keeps data\n present in input.\n\n Returns:\n imputations (torch.Tensor of shape (sample_count, batch_size, output_dim)): Input data with missing values\n filled in.\n \"\"\"\n if not isinstance(data, torch.Tensor) or not isinstance(mask, torch.Tensor):\n raise ValueError(\"data and mask should be tensors. To work on ndarrays, use impute\")\n assert data.shape == mask.shape\n assert data.shape[1] == self.input_dim\n batch_size, num_features = data.shape\n if self.variables.has_auxiliary:\n num_features = self.variables.num_processed_non_aux_cols\n\n imputations = torch.full((sample_count, batch_size, num_features), np.nan, device=self.device)\n\n # vamp_rows are rows where input is completely unobserved\n vamp_rows = torch.where(mask.sum(dim=1) == 0)[0]\n if vamp_prior_data is not None and vamp_rows.numel() > 0:\n imputed_from_vamp = self._impute_from_vamp_prior(sample_count * vamp_rows.numel(), vamp_prior_data)\n imputed_from_vamp = imputed_from_vamp.reshape(sample_count, vamp_rows.numel(), -1)\n imputations[:, vamp_rows, :] = imputed_from_vamp\n\n not_vamp_rows = torch.where(mask.sum(dim=1) != 0)[0]\n\n else:\n not_vamp_rows = torch.arange(batch_size)\n\n if len(not_vamp_rows) > 0:\n not_vamp_data = data[not_vamp_rows]\n not_vamp_mask = mask[not_vamp_rows]\n imputed_not_vamp_data = self._reconstruct_and_reshape(\n not_vamp_data, not_vamp_mask, sample_count=sample_count, **kwargs\n )\n imputations[:, not_vamp_rows, :] = imputed_not_vamp_data\n\n if preserve_data:\n imputations = restore_preserved_values(self.variables, data, imputations, mask)\n return imputations\n\n def get_model_pll(\n self: PVAEBaseModel,\n data: np.ndarray,\n feature_mask: np.ndarray,\n target_idx,\n sample_count: int = 50,\n ):\n \"\"\"\n Computes the predictive log-likelihood of the target-data given the feature_mask-masked data as input.\n\n Args:\n data (Numpy array of shape (batch_size, feature_count)): Data in unprocessed form to be used to\n compute the pll.\n feature_mask (Numpy array of shape (batch_size, feature_count)): Mask indicating conditioning\n variables for computing the predictive log-likelihood.\n target_idx (int): Column index of target variable for compute the likelihood of.\n sample_count (int): Number of Monte Carlo samples to use from the latent space. Defaults to 50.\n\n Returns:\n predictive_ll (float): Mean predictive log-likelihood (mean taken over batch dim in data).\n\n \"\"\"\n # Process input data\n (\n proc_feature_data_array,\n proc_feature_mask_array,\n ) = self.data_processor.process_data_and_masks(data, feature_mask)\n proc_feature_data, proc_feature_mask = to_tensors(\n proc_feature_data_array, proc_feature_mask_array, device=self.device\n )\n\n # Create target_mask from target_index\n target_mask = np.zeros_like(data, dtype=bool)\n target_mask[:, target_idx] = 1\n\n # Process target data\n (\n proc_target_data_array,\n proc_target_mask_array,\n ) = self.data_processor.process_data_and_masks(data, target_mask)\n proc_target_data, proc_target_mask = to_tensors(\n proc_target_data_array, proc_target_mask_array, device=self.device\n )\n\n # Expand target data and mask to be shape (sample_count, batch_size, feature_count)\n proc_target_data = proc_target_data.expand(sample_count, *proc_target_data.shape)\n proc_target_mask = proc_target_mask.expand(sample_count, *proc_target_mask.shape)\n\n # Compute PVAE outputs given input features (parameters of the Gaussian mixture)\n (dec_mean, dec_logvar), _, _ = self.reconstruct(proc_feature_data, proc_feature_mask, count=sample_count)\n\n # Compute Gaussian negative log-likelihood per sample in sample_count\n gnll = gaussian_negative_log_likelihood(\n proc_target_data, dec_mean, dec_logvar, mask=proc_target_mask, sum_type=None\n )\n gnll = gnll[:, :, target_idx]\n predictive_ll = -gnll\n predictive_ll = torch.logsumexp(predictive_ll, dim=0) - np.log(sample_count)\n predictive_ll = predictive_ll.mean()\n\n return predictive_ll\n\n def get_marginal_log_likelihood(\n self,\n impute_config: Dict[str, int],\n data: Union[np.ndarray, csr_matrix],\n observed_mask: Optional[Union[np.ndarray, csr_matrix]] = None,\n target_mask: Optional[Union[np.ndarray, csr_matrix]] = None,\n evaluate_imputation: Optional[bool] = False,\n num_importance_samples: int = 5000,\n **kwargs,\n ) -> float:\n \"\"\"\n Estimate marginal log-likelihood of the data using importance sampling:\n - Imputation MLL -> imputed data given the observed data log p(x_u|x_o) if evaluate_imputation is True\n - Reconstruction MLL -> all data log p(x) otherwise\n\n Args:\n impute_config: Dictionary containing options for inference.\n data: Data in unprocessed form to be used with shape (num_rows, input_dim).\n mask: If not None, mask indicating observed variables with shape (num_rows, input_dim). 1 is observed,\n 0 is un-observed. If None everything is marked as observed.\n target_mask: Values masked during imputation to use as prediction targets, where 1 is a target, 0 is not.\n If None, nothing is marked as an imputation target.\n evaluate_imputation: Whether to estimate Imputation MLL log p(x_u|x_o) or Reconstruction MLL log p(x).\n num_importance_samples: The number of importance samples to be taken.\n **kwargs: Extra keyword arguments required by reconstruct.\n Returns:\n marginal_log_likelihood: The estimated marginal log likelihood averaged across data points.\n \"\"\"\n # TODO(17895): Add Generation MLL option to the marginal log-likelihood metric.\n\n batch_size = impute_config[\"batch_size\"]\n\n # Assumed to only work on dense arrays for now\n if issparse(data):\n data = cast(csr_matrix, data)\n data = data.toarray()\n if issparse(observed_mask):\n observed_mask = cast(csr_matrix, observed_mask)\n observed_mask = observed_mask.toarray()\n if issparse(target_mask):\n target_mask = cast(csr_matrix, target_mask)\n target_mask = target_mask.toarray()\n if observed_mask is None:\n observed_mask = np.ones_like(data, dtype=bool)\n if target_mask is None:\n assert not evaluate_imputation\n target_mask = np.zeros_like(data, dtype=bool)\n assert data.shape == observed_mask.shape\n assert data.shape == target_mask.shape\n\n num_rows, _ = data.shape\n\n # TODO(17896): Add processing and batching of extra data objects\n processed_data, processed_obs_mask, processed_target_mask = self.data_processor.process_data_and_masks(\n data, observed_mask, target_mask\n )\n marginal_log_likelihood = np.empty((num_rows,), dtype=processed_data.dtype)\n\n with torch.no_grad():\n dataloader = create_dataloader(\n processed_data,\n processed_obs_mask,\n processed_target_mask,\n batch_size=batch_size,\n sample_randomly=False,\n )\n\n for idx, (processed_data_batch, processed_obs_mask_batch, processed_target_mask_batch) in enumerate(\n tqdm(dataloader)\n ):\n processed_data_batch = processed_data_batch.to(self.device)\n processed_obs_mask_batch = processed_obs_mask_batch.to(self.device)\n processed_target_mask_batch = processed_target_mask_batch.to(self.device)\n\n log_importance_weights = self._get_log_importance_weights(\n processed_data_batch,\n processed_obs_mask_batch,\n processed_target_mask_batch,\n evaluate_imputation=cast(bool, evaluate_imputation),\n num_importance_samples=num_importance_samples,\n **kwargs,\n ) # Shape (num_importance_samples, batch_size)\n average_factor = torch.log(torch.tensor(num_importance_samples, dtype=torch.float))\n marginal_log_likelihood_batch = (\n torch.logsumexp(log_importance_weights, dim=0) - average_factor\n ) # Shape (batch_size,)\n\n idx_start = idx * batch_size\n idx_end = min((idx + 1) * batch_size, num_rows)\n marginal_log_likelihood[idx_start:idx_end] = marginal_log_likelihood_batch.cpu().numpy()\n\n return marginal_log_likelihood.sum().item() / num_rows\n\n @abstractmethod\n def reconstruct(\n self, data: torch.Tensor, mask: Optional[torch.Tensor], sample: bool = True, count: int = 1, **kwargs: Any\n ) -> Tuple[Tuple[torch.Tensor, torch.Tensor], torch.Tensor, Tuple[torch.Tensor, torch.Tensor],]:\n \"\"\"\n Reconstruct data by passing them through the VAE.\n Note that mask=None should always be used in subclasses that don's support missing values.\n\n Args:\n data: Input data with shape (batch_size, input_dim).\n mask: If not None, mask indicating observed variables with shape (batch_size, input_dim). 1 is observed,\n 0 is un-observed.\n sample: If True, samples the latent variables, otherwise uses the mean.\n count: Number of samples to reconstruct.\n **kwargs: Extra keyword arguments required.\n\n Returns:\n (decoder_mean, decoder_logvar): Reconstucted variables, output from the decoder. Both are shape (count, batch_size, output_dim). Count dim is removed if 1.\n samples: Latent variable used to create reconstruction (input to the decoder). Shape (count, batch_size, latent_dim). Count dim is removed if 1.\n (encoder_mean, encoder_logvar): Output of the encoder. Both are shape (batch_size, latent_dim)\n \"\"\"\n raise NotImplementedError()\n\n def validate_loss_config(self, loss_config: LossConfig) -> None:\n assert loss_config.score_imputation is not None and loss_config.score_reconstruction is not None\n assert loss_config.score_reconstruction or loss_config.score_imputation\n assert loss_config.max_p_train_dropout is not None\n\n def _impute_from_vamp_prior(\n self, num_samples: int, vamp_prior_data: Tuple[torch.Tensor, torch.Tensor]\n ) -> torch.Tensor:\n vp_data, vp_mask = vamp_prior_data\n assert vp_data.shape == vp_mask.shape\n assert vp_data.shape[1] == self.variables.num_processed_cols\n # Sample inducing points for all rows, shape (sample_count * num_vamp_rows, input_dim)\n inducing_data, inducing_mask = sample_inducing_points(vp_data, vp_mask, num_samples)\n # Only move to GPU once we have sampled the inducing points as these tensors are much smaller.\n inducing_data, inducing_mask = (\n inducing_data.to(self.device),\n inducing_mask.to(self.device),\n )\n # Shape (1, num_samples, output_dim)\n return self._reconstruct_and_reshape(inducing_data, inducing_mask, sample_count=1)\n\n def _reconstruct_and_reshape(\n self, data: torch.Tensor, mask: Optional[torch.Tensor], sample_count: int, **kwargs\n ) -> torch.Tensor:\n \"\"\"\n Make sample_count imputations of missing data for given data and mask.\n\n Args:\n data: partially observed data with shape (batch_size, input_dim).\n mask: mask indicating observed variables with shape (batch_size, input_dim). 1 is observed, 0 is\n un-observed.\n If None, will be set to all 1's.\n sample_count: Number of samples to take.\n\n Returns:\n imputations: PyTorch Tensor with shape: (sample_count, batch_size, input_dim)\n \"\"\"\n if mask is None:\n mask = torch.ones_like(data)\n assert data.dim() == 2\n assert mask.shape == data.shape\n assert data.shape[1] == self.variables.num_processed_cols\n (imputations, _), _, _ = self.reconstruct(data=data, mask=mask, sample=True, count=sample_count, **kwargs)\n if self.variables.has_auxiliary:\n data = data[:, 0 : self.variables.num_processed_non_aux_cols]\n return imputations.reshape(sample_count, *data.shape)\n\n def _get_log_importance_weights(\n self,\n data: torch.Tensor,\n observed_mask: torch.Tensor,\n target_mask: torch.Tensor,\n evaluate_imputation: bool,\n num_importance_samples: int,\n **kwargs,\n ) -> torch.Tensor:\n \"\"\"\n Generate a set of log importance weights/samples to estimate marginal log-likelihood.\n Collect samples from z ~ q(z|x) to estimate:\n - Imputation MLL -> return log [p(x_u|z) q(z|x_o) / q(z|x)] if evaluate_imputation is True\n - Reconstruction MLL -> return log [p(x|z) p(z) / q(z|x)] otherwise\n\n This function assumes that latent prior distribution is standard Normal p(z) ~ N(0, 1).\n\n Args:\n data: Data to be used with shape (batch_size, input_dim).\n mask: Mask indicating observed values in data with shape (batch_size, input_dim). 1 is observed,\n 0 is un-observed.\n target_mask: Values marked as prediction targets during imputation, where 1 is a target and 0 is not.\n evaluate_imputation: Whether to collect samples for Imputation MLL log p(x_u|x_o) or Reconstruction MLL log p(x).\n num_importance_samples: The number of importance samples to be taken.\n **kwargs: Extra keyword arguments required by reconstruct.\n\n Returns:\n log_importance_weights: Log of importance weights with shape (num_importance_samples, batch_size).\n \"\"\"\n assert observed_mask is not None\n assert target_mask is not None\n assert data.shape == observed_mask.shape\n assert data.shape == target_mask.shape\n\n data_non_aux = data[:, 0 : self.variables.num_processed_non_aux_cols]\n num_non_aux_vars = self.variables.num_unprocessed_non_aux_cols\n batch_size, _ = data.shape\n\n # Collect samples\n (dec_mean, dec_logvar), latent_samples, (enc_mean, enc_logvar) = self.reconstruct(\n data=data, mask=observed_mask, sample=True, count=num_importance_samples, **kwargs\n )\n latent_samples = latent_samples.reshape(num_importance_samples, batch_size, -1)\n\n # Calculate nll i.e. -log[p(x_u|z)] or -log[p(x|z)]\n if evaluate_imputation:\n mask_nll = target_mask\n else:\n mask_nll = observed_mask\n\n nll = negative_log_likelihood(\n data=data_non_aux.repeat(num_importance_samples, 1),\n decoder_mean=dec_mean.reshape(\n num_importance_samples * batch_size, self.variables.num_processed_non_aux_cols\n ),\n decoder_logvar=dec_logvar.reshape(\n num_importance_samples * batch_size, self.variables.num_processed_non_aux_cols\n ),\n variables=self.variables,\n alpha=self._alpha,\n mask=mask_nll.repeat(num_importance_samples, 1),\n sum_type=None,\n ) # Shape (num_importance_samples * batch_size, num_non_aux_vars)\n nll = nll.reshape(\n num_importance_samples, batch_size, num_non_aux_vars\n ) # Shape (num_importance_samples, batch_size, num_non_aux_vars\n nll = nll.sum(dim=2) # Shape (num_importance_samples, batch_size)\n\n # Calculate log latent variational log[q(z|x)]\n log_latent_variational = (-1) * gaussian_negative_log_likelihood(\n targets=latent_samples, mean=enc_mean, logvar=enc_logvar, mask=None, sum_type=None\n ) # Shape (num_importance_samples, batch_size, latent_dim)\n log_latent_variational = log_latent_variational.sum(axis=2) # Shape (num_importance_samples, batch_size)\n\n # Calculate log latent prior log[q(z|x_o)] or log[p(z)]\n if evaluate_imputation:\n (_, _), _, (latent_prior_mean, latent_prior_logvar) = self.reconstruct(\n data=data, mask=observed_mask, sample=False, count=1, **kwargs\n )\n else:\n latent_prior_mean = torch.tensor(0.0)\n latent_prior_logvar = torch.log(torch.tensor(1.0))\n\n log_latent_prior = (-1) * gaussian_negative_log_likelihood(\n targets=latent_samples,\n mean=latent_prior_mean,\n logvar=latent_prior_logvar,\n mask=None,\n sum_type=None,\n ) # Shape (num_importance_samples, batch_size, latent_dim)\n log_latent_prior = log_latent_prior.sum(axis=2) # Shape (num_importance_samples, batch_size)\n\n # Calculate log importance weights\n log_importance_weights = (\n (-1) * nll + log_latent_prior - log_latent_variational\n ) # Shape (num_importance_samples, batch_size)\n return log_importance_weights" } ]
import json import math import os import warnings import numpy as np # type: ignore import torch import torch.distributions as tdist import torch.nn.functional as F from typing import Callable, Dict, List, Optional, Tuple from torch import nn from torch.utils.data import DataLoader, TensorDataset from ..datasets.dataset import Dataset from ..datasets.variables import Variables from ..models.imodel import IModelForCausalInference from ..utils.helper_functions import to_tensors from ..utils.io_utils import save_json from ..utils.nri_utils import compute_dag_loss, get_feature_indices_per_node, kl_categorical, piecewise_linear from ..utils.training_objectives import get_input_and_scoring_masks, kl_divergence, negative_log_likelihood from .pvae_base_model import PVAEBaseModel
18,895
lambda_nll: float, lambda_kl_z: float, lambda_kl_A: float, lambda_dagloss: float, sample_count: int, ) -> Dict[str, List[float]]: """ Train the model using the given data. Args: dataset: Dataset with data and masks in processed form. train_output_dir: Path to save any training information to. report_progress_callback: Function to report model progress for API. learning_rate: Learning rate for Adam optimiser. batch_size: Size of minibatches to use. epochs: Number of epochs to train for. max_p_train_dropout: Maximum fraction of extra training features to drop for each row. 0 is none, 1 is all. use_dag_loss: Whether to use the DAG loss regularisation. output_variance: The variance for the output of the GNN based VAE. hard: Whether to use hard or soft samples for the distribution over edges (if hard=True, the edge weights are just 0/1). two_steps: Whether to use the two-step variant of VISL. That is, the first half of training uses only the forward MLP and the second half fixes the distribution over edges and only optimizes the forward and backward MLPs. lambda_nll: Lambda coefficient for the ELBO term negative-log-likelihood lambda_kl_z: Lambda coefficient for the ELBO term lambda*KL(q(z|x) || p(z)) lambda_kl_A: Lambda coefficient for the ELBO term lambda*KL(q(A) || p(A)) lambda_dagloss: Lambda coefficient for the dagloss term of the ELBO. sample_count: Number of samples to reconstruct. Returns: train_results (dictionary): training_loss, KL divergence, NLL, dag_loss, training_loss_complete """ # Put PyTorch into train mode. self.train() # Setting the hard attribute which will be used for training and testing self.hard = hard # Loading data and mask, creating results_dict data, mask = dataset.train_data_and_mask results_dict: Dict[str, List] = { metric: [] for metric in [ "training_loss", "kl_z_term", "kl_A_term", "nll_term", "dag_loss_term", "training_loss_complete", ] } # Creating the optimizer # If two_steps is True, we create a different optimizer for the second half. This optimizer does not optimize over the adjacency matrix. optimizer = torch.optim.Adam(self.parameters(), lr=learning_rate) if two_steps: named_parameters = list(self.named_parameters()) all_but_adj_matrix = [] for a in named_parameters: if a[0] != "Z_edges_logits": all_but_adj_matrix.append(a[1]) optimizer_second_half = torch.optim.Adam(all_but_adj_matrix, lr=learning_rate) # Creating the dataloader tensor_dataset = TensorDataset(*to_tensors(data, mask, device=self.device)) dataloader = DataLoader(tensor_dataset, batch_size=batch_size, shuffle=True) # If DAG loss is used, it appears after 'epochs_without_dagloss' epochs and its coefficient (lambda) grows linearly # during 10% of the total number of epochs until 1. This scheme is used for lambda because of empirical # reasons (DAG loss might take over the training if it is used with coefficient 1.0 from the beginning). if use_dag_loss: epochs_without_dagloss = 5 if (epochs_without_dagloss + 0.1 * epochs) > epochs: warnings.warn("max lambda will not be achieved") best_train_loss = np.nan for epoch in range(epochs): training_loss_full = 0.0 nll_term_full = 0.0 kl_z_term_full = 0.0 kl_A_term_full = 0.0 dag_loss_term_full = 0.0 training_loss_complete_full = 0.0 # Set the optimizer_to_use depending on whether we are using the two-steps variant or not. if not two_steps: optimizer_to_use = optimizer only_forward = False elif epoch < epochs // 2: optimizer_to_use = optimizer only_forward = True else: optimizer_to_use = optimizer_second_half only_forward = False for x, mask_train_batch in dataloader: # Drop additional values (same procedure as PVAE) input_mask, scoring_mask = get_input_and_scoring_masks( mask_train_batch, max_p_train_dropout=max_p_train_dropout, score_imputation=True, score_reconstruction=True, ) # Apply the GNN-based VAE (x_reconstructed, _), _, encoder_output = self.reconstruct( x, input_mask, only_forward=only_forward, count=sample_count ) # Loss: lambda_nll * NLL + # lambda_kl_z * KL(q(z)||p(z)) + # lambda_kl_A * KL(q(A)||p(A)) + # piecewise_linear_temperature * lambda_dagloss * dag_loss # NLL dec_logvar = torch.full_like(x_reconstructed, math.log(output_variance)) categorical_lik_coeff = 1.0 nll = negative_log_likelihood( x, x_reconstructed, dec_logvar, self.variables, categorical_lik_coeff, scoring_mask ) nll_term = lambda_nll * nll # KL(q(z)||p(z)) term
# This is required in python 3 to allow return types of the same class. from __future__ import annotations class VISL(PVAEBaseModel, IModelForCausalInference): """ Subclass of `models.pvae_base_model.PVAEBaseModel` representing the algorithm VISL (missing value imputation with causal discovery). Requires file <data_dir>/<dataset_name>/adj_matrix.csv to evaluate causal discovery against ground truth. """ def __init__( self, model_id: str, variables: Variables, save_dir: str, device: torch.device, gnn_iters: int, shared_init_and_final_mappings: bool, embedding_dim: int, init_prob: float, simpler: str = None, **_, ): """ Args: model_id: Unique model ID for referencing this model instance. variables: Information about variables/features used by this model. save_dir: Location to save any information about this model, including training data. device: Device to load model to. gnn_iters: Number of message passing iterations for the GNN. shared_init_and_final_mappings: Whether all the nodes should use the same MLPs for the initial and final mappings. embedding_dim: Dimensionality of the nodes embedding. init_prob: Initial probability of having edge. simpler: Choose what MLP should be simpler (options are 'forward', 'backward', or None). Specifically, 'simpler' means to divide by 10 the dimensionality of the hidden layer of the corresponding MLP (with a minimum of 10 units). """ super().__init__(model_id, variables, save_dir, device) # Define some useful attributes feature_indices_per_node, ordered_nodes = get_feature_indices_per_node(variables) with open(os.path.join(self.save_dir, "ordered_nodes.json"), "w", encoding="utf-8") as f: json.dump(ordered_nodes, f, indent=4) self.num_nodes = len(feature_indices_per_node) self.num_edges = self.num_nodes * (self.num_nodes - 1) self.input_dim = variables.num_processed_cols # Define and initialize Z_edges # The learnable parameter is Z_edges_logits. Z_edges is F.softmax(Z_edges_logits, dim=1). self.Z_edges_logits = torch.nn.Parameter( torch.stack( [ torch.full([self.num_edges], math.log(1 - init_prob)), torch.full([self.num_edges], math.log(init_prob)), ], dim=1, ).to(device) ) # Define the GNN-based VAE self.gnn_vae = GNN_based_VAE( embedding_dim=embedding_dim, skip_first=True, device=device, n_iters=gnn_iters, num_nodes=self.num_nodes, shared_init_and_final_mappings=shared_init_and_final_mappings, simpler=simpler, feature_indices_per_node=feature_indices_per_node, ) # Create rel_rec and rel_send, which codify the receiving and sending node for each edge # Shape of rel_rec and rel_send: [num_edges, num_nodes] # The second dimension is a one-hot encoding of the receiver or sender node off_diag = np.ones([self.num_nodes, self.num_nodes]) - np.eye(self.num_nodes) rel_rec = F.one_hot(torch.tensor(np.where(off_diag)[0], dtype=torch.long)) rel_send = F.one_hot(torch.tensor(np.where(off_diag)[1], dtype=torch.long)) self.rel_rec = rel_rec.type(torch.float).to(device) self.rel_send = rel_send.type(torch.float).to(device) # Define the prior over edge types (favors sparse graphs) self.log_prior = torch.log( torch.tensor([0.95, 0.05], device=device) ) # The no-edge type is the first one (recall the skip_first argument of GNN_based_VAE __init__) # Save type of variables. Used in reconstruct method for # 1. filling the missing values before applying the GNN-based VAE, # 2. processing the output of the GNN-based VAE (i.e. use torch.sigmoid in the binary case) types = np.array([v.type_ for v in self.variables._variables]) if (types == "binary").all(): self.var_types = "binary" elif (types == "continuous").all(): self.var_types = "continuous" elif (types == "categorical").all(): self.var_types = "categorical" else: raise ValueError("Right now all the variables need to have the same type") def _train( # type: ignore self, dataset: Dataset, report_progress_callback: Optional[Callable[[str, int, int], None]], learning_rate: float, batch_size: int, epochs: int, max_p_train_dropout: float, use_dag_loss: bool, output_variance: float, hard: bool, two_steps: bool, lambda_nll: float, lambda_kl_z: float, lambda_kl_A: float, lambda_dagloss: float, sample_count: int, ) -> Dict[str, List[float]]: """ Train the model using the given data. Args: dataset: Dataset with data and masks in processed form. train_output_dir: Path to save any training information to. report_progress_callback: Function to report model progress for API. learning_rate: Learning rate for Adam optimiser. batch_size: Size of minibatches to use. epochs: Number of epochs to train for. max_p_train_dropout: Maximum fraction of extra training features to drop for each row. 0 is none, 1 is all. use_dag_loss: Whether to use the DAG loss regularisation. output_variance: The variance for the output of the GNN based VAE. hard: Whether to use hard or soft samples for the distribution over edges (if hard=True, the edge weights are just 0/1). two_steps: Whether to use the two-step variant of VISL. That is, the first half of training uses only the forward MLP and the second half fixes the distribution over edges and only optimizes the forward and backward MLPs. lambda_nll: Lambda coefficient for the ELBO term negative-log-likelihood lambda_kl_z: Lambda coefficient for the ELBO term lambda*KL(q(z|x) || p(z)) lambda_kl_A: Lambda coefficient for the ELBO term lambda*KL(q(A) || p(A)) lambda_dagloss: Lambda coefficient for the dagloss term of the ELBO. sample_count: Number of samples to reconstruct. Returns: train_results (dictionary): training_loss, KL divergence, NLL, dag_loss, training_loss_complete """ # Put PyTorch into train mode. self.train() # Setting the hard attribute which will be used for training and testing self.hard = hard # Loading data and mask, creating results_dict data, mask = dataset.train_data_and_mask results_dict: Dict[str, List] = { metric: [] for metric in [ "training_loss", "kl_z_term", "kl_A_term", "nll_term", "dag_loss_term", "training_loss_complete", ] } # Creating the optimizer # If two_steps is True, we create a different optimizer for the second half. This optimizer does not optimize over the adjacency matrix. optimizer = torch.optim.Adam(self.parameters(), lr=learning_rate) if two_steps: named_parameters = list(self.named_parameters()) all_but_adj_matrix = [] for a in named_parameters: if a[0] != "Z_edges_logits": all_but_adj_matrix.append(a[1]) optimizer_second_half = torch.optim.Adam(all_but_adj_matrix, lr=learning_rate) # Creating the dataloader tensor_dataset = TensorDataset(*to_tensors(data, mask, device=self.device)) dataloader = DataLoader(tensor_dataset, batch_size=batch_size, shuffle=True) # If DAG loss is used, it appears after 'epochs_without_dagloss' epochs and its coefficient (lambda) grows linearly # during 10% of the total number of epochs until 1. This scheme is used for lambda because of empirical # reasons (DAG loss might take over the training if it is used with coefficient 1.0 from the beginning). if use_dag_loss: epochs_without_dagloss = 5 if (epochs_without_dagloss + 0.1 * epochs) > epochs: warnings.warn("max lambda will not be achieved") best_train_loss = np.nan for epoch in range(epochs): training_loss_full = 0.0 nll_term_full = 0.0 kl_z_term_full = 0.0 kl_A_term_full = 0.0 dag_loss_term_full = 0.0 training_loss_complete_full = 0.0 # Set the optimizer_to_use depending on whether we are using the two-steps variant or not. if not two_steps: optimizer_to_use = optimizer only_forward = False elif epoch < epochs // 2: optimizer_to_use = optimizer only_forward = True else: optimizer_to_use = optimizer_second_half only_forward = False for x, mask_train_batch in dataloader: # Drop additional values (same procedure as PVAE) input_mask, scoring_mask = get_input_and_scoring_masks( mask_train_batch, max_p_train_dropout=max_p_train_dropout, score_imputation=True, score_reconstruction=True, ) # Apply the GNN-based VAE (x_reconstructed, _), _, encoder_output = self.reconstruct( x, input_mask, only_forward=only_forward, count=sample_count ) # Loss: lambda_nll * NLL + # lambda_kl_z * KL(q(z)||p(z)) + # lambda_kl_A * KL(q(A)||p(A)) + # piecewise_linear_temperature * lambda_dagloss * dag_loss # NLL dec_logvar = torch.full_like(x_reconstructed, math.log(output_variance)) categorical_lik_coeff = 1.0 nll = negative_log_likelihood( x, x_reconstructed, dec_logvar, self.variables, categorical_lik_coeff, scoring_mask ) nll_term = lambda_nll * nll # KL(q(z)||p(z)) term
kl_z_term = lambda_kl_z * kl_divergence(encoder_output).sum()
10
2023-11-21 12:55:08+00:00
24k
Yifei-Y/Openset-RCNN
openset_rcnn/evaluation/os_coco_evaluation.py
[ { "identifier": "GRASPNET_KNOWN_IDS", "path": "openset_rcnn/data/graspnet_meta.py", "snippet": "GRASPNET_KNOWN_IDS = [graspnet_known_name_id_dic[name_cat] for name_cat in GRASPNET_KNOWN_CATEGORIES]" }, { "identifier": "GRASPNET_KNOWN_CATEGORIES", "path": "openset_rcnn/data/graspnet_meta.py", "snippet": "GRASPNET_KNOWN_CATEGORIES = [\n \"cracker_box\", \"tomato_soup_can\", \"banana\", \"mug\", \"power_drill\", \"scissors\", \"strawberry\",\n \"peach\", \"plum\", \"knife\", \"flat_screwdriver\", \"racquetball\", \"b_cups\", \"d_toy_airplane\",\n \"f_toy_airplane\", \"i_toy_airplane\", \"j_toy_airplane\", \"dabao_sod\", \"darlie_toothpaste\",\n \"camel\", \"large_elephant\", \"rhinocero\", \"darlie_box\", \"black_mouse\", \"dabao_facewash\",\n \"pantene\", \"head_shoulders_supreme\", \"head_shoulders_care\"\n]" }, { "identifier": "OpensetCOCOEval", "path": "openset_rcnn/evaluation/os_cocoeval.py", "snippet": "class OpensetCOCOEval(COCOeval):\n def _prepare(self):\n '''\n Prepare ._gts and ._dts for evaluation based on params\n :return: None\n '''\n def _toMask(anns, coco):\n # modify ann['segmentation'] by reference\n for ann in anns:\n rle = coco.annToRLE(ann)\n ann['segmentation'] = rle\n p = self.params\n k_gts=self.cocoGt.loadAnns(self.cocoGt.getAnnIds(imgIds=p.imgIds, catIds=p.catIds))\n unk_gts = self.cocoGt.loadAnns(self.cocoGt.getAnnIds(imgIds=p.imgIds, catIds=1000))\n k_dts=self.cocoDt.loadAnns(self.cocoDt.getAnnIds(imgIds=p.imgIds, catIds=p.catIds))\n unk_dts = self.cocoDt.loadAnns(self.cocoDt.getAnnIds(imgIds=p.imgIds, catIds=1000))\n\n # convert ground truth to mask if iouType == 'segm'\n if p.iouType == 'segm':\n _toMask(k_gts, self.cocoGt)\n _toMask(unk_gts, self.cocoGt)\n _toMask(k_dts, self.cocoDt)\n _toMask(unk_dts, self.cocoDt)\n # set ignore flag\n for kgt in k_gts:\n kgt['ignore'] = kgt['ignore'] if 'ignore' in kgt else 0\n kgt['ignore'] = 'iscrowd' in kgt and kgt['iscrowd']\n for ugt in unk_gts:\n ugt['ignore'] = ugt['ignore'] if 'ignore' in ugt else 0\n ugt['ignore'] = 'iscrowd' in ugt and ugt['iscrowd']\n self._k_gts = defaultdict(list) # gt for evaluation\n self._ok_gts = defaultdict(list)\n self._unk_gts = defaultdict(list)\n self._k_dts = defaultdict(list) # dt for evaluation\n self._unk_dts = defaultdict(list)\n for kgt in k_gts:\n self._k_gts[kgt['image_id'], kgt['category_id']].append(kgt)\n for cId in p.catIds:\n for kgt in k_gts:\n if kgt['category_id'] != cId:\n self._ok_gts[kgt['image_id'], cId].append(kgt)\n for ugt in unk_gts:\n self._unk_gts[ugt['image_id']].append(ugt)\n for kdt in k_dts:\n self._k_dts[kdt['image_id'], kdt['category_id']].append(kdt)\n for udt in unk_dts:\n self._unk_dts[udt['image_id']].append(udt)\n self.evalImgs_kdt = defaultdict(list) # per-image per-category evaluation results\n self.evalImgs_unkdt = defaultdict(list)\n self.eval_kdt = {} # accumulated evaluation results\n self.eval_unkdt = {}\n\n def evaluate(self):\n '''\n Run per image evaluation on given images and store results (a list of dict) in self.evalImgs\n :return: None\n '''\n tic = time.time()\n print('Running per image evaluation...')\n p = self.params\n # add backward compatibility if useSegm is specified in params\n if not p.useSegm is None:\n p.iouType = 'segm' if p.useSegm == 1 else 'bbox'\n print('useSegm (deprecated) is not None. Running {} evaluation'.format(p.iouType))\n print('Evaluate annotation type *{}*'.format(p.iouType))\n p.imgIds = list(np.unique(p.imgIds))\n if p.useCats:\n p.catIds = list(np.unique(p.catIds))\n p.maxDets = sorted(p.maxDets)\n self.params=p\n\n self._prepare()\n # loop through images, area range, max detection number\n catIds = p.catIds if p.useCats else [-1]\n\n self.ious_kdt_kgt = {(imgId, catId): self.computeIoU_kdt_kgt(imgId, catId) \\\n for imgId in p.imgIds\n for catId in catIds}\n self.ious_kdt_okgt = {(imgId, catId): self.computeIoU_kdt_okgt(imgId, catId) \\\n for imgId in p.imgIds\n for catId in catIds}\n self.ious_kdt_unkgt = {(imgId, catId): self.computeIoU_kdt_unkgt(imgId, catId) \\\n for imgId in p.imgIds\n for catId in catIds}\n self.ious_unkdt_kgt = {(imgId): self.computeIoU_unkdt_kgt(imgId) for imgId in p.imgIds}\n self.ious_unkdt_unkgt = {(imgId): self.computeIoU_unkdt_unkgt(imgId) for imgId in p.imgIds}\n \n maxDet = p.maxDets[-1]\n self.evalImgs_kdt = [self.evaluateImg_kdt(imgId, catId, areaRng, maxDet)\n for catId in catIds\n for areaRng in p.areaRng\n for imgId in p.imgIds\n ]\n self.evalImgs_unkdt = [self.evaluateImg_unkdt(imgId, areaRng, maxDet)\n for areaRng in p.areaRng\n for imgId in p.imgIds\n ]\n \n self._paramsEval = copy.deepcopy(self.params)\n toc = time.time()\n print('DONE (t={:0.2f}s).'.format(toc-tic))\n\n def computeIoU_kdt_kgt(self, imgId, catId):\n p = self.params\n if p.useCats:\n gt = self._k_gts[imgId,catId]\n dt = self._k_dts[imgId,catId]\n else:\n gt = [_ for cId in p.catIds for _ in self._k_gts[imgId,cId]]\n dt = [_ for cId in p.catIds for _ in self._k_dts[imgId,cId]]\n if len(gt) == 0 and len(dt) ==0:\n return []\n inds = np.argsort([-d['score'] for d in dt], kind='mergesort')\n dt = [dt[i] for i in inds]\n if len(dt) > p.maxDets[-1]:\n dt=dt[0:p.maxDets[-1]]\n\n if p.iouType == 'segm':\n g = [g['segmentation'] for g in gt]\n d = [d['segmentation'] for d in dt]\n elif p.iouType == 'bbox':\n g = [g['bbox'] for g in gt]\n d = [d['bbox'] for d in dt]\n else:\n raise Exception('unknown iouType for iou computation')\n\n # compute iou between each dt and gt region\n iscrowd = [int(o['iscrowd']) for o in gt]\n ious = maskUtils.iou(d,g,iscrowd)\n return ious\n \n def computeIoU_kdt_okgt(self, imgId, catId):\n p = self.params\n gt = self._ok_gts[imgId, catId]\n dt = self._k_dts[imgId,catId]\n \n if len(gt) == 0 and len(dt) ==0:\n return []\n inds = np.argsort([-d['score'] for d in dt], kind='mergesort')\n dt = [dt[i] for i in inds]\n if len(dt) > p.maxDets[-1]:\n dt=dt[0:p.maxDets[-1]]\n\n if p.iouType == 'segm':\n g = [g['segmentation'] for g in gt]\n d = [d['segmentation'] for d in dt]\n elif p.iouType == 'bbox':\n g = [g['bbox'] for g in gt]\n d = [d['bbox'] for d in dt]\n else:\n raise Exception('unknown iouType for iou computation')\n\n # compute iou between each dt and gt region\n iscrowd = [int(o['iscrowd']) for o in gt]\n ious = maskUtils.iou(d,g,iscrowd)\n return ious\n \n def computeIoU_kdt_unkgt(self, imgId, catId):\n p = self.params\n gt = self._unk_gts[imgId]\n dt = self._k_dts[imgId,catId]\n if len(gt) == 0 and len(dt) ==0:\n return []\n inds = np.argsort([-d['score'] for d in dt], kind='mergesort')\n dt = [dt[i] for i in inds]\n if len(dt) > p.maxDets[-1]:\n dt=dt[0:p.maxDets[-1]]\n\n if p.iouType == 'segm':\n g = [g['segmentation'] for g in gt]\n d = [d['segmentation'] for d in dt]\n elif p.iouType == 'bbox':\n g = [g['bbox'] for g in gt]\n d = [d['bbox'] for d in dt]\n else:\n raise Exception('unknown iouType for iou computation')\n\n # compute iou between each dt and gt region\n iscrowd = [int(o['iscrowd']) for o in gt]\n ious = maskUtils.iou(d,g,iscrowd)\n return ious\n \n def computeIoU_unkdt_kgt(self, imgId):\n p = self.params\n gt = [_ for cId in p.catIds for _ in self._k_gts[imgId,cId]]\n dt = self._unk_dts[imgId]\n if len(gt) == 0 and len(dt) ==0:\n return []\n inds = np.argsort([-d['score'] for d in dt], kind='mergesort')\n dt = [dt[i] for i in inds]\n if len(dt) > p.maxDets[-1]:\n dt=dt[0:p.maxDets[-1]]\n\n if p.iouType == 'segm':\n g = [g['segmentation'] for g in gt]\n d = [d['segmentation'] for d in dt]\n elif p.iouType == 'bbox':\n g = [g['bbox'] for g in gt]\n d = [d['bbox'] for d in dt]\n else:\n raise Exception('unknown iouType for iou computation')\n\n # compute iou between each dt and gt region\n iscrowd = [int(o['iscrowd']) for o in gt]\n ious = maskUtils.iou(d,g,iscrowd)\n return ious\n \n def computeIoU_unkdt_unkgt(self, imgId):\n p = self.params\n gt = self._unk_gts[imgId]\n dt = self._unk_dts[imgId]\n if len(gt) == 0 and len(dt) ==0:\n return []\n inds = np.argsort([-d['score'] for d in dt], kind='mergesort')\n dt = [dt[i] for i in inds]\n if len(dt) > p.maxDets[-1]:\n dt=dt[0:p.maxDets[-1]]\n\n if p.iouType == 'segm':\n g = [g['segmentation'] for g in gt]\n d = [d['segmentation'] for d in dt]\n elif p.iouType == 'bbox':\n g = [g['bbox'] for g in gt]\n d = [d['bbox'] for d in dt]\n else:\n raise Exception('unknown iouType for iou computation')\n\n # compute iou between each dt and gt region\n iscrowd = [int(o['iscrowd']) for o in gt]\n ious = maskUtils.iou(d,g,iscrowd)\n return ious\n\n def evaluateImg_kdt(self, imgId, catId, aRng, maxDet):\n '''\n perform evaluation for single category and image\n :return: dict (single image results)\n '''\n p = self.params\n\n k_gt = self._k_gts[imgId,catId]\n ok_gt = self._ok_gts[imgId,catId]\n unk_gt = self._unk_gts[imgId]\n k_dt = self._k_dts[imgId,catId]\n\n for kg in k_gt:\n if kg['ignore'] or (kg['area']<aRng[0] or kg['area']>aRng[1]):\n kg['_ignore'] = 1\n else:\n kg['_ignore'] = 0\n for okg in ok_gt:\n if okg['ignore'] or (okg['area']<aRng[0] or okg['area']>aRng[1]):\n okg['_ignore'] = 1\n else:\n okg['_ignore'] = 0\n for ug in unk_gt:\n if ug['ignore'] or (ug['area']<aRng[0] or ug['area']>aRng[1]):\n ug['_ignore'] = 1\n else:\n ug['_ignore'] = 0\n\n # sort dt highest score first, sort gt ignore last\n k_gtind = np.argsort([kg['_ignore'] for kg in k_gt], kind='mergesort')\n k_gt = [k_gt[i] for i in k_gtind]\n ok_gtind = np.argsort([okg['_ignore'] for okg in ok_gt], kind='mergesort')\n ok_gt = [ok_gt[i] for i in ok_gtind]\n unk_gtind = np.argsort([ug['_ignore'] for ug in unk_gt], kind='mergesort')\n unk_gt = [unk_gt[i] for i in unk_gtind]\n k_dtind = np.argsort([-kd['score'] for kd in k_dt], kind='mergesort')\n k_dt = [k_dt[i] for i in k_dtind[0:maxDet]]\n k_iscrowd = [int(o['iscrowd']) for o in k_gt]\n ok_iscrowd = [int(o['iscrowd']) for o in ok_gt]\n unk_iscrowd = [int(o['iscrowd']) for o in unk_gt]\n # load computed ious\n ious_kgt = (\n self.ious_kdt_kgt[imgId, catId][:, k_gtind] \\\n if len(self.ious_kdt_kgt[imgId, catId]) > 0 else self.ious_kdt_kgt[imgId, catId]\n )\n ious_okgt = (\n self.ious_kdt_okgt[imgId, catId][:, ok_gtind] \\\n if len(self.ious_kdt_okgt[imgId, catId]) > 0 else self.ious_kdt_okgt[imgId, catId]\n )\n ious_unkgt = (\n self.ious_kdt_unkgt[imgId, catId][:, unk_gtind] \\\n if len(self.ious_kdt_unkgt[imgId, catId]) > 0 else self.ious_kdt_unkgt[imgId, catId]\n )\n\n T = len(p.iouThrs)\n KG = len(k_gt)\n OKG = len(ok_gt)\n UG = len(unk_gt)\n KD = len(k_dt)\n kgtm = np.zeros((T,KG))\n okgtm = np.zeros((T,OKG))\n unkgtm = np.zeros((T,UG))\n kdtm_kgt = np.zeros((T,KD))\n kdtm_okgt = np.zeros((T,KD))\n kdtm_unkgt = np.zeros((T,KD))\n kgtIg = np.array([kg['_ignore'] for kg in k_gt])\n okgtIg = np.array([okg['_ignore'] for okg in ok_gt])\n unkgtIg = np.array([ug['_ignore'] for ug in unk_gt])\n kdtIg_kgt = np.zeros((T,KD))\n kdtIg_okgt = np.zeros((T,KD))\n kdtIg_unkgt = np.zeros((T,KD))\n\n if not len(ious_kgt)==0:\n for tind, t in enumerate(p.iouThrs):\n for kdind, kd in enumerate(k_dt):\n # information about best match so far (m=-1 -> unmatched)\n iou = min([t,1-1e-10])\n m = -1\n for kgind, kg in enumerate(k_gt):\n # if this gt already matched, and not a crowd, continue\n if kgtm[tind,kgind]>0 and not k_iscrowd[kgind]:\n continue\n # if dt matched to reg gt, and on ignore gt, stop\n if m>-1 and kgtIg[m]==0 and kgtIg[kgind]==1:\n break\n # continue to next gt unless better match made\n if ious_kgt[kdind,kgind] < iou:\n continue\n # if match successful and best so far, store appropriately\n iou=ious_kgt[kdind,kgind]\n m=kgind\n # if match made store id of match for both dt and gt\n if m ==-1:\n continue\n kdtIg_kgt[tind,kdind] = kgtIg[m]\n kdtm_kgt[tind,kdind] = k_gt[m]['id']\n kgtm[tind,m] = kd['id']\n # set unmatched detections outside of area range to ignore\n a = np.array([kd['area']<aRng[0] or kd['area']>aRng[1] for kd in k_dt]).reshape((1, len(k_dt)))\n kdtIg_kgt = np.logical_or(kdtIg_kgt, np.logical_and(kdtm_kgt==0, np.repeat(a,T,0)))\n\n if not len(ious_okgt)==0:\n for tind, t in enumerate(p.iouThrs):\n for kdind, kd in enumerate(k_dt):\n # information about best match so far (m=-1 -> unmatched)\n iou = min([t,1-1e-10])\n m = -1\n for okgind, okg in enumerate(ok_gt):\n # if this gt already matched, and not a crowd, continue\n if okgtm[tind,okgind]>0 and not ok_iscrowd[okgind]:\n continue\n # if dt matched to reg gt, and on ignore gt, stop\n if m>-1 and okgtIg[m]==0 and okgtIg[okgind]==1:\n break\n # continue to next gt unless better match made\n if ious_okgt[kdind,okgind] < iou:\n continue\n # if match successful and best so far, store appropriately\n iou=ious_okgt[kdind,okgind]\n m=okgind\n # if match made store id of match for both dt and gt\n if m ==-1:\n continue\n kdtIg_okgt[tind,kdind] = okgtIg[m]\n kdtm_okgt[tind,kdind] = ok_gt[m]['id']\n okgtm[tind,m] = kd['id']\n # set unmatched detections outside of area range to ignore\n a = np.array([kd['area']<aRng[0] or kd['area']>aRng[1] for kd in k_dt]).reshape((1, len(k_dt)))\n kdtIg_okgt = np.logical_or(kdtIg_okgt, np.logical_and(kdtm_okgt==0, np.repeat(a,T,0)))\n\n if not len(ious_unkgt)==0:\n for tind, t in enumerate(p.iouThrs):\n for kdind, kd in enumerate(k_dt):\n # information about best match so far (m=-1 -> unmatched)\n iou = min([t,1-1e-10])\n m = -1\n for unkgind, unkg in enumerate(unk_gt):\n # if this gt already matched, and not a crowd, continue\n if unkgtm[tind,unkgind]>0 and not unk_iscrowd[unkgind]:\n continue\n # if dt matched to reg gt, and on ignore gt, stop\n if m>-1 and unkgtIg[m]==0 and unkgtIg[unkgind]==1:\n break\n # continue to next gt unless better match made\n if ious_unkgt[kdind,unkgind] < iou:\n continue\n # if match successful and best so far, store appropriately\n iou=ious_unkgt[kdind,unkgind]\n m=unkgind\n # if match made store id of match for both dt and gt\n if m ==-1:\n continue\n kdtIg_unkgt[tind,kdind] = unkgtIg[m]\n kdtm_unkgt[tind,kdind] = unk_gt[m]['id']\n unkgtm[tind,m] = kd['id']\n # set unmatched detections outside of area range to ignore\n a = np.array([kd['area']<aRng[0] or kd['area']>aRng[1] for kd in k_dt]).reshape((1, len(k_dt)))\n kdtIg_unkgt = np.logical_or(kdtIg_unkgt, np.logical_and(kdtm_unkgt==0, np.repeat(a,T,0)))\n\n # store results for given image and category\n return {\n 'image_id': imgId,\n 'category_id': catId,\n 'aRng': aRng,\n 'maxDet': maxDet,\n 'kdtIds': [kd['id'] for kd in k_dt],\n 'kgtIds': [kg['id'] for kg in k_gt],\n 'okgtIds': [okg['id'] for okg in ok_gt],\n 'unkgtIds': [ug['id'] for ug in unk_gt],\n 'Matches_kdt_kgt': kdtm_kgt,\n 'Matches_kdt_okgt': kdtm_okgt,\n 'Matches_kdt_unkgt': kdtm_unkgt,\n 'kgtMatches': kgtm,\n 'okgtMatches': okgtm,\n 'unkgtMatches': unkgtm,\n 'kdtScores': [kd['score'] for kd in k_dt],\n 'kgtIgnore': kgtIg,\n 'okgtIgnore': okgtIg,\n 'unkgtIgnore': unkgtIg,\n 'kdtIgnore_kgt': kdtIg_kgt,\n 'kdtIgnore_okgt': kdtIg_okgt,\n 'kdtIgnore_unkgt': kdtIg_unkgt,\n }\n \n def evaluateImg_unkdt(self, imgId, aRng, maxDet):\n '''\n '''\n p = self.params\n k_gt = [_ for cId in p.catIds for _ in self._k_gts[imgId,cId]]\n unk_gt = self._unk_gts[imgId]\n unk_dt = self._unk_dts[imgId]\n if len(unk_gt) == 0 and len(unk_dt) == 0:\n return None\n \n for kg in k_gt:\n if kg['ignore'] or (kg['area']<aRng[0] or kg['area']>aRng[1]):\n kg['_ignore'] = 1\n else:\n kg['_ignore'] = 0\n for ug in unk_gt:\n if ug['ignore'] or (ug['area']<aRng[0] or ug['area']>aRng[1]):\n ug['_ignore'] = 1\n else:\n ug['_ignore'] = 0\n \n # sort dt highest score first, sort gt ignore last\n kgtind = np.argsort([kg['_ignore'] for kg in k_gt], kind='mergesort')\n k_gt = [k_gt[i] for i in kgtind]\n unk_gtind = np.argsort([ug['_ignore'] for ug in unk_gt], kind='mergesort')\n unk_gt = [unk_gt[i] for i in unk_gtind]\n udtind = np.argsort([-ud['score'] for ud in unk_dt], kind='mergesort')\n unk_dt = [unk_dt[i] for i in udtind[0:maxDet]]\n k_iscrowd = [int(o['iscrowd']) for o in k_gt]\n unk_iscrowd = [int(o['iscrowd']) for o in unk_gt]\n\n # load computed ious\n ious_kgt = (\n self.ious_unkdt_kgt[imgId][:, kgtind] \\\n if len(self.ious_unkdt_kgt[imgId]) > 0 else self.ious_unkdt_kgt[imgId]\n )\n ious_unkgt = (\n self.ious_unkdt_unkgt[imgId][:, unk_gtind] \\\n if len(self.ious_unkdt_unkgt[imgId]) > 0 else self.ious_unkdt_unkgt[imgId]\n )\n\n T = len(p.iouThrs)\n KG = len(k_gt)\n UG = len(unk_gt)\n UD = len(unk_dt)\n kgtm = np.zeros((T,KG))\n unkgtm = np.zeros((T,UG))\n unkdtm_kgt = np.zeros((T,UD))\n unkdtm_unkgt = np.zeros((T,UD))\n kgtIg = np.array([g['_ignore'] for g in k_gt])\n unkgtIg = np.array([ug['_ignore'] for ug in unk_gt])\n unkdtIg_kgt = np.zeros((T,UD))\n unkdtIg_unkgt = np.zeros((T,UD))\n\n if not len(ious_kgt)==0:\n for tind, t in enumerate(p.iouThrs):\n for udind, ud in enumerate(unk_dt):\n # information about best match so far (m=-1 -> unmatched)\n iou = min([t,1-1e-10])\n m = -1\n for kgind, kg in enumerate(k_gt):\n # if this gt already matched, and not a crowd, continue\n if kgtm[tind,kgind]>0 and not k_iscrowd[kgind]:\n continue\n # if dt matched to reg gt, and on ignore gt, stop\n if m>-1 and kgtIg[m]==0 and kgtIg[kgind]==1:\n break\n # continue to next gt unless better match made\n if ious_kgt[udind,kgind] < iou:\n continue\n # if match successful and best so far, store appropriately\n iou=ious_kgt[udind,kgind]\n m=kgind\n # if match made store id of match for both dt and gt\n if m ==-1:\n continue\n unkdtIg_kgt[tind,udind] = kgtIg[m]\n unkdtm_kgt[tind,udind] = k_gt[m]['id']\n kgtm[tind,m] = ud['id']\n # set unmatched detections outside of area range to ignore\n a = np.array([ud['area']<aRng[0] or ud['area']>aRng[1] for ud in unk_dt]).reshape((1, len(unk_dt)))\n unkdtIg_kgt = np.logical_or(unkdtIg_kgt, np.logical_and(unkdtm_kgt==0, np.repeat(a,T,0)))\n\n if not len(ious_unkgt)==0:\n for tind, t in enumerate(p.iouThrs):\n for udind, ud in enumerate(unk_dt):\n # information about best match so far (m=-1 -> unmatched)\n iou = min([t,1-1e-10])\n m = -1\n for unkgind, unkg in enumerate(unk_gt):\n # if this gt already matched, and not a crowd, continue\n if unkgtm[tind,unkgind]>0 and not unk_iscrowd[unkgind]:\n continue\n # if dt matched to reg gt, and on ignore gt, stop\n if m>-1 and unkgtIg[m]==0 and unkgtIg[unkgind]==1:\n break\n # continue to next gt unless better match made\n if ious_unkgt[udind,unkgind] < iou:\n continue\n # if match successful and best so far, store appropriately\n iou=ious_unkgt[udind,unkgind]\n m=unkgind\n # if match made store id of match for both dt and gt\n if m ==-1:\n continue\n unkdtIg_unkgt[tind,udind] = unkgtIg[m]\n unkdtm_unkgt[tind,udind] = unk_gt[m]['id']\n unkgtm[tind,m] = ud['id']\n # set unmatched detections outside of area range to ignore\n a = np.array([ud['area']<aRng[0] or ud['area']>aRng[1] for ud in unk_dt]).reshape((1, len(unk_dt)))\n unkdtIg_unkgt = np.logical_or(unkdtIg_unkgt, np.logical_and(unkdtm_unkgt==0, np.repeat(a,T,0)))\n\n # store results for given image and category\n return {\n 'image_id': imgId,\n 'aRng': aRng,\n 'maxDet': maxDet,\n 'unkdtIds': [ud['id'] for ud in unk_dt],\n 'kgtIds': [kg['id'] for kg in k_gt],\n 'unkgtIds': [ug['id'] for ug in unk_gt],\n 'Matches_unkdt_kgt': unkdtm_kgt,\n 'Matches_unkdt_unkgt': unkdtm_unkgt,\n 'kgtMatches': kgtm,\n 'unkgtMatches': unkgtm,\n 'unkdtScores': [ud['score'] for ud in unk_dt],\n 'kgtIgnore': kgtIg,\n 'unkgtIgnore': unkgtIg,\n 'unkdtIgnore_kgt': unkdtIg_kgt,\n 'unkdtIgnore_unkgt': unkdtIg_unkgt,\n }\n\n def accumulate(self, p = None):\n '''\n Accumulate per image evaluation results and store the result in self.eval\n :param p: input params for evaluation\n :return: None\n '''\n print('Accumulating evaluation results of known detections...')\n tic = time.time()\n if not self.evalImgs_kdt or not self.evalImgs_unkdt:\n print('Please run evaluate() first')\n # allows input customized parameters\n if p is None:\n p = self.params\n p.catIds = p.catIds if p.useCats == 1 else [-1]\n T = len(p.iouThrs)\n R = len(p.recThrs)\n K = len(p.catIds) if p.useCats else 1\n A = len(p.areaRng)\n M = len(p.maxDets)\n precision = -np.ones((T,R,K,A,M)) # -1 for the precision of absent categories\n recall = -np.ones((T,K,A,M))\n scores = -np.ones((T,R,K,A,M))\n ok_det_as_known = np.zeros((T,K,A,M))\n unk_det_as_known = np.zeros((T,K,A,M))\n fp_os = np.zeros((T,R,K,A,M))\n tp_plus_fp_cs = np.zeros((T,R,K,A,M))\n\n # create dictionary for future indexing\n _pe = self._paramsEval\n catIds = _pe.catIds if _pe.useCats else [-1]\n setK = set(catIds)\n setA = set(map(tuple, _pe.areaRng))\n setM = set(_pe.maxDets)\n setI = set(_pe.imgIds)\n # get inds to evaluate\n k_list = [n for n, k in enumerate(p.catIds) if k in setK]\n m_list = [m for n, m in enumerate(p.maxDets) if m in setM]\n a_list = [n for n, a in enumerate(map(lambda x: tuple(x), p.areaRng)) if a in setA]\n i_list = [n for n, i in enumerate(p.imgIds) if i in setI]\n I0 = len(_pe.imgIds)\n A0 = len(_pe.areaRng)\n # retrieve E at each category, area range, and max number of detections\n for k, k0 in enumerate(k_list):\n Nk = k0*A0*I0\n for a, a0 in enumerate(a_list):\n Na = a0*I0\n for m, maxDet in enumerate(m_list):\n E = [self.evalImgs_kdt[Nk + Na + i] for i in i_list]\n E = [e for e in E if not e is None]\n if len(E) == 0:\n continue\n dtScores = np.concatenate([e['kdtScores'][0:maxDet] for e in E])\n\n # different sorting method generates slightly different results.\n # mergesort is used to be consistent as Matlab implementation.\n inds = np.argsort(-dtScores, kind='mergesort')\n dtScoresSorted = dtScores[inds]\n \n dtScoresSortedExpand = np.expand_dims(dtScoresSorted, 0)\n dtScoresSortedExpand = np.repeat(dtScoresSortedExpand, T, 0)\n kdtm_kgt = np.concatenate([e['Matches_kdt_kgt'][:,0:maxDet] for e in E], axis=1)[:,inds]\n kdtm_okgt = np.concatenate([e['Matches_kdt_okgt'][:,0:maxDet] for e in E], axis=1)[:,inds]\n kdtm_unkgt = np.concatenate([e['Matches_kdt_unkgt'][:,0:maxDet] for e in E], axis=1)[:,inds]\n kdtIg_kgt = np.concatenate([e['kdtIgnore_kgt'][:,0:maxDet] for e in E], axis=1)[:,inds]\n kdtIg_okgt = np.concatenate([e['kdtIgnore_okgt'][:,0:maxDet] for e in E], axis=1)[:,inds]\n kdtIg_unkgt = np.concatenate([e['kdtIgnore_unkgt'][:,0:maxDet] for e in E], axis=1)[:,inds]\n kgtIg = np.concatenate([e['kgtIgnore'] for e in E])\n npig = np.count_nonzero(kgtIg==0)\n if npig == 0:\n continue\n tps = np.logical_and(kdtm_kgt, np.logical_not(kdtIg_kgt) )\n fps = np.logical_and(np.logical_not(kdtm_kgt), np.logical_not(kdtIg_kgt) )\n okfps = np.logical_and(kdtm_okgt, np.logical_not(kdtIg_okgt))\n ufps = np.logical_and(kdtm_unkgt, np.logical_not(kdtIg_unkgt))\n\n tp_sum = np.cumsum(tps, axis=1).astype(dtype=np.float)\n fp_sum = np.cumsum(fps, axis=1).astype(dtype=np.float)\n tp_fp_sum = tp_sum + fp_sum\n okfp_sum = np.sum(okfps, axis=1).astype(dtype=np.float)\n ufp_sum = np.cumsum(ufps, axis=1).astype(dtype=np.float)\n for t, (tp, fp, tp_fp, ufp) in enumerate(zip(tp_sum, fp_sum, tp_fp_sum, ufp_sum)):\n if len(ufp):\n unk_det_as_known[t,k,a,m] = ufp[-1]\n\n ok_det_as_known[t,k,a,m] = okfp_sum[t]\n\n tp = np.array(tp)\n fp = np.array(fp)\n nd = len(tp)\n rc = tp / npig\n pr = tp / (fp+tp+np.spacing(1))\n q = np.zeros((R,))\n ss = np.zeros((R,))\n tf = np.zeros((R,))\n fo = np.zeros((R,))\n\n if nd:\n recall[t,k,a,m] = rc[-1]\n else:\n recall[t,k,a,m] = 0\n\n # numpy is slow without cython optimization for accessing elements\n # use python array gets significant speed improvement\n pr = pr.tolist(); q = q.tolist()\n\n for i in range(nd-1, 0, -1):\n if pr[i] > pr[i-1]:\n pr[i-1] = pr[i]\n\n inds = np.searchsorted(rc, p.recThrs, side='left')\n try:\n for ri, pi in enumerate(inds):\n q[ri] = pr[pi]\n ss[ri] = dtScoresSorted[pi]\n except:\n pass\n l = len(tp_fp)\n if l:\n for ri, pi in enumerate(inds):\n if pi == l:\n pi -= 1\n tf[ri] = tp_fp[pi]\n fo[ri] = ufp[pi]\n precision[t,:,k,a,m] = np.array(q)\n scores[t,:,k,a,m] = np.array(ss)\n tp_plus_fp_cs[t,:,k,a,m] = np.array(tf)\n fp_os[t,:,k,a,m] = np.array(fo)\n self.eval_kdt = {\n 'params': p,\n 'counts': [T, R, K, A, M],\n 'date': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),\n 'precision': precision,\n 'recall': recall,\n 'scores': scores,\n 'ok_det_as_known': ok_det_as_known,\n 'unk_det_as_known': unk_det_as_known,\n 'tp_plus_fp_cs': tp_plus_fp_cs,\n 'fp_os': fp_os\n }\n toc = time.time()\n print('DONE (t={:0.2f}s).'.format( toc-tic))\n\n print('Accumulating evaluation results of unknown detections...')\n tic = time.time()\n if not self.evalImgs_unkdt:\n print('Please run evaluate() first')\n \n precision = -np.ones((T,R,A,M)) # -1 for the precision of absent categories\n recall = -np.ones((T,A,M))\n scores = -np.ones((T,R,A,M))\n\n num_k_det_as_unk = np.zeros((T,A,M))\n\n # retrieve E at each category, area range, and max number of detections\n for a, a0 in enumerate(a_list):\n Na = a0*I0\n for m, maxDet in enumerate(m_list):\n E = [self.evalImgs_unkdt[Na + i] for i in i_list]\n E = [e for e in E if not e is None]\n if len(E) == 0:\n continue\n udtScores = np.concatenate([e['unkdtScores'][0:maxDet] for e in E])\n\n # different sorting method generates slightly different results.\n # mergesort is used to be consistent as Matlab implementation.\n inds = np.argsort(-udtScores, kind='mergesort')\n udtScoresSorted = udtScores[inds]\n\n udtm_kgt = np.concatenate([e['Matches_unkdt_kgt'][:,0:maxDet] for e in E], axis=1)[:,inds]\n udtm_unkgt = np.concatenate([e['Matches_unkdt_unkgt'][:,0:maxDet] for e in E], axis=1)[:,inds]\n udtIg_kgt = np.concatenate([e['unkdtIgnore_kgt'][:,0:maxDet] for e in E], axis=1)[:,inds]\n udtIg_unkgt = np.concatenate([e['unkdtIgnore_unkgt'][:,0:maxDet] for e in E], axis=1)[:,inds]\n kgtIg = np.concatenate([e['kgtIgnore'] for e in E])\n unkgtIg = np.concatenate([e['unkgtIgnore'] for e in E])\n npig = np.count_nonzero(unkgtIg==0 )\n if npig == 0:\n continue\n\n tps = np.logical_and(udtm_unkgt, np.logical_not(udtIg_unkgt) )\n fps = np.logical_and(np.logical_not(udtm_unkgt), np.logical_not(udtIg_unkgt) )\n k_det_as_unk_fps = np.logical_and(udtm_kgt, np.logical_not(udtIg_kgt))\n\n tp_sum = np.cumsum(tps, axis=1).astype(dtype=float)\n fp_sum = np.cumsum(fps, axis=1).astype(dtype=float)\n k_det_as_unk_fp_sum = np.cumsum(k_det_as_unk_fps, axis=1).astype(dtype=np.float)\n for t, (tp, fp, k_det_as_unk_fp) in enumerate(zip(tp_sum, fp_sum, k_det_as_unk_fp_sum)):\n if len(k_det_as_unk_fp):\n num_k_det_as_unk[t,a,m] = k_det_as_unk_fp[-1]\n \n tp = np.array(tp)\n fp = np.array(fp)\n nd = len(tp)\n rc = tp / npig\n pr = tp / (fp+tp+np.spacing(1))\n q = np.zeros((R,))\n ss = np.zeros((R,))\n\n if nd:\n recall[t,a,m] = rc[-1]\n else:\n recall[t,a,m] = 0\n\n # numpy is slow without cython optimization for accessing elements\n # use python array gets significant speed improvement\n pr = pr.tolist(); q = q.tolist()\n\n for i in range(nd-1, 0, -1):\n if pr[i] > pr[i-1]:\n pr[i-1] = pr[i]\n\n inds = np.searchsorted(rc, p.recThrs, side='left')\n try:\n for ri, pi in enumerate(inds):\n q[ri] = pr[pi]\n ss[ri] = udtScoresSorted[pi]\n except:\n pass\n precision[t,:,a,m] = np.array(q)\n scores[t,:,a,m] = np.array(ss)\n \n self.eval_unkdt = {\n 'params': p,\n 'counts': [T, R, A, M],\n 'date': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),\n 'precision': precision,\n 'recall': recall,\n 'scores': scores,\n 'k_det_as_unk': num_k_det_as_unk\n }\n toc = time.time()\n print('DONE (t={:0.2f}s).'.format( toc-tic))\n\n def summarize(self):\n '''\n Compute and display summary metrics for evaluation results.\n Note this functin can *only* be applied on the default parameter setting\n '''\n def _num_unk_det_as_known(iouThr=None, areaRng='all', maxDets=100):\n p = self.params\n iStr = ' {:<18} {} @[ IoU={:<9} | area={:>6s} | maxDets={:>3d} ] = {}'\n titleStr = 'UNK_det_as_K'\n typeStr = '(AOSE)'\n iouStr = '{:0.2f}'.format(iouThr)\n tind = [i for i, iouT in enumerate(p.iouThrs) if iouT == iouThr]\n aind = [i for i, aRng in enumerate(p.areaRngLbl) if aRng == areaRng]\n mind = [i for i, mDet in enumerate(p.maxDets) if mDet == maxDets]\n\n unk_det_as_known = self.eval_kdt['unk_det_as_known']\n\n self.unk_det_as_known = unk_det_as_known[tind,:,aind,mind]\n\n print(iStr.format(titleStr, typeStr, iouStr, areaRng, maxDets, np.sum(unk_det_as_known[tind,:,aind,mind])))\n print(unk_det_as_known[tind,:,aind,mind])\n \n return np.sum(unk_det_as_known[tind,:,aind,mind])\n\n def _num_k_det_as_unk(iouThr=None, areaRng='all', maxDets=100):\n p = self.params\n iStr = ' {:<18} @[ IoU={:<9} | area={:>6s} | maxDets={:>3d} ] = {}'\n titleStr = 'K_det_as_UNK'\n iouStr = '{:0.2f}'.format(iouThr)\n tind = [i for i, iouT in enumerate(p.iouThrs) if iouT == iouThr]\n aind = [i for i, aRng in enumerate(p.areaRngLbl) if aRng == areaRng]\n mind = [i for i, mDet in enumerate(p.maxDets) if mDet == maxDets]\n\n k_det_as_unk = self.eval_unkdt['k_det_as_unk']\n\n self.k_det_as_unk = k_det_as_unk[tind,aind,mind]\n\n print(iStr.format(titleStr, iouStr, areaRng, maxDets, k_det_as_unk[tind,aind,mind]))\n \n return k_det_as_unk[tind,aind,mind]\n \n def _wi(iouThr=None, areaRng='all', maxDets=100, recall_level=0.8):\n p = self.params\n iStr = ' {:<18} {} @[ IoU={:<9} | area={:>6s} | maxDets={:>3d} ] = {:0.3f}'\n titleStr = 'Wilderness Impact'\n typeStr = '(WI)'\n iouStr = '{:0.2f}'.format(iouThr)\n\n tind = [i for i, iouT in enumerate(p.iouThrs) if iouT == iouThr]\n rind = [i for i, recT in enumerate(p.recThrs) if recT == recall_level]\n aind = [i for i, aRng in enumerate(p.areaRngLbl) if aRng == areaRng]\n mind = [i for i, mDet in enumerate(p.maxDets) if mDet == maxDets]\n\n tp_plus_fp_cs = self.eval_kdt['tp_plus_fp_cs']\n fp_os = self.eval_kdt['fp_os']\n\n wi = np.mean(fp_os[tind,rind,:,aind,mind]) / np.mean(tp_plus_fp_cs[tind,rind,:,aind,mind])\n \n print(iStr.format(titleStr, typeStr, iouStr, areaRng, maxDets, wi))\n\n return wi\n \n def _print_precision(iouThr=.5, areaRng='all', maxDets=100 ):\n p = self.params\n\n aind = [i for i, aRng in enumerate(p.areaRngLbl) if aRng == areaRng]\n mind = [i for i, mDet in enumerate(p.maxDets) if mDet == maxDets]\n\n # dimension of precision: [TxRxKxAxM]\n s = self.eval_kdt['precision']\n # IoU\n\n t = np.where(iouThr == p.iouThrs)[0]\n s = s[t]\n s = np.squeeze(s[:,:,:,aind,mind])\n s = s[[10, 20, 30, 40, 50, 60, 70, 80, 90, 100],:]\n \n for i in range(s.shape[1]):\n print(s[:,i])\n\n def _summarize( ap=1, iouThr=None, areaRng='all', maxDets=100 ):\n p = self.params\n iStr = ' {:<18} {} @[ IoU={:<9} | area={:>6s} | maxDets={:>3d} ] = {:0.3f}'\n titleStr = 'Known Average Precision' if ap == 1 else 'Known Average Recall'\n typeStr = '(AP)' if ap==1 else '(AR)'\n iouStr = '{:0.2f}:{:0.2f}'.format(p.iouThrs[0], p.iouThrs[-1]) \\\n if iouThr is None else '{:0.2f}'.format(iouThr)\n\n aind = [i for i, aRng in enumerate(p.areaRngLbl) if aRng == areaRng]\n mind = [i for i, mDet in enumerate(p.maxDets) if mDet == maxDets]\n if ap == 1:\n # dimension of precision: [TxRxKxAxM]\n s = self.eval_kdt['precision']\n # IoU\n if iouThr is not None:\n t = np.where(iouThr == p.iouThrs)[0]\n s = s[t]\n s = s[:,:,:,aind,mind]\n else:\n # dimension of recall: [TxKxAxM]\n s = self.eval_kdt['recall']\n if iouThr is not None:\n t = np.where(iouThr == p.iouThrs)[0]\n s = s[t]\n s = s[:,:,aind,mind]\n if len(s[s>-1])==0:\n mean_s = -1\n else:\n mean_s = np.mean(s[s>-1])\n print(iStr.format(titleStr, typeStr, iouStr, areaRng, maxDets, mean_s))\n return mean_s\n\n def _summarize_unk( ap=1, iouThr=None, areaRng='all', maxDets=100 ):\n p = self.params\n iStr = ' {:<18} {} @[ IoU={:<9} | area={:>6s} | maxDets={:>3d} ] = {:0.3f}'\n titleStr = 'Unknown Average Precision' if ap == 1 else 'Unknown Average Recall'\n typeStr = '(AP)' if ap==1 else '(AR)'\n iouStr = '{:0.2f}:{:0.2f}'.format(p.iouThrs[0], p.iouThrs[-1]) \\\n if iouThr is None else '{:0.2f}'.format(iouThr)\n\n aind = [i for i, aRng in enumerate(p.areaRngLbl) if aRng == areaRng]\n mind = [i for i, mDet in enumerate(p.maxDets) if mDet == maxDets]\n if ap == 1:\n # dimension of precision: [TxRxKxAxM]\n s = self.eval_unkdt['precision']\n # IoU\n if iouThr is not None:\n t = np.where(iouThr == p.iouThrs)[0]\n s = s[t]\n s = s[:,:,aind,mind]\n else:\n # dimension of recall: [TxKxAxM]\n s = self.eval_unkdt['recall']\n if iouThr is not None:\n t = np.where(iouThr == p.iouThrs)[0]\n s = s[t]\n s = s[:,aind,mind]\n if len(s[s>-1])==0:\n mean_s = -1\n else:\n mean_s = np.mean(s[s>-1])\n print(iStr.format(titleStr, typeStr, iouStr, areaRng, maxDets, mean_s))\n return mean_s\n\n def _summarizeDets():\n stats = np.zeros((30,))\n stats[0] = _summarize(1)\n stats[1] = _summarize(1, iouThr=.5, maxDets=self.params.maxDets[-1])\n stats[2] = _summarize(1, iouThr=.75, maxDets=self.params.maxDets[-1])\n stats[3] = _summarize(1, areaRng='small', maxDets=self.params.maxDets[-1])\n stats[4] = _summarize(1, areaRng='medium', maxDets=self.params.maxDets[-1])\n stats[5] = _summarize(1, areaRng='large', maxDets=self.params.maxDets[-1])\n stats[6] = _summarize(0, maxDets=self.params.maxDets[0])\n stats[7] = _summarize(0, maxDets=self.params.maxDets[1])\n stats[8] = _summarize(0, maxDets=self.params.maxDets[2])\n stats[9] = _summarize(0, maxDets=self.params.maxDets[3])\n stats[10] = _summarize(0, maxDets=self.params.maxDets[4])\n stats[11] = _summarize(0, areaRng='small', maxDets=self.params.maxDets[-1])\n stats[12] = _summarize(0, areaRng='medium', maxDets=self.params.maxDets[-1])\n stats[13] = _summarize(0, areaRng='large', maxDets=self.params.maxDets[-1])\n stats[14] = _wi(iouThr=.5, areaRng='all', maxDets=100, recall_level=0.8)\n stats[15] = _num_unk_det_as_known(iouThr=.5, areaRng='all', maxDets=100)\n \n stats[16] = _summarize_unk(1)\n stats[17] = _summarize_unk(1, iouThr=.5, maxDets=self.params.maxDets[-1])\n stats[18] = _summarize_unk(1, iouThr=.75, maxDets=self.params.maxDets[-1])\n stats[19] = _summarize_unk(1, areaRng='small', maxDets=self.params.maxDets[-1])\n stats[20] = _summarize_unk(1, areaRng='medium', maxDets=self.params.maxDets[-1])\n stats[21] = _summarize_unk(1, areaRng='large', maxDets=self.params.maxDets[-1])\n stats[22] = _summarize_unk(0, maxDets=self.params.maxDets[0])\n stats[23] = _summarize_unk(0, maxDets=self.params.maxDets[1])\n stats[24] = _summarize_unk(0, maxDets=self.params.maxDets[2])\n stats[25] = _summarize_unk(0, maxDets=self.params.maxDets[3])\n stats[26] = _summarize_unk(0, maxDets=self.params.maxDets[4])\n stats[27] = _summarize_unk(0, areaRng='small', maxDets=self.params.maxDets[-1])\n stats[28] = _summarize_unk(0, areaRng='medium', maxDets=self.params.maxDets[-1])\n stats[29] = _summarize_unk(0, areaRng='large', maxDets=self.params.maxDets[-1])\n return stats\n \n if not self.eval_kdt or not self.eval_unkdt:\n raise Exception('Please run accumulate() first')\n iouType = self.params.iouType\n if iouType == 'segm' or iouType == 'bbox':\n summarize = _summarizeDets\n self.stats = summarize()" } ]
import contextlib import copy import io import itertools import json import logging import numpy as np import os import pickle import pycocotools.mask as mask_util import torch import detectron2.utils.comm as comm from collections import OrderedDict from pycocotools.coco import COCO from pycocotools.cocoeval import COCOeval from tabulate import tabulate from detectron2.config import CfgNode from detectron2.data import MetadataCatalog from detectron2.data.datasets.coco import convert_to_coco_json from detectron2.structures import Boxes, BoxMode, pairwise_iou from detectron2.utils.file_io import PathManager from detectron2.utils.logger import create_small_table from detectron2.evaluation.evaluator import DatasetEvaluator from detectron2.evaluation.coco_evaluation import instances_to_coco_json from openset_rcnn.data.graspnet_meta import GRASPNET_KNOWN_IDS, GRASPNET_KNOWN_CATEGORIES from .os_cocoeval import OpensetCOCOEval
17,239
[32 ** 2, 96 ** 2], # medium [96 ** 2, 1e5 ** 2], # large [96 ** 2, 128 ** 2], # 96-128 [128 ** 2, 256 ** 2], # 128-256 [256 ** 2, 512 ** 2], # 256-512 [512 ** 2, 1e5 ** 2], ] # 512-inf assert area in areas, "Unknown area range: {}".format(area) area_range = area_ranges[areas[area]] gt_overlaps = [] num_pos = 0 for prediction_dict in dataset_predictions: predictions = prediction_dict["proposals"] # sort predictions in descending order # TODO maybe remove this and make it explicit in the documentation inds = predictions.objectness_logits.sort(descending=True)[1] predictions = predictions[inds] ann_ids = coco_api.getAnnIds(imgIds=prediction_dict["image_id"]) anno = coco_api.loadAnns(ann_ids) gt_boxes = [ BoxMode.convert(obj["bbox"], BoxMode.XYWH_ABS, BoxMode.XYXY_ABS) for obj in anno if obj["iscrowd"] == 0 ] gt_boxes = torch.as_tensor(gt_boxes).reshape(-1, 4) # guard against no boxes gt_boxes = Boxes(gt_boxes) gt_areas = torch.as_tensor([obj["area"] for obj in anno if obj["iscrowd"] == 0]) if len(gt_boxes) == 0 or len(predictions) == 0: continue valid_gt_inds = (gt_areas >= area_range[0]) & (gt_areas <= area_range[1]) gt_boxes = gt_boxes[valid_gt_inds] num_pos += len(gt_boxes) if len(gt_boxes) == 0: continue if limit is not None and len(predictions) > limit: predictions = predictions[:limit] overlaps = pairwise_iou(predictions.proposal_boxes, gt_boxes) _gt_overlaps = torch.zeros(len(gt_boxes)) for j in range(min(len(predictions), len(gt_boxes))): # find which proposal box maximally covers each gt box # and get the iou amount of coverage for each gt box max_overlaps, argmax_overlaps = overlaps.max(dim=0) # find which gt box is 'best' covered (i.e. 'best' = most iou) gt_ovr, gt_ind = max_overlaps.max(dim=0) assert gt_ovr >= 0 # find the proposal box that covers the best covered gt box box_ind = argmax_overlaps[gt_ind] # record the iou coverage of this gt box _gt_overlaps[j] = overlaps[box_ind, gt_ind] assert _gt_overlaps[j] == gt_ovr # mark the proposal box and the gt box as used overlaps[box_ind, :] = -1 overlaps[:, gt_ind] = -1 # append recorded iou coverage level gt_overlaps.append(_gt_overlaps) gt_overlaps = ( torch.cat(gt_overlaps, dim=0) if len(gt_overlaps) else torch.zeros(0, dtype=torch.float32) ) gt_overlaps, _ = torch.sort(gt_overlaps) if thresholds is None: step = 0.05 thresholds = torch.arange(0.5, 0.95 + 1e-5, step, dtype=torch.float32) recalls = torch.zeros_like(thresholds) # compute recall for each iou threshold for i, t in enumerate(thresholds): recalls[i] = (gt_overlaps >= t).float().sum() / float(num_pos) # ar = 2 * np.trapz(recalls, thresholds) ar = recalls.mean() return { "ar": ar, "recalls": recalls, "thresholds": thresholds, "gt_overlaps": gt_overlaps, "num_pos": num_pos, } def _evaluate_predictions_on_coco( coco_gt, coco_results, iou_type, eval_type, known_names, known_ids, img_ids=None, max_dets_per_image=None, ): """ Evaluate the coco results using COCOEval API. """ assert len(coco_results) > 0 if iou_type == "segm": coco_results = copy.deepcopy(coco_results) # When evaluating mask AP, if the results contain bbox, cocoapi will # use the box area as the area of the instance, instead of the mask area. # This leads to a different definition of small/medium/large. # We remove the bbox field to let mask AP use mask area. for c in coco_results: c.pop("bbox", None) coco_dt = coco_gt.loadRes(coco_results) assert eval_type == "openset" for _, ann in enumerate(coco_gt.dataset['annotations']): if ann['category_id'] not in known_ids: ann['category_id'] = 1000
# Copyright (c) Facebook, Inc. and its affiliates. class OpensetCOCOEvaluator(DatasetEvaluator): """ Evaluate AR for object proposals, AP for instance detection/segmentation, AP for keypoint detection outputs using COCO's metrics. See http://cocodataset.org/#detection-eval and http://cocodataset.org/#keypoints-eval to understand its metrics. The metrics range from 0 to 100 (instead of 0 to 1), where a -1 or NaN means the metric cannot be computed (e.g. due to no predictions made). In addition to COCO, this evaluator is able to support any bounding box detection, instance segmentation, or keypoint detection dataset. """ def __init__( self, dataset_name, eval_type, tasks=None, distributed=True, output_dir=None, *, max_dets_per_image=None, use_fast_impl=True, kpt_oks_sigmas=(), ): """ Args: dataset_name (str): name of the dataset to be evaluated. It must have either the following corresponding metadata: "json_file": the path to the COCO format annotation Or it must be in detectron2's standard dataset format so it can be converted to COCO format automatically. tasks (tuple[str]): tasks that can be evaluated under the given configuration. A task is one of "bbox", "segm", "keypoints". By default, will infer this automatically from predictions. distributed (True): if True, will collect results from all ranks and run evaluation in the main process. Otherwise, will only evaluate the results in the current process. output_dir (str): optional, an output directory to dump all results predicted on the dataset. The dump contains two files: 1. "instances_predictions.pth" a file that can be loaded with `torch.load` and contains all the results in the format they are produced by the model. 2. "coco_instances_results.json" a json file in COCO's result format. max_dets_per_image (list[int]): limit on the maximum number of detections per image. use_fast_impl (bool): use a fast but **unofficial** implementation to compute AP. Although the results should be very close to the official implementation in COCO API, it is still recommended to compute results with the official API for use in papers. The faster implementation also uses more RAM. """ self._logger = logging.getLogger(__name__) self._distributed = distributed self._output_dir = output_dir self._use_fast_impl = use_fast_impl self._max_dets_per_image = max_dets_per_image if tasks is not None and isinstance(tasks, CfgNode): kpt_oks_sigmas = ( tasks.TEST.KEYPOINT_OKS_SIGMAS if not kpt_oks_sigmas else kpt_oks_sigmas ) self._logger.warn( "COCO Evaluator instantiated using config, this is deprecated behavior." " Please pass in explicit arguments instead." ) self._tasks = None # Infering it from predictions should be better else: self._tasks = tasks self._cpu_device = torch.device("cpu") self.known_names = GRASPNET_KNOWN_CATEGORIES self.known_ids = GRASPNET_KNOWN_IDS self._metadata = MetadataCatalog.get(dataset_name) if not hasattr(self._metadata, "json_file"): if output_dir is None: raise ValueError( "output_dir must be provided to COCOEvaluator " "for datasets not in COCO format." ) self._logger.info(f"Trying to convert '{dataset_name}' to COCO format ...") cache_path = os.path.join(output_dir, f"{dataset_name}_coco_format.json") self._metadata.json_file = cache_path convert_to_coco_json(dataset_name, cache_path) json_file = PathManager.get_local_path(self._metadata.json_file) with contextlib.redirect_stdout(io.StringIO()): self._coco_api = COCO(json_file) # Test set json files do not contain annotations (evaluation must be # performed using the COCO evaluation server). self._do_evaluation = "annotations" in self._coco_api.dataset if self._do_evaluation: self._kpt_oks_sigmas = kpt_oks_sigmas self.eval_type = eval_type def reset(self): self._predictions = [] def process(self, inputs, outputs): """ Args: inputs: the inputs to a COCO model (e.g., GeneralizedRCNN). It is a list of dict. Each dict corresponds to an image and contains keys like "height", "width", "file_name", "image_id". outputs: the outputs of a COCO model. It is a list of dicts with key "instances" that contains :class:`Instances`. """ for input, output in zip(inputs, outputs): prediction = {"image_id": input["image_id"]} if "instances" in output: instances = output["instances"].to(self._cpu_device) prediction["instances"] = instances_to_coco_json(instances, input["image_id"]) if "proposals" in output: prediction["proposals"] = output["proposals"].to(self._cpu_device) if len(prediction) > 1: self._predictions.append(prediction) def evaluate(self, img_ids=None, resume=False): """ Args: img_ids: a list of image IDs to evaluate on. Default to None for the whole dataset resume: if resume is True, read the saved detection results to save time. """ if not resume: if self._distributed: comm.synchronize() predictions = comm.gather(self._predictions, dst=0) predictions = list(itertools.chain(*predictions)) if not comm.is_main_process(): return {} else: predictions = self._predictions if len(predictions) == 0: self._logger.warning("[COCOEvaluator] Did not receive valid predictions.") return {} if self._output_dir: PathManager.mkdirs(self._output_dir) file_path = os.path.join(self._output_dir, "instances_predictions.pth") with PathManager.open(file_path, "wb") as f: torch.save(predictions, f) else: file_path = os.path.join(self._output_dir, "instances_predictions.pth") predictions = torch.load(file_path) self._results = OrderedDict() if "proposals" in predictions[0]: self._eval_box_proposals(predictions) if "instances" in predictions[0]: self._eval_predictions(predictions, img_ids=img_ids, resume=resume) # Copy so the caller can do whatever with results return copy.deepcopy(self._results) def _tasks_from_predictions(self, predictions): """ Get COCO API "tasks" (i.e. iou_type) from COCO-format predictions. """ tasks = {"bbox"} for pred in predictions: if "segmentation" in pred: tasks.add("segm") if "keypoints" in pred: tasks.add("keypoints") return sorted(tasks) def save_json(self, output_dir): PathManager.mkdirs(output_dir) coco_results = list(itertools.chain(*[x["instances"] for x in self._predictions])) if hasattr(self._metadata, "thing_dataset_id_to_contiguous_id"): dataset_id_to_contiguous_id = self._metadata.thing_dataset_id_to_contiguous_id reverse_id_mapping = {v: k for k, v in dataset_id_to_contiguous_id.items()} for result in coco_results: result["category_id"] = reverse_id_mapping[result["category_id"]] if result["category_id"] not in self.known_ids: result["category_id"] = 1000 file_path = os.path.join(output_dir, "coco_instances_results.json") with PathManager.open(file_path, "w") as f: f.write(json.dumps(coco_results)) f.flush() def _eval_predictions(self, predictions, img_ids=None, resume=False): """ Evaluate predictions. Fill self._results with the metrics of the tasks. """ self._logger.info("Preparing results for COCO format ...") coco_results = list(itertools.chain(*[x["instances"] for x in predictions])) tasks = self._tasks or self._tasks_from_predictions(coco_results) # unmap the category ids for COCO if hasattr(self._metadata, "thing_dataset_id_to_contiguous_id"): dataset_id_to_contiguous_id = self._metadata.thing_dataset_id_to_contiguous_id all_contiguous_ids = list(dataset_id_to_contiguous_id.values()) num_classes = len(all_contiguous_ids) assert min(all_contiguous_ids) == 0 and max(all_contiguous_ids) == num_classes - 1 reverse_id_mapping = {v: k for k, v in dataset_id_to_contiguous_id.items()} if self.eval_type != "Closeset": reverse_id_mapping[1000] = 1000 for result in coco_results: category_id = result["category_id"] assert category_id < num_classes or category_id == 1000, ( f"A prediction has class={category_id}, " f"but the dataset only has {num_classes} classes and " f"predicted class id should be in [0, {num_classes - 1}] or 1000." ) result["category_id"] = reverse_id_mapping[category_id] else: for result in coco_results: category_id = result["category_id"] assert category_id < num_classes, ( f"A prediction has class={category_id}, " f"but the dataset only has {num_classes} classes and " f"predicted class id should be in [0, {num_classes - 1}]." ) result["category_id"] = reverse_id_mapping[category_id] if self._output_dir and not resume: file_path = os.path.join(self._output_dir, "coco_instances_results.json") self._logger.info("Saving results to {}".format(file_path)) with PathManager.open(file_path, "w") as f: f.write(json.dumps(coco_results)) f.flush() if not self._do_evaluation: self._logger.info("Annotations are not available for evaluation.") return self._logger.info( "Evaluating predictions with {} COCO API...".format( "unofficial" if self._use_fast_impl else "official" ) ) for task in sorted(tasks): assert task in {"bbox", "segm", "keypoints"}, f"Got unknown task: {task}!" coco_eval = ( _evaluate_predictions_on_coco( self._coco_api, coco_results, task, self.eval_type, self.known_names, self.known_ids, img_ids=img_ids, max_dets_per_image=self._max_dets_per_image, ) if len(coco_results) > 0 else None # cocoapi does not handle empty results very well ) res = self._derive_coco_results( coco_eval, task, self.eval_type, class_names=self._metadata.get("thing_classes") ) self._results[task] = res def _eval_box_proposals(self, predictions): """ Evaluate the box proposals in predictions. Fill self._results with the metrics for "box_proposals" task. """ if self._output_dir: # Saving generated box proposals to file. # Predicted box_proposals are in XYXY_ABS mode. bbox_mode = BoxMode.XYXY_ABS.value ids, boxes, objectness_logits = [], [], [] for prediction in predictions: ids.append(prediction["image_id"]) boxes.append(prediction["proposals"].proposal_boxes.tensor.numpy()) objectness_logits.append(prediction["proposals"].objectness_logits.numpy()) proposal_data = { "boxes": boxes, "objectness_logits": objectness_logits, "ids": ids, "bbox_mode": bbox_mode, } with PathManager.open(os.path.join(self._output_dir, "box_proposals.pkl"), "wb") as f: pickle.dump(proposal_data, f) if not self._do_evaluation: self._logger.info("Annotations are not available for evaluation.") return self._logger.info("Evaluating bbox proposals ...") res = {} areas = {"all": "", "small": "s", "medium": "m", "large": "l"} for limit in [100, 1000]: for area, suffix in areas.items(): stats = _evaluate_box_proposals(predictions, self._coco_api, area=area, limit=limit) key = "AR{}@{:d}".format(suffix, limit) res[key] = float(stats["ar"].item() * 100) self._logger.info("Proposal metrics: \n" + create_small_table(res)) self._results["box_proposals"] = res def _derive_coco_results(self, coco_eval, iou_type, eval_type, class_names=None): """ Derive the desired score numbers from summarized COCOeval. Args: coco_eval (None or COCOEval): None represents no predictions from model. iou_type (str): class_names (None or list[str]): if provided, will use it to predict per-category AP. Returns: a dict of {metric name: score} """ if eval_type == "openset": metrics = { "bbox": ["AP", "AP50", "AP75", "APs", "APm", "APl", "AR10", "AR20", "AR30", "AR50", "AR100", "ARs", "ARm", "ARl"], "segm": ["AP", "AP50", "AP75", "APs", "APm", "APl", "AR10", "AR20", "AR30", "AR50", "AR100", "ARs", "ARm", "ARl"], }[iou_type] if coco_eval is None: self._logger.warn("No predictions from the model!") return {metric: float("nan") for metric in metrics} # the standard metrics results = { metric: float(coco_eval.stats[idx] * 100 if coco_eval.stats[idx] >= 0 else "nan") for idx, metric in enumerate(metrics) } results['WI'] = coco_eval.stats[14] results['AOSE'] = coco_eval.stats[15] self._logger.info("Evaluation type is {} known: \n".format(self.eval_type)) self._logger.info( "Evaluation results for known {}: \n".format(iou_type) + create_small_table(results) ) results_unk = { metric: float(coco_eval.stats[idx+16] * 100 if coco_eval.stats[idx] >= 0 else "nan") for idx, metric in enumerate(metrics) } self._logger.info("Evaluation type is {} unknown: \n".format(self.eval_type)) self._logger.info( "Evaluation results for unknown {}: \n".format(iou_type) + create_small_table(results_unk) ) if not np.isfinite(sum(results.values())): self._logger.info("Some metrics cannot be computed and is shown as NaN.") class_names = self.known_names # Compute per-category AP # from https://github.com/facebookresearch/Detectron/blob/a6a835f5b8208c45d0dce217ce9bbda915f44df7/detectron/datasets/json_dataset_evaluator.py#L222-L252 # noqa precisions = coco_eval.eval_kdt["precision"] # precision has dims (iou, recall, cls, area range, max dets) assert len(class_names) == precisions.shape[2] results_per_category = [] for idx, name in enumerate(class_names): # area range index 0: all area ranges # max dets index -1: typically 100 per image precision = precisions[:, :, idx, 0, -1] precision = precision[precision > -1] ap = np.mean(precision) if precision.size else float("nan") results_per_category.append(("{}".format(name), float(ap * 100))) unk_precisions = coco_eval.eval_unkdt["precision"] unk_precisions = unk_precisions[:, :, 0, -1] unk_precisions = unk_precisions[unk_precisions > -1] unk_ap = np.mean(unk_precisions) if unk_precisions.size else float("nan") results_per_category.append(("unknown", float(unk_ap * 100))) # tabulate it N_COLS = min(6, len(results_per_category) * 2) results_flatten = list(itertools.chain(*results_per_category)) results_2d = itertools.zip_longest(*[results_flatten[i::N_COLS] for i in range(N_COLS)]) table = tabulate( results_2d, tablefmt="pipe", floatfmt=".3f", headers=["category", "AP"] * (N_COLS // 2), numalign="left", ) self._logger.info("Per-category {} AP: \n".format(iou_type) + table) results.update({"AP-" + name: ap for name, ap in results_per_category}) np.save(os.path.join(self._output_dir, "known_precision_" + iou_type + ".npy"), coco_eval.eval_kdt["precision"]) np.save(os.path.join(self._output_dir, "known_recall_" + iou_type + ".npy"), coco_eval.eval_kdt["recall"]) np.save(os.path.join(self._output_dir, "unknown_precision_" + iou_type + ".npy"), coco_eval.eval_unkdt["precision"]) np.save(os.path.join(self._output_dir, "unknown_recall_" + iou_type + ".npy"), coco_eval.eval_unkdt["recall"]) return results elif eval_type == "cls_agn_unk": metrics = { "bbox": ["AR10", "AR20", "AR30", "AR50", "AR100", "AP"], "segm": ["AR10", "AR20", "AR30", "AR50", "AR100", "AP"], }[iou_type] if coco_eval is None: self._logger.warn("No predictions from the model!") return {metric: float("nan") for metric in metrics} # the standard metrics results = { metric: float(coco_eval.stats[idx] * 100 if coco_eval.stats[idx] >= 0 else "nan") for idx, metric in enumerate(metrics) } self._logger.info("Evaluation type is {}: \n".format(self.eval_type)) self._logger.info( "Evaluation results for {}: \n".format(iou_type) + create_small_table(results) ) if not np.isfinite(sum(results.values())): self._logger.info("Some metrics cannot be computed and is shown as NaN.") return results # inspired from Detectron: # https://github.com/facebookresearch/Detectron/blob/a6a835f5b8208c45d0dce217ce9bbda915f44df7/detectron/datasets/json_dataset_evaluator.py#L255 # noqa def _evaluate_box_proposals(dataset_predictions, coco_api, thresholds=None, area="all", limit=None): """ Evaluate detection proposal recall metrics. This function is a much faster alternative to the official COCO API recall evaluation code. However, it produces slightly different results. """ # Record max overlap value for each gt box # Return vector of overlap values areas = { "all": 0, "small": 1, "medium": 2, "large": 3, "96-128": 4, "128-256": 5, "256-512": 6, "512-inf": 7, } area_ranges = [ [0 ** 2, 1e5 ** 2], # all [0 ** 2, 32 ** 2], # small [32 ** 2, 96 ** 2], # medium [96 ** 2, 1e5 ** 2], # large [96 ** 2, 128 ** 2], # 96-128 [128 ** 2, 256 ** 2], # 128-256 [256 ** 2, 512 ** 2], # 256-512 [512 ** 2, 1e5 ** 2], ] # 512-inf assert area in areas, "Unknown area range: {}".format(area) area_range = area_ranges[areas[area]] gt_overlaps = [] num_pos = 0 for prediction_dict in dataset_predictions: predictions = prediction_dict["proposals"] # sort predictions in descending order # TODO maybe remove this and make it explicit in the documentation inds = predictions.objectness_logits.sort(descending=True)[1] predictions = predictions[inds] ann_ids = coco_api.getAnnIds(imgIds=prediction_dict["image_id"]) anno = coco_api.loadAnns(ann_ids) gt_boxes = [ BoxMode.convert(obj["bbox"], BoxMode.XYWH_ABS, BoxMode.XYXY_ABS) for obj in anno if obj["iscrowd"] == 0 ] gt_boxes = torch.as_tensor(gt_boxes).reshape(-1, 4) # guard against no boxes gt_boxes = Boxes(gt_boxes) gt_areas = torch.as_tensor([obj["area"] for obj in anno if obj["iscrowd"] == 0]) if len(gt_boxes) == 0 or len(predictions) == 0: continue valid_gt_inds = (gt_areas >= area_range[0]) & (gt_areas <= area_range[1]) gt_boxes = gt_boxes[valid_gt_inds] num_pos += len(gt_boxes) if len(gt_boxes) == 0: continue if limit is not None and len(predictions) > limit: predictions = predictions[:limit] overlaps = pairwise_iou(predictions.proposal_boxes, gt_boxes) _gt_overlaps = torch.zeros(len(gt_boxes)) for j in range(min(len(predictions), len(gt_boxes))): # find which proposal box maximally covers each gt box # and get the iou amount of coverage for each gt box max_overlaps, argmax_overlaps = overlaps.max(dim=0) # find which gt box is 'best' covered (i.e. 'best' = most iou) gt_ovr, gt_ind = max_overlaps.max(dim=0) assert gt_ovr >= 0 # find the proposal box that covers the best covered gt box box_ind = argmax_overlaps[gt_ind] # record the iou coverage of this gt box _gt_overlaps[j] = overlaps[box_ind, gt_ind] assert _gt_overlaps[j] == gt_ovr # mark the proposal box and the gt box as used overlaps[box_ind, :] = -1 overlaps[:, gt_ind] = -1 # append recorded iou coverage level gt_overlaps.append(_gt_overlaps) gt_overlaps = ( torch.cat(gt_overlaps, dim=0) if len(gt_overlaps) else torch.zeros(0, dtype=torch.float32) ) gt_overlaps, _ = torch.sort(gt_overlaps) if thresholds is None: step = 0.05 thresholds = torch.arange(0.5, 0.95 + 1e-5, step, dtype=torch.float32) recalls = torch.zeros_like(thresholds) # compute recall for each iou threshold for i, t in enumerate(thresholds): recalls[i] = (gt_overlaps >= t).float().sum() / float(num_pos) # ar = 2 * np.trapz(recalls, thresholds) ar = recalls.mean() return { "ar": ar, "recalls": recalls, "thresholds": thresholds, "gt_overlaps": gt_overlaps, "num_pos": num_pos, } def _evaluate_predictions_on_coco( coco_gt, coco_results, iou_type, eval_type, known_names, known_ids, img_ids=None, max_dets_per_image=None, ): """ Evaluate the coco results using COCOEval API. """ assert len(coco_results) > 0 if iou_type == "segm": coco_results = copy.deepcopy(coco_results) # When evaluating mask AP, if the results contain bbox, cocoapi will # use the box area as the area of the instance, instead of the mask area. # This leads to a different definition of small/medium/large. # We remove the bbox field to let mask AP use mask area. for c in coco_results: c.pop("bbox", None) coco_dt = coco_gt.loadRes(coco_results) assert eval_type == "openset" for _, ann in enumerate(coco_gt.dataset['annotations']): if ann['category_id'] not in known_ids: ann['category_id'] = 1000
coco_eval = OpensetCOCOEval(coco_gt, coco_dt, iou_type)
2
2023-11-21 01:47:01+00:00
24k
jiawei-ren/dreamgaussian4d
diffusers/src/diffusers/models/unet_2d_condition_flax.py
[ { "identifier": "ConfigMixin", "path": "diffusers/src/diffusers/configuration_utils.py", "snippet": "class ConfigMixin:\n r\"\"\"\n Base class for all configuration classes. All configuration parameters are stored under `self.config`. Also\n provides the [`~ConfigMixin.from_config`] and [`~ConfigMixin.save_config`] methods for loading, downloading, and\n saving classes that inherit from [`ConfigMixin`].\n\n Class attributes:\n - **config_name** (`str`) -- A filename under which the config should stored when calling\n [`~ConfigMixin.save_config`] (should be overridden by parent class).\n - **ignore_for_config** (`List[str]`) -- A list of attributes that should not be saved in the config (should be\n overridden by subclass).\n - **has_compatibles** (`bool`) -- Whether the class has compatible classes (should be overridden by subclass).\n - **_deprecated_kwargs** (`List[str]`) -- Keyword arguments that are deprecated. Note that the `init` function\n should only have a `kwargs` argument if at least one argument is deprecated (should be overridden by\n subclass).\n \"\"\"\n\n config_name = None\n ignore_for_config = []\n has_compatibles = False\n\n _deprecated_kwargs = []\n\n def register_to_config(self, **kwargs):\n if self.config_name is None:\n raise NotImplementedError(f\"Make sure that {self.__class__} has defined a class name `config_name`\")\n # Special case for `kwargs` used in deprecation warning added to schedulers\n # TODO: remove this when we remove the deprecation warning, and the `kwargs` argument,\n # or solve in a more general way.\n kwargs.pop(\"kwargs\", None)\n\n if not hasattr(self, \"_internal_dict\"):\n internal_dict = kwargs\n else:\n previous_dict = dict(self._internal_dict)\n internal_dict = {**self._internal_dict, **kwargs}\n logger.debug(f\"Updating config from {previous_dict} to {internal_dict}\")\n\n self._internal_dict = FrozenDict(internal_dict)\n\n def __getattr__(self, name: str) -> Any:\n \"\"\"The only reason we overwrite `getattr` here is to gracefully deprecate accessing\n config attributes directly. See https://github.com/huggingface/diffusers/pull/3129\n\n Tihs funtion is mostly copied from PyTorch's __getattr__ overwrite:\n https://pytorch.org/docs/stable/_modules/torch/nn/modules/module.html#Module\n \"\"\"\n\n is_in_config = \"_internal_dict\" in self.__dict__ and hasattr(self.__dict__[\"_internal_dict\"], name)\n is_attribute = name in self.__dict__\n\n if is_in_config and not is_attribute:\n deprecation_message = f\"Accessing config attribute `{name}` directly via '{type(self).__name__}' object attribute is deprecated. Please access '{name}' over '{type(self).__name__}'s config object instead, e.g. 'scheduler.config.{name}'.\"\n deprecate(\"direct config name access\", \"1.0.0\", deprecation_message, standard_warn=False)\n return self._internal_dict[name]\n\n raise AttributeError(f\"'{type(self).__name__}' object has no attribute '{name}'\")\n\n def save_config(self, save_directory: Union[str, os.PathLike], push_to_hub: bool = False, **kwargs):\n \"\"\"\n Save a configuration object to the directory specified in `save_directory` so that it can be reloaded using the\n [`~ConfigMixin.from_config`] class method.\n\n Args:\n save_directory (`str` or `os.PathLike`):\n Directory where the configuration JSON file is saved (will be created if it does not exist).\n push_to_hub (`bool`, *optional*, defaults to `False`):\n Whether or not to push your model to the Hugging Face Hub after saving it. You can specify the\n repository you want to push to with `repo_id` (will default to the name of `save_directory` in your\n namespace).\n kwargs (`Dict[str, Any]`, *optional*):\n Additional keyword arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method.\n \"\"\"\n if os.path.isfile(save_directory):\n raise AssertionError(f\"Provided path ({save_directory}) should be a directory, not a file\")\n\n os.makedirs(save_directory, exist_ok=True)\n\n # If we save using the predefined names, we can load using `from_config`\n output_config_file = os.path.join(save_directory, self.config_name)\n\n self.to_json_file(output_config_file)\n logger.info(f\"Configuration saved in {output_config_file}\")\n\n if push_to_hub:\n commit_message = kwargs.pop(\"commit_message\", None)\n private = kwargs.pop(\"private\", False)\n create_pr = kwargs.pop(\"create_pr\", False)\n token = kwargs.pop(\"token\", None)\n repo_id = kwargs.pop(\"repo_id\", save_directory.split(os.path.sep)[-1])\n repo_id = create_repo(repo_id, exist_ok=True, private=private, token=token).repo_id\n\n self._upload_folder(\n save_directory,\n repo_id,\n token=token,\n commit_message=commit_message,\n create_pr=create_pr,\n )\n\n @classmethod\n def from_config(cls, config: Union[FrozenDict, Dict[str, Any]] = None, return_unused_kwargs=False, **kwargs):\n r\"\"\"\n Instantiate a Python class from a config dictionary.\n\n Parameters:\n config (`Dict[str, Any]`):\n A config dictionary from which the Python class is instantiated. Make sure to only load configuration\n files of compatible classes.\n return_unused_kwargs (`bool`, *optional*, defaults to `False`):\n Whether kwargs that are not consumed by the Python class should be returned or not.\n kwargs (remaining dictionary of keyword arguments, *optional*):\n Can be used to update the configuration object (after it is loaded) and initiate the Python class.\n `**kwargs` are passed directly to the underlying scheduler/model's `__init__` method and eventually\n overwrite the same named arguments in `config`.\n\n Returns:\n [`ModelMixin`] or [`SchedulerMixin`]:\n A model or scheduler object instantiated from a config dictionary.\n\n Examples:\n\n ```python\n >>> from diffusers import DDPMScheduler, DDIMScheduler, PNDMScheduler\n\n >>> # Download scheduler from huggingface.co and cache.\n >>> scheduler = DDPMScheduler.from_pretrained(\"google/ddpm-cifar10-32\")\n\n >>> # Instantiate DDIM scheduler class with same config as DDPM\n >>> scheduler = DDIMScheduler.from_config(scheduler.config)\n\n >>> # Instantiate PNDM scheduler class with same config as DDPM\n >>> scheduler = PNDMScheduler.from_config(scheduler.config)\n ```\n \"\"\"\n # <===== TO BE REMOVED WITH DEPRECATION\n # TODO(Patrick) - make sure to remove the following lines when config==\"model_path\" is deprecated\n if \"pretrained_model_name_or_path\" in kwargs:\n config = kwargs.pop(\"pretrained_model_name_or_path\")\n\n if config is None:\n raise ValueError(\"Please make sure to provide a config as the first positional argument.\")\n # ======>\n\n if not isinstance(config, dict):\n deprecation_message = \"It is deprecated to pass a pretrained model name or path to `from_config`.\"\n if \"Scheduler\" in cls.__name__:\n deprecation_message += (\n f\"If you were trying to load a scheduler, please use {cls}.from_pretrained(...) instead.\"\n \" Otherwise, please make sure to pass a configuration dictionary instead. This functionality will\"\n \" be removed in v1.0.0.\"\n )\n elif \"Model\" in cls.__name__:\n deprecation_message += (\n f\"If you were trying to load a model, please use {cls}.load_config(...) followed by\"\n f\" {cls}.from_config(...) instead. Otherwise, please make sure to pass a configuration dictionary\"\n \" instead. This functionality will be removed in v1.0.0.\"\n )\n deprecate(\"config-passed-as-path\", \"1.0.0\", deprecation_message, standard_warn=False)\n config, kwargs = cls.load_config(pretrained_model_name_or_path=config, return_unused_kwargs=True, **kwargs)\n\n init_dict, unused_kwargs, hidden_dict = cls.extract_init_dict(config, **kwargs)\n\n # Allow dtype to be specified on initialization\n if \"dtype\" in unused_kwargs:\n init_dict[\"dtype\"] = unused_kwargs.pop(\"dtype\")\n\n # add possible deprecated kwargs\n for deprecated_kwarg in cls._deprecated_kwargs:\n if deprecated_kwarg in unused_kwargs:\n init_dict[deprecated_kwarg] = unused_kwargs.pop(deprecated_kwarg)\n\n # Return model and optionally state and/or unused_kwargs\n model = cls(**init_dict)\n\n # make sure to also save config parameters that might be used for compatible classes\n model.register_to_config(**hidden_dict)\n\n # add hidden kwargs of compatible classes to unused_kwargs\n unused_kwargs = {**unused_kwargs, **hidden_dict}\n\n if return_unused_kwargs:\n return (model, unused_kwargs)\n else:\n return model\n\n @classmethod\n def get_config_dict(cls, *args, **kwargs):\n deprecation_message = (\n f\" The function get_config_dict is deprecated. Please use {cls}.load_config instead. This function will be\"\n \" removed in version v1.0.0\"\n )\n deprecate(\"get_config_dict\", \"1.0.0\", deprecation_message, standard_warn=False)\n return cls.load_config(*args, **kwargs)\n\n @classmethod\n def load_config(\n cls,\n pretrained_model_name_or_path: Union[str, os.PathLike],\n return_unused_kwargs=False,\n return_commit_hash=False,\n **kwargs,\n ) -> Tuple[Dict[str, Any], Dict[str, Any]]:\n r\"\"\"\n Load a model or scheduler configuration.\n\n Parameters:\n pretrained_model_name_or_path (`str` or `os.PathLike`, *optional*):\n Can be either:\n\n - A string, the *model id* (for example `google/ddpm-celebahq-256`) of a pretrained model hosted on\n the Hub.\n - A path to a *directory* (for example `./my_model_directory`) containing model weights saved with\n [`~ConfigMixin.save_config`].\n\n cache_dir (`Union[str, os.PathLike]`, *optional*):\n Path to a directory where a downloaded pretrained model configuration is cached if the standard cache\n is not used.\n force_download (`bool`, *optional*, defaults to `False`):\n Whether or not to force the (re-)download of the model weights and configuration files, overriding the\n cached versions if they exist.\n resume_download (`bool`, *optional*, defaults to `False`):\n Whether or not to resume downloading the model weights and configuration files. If set to `False`, any\n incompletely downloaded files are deleted.\n proxies (`Dict[str, str]`, *optional*):\n A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128',\n 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.\n output_loading_info(`bool`, *optional*, defaults to `False`):\n Whether or not to also return a dictionary containing missing keys, unexpected keys and error messages.\n local_files_only (`bool`, *optional*, defaults to `False`):\n Whether to only load local model weights and configuration files or not. If set to `True`, the model\n won't be downloaded from the Hub.\n use_auth_token (`str` or *bool*, *optional*):\n The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from\n `diffusers-cli login` (stored in `~/.huggingface`) is used.\n revision (`str`, *optional*, defaults to `\"main\"`):\n The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier\n allowed by Git.\n subfolder (`str`, *optional*, defaults to `\"\"`):\n The subfolder location of a model file within a larger model repository on the Hub or locally.\n return_unused_kwargs (`bool`, *optional*, defaults to `False):\n Whether unused keyword arguments of the config are returned.\n return_commit_hash (`bool`, *optional*, defaults to `False):\n Whether the `commit_hash` of the loaded configuration are returned.\n\n Returns:\n `dict`:\n A dictionary of all the parameters stored in a JSON configuration file.\n\n \"\"\"\n cache_dir = kwargs.pop(\"cache_dir\", DIFFUSERS_CACHE)\n force_download = kwargs.pop(\"force_download\", False)\n resume_download = kwargs.pop(\"resume_download\", False)\n proxies = kwargs.pop(\"proxies\", None)\n use_auth_token = kwargs.pop(\"use_auth_token\", None)\n local_files_only = kwargs.pop(\"local_files_only\", False)\n revision = kwargs.pop(\"revision\", None)\n _ = kwargs.pop(\"mirror\", None)\n subfolder = kwargs.pop(\"subfolder\", None)\n user_agent = kwargs.pop(\"user_agent\", {})\n\n user_agent = {**user_agent, \"file_type\": \"config\"}\n user_agent = http_user_agent(user_agent)\n\n pretrained_model_name_or_path = str(pretrained_model_name_or_path)\n\n if cls.config_name is None:\n raise ValueError(\n \"`self.config_name` is not defined. Note that one should not load a config from \"\n \"`ConfigMixin`. Please make sure to define `config_name` in a class inheriting from `ConfigMixin`\"\n )\n\n if os.path.isfile(pretrained_model_name_or_path):\n config_file = pretrained_model_name_or_path\n elif os.path.isdir(pretrained_model_name_or_path):\n if os.path.isfile(os.path.join(pretrained_model_name_or_path, cls.config_name)):\n # Load from a PyTorch checkpoint\n config_file = os.path.join(pretrained_model_name_or_path, cls.config_name)\n elif subfolder is not None and os.path.isfile(\n os.path.join(pretrained_model_name_or_path, subfolder, cls.config_name)\n ):\n config_file = os.path.join(pretrained_model_name_or_path, subfolder, cls.config_name)\n else:\n raise EnvironmentError(\n f\"Error no file named {cls.config_name} found in directory {pretrained_model_name_or_path}.\"\n )\n else:\n try:\n # Load from URL or cache if already cached\n config_file = hf_hub_download(\n pretrained_model_name_or_path,\n filename=cls.config_name,\n cache_dir=cache_dir,\n force_download=force_download,\n proxies=proxies,\n resume_download=resume_download,\n local_files_only=local_files_only,\n use_auth_token=use_auth_token,\n user_agent=user_agent,\n subfolder=subfolder,\n revision=revision,\n )\n except RepositoryNotFoundError:\n raise EnvironmentError(\n f\"{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier\"\n \" listed on 'https://huggingface.co/models'\\nIf this is a private repository, make sure to pass a\"\n \" token having permission to this repo with `use_auth_token` or log in with `huggingface-cli\"\n \" login`.\"\n )\n except RevisionNotFoundError:\n raise EnvironmentError(\n f\"{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for\"\n \" this model name. Check the model page at\"\n f\" 'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions.\"\n )\n except EntryNotFoundError:\n raise EnvironmentError(\n f\"{pretrained_model_name_or_path} does not appear to have a file named {cls.config_name}.\"\n )\n except HTTPError as err:\n raise EnvironmentError(\n \"There was a specific connection error when trying to load\"\n f\" {pretrained_model_name_or_path}:\\n{err}\"\n )\n except ValueError:\n raise EnvironmentError(\n f\"We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it\"\n f\" in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a\"\n f\" directory containing a {cls.config_name} file.\\nCheckout your internet connection or see how to\"\n \" run the library in offline mode at\"\n \" 'https://huggingface.co/docs/diffusers/installation#offline-mode'.\"\n )\n except EnvironmentError:\n raise EnvironmentError(\n f\"Can't load config for '{pretrained_model_name_or_path}'. If you were trying to load it from \"\n \"'https://huggingface.co/models', make sure you don't have a local directory with the same name. \"\n f\"Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory \"\n f\"containing a {cls.config_name} file\"\n )\n\n try:\n # Load config dict\n config_dict = cls._dict_from_json_file(config_file)\n\n commit_hash = extract_commit_hash(config_file)\n except (json.JSONDecodeError, UnicodeDecodeError):\n raise EnvironmentError(f\"It looks like the config file at '{config_file}' is not a valid JSON file.\")\n\n if not (return_unused_kwargs or return_commit_hash):\n return config_dict\n\n outputs = (config_dict,)\n\n if return_unused_kwargs:\n outputs += (kwargs,)\n\n if return_commit_hash:\n outputs += (commit_hash,)\n\n return outputs\n\n @staticmethod\n def _get_init_keys(cls):\n return set(dict(inspect.signature(cls.__init__).parameters).keys())\n\n @classmethod\n def extract_init_dict(cls, config_dict, **kwargs):\n # Skip keys that were not present in the original config, so default __init__ values were used\n used_defaults = config_dict.get(\"_use_default_values\", [])\n config_dict = {k: v for k, v in config_dict.items() if k not in used_defaults and k != \"_use_default_values\"}\n\n # 0. Copy origin config dict\n original_dict = dict(config_dict.items())\n\n # 1. Retrieve expected config attributes from __init__ signature\n expected_keys = cls._get_init_keys(cls)\n expected_keys.remove(\"self\")\n # remove general kwargs if present in dict\n if \"kwargs\" in expected_keys:\n expected_keys.remove(\"kwargs\")\n # remove flax internal keys\n if hasattr(cls, \"_flax_internal_args\"):\n for arg in cls._flax_internal_args:\n expected_keys.remove(arg)\n\n # 2. Remove attributes that cannot be expected from expected config attributes\n # remove keys to be ignored\n if len(cls.ignore_for_config) > 0:\n expected_keys = expected_keys - set(cls.ignore_for_config)\n\n # load diffusers library to import compatible and original scheduler\n diffusers_library = importlib.import_module(__name__.split(\".\")[0])\n\n if cls.has_compatibles:\n compatible_classes = [c for c in cls._get_compatibles() if not isinstance(c, DummyObject)]\n else:\n compatible_classes = []\n\n expected_keys_comp_cls = set()\n for c in compatible_classes:\n expected_keys_c = cls._get_init_keys(c)\n expected_keys_comp_cls = expected_keys_comp_cls.union(expected_keys_c)\n expected_keys_comp_cls = expected_keys_comp_cls - cls._get_init_keys(cls)\n config_dict = {k: v for k, v in config_dict.items() if k not in expected_keys_comp_cls}\n\n # remove attributes from orig class that cannot be expected\n orig_cls_name = config_dict.pop(\"_class_name\", cls.__name__)\n if (\n isinstance(orig_cls_name, str)\n and orig_cls_name != cls.__name__\n and hasattr(diffusers_library, orig_cls_name)\n ):\n orig_cls = getattr(diffusers_library, orig_cls_name)\n unexpected_keys_from_orig = cls._get_init_keys(orig_cls) - expected_keys\n config_dict = {k: v for k, v in config_dict.items() if k not in unexpected_keys_from_orig}\n elif not isinstance(orig_cls_name, str) and not isinstance(orig_cls_name, (list, tuple)):\n raise ValueError(\n \"Make sure that the `_class_name` is of type string or list of string (for custom pipelines).\"\n )\n\n # remove private attributes\n config_dict = {k: v for k, v in config_dict.items() if not k.startswith(\"_\")}\n\n # 3. Create keyword arguments that will be passed to __init__ from expected keyword arguments\n init_dict = {}\n for key in expected_keys:\n # if config param is passed to kwarg and is present in config dict\n # it should overwrite existing config dict key\n if key in kwargs and key in config_dict:\n config_dict[key] = kwargs.pop(key)\n\n if key in kwargs:\n # overwrite key\n init_dict[key] = kwargs.pop(key)\n elif key in config_dict:\n # use value from config dict\n init_dict[key] = config_dict.pop(key)\n\n # 4. Give nice warning if unexpected values have been passed\n if len(config_dict) > 0:\n logger.warning(\n f\"The config attributes {config_dict} were passed to {cls.__name__}, \"\n \"but are not expected and will be ignored. Please verify your \"\n f\"{cls.config_name} configuration file.\"\n )\n\n # 5. Give nice info if config attributes are initiliazed to default because they have not been passed\n passed_keys = set(init_dict.keys())\n if len(expected_keys - passed_keys) > 0:\n logger.info(\n f\"{expected_keys - passed_keys} was not found in config. Values will be initialized to default values.\"\n )\n\n # 6. Define unused keyword arguments\n unused_kwargs = {**config_dict, **kwargs}\n\n # 7. Define \"hidden\" config parameters that were saved for compatible classes\n hidden_config_dict = {k: v for k, v in original_dict.items() if k not in init_dict}\n\n return init_dict, unused_kwargs, hidden_config_dict\n\n @classmethod\n def _dict_from_json_file(cls, json_file: Union[str, os.PathLike]):\n with open(json_file, \"r\", encoding=\"utf-8\") as reader:\n text = reader.read()\n return json.loads(text)\n\n def __repr__(self):\n return f\"{self.__class__.__name__} {self.to_json_string()}\"\n\n @property\n def config(self) -> Dict[str, Any]:\n \"\"\"\n Returns the config of the class as a frozen dictionary\n\n Returns:\n `Dict[str, Any]`: Config of the class.\n \"\"\"\n return self._internal_dict\n\n def to_json_string(self) -> str:\n \"\"\"\n Serializes the configuration instance to a JSON string.\n\n Returns:\n `str`:\n String containing all the attributes that make up the configuration instance in JSON format.\n \"\"\"\n config_dict = self._internal_dict if hasattr(self, \"_internal_dict\") else {}\n config_dict[\"_class_name\"] = self.__class__.__name__\n config_dict[\"_diffusers_version\"] = __version__\n\n def to_json_saveable(value):\n if isinstance(value, np.ndarray):\n value = value.tolist()\n elif isinstance(value, PosixPath):\n value = str(value)\n return value\n\n config_dict = {k: to_json_saveable(v) for k, v in config_dict.items()}\n # Don't save \"_ignore_files\" or \"_use_default_values\"\n config_dict.pop(\"_ignore_files\", None)\n config_dict.pop(\"_use_default_values\", None)\n\n return json.dumps(config_dict, indent=2, sort_keys=True) + \"\\n\"\n\n def to_json_file(self, json_file_path: Union[str, os.PathLike]):\n \"\"\"\n Save the configuration instance's parameters to a JSON file.\n\n Args:\n json_file_path (`str` or `os.PathLike`):\n Path to the JSON file to save a configuration instance's parameters.\n \"\"\"\n with open(json_file_path, \"w\", encoding=\"utf-8\") as writer:\n writer.write(self.to_json_string())" }, { "identifier": "flax_register_to_config", "path": "diffusers/src/diffusers/configuration_utils.py", "snippet": "def flax_register_to_config(cls):\n original_init = cls.__init__\n\n @functools.wraps(original_init)\n def init(self, *args, **kwargs):\n if not isinstance(self, ConfigMixin):\n raise RuntimeError(\n f\"`@register_for_config` was applied to {self.__class__.__name__} init method, but this class does \"\n \"not inherit from `ConfigMixin`.\"\n )\n\n # Ignore private kwargs in the init. Retrieve all passed attributes\n init_kwargs = dict(kwargs.items())\n\n # Retrieve default values\n fields = dataclasses.fields(self)\n default_kwargs = {}\n for field in fields:\n # ignore flax specific attributes\n if field.name in self._flax_internal_args:\n continue\n if type(field.default) == dataclasses._MISSING_TYPE:\n default_kwargs[field.name] = None\n else:\n default_kwargs[field.name] = getattr(self, field.name)\n\n # Make sure init_kwargs override default kwargs\n new_kwargs = {**default_kwargs, **init_kwargs}\n # dtype should be part of `init_kwargs`, but not `new_kwargs`\n if \"dtype\" in new_kwargs:\n new_kwargs.pop(\"dtype\")\n\n # Get positional arguments aligned with kwargs\n for i, arg in enumerate(args):\n name = fields[i].name\n new_kwargs[name] = arg\n\n # Take note of the parameters that were not present in the loaded config\n if len(set(new_kwargs.keys()) - set(init_kwargs)) > 0:\n new_kwargs[\"_use_default_values\"] = list(set(new_kwargs.keys()) - set(init_kwargs))\n\n getattr(self, \"register_to_config\")(**new_kwargs)\n original_init(self, *args, **kwargs)\n\n cls.__init__ = init\n return cls" }, { "identifier": "BaseOutput", "path": "diffusers/src/diffusers/utils/outputs.py", "snippet": "class BaseOutput(OrderedDict):\n \"\"\"\n Base class for all model outputs as dataclass. Has a `__getitem__` that allows indexing by integer or slice (like a\n tuple) or strings (like a dictionary) that will ignore the `None` attributes. Otherwise behaves like a regular\n Python dictionary.\n\n <Tip warning={true}>\n\n You can't unpack a [`BaseOutput`] directly. Use the [`~utils.BaseOutput.to_tuple`] method to convert it to a tuple\n first.\n\n </Tip>\n \"\"\"\n\n def __init_subclass__(cls) -> None:\n \"\"\"Register subclasses as pytree nodes.\n\n This is necessary to synchronize gradients when using `torch.nn.parallel.DistributedDataParallel` with\n `static_graph=True` with modules that output `ModelOutput` subclasses.\n \"\"\"\n if is_torch_available():\n import torch.utils._pytree\n\n torch.utils._pytree._register_pytree_node(\n cls,\n torch.utils._pytree._dict_flatten,\n lambda values, context: cls(**torch.utils._pytree._dict_unflatten(values, context)),\n )\n\n def __post_init__(self) -> None:\n class_fields = fields(self)\n\n # Safety and consistency checks\n if not len(class_fields):\n raise ValueError(f\"{self.__class__.__name__} has no fields.\")\n\n first_field = getattr(self, class_fields[0].name)\n other_fields_are_none = all(getattr(self, field.name) is None for field in class_fields[1:])\n\n if other_fields_are_none and isinstance(first_field, dict):\n for key, value in first_field.items():\n self[key] = value\n else:\n for field in class_fields:\n v = getattr(self, field.name)\n if v is not None:\n self[field.name] = v\n\n def __delitem__(self, *args, **kwargs):\n raise Exception(f\"You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.\")\n\n def setdefault(self, *args, **kwargs):\n raise Exception(f\"You cannot use ``setdefault`` on a {self.__class__.__name__} instance.\")\n\n def pop(self, *args, **kwargs):\n raise Exception(f\"You cannot use ``pop`` on a {self.__class__.__name__} instance.\")\n\n def update(self, *args, **kwargs):\n raise Exception(f\"You cannot use ``update`` on a {self.__class__.__name__} instance.\")\n\n def __getitem__(self, k: Any) -> Any:\n if isinstance(k, str):\n inner_dict = dict(self.items())\n return inner_dict[k]\n else:\n return self.to_tuple()[k]\n\n def __setattr__(self, name: Any, value: Any) -> None:\n if name in self.keys() and value is not None:\n # Don't call self.__setitem__ to avoid recursion errors\n super().__setitem__(name, value)\n super().__setattr__(name, value)\n\n def __setitem__(self, key, value):\n # Will raise a KeyException if needed\n super().__setitem__(key, value)\n # Don't call self.__setattr__ to avoid recursion errors\n super().__setattr__(key, value)\n\n def __reduce__(self):\n if not is_dataclass(self):\n return super().__reduce__()\n callable, _args, *remaining = super().__reduce__()\n args = tuple(getattr(self, field.name) for field in fields(self))\n return callable, args, *remaining\n\n def to_tuple(self) -> Tuple[Any, ...]:\n \"\"\"\n Convert self to a tuple containing all the attributes/keys that are not `None`.\n \"\"\"\n return tuple(self[k] for k in self.keys())" }, { "identifier": "FlaxTimestepEmbedding", "path": "diffusers/src/diffusers/models/embeddings_flax.py", "snippet": "class FlaxTimestepEmbedding(nn.Module):\n r\"\"\"\n Time step Embedding Module. Learns embeddings for input time steps.\n\n Args:\n time_embed_dim (`int`, *optional*, defaults to `32`):\n Time step embedding dimension\n dtype (:obj:`jnp.dtype`, *optional*, defaults to jnp.float32):\n Parameters `dtype`\n \"\"\"\n\n time_embed_dim: int = 32\n dtype: jnp.dtype = jnp.float32\n\n @nn.compact\n def __call__(self, temb):\n temb = nn.Dense(self.time_embed_dim, dtype=self.dtype, name=\"linear_1\")(temb)\n temb = nn.silu(temb)\n temb = nn.Dense(self.time_embed_dim, dtype=self.dtype, name=\"linear_2\")(temb)\n return temb" }, { "identifier": "FlaxTimesteps", "path": "diffusers/src/diffusers/models/embeddings_flax.py", "snippet": "class FlaxTimesteps(nn.Module):\n r\"\"\"\n Wrapper Module for sinusoidal Time step Embeddings as described in https://arxiv.org/abs/2006.11239\n\n Args:\n dim (`int`, *optional*, defaults to `32`):\n Time step embedding dimension\n \"\"\"\n\n dim: int = 32\n flip_sin_to_cos: bool = False\n freq_shift: float = 1\n\n @nn.compact\n def __call__(self, timesteps):\n return get_sinusoidal_embeddings(\n timesteps, embedding_dim=self.dim, flip_sin_to_cos=self.flip_sin_to_cos, freq_shift=self.freq_shift\n )" }, { "identifier": "FlaxModelMixin", "path": "diffusers/src/diffusers/models/modeling_flax_utils.py", "snippet": "class FlaxModelMixin(PushToHubMixin):\n r\"\"\"\n Base class for all Flax models.\n\n [`FlaxModelMixin`] takes care of storing the model configuration and provides methods for loading, downloading and\n saving models.\n\n - **config_name** ([`str`]) -- Filename to save a model to when calling [`~FlaxModelMixin.save_pretrained`].\n \"\"\"\n\n config_name = CONFIG_NAME\n _automatically_saved_args = [\"_diffusers_version\", \"_class_name\", \"_name_or_path\"]\n _flax_internal_args = [\"name\", \"parent\", \"dtype\"]\n\n @classmethod\n def _from_config(cls, config, **kwargs):\n \"\"\"\n All context managers that the model should be initialized under go here.\n \"\"\"\n return cls(config, **kwargs)\n\n def _cast_floating_to(self, params: Union[Dict, FrozenDict], dtype: jnp.dtype, mask: Any = None) -> Any:\n \"\"\"\n Helper method to cast floating-point values of given parameter `PyTree` to given `dtype`.\n \"\"\"\n\n # taken from https://github.com/deepmind/jmp/blob/3a8318abc3292be38582794dbf7b094e6583b192/jmp/_src/policy.py#L27\n def conditional_cast(param):\n if isinstance(param, jnp.ndarray) and jnp.issubdtype(param.dtype, jnp.floating):\n param = param.astype(dtype)\n return param\n\n if mask is None:\n return jax.tree_map(conditional_cast, params)\n\n flat_params = flatten_dict(params)\n flat_mask, _ = jax.tree_flatten(mask)\n\n for masked, key in zip(flat_mask, flat_params.keys()):\n if masked:\n param = flat_params[key]\n flat_params[key] = conditional_cast(param)\n\n return unflatten_dict(flat_params)\n\n def to_bf16(self, params: Union[Dict, FrozenDict], mask: Any = None):\n r\"\"\"\n Cast the floating-point `params` to `jax.numpy.bfloat16`. This returns a new `params` tree and does not cast\n the `params` in place.\n\n This method can be used on a TPU to explicitly convert the model parameters to bfloat16 precision to do full\n half-precision training or to save weights in bfloat16 for inference in order to save memory and improve speed.\n\n Arguments:\n params (`Union[Dict, FrozenDict]`):\n A `PyTree` of model parameters.\n mask (`Union[Dict, FrozenDict]`):\n A `PyTree` with same structure as the `params` tree. The leaves should be booleans. It should be `True`\n for params you want to cast, and `False` for those you want to skip.\n\n Examples:\n\n ```python\n >>> from diffusers import FlaxUNet2DConditionModel\n\n >>> # load model\n >>> model, params = FlaxUNet2DConditionModel.from_pretrained(\"runwayml/stable-diffusion-v1-5\")\n >>> # By default, the model parameters will be in fp32 precision, to cast these to bfloat16 precision\n >>> params = model.to_bf16(params)\n >>> # If you don't want to cast certain parameters (for example layer norm bias and scale)\n >>> # then pass the mask as follows\n >>> from flax import traverse_util\n\n >>> model, params = FlaxUNet2DConditionModel.from_pretrained(\"runwayml/stable-diffusion-v1-5\")\n >>> flat_params = traverse_util.flatten_dict(params)\n >>> mask = {\n ... path: (path[-2] != (\"LayerNorm\", \"bias\") and path[-2:] != (\"LayerNorm\", \"scale\"))\n ... for path in flat_params\n ... }\n >>> mask = traverse_util.unflatten_dict(mask)\n >>> params = model.to_bf16(params, mask)\n ```\"\"\"\n return self._cast_floating_to(params, jnp.bfloat16, mask)\n\n def to_fp32(self, params: Union[Dict, FrozenDict], mask: Any = None):\n r\"\"\"\n Cast the floating-point `params` to `jax.numpy.float32`. This method can be used to explicitly convert the\n model parameters to fp32 precision. This returns a new `params` tree and does not cast the `params` in place.\n\n Arguments:\n params (`Union[Dict, FrozenDict]`):\n A `PyTree` of model parameters.\n mask (`Union[Dict, FrozenDict]`):\n A `PyTree` with same structure as the `params` tree. The leaves should be booleans. It should be `True`\n for params you want to cast, and `False` for those you want to skip.\n\n Examples:\n\n ```python\n >>> from diffusers import FlaxUNet2DConditionModel\n\n >>> # Download model and configuration from huggingface.co\n >>> model, params = FlaxUNet2DConditionModel.from_pretrained(\"runwayml/stable-diffusion-v1-5\")\n >>> # By default, the model params will be in fp32, to illustrate the use of this method,\n >>> # we'll first cast to fp16 and back to fp32\n >>> params = model.to_f16(params)\n >>> # now cast back to fp32\n >>> params = model.to_fp32(params)\n ```\"\"\"\n return self._cast_floating_to(params, jnp.float32, mask)\n\n def to_fp16(self, params: Union[Dict, FrozenDict], mask: Any = None):\n r\"\"\"\n Cast the floating-point `params` to `jax.numpy.float16`. This returns a new `params` tree and does not cast the\n `params` in place.\n\n This method can be used on a GPU to explicitly convert the model parameters to float16 precision to do full\n half-precision training or to save weights in float16 for inference in order to save memory and improve speed.\n\n Arguments:\n params (`Union[Dict, FrozenDict]`):\n A `PyTree` of model parameters.\n mask (`Union[Dict, FrozenDict]`):\n A `PyTree` with same structure as the `params` tree. The leaves should be booleans. It should be `True`\n for params you want to cast, and `False` for those you want to skip.\n\n Examples:\n\n ```python\n >>> from diffusers import FlaxUNet2DConditionModel\n\n >>> # load model\n >>> model, params = FlaxUNet2DConditionModel.from_pretrained(\"runwayml/stable-diffusion-v1-5\")\n >>> # By default, the model params will be in fp32, to cast these to float16\n >>> params = model.to_fp16(params)\n >>> # If you want don't want to cast certain parameters (for example layer norm bias and scale)\n >>> # then pass the mask as follows\n >>> from flax import traverse_util\n\n >>> model, params = FlaxUNet2DConditionModel.from_pretrained(\"runwayml/stable-diffusion-v1-5\")\n >>> flat_params = traverse_util.flatten_dict(params)\n >>> mask = {\n ... path: (path[-2] != (\"LayerNorm\", \"bias\") and path[-2:] != (\"LayerNorm\", \"scale\"))\n ... for path in flat_params\n ... }\n >>> mask = traverse_util.unflatten_dict(mask)\n >>> params = model.to_fp16(params, mask)\n ```\"\"\"\n return self._cast_floating_to(params, jnp.float16, mask)\n\n def init_weights(self, rng: jax.Array) -> Dict:\n raise NotImplementedError(f\"init_weights method has to be implemented for {self}\")\n\n @classmethod\n def from_pretrained(\n cls,\n pretrained_model_name_or_path: Union[str, os.PathLike],\n dtype: jnp.dtype = jnp.float32,\n *model_args,\n **kwargs,\n ):\n r\"\"\"\n Instantiate a pretrained Flax model from a pretrained model configuration.\n\n Parameters:\n pretrained_model_name_or_path (`str` or `os.PathLike`):\n Can be either:\n\n - A string, the *model id* (for example `runwayml/stable-diffusion-v1-5`) of a pretrained model\n hosted on the Hub.\n - A path to a *directory* (for example `./my_model_directory`) containing the model weights saved\n using [`~FlaxModelMixin.save_pretrained`].\n dtype (`jax.numpy.dtype`, *optional*, defaults to `jax.numpy.float32`):\n The data type of the computation. Can be one of `jax.numpy.float32`, `jax.numpy.float16` (on GPUs) and\n `jax.numpy.bfloat16` (on TPUs).\n\n This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If\n specified, all the computation will be performed with the given `dtype`.\n\n <Tip>\n\n This only specifies the dtype of the *computation* and does not influence the dtype of model\n parameters.\n\n If you wish to change the dtype of the model parameters, see [`~FlaxModelMixin.to_fp16`] and\n [`~FlaxModelMixin.to_bf16`].\n\n </Tip>\n\n model_args (sequence of positional arguments, *optional*):\n All remaining positional arguments are passed to the underlying model's `__init__` method.\n cache_dir (`Union[str, os.PathLike]`, *optional*):\n Path to a directory where a downloaded pretrained model configuration is cached if the standard cache\n is not used.\n force_download (`bool`, *optional*, defaults to `False`):\n Whether or not to force the (re-)download of the model weights and configuration files, overriding the\n cached versions if they exist.\n resume_download (`bool`, *optional*, defaults to `False`):\n Whether or not to resume downloading the model weights and configuration files. If set to `False`, any\n incompletely downloaded files are deleted.\n proxies (`Dict[str, str]`, *optional*):\n A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128',\n 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.\n local_files_only(`bool`, *optional*, defaults to `False`):\n Whether to only load local model weights and configuration files or not. If set to `True`, the model\n won't be downloaded from the Hub.\n revision (`str`, *optional*, defaults to `\"main\"`):\n The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier\n allowed by Git.\n from_pt (`bool`, *optional*, defaults to `False`):\n Load the model weights from a PyTorch checkpoint save file.\n kwargs (remaining dictionary of keyword arguments, *optional*):\n Can be used to update the configuration object (after it is loaded) and initiate the model (for\n example, `output_attentions=True`). Behaves differently depending on whether a `config` is provided or\n automatically loaded:\n\n - If a configuration is provided with `config`, `kwargs` are directly passed to the underlying\n model's `__init__` method (we assume all relevant updates to the configuration have already been\n done).\n - If a configuration is not provided, `kwargs` are first passed to the configuration class\n initialization function [`~ConfigMixin.from_config`]. Each key of the `kwargs` that corresponds\n to a configuration attribute is used to override said attribute with the supplied `kwargs` value.\n Remaining keys that do not correspond to any configuration attribute are passed to the underlying\n model's `__init__` function.\n\n Examples:\n\n ```python\n >>> from diffusers import FlaxUNet2DConditionModel\n\n >>> # Download model and configuration from huggingface.co and cache.\n >>> model, params = FlaxUNet2DConditionModel.from_pretrained(\"runwayml/stable-diffusion-v1-5\")\n >>> # Model was saved using *save_pretrained('./test/saved_model/')* (for example purposes, not runnable).\n >>> model, params = FlaxUNet2DConditionModel.from_pretrained(\"./test/saved_model/\")\n ```\n\n If you get the error message below, you need to finetune the weights for your downstream task:\n\n ```bash\n Some weights of UNet2DConditionModel were not initialized from the model checkpoint at runwayml/stable-diffusion-v1-5 and are newly initialized because the shapes did not match:\n - conv_in.weight: found shape torch.Size([320, 4, 3, 3]) in the checkpoint and torch.Size([320, 9, 3, 3]) in the model instantiated\n You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.\n ```\n \"\"\"\n config = kwargs.pop(\"config\", None)\n cache_dir = kwargs.pop(\"cache_dir\", DIFFUSERS_CACHE)\n force_download = kwargs.pop(\"force_download\", False)\n from_pt = kwargs.pop(\"from_pt\", False)\n resume_download = kwargs.pop(\"resume_download\", False)\n proxies = kwargs.pop(\"proxies\", None)\n local_files_only = kwargs.pop(\"local_files_only\", False)\n use_auth_token = kwargs.pop(\"use_auth_token\", None)\n revision = kwargs.pop(\"revision\", None)\n subfolder = kwargs.pop(\"subfolder\", None)\n\n user_agent = {\n \"diffusers\": __version__,\n \"file_type\": \"model\",\n \"framework\": \"flax\",\n }\n\n # Load config if we don't provide one\n if config is None:\n config, unused_kwargs = cls.load_config(\n pretrained_model_name_or_path,\n cache_dir=cache_dir,\n return_unused_kwargs=True,\n force_download=force_download,\n resume_download=resume_download,\n proxies=proxies,\n local_files_only=local_files_only,\n use_auth_token=use_auth_token,\n revision=revision,\n subfolder=subfolder,\n **kwargs,\n )\n\n model, model_kwargs = cls.from_config(config, dtype=dtype, return_unused_kwargs=True, **unused_kwargs)\n\n # Load model\n pretrained_path_with_subfolder = (\n pretrained_model_name_or_path\n if subfolder is None\n else os.path.join(pretrained_model_name_or_path, subfolder)\n )\n if os.path.isdir(pretrained_path_with_subfolder):\n if from_pt:\n if not os.path.isfile(os.path.join(pretrained_path_with_subfolder, WEIGHTS_NAME)):\n raise EnvironmentError(\n f\"Error no file named {WEIGHTS_NAME} found in directory {pretrained_path_with_subfolder} \"\n )\n model_file = os.path.join(pretrained_path_with_subfolder, WEIGHTS_NAME)\n elif os.path.isfile(os.path.join(pretrained_path_with_subfolder, FLAX_WEIGHTS_NAME)):\n # Load from a Flax checkpoint\n model_file = os.path.join(pretrained_path_with_subfolder, FLAX_WEIGHTS_NAME)\n # Check if pytorch weights exist instead\n elif os.path.isfile(os.path.join(pretrained_path_with_subfolder, WEIGHTS_NAME)):\n raise EnvironmentError(\n f\"{WEIGHTS_NAME} file found in directory {pretrained_path_with_subfolder}. Please load the model\"\n \" using `from_pt=True`.\"\n )\n else:\n raise EnvironmentError(\n f\"Error no file named {FLAX_WEIGHTS_NAME} or {WEIGHTS_NAME} found in directory \"\n f\"{pretrained_path_with_subfolder}.\"\n )\n else:\n try:\n model_file = hf_hub_download(\n pretrained_model_name_or_path,\n filename=FLAX_WEIGHTS_NAME if not from_pt else WEIGHTS_NAME,\n cache_dir=cache_dir,\n force_download=force_download,\n proxies=proxies,\n resume_download=resume_download,\n local_files_only=local_files_only,\n use_auth_token=use_auth_token,\n user_agent=user_agent,\n subfolder=subfolder,\n revision=revision,\n )\n\n except RepositoryNotFoundError:\n raise EnvironmentError(\n f\"{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier \"\n \"listed on 'https://huggingface.co/models'\\nIf this is a private repository, make sure to pass a \"\n \"token having permission to this repo with `use_auth_token` or log in with `huggingface-cli \"\n \"login`.\"\n )\n except RevisionNotFoundError:\n raise EnvironmentError(\n f\"{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for \"\n \"this model name. Check the model page at \"\n f\"'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions.\"\n )\n except EntryNotFoundError:\n raise EnvironmentError(\n f\"{pretrained_model_name_or_path} does not appear to have a file named {FLAX_WEIGHTS_NAME}.\"\n )\n except HTTPError as err:\n raise EnvironmentError(\n f\"There was a specific connection error when trying to load {pretrained_model_name_or_path}:\\n\"\n f\"{err}\"\n )\n except ValueError:\n raise EnvironmentError(\n f\"We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it\"\n f\" in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a\"\n f\" directory containing a file named {FLAX_WEIGHTS_NAME} or {WEIGHTS_NAME}.\\nCheckout your\"\n \" internet connection or see how to run the library in offline mode at\"\n \" 'https://huggingface.co/docs/transformers/installation#offline-mode'.\"\n )\n except EnvironmentError:\n raise EnvironmentError(\n f\"Can't load the model for '{pretrained_model_name_or_path}'. If you were trying to load it from \"\n \"'https://huggingface.co/models', make sure you don't have a local directory with the same name. \"\n f\"Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory \"\n f\"containing a file named {FLAX_WEIGHTS_NAME} or {WEIGHTS_NAME}.\"\n )\n\n if from_pt:\n if is_torch_available():\n from .modeling_utils import load_state_dict\n else:\n raise EnvironmentError(\n \"Can't load the model in PyTorch format because PyTorch is not installed. \"\n \"Please, install PyTorch or use native Flax weights.\"\n )\n\n # Step 1: Get the pytorch file\n pytorch_model_file = load_state_dict(model_file)\n\n # Step 2: Convert the weights\n state = convert_pytorch_state_dict_to_flax(pytorch_model_file, model)\n else:\n try:\n with open(model_file, \"rb\") as state_f:\n state = from_bytes(cls, state_f.read())\n except (UnpicklingError, msgpack.exceptions.ExtraData) as e:\n try:\n with open(model_file) as f:\n if f.read().startswith(\"version\"):\n raise OSError(\n \"You seem to have cloned a repository without having git-lfs installed. Please\"\n \" install git-lfs and run `git lfs install` followed by `git lfs pull` in the\"\n \" folder you cloned.\"\n )\n else:\n raise ValueError from e\n except (UnicodeDecodeError, ValueError):\n raise EnvironmentError(f\"Unable to convert {model_file} to Flax deserializable object. \")\n # make sure all arrays are stored as jnp.ndarray\n # NOTE: This is to prevent a bug this will be fixed in Flax >= v0.3.4:\n # https://github.com/google/flax/issues/1261\n state = jax.tree_util.tree_map(lambda x: jax.device_put(x, jax.local_devices(backend=\"cpu\")[0]), state)\n\n # flatten dicts\n state = flatten_dict(state)\n\n params_shape_tree = jax.eval_shape(model.init_weights, rng=jax.random.PRNGKey(0))\n required_params = set(flatten_dict(unfreeze(params_shape_tree)).keys())\n\n shape_state = flatten_dict(unfreeze(params_shape_tree))\n\n missing_keys = required_params - set(state.keys())\n unexpected_keys = set(state.keys()) - required_params\n\n if missing_keys:\n logger.warning(\n f\"The checkpoint {pretrained_model_name_or_path} is missing required keys: {missing_keys}. \"\n \"Make sure to call model.init_weights to initialize the missing weights.\"\n )\n cls._missing_keys = missing_keys\n\n for key in state.keys():\n if key in shape_state and state[key].shape != shape_state[key].shape:\n raise ValueError(\n f\"Trying to load the pretrained weight for {key} failed: checkpoint has shape \"\n f\"{state[key].shape} which is incompatible with the model shape {shape_state[key].shape}. \"\n )\n\n # remove unexpected keys to not be saved again\n for unexpected_key in unexpected_keys:\n del state[unexpected_key]\n\n if len(unexpected_keys) > 0:\n logger.warning(\n f\"Some weights of the model checkpoint at {pretrained_model_name_or_path} were not used when\"\n f\" initializing {model.__class__.__name__}: {unexpected_keys}\\n- This IS expected if you are\"\n f\" initializing {model.__class__.__name__} from the checkpoint of a model trained on another task or\"\n \" with another architecture.\"\n )\n else:\n logger.info(f\"All model checkpoint weights were used when initializing {model.__class__.__name__}.\\n\")\n\n if len(missing_keys) > 0:\n logger.warning(\n f\"Some weights of {model.__class__.__name__} were not initialized from the model checkpoint at\"\n f\" {pretrained_model_name_or_path} and are newly initialized: {missing_keys}\\nYou should probably\"\n \" TRAIN this model on a down-stream task to be able to use it for predictions and inference.\"\n )\n else:\n logger.info(\n f\"All the weights of {model.__class__.__name__} were initialized from the model checkpoint at\"\n f\" {pretrained_model_name_or_path}.\\nIf your task is similar to the task the model of the checkpoint\"\n f\" was trained on, you can already use {model.__class__.__name__} for predictions without further\"\n \" training.\"\n )\n\n return model, unflatten_dict(state)\n\n def save_pretrained(\n self,\n save_directory: Union[str, os.PathLike],\n params: Union[Dict, FrozenDict],\n is_main_process: bool = True,\n push_to_hub: bool = False,\n **kwargs,\n ):\n \"\"\"\n Save a model and its configuration file to a directory so that it can be reloaded using the\n [`~FlaxModelMixin.from_pretrained`] class method.\n\n Arguments:\n save_directory (`str` or `os.PathLike`):\n Directory to save a model and its configuration file to. Will be created if it doesn't exist.\n params (`Union[Dict, FrozenDict]`):\n A `PyTree` of model parameters.\n is_main_process (`bool`, *optional*, defaults to `True`):\n Whether the process calling this is the main process or not. Useful during distributed training and you\n need to call this function on all processes. In this case, set `is_main_process=True` only on the main\n process to avoid race conditions.\n push_to_hub (`bool`, *optional*, defaults to `False`):\n Whether or not to push your model to the Hugging Face model hub after saving it. You can specify the\n repository you want to push to with `repo_id` (will default to the name of `save_directory` in your\n namespace).\n kwargs (`Dict[str, Any]`, *optional*):\n Additional key word arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method.\n \"\"\"\n if os.path.isfile(save_directory):\n logger.error(f\"Provided path ({save_directory}) should be a directory, not a file\")\n return\n\n os.makedirs(save_directory, exist_ok=True)\n\n if push_to_hub:\n commit_message = kwargs.pop(\"commit_message\", None)\n private = kwargs.pop(\"private\", False)\n create_pr = kwargs.pop(\"create_pr\", False)\n token = kwargs.pop(\"token\", None)\n repo_id = kwargs.pop(\"repo_id\", save_directory.split(os.path.sep)[-1])\n repo_id = create_repo(repo_id, exist_ok=True, private=private, token=token).repo_id\n\n model_to_save = self\n\n # Attach architecture to the config\n # Save the config\n if is_main_process:\n model_to_save.save_config(save_directory)\n\n # save model\n output_model_file = os.path.join(save_directory, FLAX_WEIGHTS_NAME)\n with open(output_model_file, \"wb\") as f:\n model_bytes = to_bytes(params)\n f.write(model_bytes)\n\n logger.info(f\"Model weights saved in {output_model_file}\")\n\n if push_to_hub:\n self._upload_folder(\n save_directory,\n repo_id,\n token=token,\n commit_message=commit_message,\n create_pr=create_pr,\n )" }, { "identifier": "FlaxCrossAttnDownBlock2D", "path": "diffusers/src/diffusers/models/unet_2d_blocks_flax.py", "snippet": "class FlaxCrossAttnDownBlock2D(nn.Module):\n r\"\"\"\n Cross Attention 2D Downsizing block - original architecture from Unet transformers:\n https://arxiv.org/abs/2103.06104\n\n Parameters:\n in_channels (:obj:`int`):\n Input channels\n out_channels (:obj:`int`):\n Output channels\n dropout (:obj:`float`, *optional*, defaults to 0.0):\n Dropout rate\n num_layers (:obj:`int`, *optional*, defaults to 1):\n Number of attention blocks layers\n num_attention_heads (:obj:`int`, *optional*, defaults to 1):\n Number of attention heads of each spatial transformer block\n add_downsample (:obj:`bool`, *optional*, defaults to `True`):\n Whether to add downsampling layer before each final output\n use_memory_efficient_attention (`bool`, *optional*, defaults to `False`):\n enable memory efficient attention https://arxiv.org/abs/2112.05682\n split_head_dim (`bool`, *optional*, defaults to `False`):\n Whether to split the head dimension into a new axis for the self-attention computation. In most cases,\n enabling this flag should speed up the computation for Stable Diffusion 2.x and Stable Diffusion XL.\n dtype (:obj:`jnp.dtype`, *optional*, defaults to jnp.float32):\n Parameters `dtype`\n \"\"\"\n\n in_channels: int\n out_channels: int\n dropout: float = 0.0\n num_layers: int = 1\n num_attention_heads: int = 1\n add_downsample: bool = True\n use_linear_projection: bool = False\n only_cross_attention: bool = False\n use_memory_efficient_attention: bool = False\n split_head_dim: bool = False\n dtype: jnp.dtype = jnp.float32\n transformer_layers_per_block: int = 1\n\n def setup(self):\n resnets = []\n attentions = []\n\n for i in range(self.num_layers):\n in_channels = self.in_channels if i == 0 else self.out_channels\n\n res_block = FlaxResnetBlock2D(\n in_channels=in_channels,\n out_channels=self.out_channels,\n dropout_prob=self.dropout,\n dtype=self.dtype,\n )\n resnets.append(res_block)\n\n attn_block = FlaxTransformer2DModel(\n in_channels=self.out_channels,\n n_heads=self.num_attention_heads,\n d_head=self.out_channels // self.num_attention_heads,\n depth=self.transformer_layers_per_block,\n use_linear_projection=self.use_linear_projection,\n only_cross_attention=self.only_cross_attention,\n use_memory_efficient_attention=self.use_memory_efficient_attention,\n split_head_dim=self.split_head_dim,\n dtype=self.dtype,\n )\n attentions.append(attn_block)\n\n self.resnets = resnets\n self.attentions = attentions\n\n if self.add_downsample:\n self.downsamplers_0 = FlaxDownsample2D(self.out_channels, dtype=self.dtype)\n\n def __call__(self, hidden_states, temb, encoder_hidden_states, deterministic=True):\n output_states = ()\n\n for resnet, attn in zip(self.resnets, self.attentions):\n hidden_states = resnet(hidden_states, temb, deterministic=deterministic)\n hidden_states = attn(hidden_states, encoder_hidden_states, deterministic=deterministic)\n output_states += (hidden_states,)\n\n if self.add_downsample:\n hidden_states = self.downsamplers_0(hidden_states)\n output_states += (hidden_states,)\n\n return hidden_states, output_states" }, { "identifier": "FlaxCrossAttnUpBlock2D", "path": "diffusers/src/diffusers/models/unet_2d_blocks_flax.py", "snippet": "class FlaxCrossAttnUpBlock2D(nn.Module):\n r\"\"\"\n Cross Attention 2D Upsampling block - original architecture from Unet transformers:\n https://arxiv.org/abs/2103.06104\n\n Parameters:\n in_channels (:obj:`int`):\n Input channels\n out_channels (:obj:`int`):\n Output channels\n dropout (:obj:`float`, *optional*, defaults to 0.0):\n Dropout rate\n num_layers (:obj:`int`, *optional*, defaults to 1):\n Number of attention blocks layers\n num_attention_heads (:obj:`int`, *optional*, defaults to 1):\n Number of attention heads of each spatial transformer block\n add_upsample (:obj:`bool`, *optional*, defaults to `True`):\n Whether to add upsampling layer before each final output\n use_memory_efficient_attention (`bool`, *optional*, defaults to `False`):\n enable memory efficient attention https://arxiv.org/abs/2112.05682\n split_head_dim (`bool`, *optional*, defaults to `False`):\n Whether to split the head dimension into a new axis for the self-attention computation. In most cases,\n enabling this flag should speed up the computation for Stable Diffusion 2.x and Stable Diffusion XL.\n dtype (:obj:`jnp.dtype`, *optional*, defaults to jnp.float32):\n Parameters `dtype`\n \"\"\"\n\n in_channels: int\n out_channels: int\n prev_output_channel: int\n dropout: float = 0.0\n num_layers: int = 1\n num_attention_heads: int = 1\n add_upsample: bool = True\n use_linear_projection: bool = False\n only_cross_attention: bool = False\n use_memory_efficient_attention: bool = False\n split_head_dim: bool = False\n dtype: jnp.dtype = jnp.float32\n transformer_layers_per_block: int = 1\n\n def setup(self):\n resnets = []\n attentions = []\n\n for i in range(self.num_layers):\n res_skip_channels = self.in_channels if (i == self.num_layers - 1) else self.out_channels\n resnet_in_channels = self.prev_output_channel if i == 0 else self.out_channels\n\n res_block = FlaxResnetBlock2D(\n in_channels=resnet_in_channels + res_skip_channels,\n out_channels=self.out_channels,\n dropout_prob=self.dropout,\n dtype=self.dtype,\n )\n resnets.append(res_block)\n\n attn_block = FlaxTransformer2DModel(\n in_channels=self.out_channels,\n n_heads=self.num_attention_heads,\n d_head=self.out_channels // self.num_attention_heads,\n depth=self.transformer_layers_per_block,\n use_linear_projection=self.use_linear_projection,\n only_cross_attention=self.only_cross_attention,\n use_memory_efficient_attention=self.use_memory_efficient_attention,\n split_head_dim=self.split_head_dim,\n dtype=self.dtype,\n )\n attentions.append(attn_block)\n\n self.resnets = resnets\n self.attentions = attentions\n\n if self.add_upsample:\n self.upsamplers_0 = FlaxUpsample2D(self.out_channels, dtype=self.dtype)\n\n def __call__(self, hidden_states, res_hidden_states_tuple, temb, encoder_hidden_states, deterministic=True):\n for resnet, attn in zip(self.resnets, self.attentions):\n # pop res hidden states\n res_hidden_states = res_hidden_states_tuple[-1]\n res_hidden_states_tuple = res_hidden_states_tuple[:-1]\n hidden_states = jnp.concatenate((hidden_states, res_hidden_states), axis=-1)\n\n hidden_states = resnet(hidden_states, temb, deterministic=deterministic)\n hidden_states = attn(hidden_states, encoder_hidden_states, deterministic=deterministic)\n\n if self.add_upsample:\n hidden_states = self.upsamplers_0(hidden_states)\n\n return hidden_states" }, { "identifier": "FlaxDownBlock2D", "path": "diffusers/src/diffusers/models/unet_2d_blocks_flax.py", "snippet": "class FlaxDownBlock2D(nn.Module):\n r\"\"\"\n Flax 2D downsizing block\n\n Parameters:\n in_channels (:obj:`int`):\n Input channels\n out_channels (:obj:`int`):\n Output channels\n dropout (:obj:`float`, *optional*, defaults to 0.0):\n Dropout rate\n num_layers (:obj:`int`, *optional*, defaults to 1):\n Number of attention blocks layers\n add_downsample (:obj:`bool`, *optional*, defaults to `True`):\n Whether to add downsampling layer before each final output\n dtype (:obj:`jnp.dtype`, *optional*, defaults to jnp.float32):\n Parameters `dtype`\n \"\"\"\n\n in_channels: int\n out_channels: int\n dropout: float = 0.0\n num_layers: int = 1\n add_downsample: bool = True\n dtype: jnp.dtype = jnp.float32\n\n def setup(self):\n resnets = []\n\n for i in range(self.num_layers):\n in_channels = self.in_channels if i == 0 else self.out_channels\n\n res_block = FlaxResnetBlock2D(\n in_channels=in_channels,\n out_channels=self.out_channels,\n dropout_prob=self.dropout,\n dtype=self.dtype,\n )\n resnets.append(res_block)\n self.resnets = resnets\n\n if self.add_downsample:\n self.downsamplers_0 = FlaxDownsample2D(self.out_channels, dtype=self.dtype)\n\n def __call__(self, hidden_states, temb, deterministic=True):\n output_states = ()\n\n for resnet in self.resnets:\n hidden_states = resnet(hidden_states, temb, deterministic=deterministic)\n output_states += (hidden_states,)\n\n if self.add_downsample:\n hidden_states = self.downsamplers_0(hidden_states)\n output_states += (hidden_states,)\n\n return hidden_states, output_states" }, { "identifier": "FlaxUNetMidBlock2DCrossAttn", "path": "diffusers/src/diffusers/models/unet_2d_blocks_flax.py", "snippet": "class FlaxUNetMidBlock2DCrossAttn(nn.Module):\n r\"\"\"\n Cross Attention 2D Mid-level block - original architecture from Unet transformers: https://arxiv.org/abs/2103.06104\n\n Parameters:\n in_channels (:obj:`int`):\n Input channels\n dropout (:obj:`float`, *optional*, defaults to 0.0):\n Dropout rate\n num_layers (:obj:`int`, *optional*, defaults to 1):\n Number of attention blocks layers\n num_attention_heads (:obj:`int`, *optional*, defaults to 1):\n Number of attention heads of each spatial transformer block\n use_memory_efficient_attention (`bool`, *optional*, defaults to `False`):\n enable memory efficient attention https://arxiv.org/abs/2112.05682\n split_head_dim (`bool`, *optional*, defaults to `False`):\n Whether to split the head dimension into a new axis for the self-attention computation. In most cases,\n enabling this flag should speed up the computation for Stable Diffusion 2.x and Stable Diffusion XL.\n dtype (:obj:`jnp.dtype`, *optional*, defaults to jnp.float32):\n Parameters `dtype`\n \"\"\"\n\n in_channels: int\n dropout: float = 0.0\n num_layers: int = 1\n num_attention_heads: int = 1\n use_linear_projection: bool = False\n use_memory_efficient_attention: bool = False\n split_head_dim: bool = False\n dtype: jnp.dtype = jnp.float32\n transformer_layers_per_block: int = 1\n\n def setup(self):\n # there is always at least one resnet\n resnets = [\n FlaxResnetBlock2D(\n in_channels=self.in_channels,\n out_channels=self.in_channels,\n dropout_prob=self.dropout,\n dtype=self.dtype,\n )\n ]\n\n attentions = []\n\n for _ in range(self.num_layers):\n attn_block = FlaxTransformer2DModel(\n in_channels=self.in_channels,\n n_heads=self.num_attention_heads,\n d_head=self.in_channels // self.num_attention_heads,\n depth=self.transformer_layers_per_block,\n use_linear_projection=self.use_linear_projection,\n use_memory_efficient_attention=self.use_memory_efficient_attention,\n split_head_dim=self.split_head_dim,\n dtype=self.dtype,\n )\n attentions.append(attn_block)\n\n res_block = FlaxResnetBlock2D(\n in_channels=self.in_channels,\n out_channels=self.in_channels,\n dropout_prob=self.dropout,\n dtype=self.dtype,\n )\n resnets.append(res_block)\n\n self.resnets = resnets\n self.attentions = attentions\n\n def __call__(self, hidden_states, temb, encoder_hidden_states, deterministic=True):\n hidden_states = self.resnets[0](hidden_states, temb)\n for attn, resnet in zip(self.attentions, self.resnets[1:]):\n hidden_states = attn(hidden_states, encoder_hidden_states, deterministic=deterministic)\n hidden_states = resnet(hidden_states, temb, deterministic=deterministic)\n\n return hidden_states" }, { "identifier": "FlaxUpBlock2D", "path": "diffusers/src/diffusers/models/unet_2d_blocks_flax.py", "snippet": "class FlaxUpBlock2D(nn.Module):\n r\"\"\"\n Flax 2D upsampling block\n\n Parameters:\n in_channels (:obj:`int`):\n Input channels\n out_channels (:obj:`int`):\n Output channels\n prev_output_channel (:obj:`int`):\n Output channels from the previous block\n dropout (:obj:`float`, *optional*, defaults to 0.0):\n Dropout rate\n num_layers (:obj:`int`, *optional*, defaults to 1):\n Number of attention blocks layers\n add_downsample (:obj:`bool`, *optional*, defaults to `True`):\n Whether to add downsampling layer before each final output\n dtype (:obj:`jnp.dtype`, *optional*, defaults to jnp.float32):\n Parameters `dtype`\n \"\"\"\n\n in_channels: int\n out_channels: int\n prev_output_channel: int\n dropout: float = 0.0\n num_layers: int = 1\n add_upsample: bool = True\n dtype: jnp.dtype = jnp.float32\n\n def setup(self):\n resnets = []\n\n for i in range(self.num_layers):\n res_skip_channels = self.in_channels if (i == self.num_layers - 1) else self.out_channels\n resnet_in_channels = self.prev_output_channel if i == 0 else self.out_channels\n\n res_block = FlaxResnetBlock2D(\n in_channels=resnet_in_channels + res_skip_channels,\n out_channels=self.out_channels,\n dropout_prob=self.dropout,\n dtype=self.dtype,\n )\n resnets.append(res_block)\n\n self.resnets = resnets\n\n if self.add_upsample:\n self.upsamplers_0 = FlaxUpsample2D(self.out_channels, dtype=self.dtype)\n\n def __call__(self, hidden_states, res_hidden_states_tuple, temb, deterministic=True):\n for resnet in self.resnets:\n # pop res hidden states\n res_hidden_states = res_hidden_states_tuple[-1]\n res_hidden_states_tuple = res_hidden_states_tuple[:-1]\n hidden_states = jnp.concatenate((hidden_states, res_hidden_states), axis=-1)\n\n hidden_states = resnet(hidden_states, temb, deterministic=deterministic)\n\n if self.add_upsample:\n hidden_states = self.upsamplers_0(hidden_states)\n\n return hidden_states" } ]
from typing import Dict, Optional, Tuple, Union from flax.core.frozen_dict import FrozenDict from ..configuration_utils import ConfigMixin, flax_register_to_config from ..utils import BaseOutput from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps from .modeling_flax_utils import FlaxModelMixin from .unet_2d_blocks_flax import ( FlaxCrossAttnDownBlock2D, FlaxCrossAttnUpBlock2D, FlaxDownBlock2D, FlaxUNetMidBlock2DCrossAttn, FlaxUpBlock2D, ) import flax import flax.linen as nn import jax import jax.numpy as jnp
19,618
sample_size (`int`, *optional*): The size of the input sample. in_channels (`int`, *optional*, defaults to 4): The number of channels in the input sample. out_channels (`int`, *optional*, defaults to 4): The number of channels in the output. down_block_types (`Tuple[str]`, *optional*, defaults to `("FlaxCrossAttnDownBlock2D", "FlaxCrossAttnDownBlock2D", "FlaxCrossAttnDownBlock2D", "FlaxDownBlock2D")`): The tuple of downsample blocks to use. up_block_types (`Tuple[str]`, *optional*, defaults to `("FlaxUpBlock2D", "FlaxCrossAttnUpBlock2D", "FlaxCrossAttnUpBlock2D", "FlaxCrossAttnUpBlock2D")`): The tuple of upsample blocks to use. block_out_channels (`Tuple[int]`, *optional*, defaults to `(320, 640, 1280, 1280)`): The tuple of output channels for each block. layers_per_block (`int`, *optional*, defaults to 2): The number of layers per block. attention_head_dim (`int` or `Tuple[int]`, *optional*, defaults to 8): The dimension of the attention heads. num_attention_heads (`int` or `Tuple[int]`, *optional*): The number of attention heads. cross_attention_dim (`int`, *optional*, defaults to 768): The dimension of the cross attention features. dropout (`float`, *optional*, defaults to 0): Dropout probability for down, up and bottleneck blocks. flip_sin_to_cos (`bool`, *optional*, defaults to `True`): Whether to flip the sin to cos in the time embedding. freq_shift (`int`, *optional*, defaults to 0): The frequency shift to apply to the time embedding. use_memory_efficient_attention (`bool`, *optional*, defaults to `False`): Enable memory efficient attention as described [here](https://arxiv.org/abs/2112.05682). split_head_dim (`bool`, *optional*, defaults to `False`): Whether to split the head dimension into a new axis for the self-attention computation. In most cases, enabling this flag should speed up the computation for Stable Diffusion 2.x and Stable Diffusion XL. """ sample_size: int = 32 in_channels: int = 4 out_channels: int = 4 down_block_types: Tuple[str, ...] = ( "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D", ) up_block_types: Tuple[str, ...] = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D") only_cross_attention: Union[bool, Tuple[bool]] = False block_out_channels: Tuple[int, ...] = (320, 640, 1280, 1280) layers_per_block: int = 2 attention_head_dim: Union[int, Tuple[int, ...]] = 8 num_attention_heads: Optional[Union[int, Tuple[int, ...]]] = None cross_attention_dim: int = 1280 dropout: float = 0.0 use_linear_projection: bool = False dtype: jnp.dtype = jnp.float32 flip_sin_to_cos: bool = True freq_shift: int = 0 use_memory_efficient_attention: bool = False split_head_dim: bool = False transformer_layers_per_block: Union[int, Tuple[int, ...]] = 1 addition_embed_type: Optional[str] = None addition_time_embed_dim: Optional[int] = None addition_embed_type_num_heads: int = 64 projection_class_embeddings_input_dim: Optional[int] = None def init_weights(self, rng: jax.Array) -> FrozenDict: # init input tensors sample_shape = (1, self.in_channels, self.sample_size, self.sample_size) sample = jnp.zeros(sample_shape, dtype=jnp.float32) timesteps = jnp.ones((1,), dtype=jnp.int32) encoder_hidden_states = jnp.zeros((1, 1, self.cross_attention_dim), dtype=jnp.float32) params_rng, dropout_rng = jax.random.split(rng) rngs = {"params": params_rng, "dropout": dropout_rng} added_cond_kwargs = None if self.addition_embed_type == "text_time": # we retrieve the expected `text_embeds_dim` by first checking if the architecture is a refiner # or non-refiner architecture and then by "reverse-computing" from `projection_class_embeddings_input_dim` is_refiner = ( 5 * self.config.addition_time_embed_dim + self.config.cross_attention_dim == self.config.projection_class_embeddings_input_dim ) num_micro_conditions = 5 if is_refiner else 6 text_embeds_dim = self.config.projection_class_embeddings_input_dim - ( num_micro_conditions * self.config.addition_time_embed_dim ) time_ids_channels = self.projection_class_embeddings_input_dim - text_embeds_dim time_ids_dims = time_ids_channels // self.addition_time_embed_dim added_cond_kwargs = { "text_embeds": jnp.zeros((1, text_embeds_dim), dtype=jnp.float32), "time_ids": jnp.zeros((1, time_ids_dims), dtype=jnp.float32), } return self.init(rngs, sample, timesteps, encoder_hidden_states, added_cond_kwargs)["params"] def setup(self) -> None: block_out_channels = self.block_out_channels time_embed_dim = block_out_channels[0] * 4 if self.num_attention_heads is not None: raise ValueError( "At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19." ) # If `num_attention_heads` is not defined (which is the case for most models) # it will default to `attention_head_dim`. This looks weird upon first reading it and it is. # The reason for this behavior is to correct for incorrectly named variables that were introduced # when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131 # Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking # which is why we correct for the naming here. num_attention_heads = self.num_attention_heads or self.attention_head_dim # input self.conv_in = nn.Conv( block_out_channels[0], kernel_size=(3, 3), strides=(1, 1), padding=((1, 1), (1, 1)), dtype=self.dtype, ) # time
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. @flax.struct.dataclass class FlaxUNet2DConditionOutput(BaseOutput): """ The output of [`FlaxUNet2DConditionModel`]. Args: sample (`jnp.ndarray` of shape `(batch_size, num_channels, height, width)`): The hidden states output conditioned on `encoder_hidden_states` input. Output of last layer of model. """ sample: jnp.ndarray @flax_register_to_config class FlaxUNet2DConditionModel(nn.Module, FlaxModelMixin, ConfigMixin): r""" A conditional 2D UNet model that takes a noisy sample, conditional state, and a timestep and returns a sample shaped output. This model inherits from [`FlaxModelMixin`]. Check the superclass documentation for it's generic methods implemented for all models (such as downloading or saving). This model is also a Flax Linen [flax.linen.Module](https://flax.readthedocs.io/en/latest/flax.linen.html#module) subclass. Use it as a regular Flax Linen module and refer to the Flax documentation for all matters related to its general usage and behavior. Inherent JAX features such as the following are supported: - [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit) - [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation) - [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap) - [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap) Parameters: sample_size (`int`, *optional*): The size of the input sample. in_channels (`int`, *optional*, defaults to 4): The number of channels in the input sample. out_channels (`int`, *optional*, defaults to 4): The number of channels in the output. down_block_types (`Tuple[str]`, *optional*, defaults to `("FlaxCrossAttnDownBlock2D", "FlaxCrossAttnDownBlock2D", "FlaxCrossAttnDownBlock2D", "FlaxDownBlock2D")`): The tuple of downsample blocks to use. up_block_types (`Tuple[str]`, *optional*, defaults to `("FlaxUpBlock2D", "FlaxCrossAttnUpBlock2D", "FlaxCrossAttnUpBlock2D", "FlaxCrossAttnUpBlock2D")`): The tuple of upsample blocks to use. block_out_channels (`Tuple[int]`, *optional*, defaults to `(320, 640, 1280, 1280)`): The tuple of output channels for each block. layers_per_block (`int`, *optional*, defaults to 2): The number of layers per block. attention_head_dim (`int` or `Tuple[int]`, *optional*, defaults to 8): The dimension of the attention heads. num_attention_heads (`int` or `Tuple[int]`, *optional*): The number of attention heads. cross_attention_dim (`int`, *optional*, defaults to 768): The dimension of the cross attention features. dropout (`float`, *optional*, defaults to 0): Dropout probability for down, up and bottleneck blocks. flip_sin_to_cos (`bool`, *optional*, defaults to `True`): Whether to flip the sin to cos in the time embedding. freq_shift (`int`, *optional*, defaults to 0): The frequency shift to apply to the time embedding. use_memory_efficient_attention (`bool`, *optional*, defaults to `False`): Enable memory efficient attention as described [here](https://arxiv.org/abs/2112.05682). split_head_dim (`bool`, *optional*, defaults to `False`): Whether to split the head dimension into a new axis for the self-attention computation. In most cases, enabling this flag should speed up the computation for Stable Diffusion 2.x and Stable Diffusion XL. """ sample_size: int = 32 in_channels: int = 4 out_channels: int = 4 down_block_types: Tuple[str, ...] = ( "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D", ) up_block_types: Tuple[str, ...] = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D") only_cross_attention: Union[bool, Tuple[bool]] = False block_out_channels: Tuple[int, ...] = (320, 640, 1280, 1280) layers_per_block: int = 2 attention_head_dim: Union[int, Tuple[int, ...]] = 8 num_attention_heads: Optional[Union[int, Tuple[int, ...]]] = None cross_attention_dim: int = 1280 dropout: float = 0.0 use_linear_projection: bool = False dtype: jnp.dtype = jnp.float32 flip_sin_to_cos: bool = True freq_shift: int = 0 use_memory_efficient_attention: bool = False split_head_dim: bool = False transformer_layers_per_block: Union[int, Tuple[int, ...]] = 1 addition_embed_type: Optional[str] = None addition_time_embed_dim: Optional[int] = None addition_embed_type_num_heads: int = 64 projection_class_embeddings_input_dim: Optional[int] = None def init_weights(self, rng: jax.Array) -> FrozenDict: # init input tensors sample_shape = (1, self.in_channels, self.sample_size, self.sample_size) sample = jnp.zeros(sample_shape, dtype=jnp.float32) timesteps = jnp.ones((1,), dtype=jnp.int32) encoder_hidden_states = jnp.zeros((1, 1, self.cross_attention_dim), dtype=jnp.float32) params_rng, dropout_rng = jax.random.split(rng) rngs = {"params": params_rng, "dropout": dropout_rng} added_cond_kwargs = None if self.addition_embed_type == "text_time": # we retrieve the expected `text_embeds_dim` by first checking if the architecture is a refiner # or non-refiner architecture and then by "reverse-computing" from `projection_class_embeddings_input_dim` is_refiner = ( 5 * self.config.addition_time_embed_dim + self.config.cross_attention_dim == self.config.projection_class_embeddings_input_dim ) num_micro_conditions = 5 if is_refiner else 6 text_embeds_dim = self.config.projection_class_embeddings_input_dim - ( num_micro_conditions * self.config.addition_time_embed_dim ) time_ids_channels = self.projection_class_embeddings_input_dim - text_embeds_dim time_ids_dims = time_ids_channels // self.addition_time_embed_dim added_cond_kwargs = { "text_embeds": jnp.zeros((1, text_embeds_dim), dtype=jnp.float32), "time_ids": jnp.zeros((1, time_ids_dims), dtype=jnp.float32), } return self.init(rngs, sample, timesteps, encoder_hidden_states, added_cond_kwargs)["params"] def setup(self) -> None: block_out_channels = self.block_out_channels time_embed_dim = block_out_channels[0] * 4 if self.num_attention_heads is not None: raise ValueError( "At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19." ) # If `num_attention_heads` is not defined (which is the case for most models) # it will default to `attention_head_dim`. This looks weird upon first reading it and it is. # The reason for this behavior is to correct for incorrectly named variables that were introduced # when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131 # Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking # which is why we correct for the naming here. num_attention_heads = self.num_attention_heads or self.attention_head_dim # input self.conv_in = nn.Conv( block_out_channels[0], kernel_size=(3, 3), strides=(1, 1), padding=((1, 1), (1, 1)), dtype=self.dtype, ) # time
self.time_proj = FlaxTimesteps(
4
2023-12-28 08:17:40+00:00
24k
FoundationVision/UniRef
detectron2/evaluation/coco_evaluation.py
[ { "identifier": "CfgNode", "path": "detectron2/config/config.py", "snippet": "class CfgNode(_CfgNode):\n \"\"\"\n The same as `fvcore.common.config.CfgNode`, but different in:\n\n 1. Use unsafe yaml loading by default.\n Note that this may lead to arbitrary code execution: you must not\n load a config file from untrusted sources before manually inspecting\n the content of the file.\n 2. Support config versioning.\n When attempting to merge an old config, it will convert the old config automatically.\n\n .. automethod:: clone\n .. automethod:: freeze\n .. automethod:: defrost\n .. automethod:: is_frozen\n .. automethod:: load_yaml_with_base\n .. automethod:: merge_from_list\n .. automethod:: merge_from_other_cfg\n \"\"\"\n\n @classmethod\n def _open_cfg(cls, filename):\n return PathManager.open(filename, \"r\")\n\n # Note that the default value of allow_unsafe is changed to True\n def merge_from_file(self, cfg_filename: str, allow_unsafe: bool = True) -> None:\n \"\"\"\n Load content from the given config file and merge it into self.\n\n Args:\n cfg_filename: config filename\n allow_unsafe: allow unsafe yaml syntax\n \"\"\"\n assert PathManager.isfile(cfg_filename), f\"Config file '{cfg_filename}' does not exist!\"\n loaded_cfg = self.load_yaml_with_base(cfg_filename, allow_unsafe=allow_unsafe)\n loaded_cfg = type(self)(loaded_cfg)\n\n # defaults.py needs to import CfgNode\n from .defaults import _C\n\n latest_ver = _C.VERSION\n assert (\n latest_ver == self.VERSION\n ), \"CfgNode.merge_from_file is only allowed on a config object of latest version!\"\n\n logger = logging.getLogger(__name__)\n\n loaded_ver = loaded_cfg.get(\"VERSION\", None)\n if loaded_ver is None:\n from .compat import guess_version\n\n loaded_ver = guess_version(loaded_cfg, cfg_filename)\n assert loaded_ver <= self.VERSION, \"Cannot merge a v{} config into a v{} config.\".format(\n loaded_ver, self.VERSION\n )\n\n if loaded_ver == self.VERSION:\n self.merge_from_other_cfg(loaded_cfg)\n else:\n # compat.py needs to import CfgNode\n from .compat import upgrade_config, downgrade_config\n\n logger.warning(\n \"Loading an old v{} config file '{}' by automatically upgrading to v{}. \"\n \"See docs/CHANGELOG.md for instructions to update your files.\".format(\n loaded_ver, cfg_filename, self.VERSION\n )\n )\n # To convert, first obtain a full config at an old version\n old_self = downgrade_config(self, to_version=loaded_ver)\n old_self.merge_from_other_cfg(loaded_cfg)\n new_config = upgrade_config(old_self)\n self.clear()\n self.update(new_config)\n\n def dump(self, *args, **kwargs):\n \"\"\"\n Returns:\n str: a yaml string representation of the config\n \"\"\"\n # to make it show up in docs\n return super().dump(*args, **kwargs)" }, { "identifier": "MetadataCatalog", "path": "detectron2/data/catalog.py", "snippet": "class _DatasetCatalog(UserDict):\nclass Metadata(types.SimpleNamespace):\nclass _MetadataCatalog(UserDict):\n def register(self, name, func):\n def get(self, name):\n def list(self) -> List[str]:\n def remove(self, name):\n def __str__(self):\n def __getattr__(self, key):\n def __setattr__(self, key, val):\n def as_dict(self):\n def set(self, **kwargs):\n def get(self, key, default=None):\n def get(self, name):\n def list(self):\n def remove(self, name):\n def __str__(self):\n _RENAMED = {\n \"class_names\": \"thing_classes\",\n \"dataset_id_to_contiguous_id\": \"thing_dataset_id_to_contiguous_id\",\n \"stuff_class_names\": \"stuff_classes\",\n }" }, { "identifier": "convert_to_coco_json", "path": "detectron2/data/datasets/coco.py", "snippet": "def convert_to_coco_json(dataset_name, output_file, allow_cached=True):\n \"\"\"\n Converts dataset into COCO format and saves it to a json file.\n dataset_name must be registered in DatasetCatalog and in detectron2's standard format.\n\n Args:\n dataset_name:\n reference from the config file to the catalogs\n must be registered in DatasetCatalog and in detectron2's standard format\n output_file: path of json file that will be saved to\n allow_cached: if json file is already present then skip conversion\n \"\"\"\n\n # TODO: The dataset or the conversion script *may* change,\n # a checksum would be useful for validating the cached data\n\n PathManager.mkdirs(os.path.dirname(output_file))\n with file_lock(output_file):\n if PathManager.exists(output_file) and allow_cached:\n logger.warning(\n f\"Using previously cached COCO format annotations at '{output_file}'. \"\n \"You need to clear the cache file if your dataset has been modified.\"\n )\n else:\n logger.info(f\"Converting annotations of dataset '{dataset_name}' to COCO format ...)\")\n coco_dict = convert_to_coco_dict(dataset_name)\n\n logger.info(f\"Caching COCO format annotations at '{output_file}' ...\")\n tmp_file = output_file + \".tmp\"\n with PathManager.open(tmp_file, \"w\") as f:\n json.dump(coco_dict, f)\n shutil.move(tmp_file, output_file)" }, { "identifier": "Boxes", "path": "detectron2/structures/boxes.py", "snippet": "class Boxes:\n \"\"\"\n This structure stores a list of boxes as a Nx4 torch.Tensor.\n It supports some common methods about boxes\n (`area`, `clip`, `nonempty`, etc),\n and also behaves like a Tensor\n (support indexing, `to(device)`, `.device`, and iteration over all boxes)\n\n Attributes:\n tensor (torch.Tensor): float matrix of Nx4. Each row is (x1, y1, x2, y2).\n \"\"\"\n\n def __init__(self, tensor: torch.Tensor):\n \"\"\"\n Args:\n tensor (Tensor[float]): a Nx4 matrix. Each row is (x1, y1, x2, y2).\n \"\"\"\n device = tensor.device if isinstance(tensor, torch.Tensor) else torch.device(\"cpu\")\n tensor = torch.as_tensor(tensor, dtype=torch.float32, device=device)\n if tensor.numel() == 0:\n # Use reshape, so we don't end up creating a new tensor that does not depend on\n # the inputs (and consequently confuses jit)\n tensor = tensor.reshape((-1, 4)).to(dtype=torch.float32, device=device)\n assert tensor.dim() == 2 and tensor.size(-1) == 4, tensor.size()\n\n self.tensor = tensor\n\n def clone(self) -> \"Boxes\":\n \"\"\"\n Clone the Boxes.\n\n Returns:\n Boxes\n \"\"\"\n return Boxes(self.tensor.clone())\n\n def to(self, device: torch.device):\n # Boxes are assumed float32 and does not support to(dtype)\n return Boxes(self.tensor.to(device=device))\n\n def area(self) -> torch.Tensor:\n \"\"\"\n Computes the area of all the boxes.\n\n Returns:\n torch.Tensor: a vector with areas of each box.\n \"\"\"\n box = self.tensor\n area = (box[:, 2] - box[:, 0]) * (box[:, 3] - box[:, 1])\n return area\n\n def clip(self, box_size: Tuple[int, int]) -> None:\n \"\"\"\n Clip (in place) the boxes by limiting x coordinates to the range [0, width]\n and y coordinates to the range [0, height].\n\n Args:\n box_size (height, width): The clipping box's size.\n \"\"\"\n assert torch.isfinite(self.tensor).all(), \"Box tensor contains infinite or NaN!\"\n h, w = box_size\n x1 = self.tensor[:, 0].clamp(min=0, max=w)\n y1 = self.tensor[:, 1].clamp(min=0, max=h)\n x2 = self.tensor[:, 2].clamp(min=0, max=w)\n y2 = self.tensor[:, 3].clamp(min=0, max=h)\n self.tensor = torch.stack((x1, y1, x2, y2), dim=-1)\n\n def nonempty(self, threshold: float = 0.0) -> torch.Tensor:\n \"\"\"\n Find boxes that are non-empty.\n A box is considered empty, if either of its side is no larger than threshold.\n\n Returns:\n Tensor:\n a binary vector which represents whether each box is empty\n (False) or non-empty (True).\n \"\"\"\n box = self.tensor\n widths = box[:, 2] - box[:, 0]\n heights = box[:, 3] - box[:, 1]\n keep = (widths > threshold) & (heights > threshold)\n return keep\n\n def __getitem__(self, item) -> \"Boxes\":\n \"\"\"\n Args:\n item: int, slice, or a BoolTensor\n\n Returns:\n Boxes: Create a new :class:`Boxes` by indexing.\n\n The following usage are allowed:\n\n 1. `new_boxes = boxes[3]`: return a `Boxes` which contains only one box.\n 2. `new_boxes = boxes[2:10]`: return a slice of boxes.\n 3. `new_boxes = boxes[vector]`, where vector is a torch.BoolTensor\n with `length = len(boxes)`. Nonzero elements in the vector will be selected.\n\n Note that the returned Boxes might share storage with this Boxes,\n subject to Pytorch's indexing semantics.\n \"\"\"\n if isinstance(item, int):\n return Boxes(self.tensor[item].view(1, -1))\n b = self.tensor[item]\n assert b.dim() == 2, \"Indexing on Boxes with {} failed to return a matrix!\".format(item)\n return Boxes(b)\n\n def __len__(self) -> int:\n return self.tensor.shape[0]\n\n def __repr__(self) -> str:\n return \"Boxes(\" + str(self.tensor) + \")\"\n\n def inside_box(self, box_size: Tuple[int, int], boundary_threshold: int = 0) -> torch.Tensor:\n \"\"\"\n Args:\n box_size (height, width): Size of the reference box.\n boundary_threshold (int): Boxes that extend beyond the reference box\n boundary by more than boundary_threshold are considered \"outside\".\n\n Returns:\n a binary vector, indicating whether each box is inside the reference box.\n \"\"\"\n height, width = box_size\n inds_inside = (\n (self.tensor[..., 0] >= -boundary_threshold)\n & (self.tensor[..., 1] >= -boundary_threshold)\n & (self.tensor[..., 2] < width + boundary_threshold)\n & (self.tensor[..., 3] < height + boundary_threshold)\n )\n return inds_inside\n\n def get_centers(self) -> torch.Tensor:\n \"\"\"\n Returns:\n The box centers in a Nx2 array of (x, y).\n \"\"\"\n return (self.tensor[:, :2] + self.tensor[:, 2:]) / 2\n\n def scale(self, scale_x: float, scale_y: float) -> None:\n \"\"\"\n Scale the box with horizontal and vertical scaling factors\n \"\"\"\n self.tensor[:, 0::2] *= scale_x\n self.tensor[:, 1::2] *= scale_y\n\n @classmethod\n def cat(cls, boxes_list: List[\"Boxes\"]) -> \"Boxes\":\n \"\"\"\n Concatenates a list of Boxes into a single Boxes\n\n Arguments:\n boxes_list (list[Boxes])\n\n Returns:\n Boxes: the concatenated Boxes\n \"\"\"\n assert isinstance(boxes_list, (list, tuple))\n if len(boxes_list) == 0:\n return cls(torch.empty(0))\n assert all([isinstance(box, Boxes) for box in boxes_list])\n\n # use torch.cat (v.s. layers.cat) so the returned boxes never share storage with input\n cat_boxes = cls(torch.cat([b.tensor for b in boxes_list], dim=0))\n return cat_boxes\n\n @property\n def device(self) -> device:\n return self.tensor.device\n\n # type \"Iterator[torch.Tensor]\", yield, and iter() not supported by torchscript\n # https://github.com/pytorch/pytorch/issues/18627\n @torch.jit.unused\n def __iter__(self):\n \"\"\"\n Yield a box as a Tensor of shape (4,) at a time.\n \"\"\"\n yield from self.tensor" }, { "identifier": "BoxMode", "path": "detectron2/structures/boxes.py", "snippet": "class BoxMode(IntEnum):\n \"\"\"\n Enum of different ways to represent a box.\n \"\"\"\n\n XYXY_ABS = 0\n \"\"\"\n (x0, y0, x1, y1) in absolute floating points coordinates.\n The coordinates in range [0, width or height].\n \"\"\"\n XYWH_ABS = 1\n \"\"\"\n (x0, y0, w, h) in absolute floating points coordinates.\n \"\"\"\n XYXY_REL = 2\n \"\"\"\n Not yet supported!\n (x0, y0, x1, y1) in range [0, 1]. They are relative to the size of the image.\n \"\"\"\n XYWH_REL = 3\n \"\"\"\n Not yet supported!\n (x0, y0, w, h) in range [0, 1]. They are relative to the size of the image.\n \"\"\"\n XYWHA_ABS = 4\n \"\"\"\n (xc, yc, w, h, a) in absolute floating points coordinates.\n (xc, yc) is the center of the rotated box, and the angle a is in degrees ccw.\n \"\"\"\n\n @staticmethod\n def convert(box: _RawBoxType, from_mode: \"BoxMode\", to_mode: \"BoxMode\") -> _RawBoxType:\n \"\"\"\n Args:\n box: can be a k-tuple, k-list or an Nxk array/tensor, where k = 4 or 5\n from_mode, to_mode (BoxMode)\n\n Returns:\n The converted box of the same type.\n \"\"\"\n if from_mode == to_mode:\n return box\n\n original_type = type(box)\n is_numpy = isinstance(box, np.ndarray)\n single_box = isinstance(box, (list, tuple))\n if single_box:\n assert len(box) == 4 or len(box) == 5, (\n \"BoxMode.convert takes either a k-tuple/list or an Nxk array/tensor,\"\n \" where k == 4 or 5\"\n )\n arr = torch.tensor(box)[None, :]\n else:\n # avoid modifying the input box\n if is_numpy:\n arr = torch.from_numpy(np.asarray(box)).clone()\n else:\n arr = box.clone()\n\n assert to_mode not in [BoxMode.XYXY_REL, BoxMode.XYWH_REL] and from_mode not in [\n BoxMode.XYXY_REL,\n BoxMode.XYWH_REL,\n ], \"Relative mode not yet supported!\"\n\n if from_mode == BoxMode.XYWHA_ABS and to_mode == BoxMode.XYXY_ABS:\n assert (\n arr.shape[-1] == 5\n ), \"The last dimension of input shape must be 5 for XYWHA format\"\n original_dtype = arr.dtype\n arr = arr.double()\n\n w = arr[:, 2]\n h = arr[:, 3]\n a = arr[:, 4]\n c = torch.abs(torch.cos(a * math.pi / 180.0))\n s = torch.abs(torch.sin(a * math.pi / 180.0))\n # This basically computes the horizontal bounding rectangle of the rotated box\n new_w = c * w + s * h\n new_h = c * h + s * w\n\n # convert center to top-left corner\n arr[:, 0] -= new_w / 2.0\n arr[:, 1] -= new_h / 2.0\n # bottom-right corner\n arr[:, 2] = arr[:, 0] + new_w\n arr[:, 3] = arr[:, 1] + new_h\n\n arr = arr[:, :4].to(dtype=original_dtype)\n elif from_mode == BoxMode.XYWH_ABS and to_mode == BoxMode.XYWHA_ABS:\n original_dtype = arr.dtype\n arr = arr.double()\n arr[:, 0] += arr[:, 2] / 2.0\n arr[:, 1] += arr[:, 3] / 2.0\n angles = torch.zeros((arr.shape[0], 1), dtype=arr.dtype)\n arr = torch.cat((arr, angles), axis=1).to(dtype=original_dtype)\n else:\n if to_mode == BoxMode.XYXY_ABS and from_mode == BoxMode.XYWH_ABS:\n arr[:, 2] += arr[:, 0]\n arr[:, 3] += arr[:, 1]\n elif from_mode == BoxMode.XYXY_ABS and to_mode == BoxMode.XYWH_ABS:\n arr[:, 2] -= arr[:, 0]\n arr[:, 3] -= arr[:, 1]\n else:\n raise NotImplementedError(\n \"Conversion from BoxMode {} to {} is not supported yet\".format(\n from_mode, to_mode\n )\n )\n\n if single_box:\n return original_type(arr.flatten().tolist())\n if is_numpy:\n return arr.numpy()\n else:\n return arr" }, { "identifier": "pairwise_iou", "path": "detectron2/structures/boxes.py", "snippet": "def pairwise_iou(boxes1: Boxes, boxes2: Boxes) -> torch.Tensor:\n \"\"\"\n Given two lists of boxes of size N and M, compute the IoU\n (intersection over union) between **all** N x M pairs of boxes.\n The box order must be (xmin, ymin, xmax, ymax).\n\n Args:\n boxes1,boxes2 (Boxes): two `Boxes`. Contains N & M boxes, respectively.\n\n Returns:\n Tensor: IoU, sized [N,M].\n \"\"\"\n area1 = boxes1.area() # [N]\n area2 = boxes2.area() # [M]\n inter = pairwise_intersection(boxes1, boxes2)\n\n # handle empty boxes\n iou = torch.where(\n inter > 0,\n inter / (area1[:, None] + area2 - inter),\n torch.zeros(1, dtype=inter.dtype, device=inter.device),\n )\n return iou" }, { "identifier": "PathManager", "path": "detectron2/utils/file_io.py", "snippet": "class Detectron2Handler(PathHandler):\n PREFIX = \"detectron2://\"\n S3_DETECTRON2_PREFIX = \"https://dl.fbaipublicfiles.com/detectron2/\"\n def _get_supported_prefixes(self):\n def _get_local_path(self, path, **kwargs):\n def _open(self, path, mode=\"r\", **kwargs):" }, { "identifier": "create_small_table", "path": "detectron2/utils/logger.py", "snippet": "def create_small_table(small_dict):\n \"\"\"\n Create a small table using the keys of small_dict as headers. This is only\n suitable for small dictionaries.\n\n Args:\n small_dict (dict): a result dictionary of only a few items.\n\n Returns:\n str: the table as a string.\n \"\"\"\n keys, values = tuple(zip(*small_dict.items()))\n table = tabulate(\n [values],\n headers=keys,\n tablefmt=\"pipe\",\n floatfmt=\".3f\",\n stralign=\"center\",\n numalign=\"center\",\n )\n return table" }, { "identifier": "DatasetEvaluator", "path": "detectron2/evaluation/evaluator.py", "snippet": "class DatasetEvaluator:\n \"\"\"\n Base class for a dataset evaluator.\n\n The function :func:`inference_on_dataset` runs the model over\n all samples in the dataset, and have a DatasetEvaluator to process the inputs/outputs.\n\n This class will accumulate information of the inputs/outputs (by :meth:`process`),\n and produce evaluation results in the end (by :meth:`evaluate`).\n \"\"\"\n\n def reset(self):\n \"\"\"\n Preparation for a new round of evaluation.\n Should be called before starting a round of evaluation.\n \"\"\"\n pass\n\n def process(self, inputs, outputs):\n \"\"\"\n Process the pair of inputs and outputs.\n If they contain batches, the pairs can be consumed one-by-one using `zip`:\n\n .. code-block:: python\n\n for input_, output in zip(inputs, outputs):\n # do evaluation on single input/output pair\n ...\n\n Args:\n inputs (list): the inputs that's used to call the model.\n outputs (list): the return value of `model(inputs)`\n \"\"\"\n pass\n\n def evaluate(self):\n \"\"\"\n Evaluate/summarize the performance, after processing all input/output pairs.\n\n Returns:\n dict:\n A new evaluator class can return a dict of arbitrary format\n as long as the user can process the results.\n In our train_net.py, we expect the following format:\n\n * key: the name of the task (e.g., bbox)\n * value: a dict of {metric name: score}, e.g.: {\"AP50\": 80}\n \"\"\"\n pass" }, { "identifier": "RefCOCOeval", "path": "detectron2/evaluation/refcocoeval.py", "snippet": "class RefCOCOeval:\n # Interface for evaluating detection on the Microsoft COCO dataset.\n #\n # The usage for CocoEval is as follows:\n # cocoGt=..., cocoDt=... # load dataset and results\n # E = CocoEval(cocoGt,cocoDt); # initialize CocoEval object\n # E.params.recThrs = ...; # set parameters as desired\n # E.evaluate(); # run per image evaluation\n # E.accumulate(); # accumulate per image results\n # E.summarize(); # display summary metrics of results\n # For example usage see evalDemo.m and http://mscoco.org/.\n #\n # The evaluation parameters are as follows (defaults in brackets):\n # imgIds - [all] N img ids to use for evaluation\n # catIds - [all] K cat ids to use for evaluation\n # iouThrs - [.5:.05:.95] T=10 IoU thresholds for evaluation\n # recThrs - [0:.01:1] R=101 recall thresholds for evaluation\n # areaRng - [...] A=4 object area ranges for evaluation\n # maxDets - [1 10 100] M=3 thresholds on max detections per image\n # iouType - ['segm'] set iouType to 'segm', 'bbox' or 'keypoints'\n # iouType replaced the now DEPRECATED useSegm parameter.\n # useCats - [1] if true use category labels for evaluation\n # Note: if useCats=0 category labels are ignored as in proposal scoring.\n # Note: multiple areaRngs [Ax2] and maxDets [Mx1] can be specified.\n #\n # evaluate(): evaluates detections on every image and every category and\n # concats the results into the \"evalImgs\" with fields:\n # dtIds - [1xD] id for each of the D detections (dt)\n # gtIds - [1xG] id for each of the G ground truths (gt)\n # dtMatches - [TxD] matching gt id at each IoU or 0\n # gtMatches - [TxG] matching dt id at each IoU or 0\n # dtScores - [1xD] confidence of each dt\n # gtIgnore - [1xG] ignore flag for each gt\n # dtIgnore - [TxD] ignore flag for each dt at each IoU\n #\n # accumulate(): accumulates the per-image, per-category evaluation\n # results in \"evalImgs\" into the dictionary \"eval\" with fields:\n # params - parameters used for evaluation\n # date - date evaluation was performed\n # counts - [T,R,K,A,M] parameter dimensions (see above)\n # precision - [TxRxKxAxM] precision for every evaluation setting\n # recall - [TxKxAxM] max recall for every evaluation setting\n # Note: precision and recall==-1 for settings with no gt objects.\n #\n # See also coco, mask, pycocoDemo, pycocoEvalDemo\n #\n # Microsoft COCO Toolbox. version 2.0\n # Data, paper, and tutorials available at: http://mscoco.org/\n # Code written by Piotr Dollar and Tsung-Yi Lin, 2015.\n # Licensed under the Simplified BSD License [see coco/license.txt]\n def __init__(self, cocoGt=None, cocoDt=None, iouType='segm'):\n '''\n Initialize CocoEval using coco APIs for gt and dt\n :param cocoGt: coco object with ground truth annotations\n :param cocoDt: coco object with detection results\n :return: None\n '''\n if not iouType:\n print('iouType not specified. use default iouType segm')\n self.cocoGt = cocoGt # ground truth COCO API\n self.cocoDt = cocoDt # detections COCO API\n self.evalImgs = defaultdict(list) # per-image per-category evaluation results [KxAxI] elements\n self.eval = {} # accumulated evaluation results\n self._gts = defaultdict(list) # gt for evaluation\n self._dts = defaultdict(list) # dt for evaluation\n self.params = Params(iouType=iouType) # parameters\n self._paramsEval = {} # parameters for evaluation\n self.stats = [] # result summarization\n self.ious = {} # ious between all gts and dts\n # for computing overall iou\n self.total_intersection_area = 0\n self.total_union_area = 0\n self.iou_list = []\n if not cocoGt is None:\n self.params.imgIds = sorted(cocoGt.getImgIds())\n self.params.catIds = sorted(cocoGt.getCatIds())\n\n\n def _prepare(self):\n '''\n Prepare ._gts and ._dts for evaluation based on params\n :return: None\n '''\n def _toMask(anns, coco):\n # modify ann['segmentation'] by reference\n for ann in anns:\n rle = coco.annToRLE(ann)\n ann['segmentation'] = rle\n p = self.params\n if p.useCats:\n gts=self.cocoGt.loadAnns(self.cocoGt.getAnnIds(imgIds=p.imgIds, catIds=p.catIds))\n dts=self.cocoDt.loadAnns(self.cocoDt.getAnnIds(imgIds=p.imgIds, catIds=p.catIds))\n else:\n gts=self.cocoGt.loadAnns(self.cocoGt.getAnnIds(imgIds=p.imgIds))\n dts=self.cocoDt.loadAnns(self.cocoDt.getAnnIds(imgIds=p.imgIds))\n\n # convert ground truth to mask if iouType == 'segm'\n if p.iouType == 'segm':\n _toMask(gts, self.cocoGt)\n _toMask(dts, self.cocoDt)\n # set ignore flag\n for gt in gts:\n gt['ignore'] = gt['ignore'] if 'ignore' in gt else 0\n gt['ignore'] = 'iscrowd' in gt and gt['iscrowd']\n if p.iouType == 'keypoints':\n gt['ignore'] = (gt['num_keypoints'] == 0) or gt['ignore']\n self._gts = defaultdict(list) # gt for evaluation\n self._dts = defaultdict(list) # dt for evaluation\n for gt in gts:\n self._gts[gt['image_id'], gt['category_id']].append(gt)\n for dt in dts:\n self._dts[dt['image_id'], dt['category_id']].append(dt)\n self.evalImgs = defaultdict(list) # per-image per-category evaluation results\n self.eval = {} # accumulated evaluation results\n\n def evaluate(self):\n '''\n Run per image evaluation on given images and store results (a list of dict) in self.evalImgs\n :return: None\n '''\n tic = time.time()\n print('Running per image evaluation...')\n p = self.params\n # add backward compatibility if useSegm is specified in params\n if not p.useSegm is None:\n p.iouType = 'segm' if p.useSegm == 1 else 'bbox'\n print('useSegm (deprecated) is not None. Running {} evaluation'.format(p.iouType))\n print('Evaluate annotation type *{}*'.format(p.iouType))\n p.imgIds = list(np.unique(p.imgIds))\n if p.useCats:\n p.catIds = list(np.unique(p.catIds))\n p.maxDets = sorted(p.maxDets)\n self.params=p\n\n self._prepare()\n # loop through images, area range, max detection number\n catIds = p.catIds if p.useCats else [-1]\n\n if p.iouType == 'segm' or p.iouType == 'bbox':\n computeIoU = self.computeIoU\n elif p.iouType == 'keypoints':\n computeIoU = self.computeOks\n self.ious = {(imgId, catId): computeIoU(imgId, catId) \\\n for imgId in p.imgIds\n for catId in catIds}\n # evaluateImg = self.evaluateImg\n # maxDet = p.maxDets[-1]\n # self.evalImgs = [evaluateImg(imgId, catId, areaRng, maxDet)\n # for catId in catIds\n # for areaRng in p.areaRng\n # for imgId in p.imgIds\n # ]\n # self._paramsEval = copy.deepcopy(self.params)\n toc = time.time()\n print('DONE (t={:0.2f}s).'.format(toc-tic))\n\n def computeIoU(self, imgId, catId):\n p = self.params\n if p.useCats:\n gt = self._gts[imgId,catId]\n dt = self._dts[imgId,catId]\n else:\n gt = [_ for cId in p.catIds for _ in self._gts[imgId,cId]]\n dt = [_ for cId in p.catIds for _ in self._dts[imgId,cId]]\n if len(gt) == 0 and len(dt) ==0:\n return []\n inds = np.argsort([-d['score'] for d in dt], kind='mergesort')\n dt = [dt[i] for i in inds]\n if len(dt) > p.maxDets[-1]:\n dt=dt[0:p.maxDets[-1]]\n\n if p.iouType == 'segm':\n g = [g['segmentation'] for g in gt]\n d = [d['segmentation'] for d in dt]\n elif p.iouType == 'bbox':\n g = [g['bbox'] for g in gt]\n d = [d['bbox'] for d in dt]\n else:\n raise Exception('unknown iouType for iou computation')\n\n # compute iou between each dt and gt region\n iscrowd = [int(o['iscrowd']) for o in gt]\n ious = maskUtils.iou(d,g,iscrowd)\n\n # for computing overall iou\n # there is only one bbox and segm\n if p.iouType == 'bbox':\n g, d = g[0], d[0]\n g_bbox = [g[0], g[1], g[2] + g[0], g[3] + g[1]] # x1y1wh -> x1y1x2y2\n d_bbox = [d[0], d[1], d[2] + d[0], d[3] + d[1]] # x1y1wh -> x1y1x2y2\n g_bbox = torch.tensor(g_bbox).unsqueeze(0)\n d_bbox = torch.tensor(d_bbox).unsqueeze(0)\n iou, intersection, union = compute_bbox_iou(d_bbox, g_bbox)\n elif p.iouType == 'segm':\n g_segm = decode(g[0])\n d_segm = decode(d[0])\n g_segm = torch.tensor(g_segm).unsqueeze(0)\n d_segm = torch.tensor(d_segm).unsqueeze(0)\n iou, intersection, union = compute_mask_iou(d_segm, g_segm)\n else:\n raise Exception('unknown iouType for iou computation')\n iou, intersection, union = iou.item(), intersection.item(), union.item()\n self.total_intersection_area += intersection\n self.total_union_area += union\n self.iou_list.append(iou)\n return ious\n\n\n def evaluateImg(self, imgId, catId, aRng, maxDet):\n '''\n perform evaluation for single category and image\n :return: dict (single image results)\n '''\n p = self.params\n if p.useCats:\n gt = self._gts[imgId,catId]\n dt = self._dts[imgId,catId]\n else:\n gt = [_ for cId in p.catIds for _ in self._gts[imgId,cId]]\n dt = [_ for cId in p.catIds for _ in self._dts[imgId,cId]]\n if len(gt) == 0 and len(dt) ==0:\n return None\n\n for g in gt:\n if g['ignore'] or (g['area']<aRng[0] or g['area']>aRng[1]):\n g['_ignore'] = 1\n else:\n g['_ignore'] = 0\n\n # sort dt highest score first, sort gt ignore last\n gtind = np.argsort([g['_ignore'] for g in gt], kind='mergesort')\n gt = [gt[i] for i in gtind]\n dtind = np.argsort([-d['score'] for d in dt], kind='mergesort')\n dt = [dt[i] for i in dtind[0:maxDet]]\n iscrowd = [int(o['iscrowd']) for o in gt]\n # load computed ious\n ious = self.ious[imgId, catId][:, gtind] if len(self.ious[imgId, catId]) > 0 else self.ious[imgId, catId]\n\n T = len(p.iouThrs)\n G = len(gt)\n D = len(dt)\n gtm = np.zeros((T,G))\n dtm = np.zeros((T,D))\n gtIg = np.array([g['_ignore'] for g in gt])\n dtIg = np.zeros((T,D))\n if not len(ious)==0:\n for tind, t in enumerate(p.iouThrs):\n for dind, d in enumerate(dt):\n # information about best match so far (m=-1 -> unmatched)\n iou = min([t,1-1e-10])\n m = -1\n for gind, g in enumerate(gt):\n # if this gt already matched, and not a crowd, continue\n if gtm[tind,gind]>0 and not iscrowd[gind]:\n continue\n # if dt matched to reg gt, and on ignore gt, stop\n if m>-1 and gtIg[m]==0 and gtIg[gind]==1:\n break\n # continue to next gt unless better match made\n if ious[dind,gind] < iou:\n continue\n # if match successful and best so far, store appropriately\n iou=ious[dind,gind]\n m=gind\n # if match made store id of match for both dt and gt\n if m ==-1:\n continue\n dtIg[tind,dind] = gtIg[m]\n dtm[tind,dind] = gt[m]['id']\n gtm[tind,m] = d['id']\n # set unmatched detections outside of area range to ignore\n a = np.array([d['area']<aRng[0] or d['area']>aRng[1] for d in dt]).reshape((1, len(dt)))\n dtIg = np.logical_or(dtIg, np.logical_and(dtm==0, np.repeat(a,T,0)))\n # store results for given image and category\n return {\n 'image_id': imgId,\n 'category_id': catId,\n 'aRng': aRng,\n 'maxDet': maxDet,\n 'dtIds': [d['id'] for d in dt],\n 'gtIds': [g['id'] for g in gt],\n 'dtMatches': dtm,\n 'gtMatches': gtm,\n 'dtScores': [d['score'] for d in dt],\n 'gtIgnore': gtIg,\n 'dtIgnore': dtIg,\n }\n\n def accumulate(self, p = None):\n '''\n Accumulate per image evaluation results and store the result in self.eval\n :param p: input params for evaluation\n :return: None\n '''\n print('Accumulating evaluation results...')\n tic = time.time()\n if not self.evalImgs:\n print('Please run evaluate() first')\n # allows input customized parameters\n if p is None:\n p = self.params\n p.catIds = p.catIds if p.useCats == 1 else [-1]\n T = len(p.iouThrs)\n R = len(p.recThrs)\n K = len(p.catIds) if p.useCats else 1\n A = len(p.areaRng)\n M = len(p.maxDets)\n precision = -np.ones((T,R,K,A,M)) # -1 for the precision of absent categories\n recall = -np.ones((T,K,A,M))\n scores = -np.ones((T,R,K,A,M))\n\n # create dictionary for future indexing\n _pe = self._paramsEval\n catIds = _pe.catIds if _pe.useCats else [-1]\n setK = set(catIds)\n setA = set(map(tuple, _pe.areaRng))\n setM = set(_pe.maxDets)\n setI = set(_pe.imgIds)\n # get inds to evaluate\n k_list = [n for n, k in enumerate(p.catIds) if k in setK]\n m_list = [m for n, m in enumerate(p.maxDets) if m in setM]\n a_list = [n for n, a in enumerate(map(lambda x: tuple(x), p.areaRng)) if a in setA]\n i_list = [n for n, i in enumerate(p.imgIds) if i in setI]\n I0 = len(_pe.imgIds)\n A0 = len(_pe.areaRng)\n # retrieve E at each category, area range, and max number of detections\n for k, k0 in enumerate(k_list):\n Nk = k0*A0*I0\n for a, a0 in enumerate(a_list):\n Na = a0*I0\n for m, maxDet in enumerate(m_list):\n E = [self.evalImgs[Nk + Na + i] for i in i_list]\n E = [e for e in E if not e is None]\n if len(E) == 0:\n continue\n dtScores = np.concatenate([e['dtScores'][0:maxDet] for e in E])\n\n # different sorting method generates slightly different results.\n # mergesort is used to be consistent as Matlab implementation.\n inds = np.argsort(-dtScores, kind='mergesort')\n dtScoresSorted = dtScores[inds]\n\n dtm = np.concatenate([e['dtMatches'][:,0:maxDet] for e in E], axis=1)[:,inds]\n dtIg = np.concatenate([e['dtIgnore'][:,0:maxDet] for e in E], axis=1)[:,inds]\n gtIg = np.concatenate([e['gtIgnore'] for e in E])\n npig = np.count_nonzero(gtIg==0 )\n if npig == 0:\n continue\n tps = np.logical_and( dtm, np.logical_not(dtIg) )\n fps = np.logical_and(np.logical_not(dtm), np.logical_not(dtIg) )\n\n tp_sum = np.cumsum(tps, axis=1).astype(dtype=np.float)\n fp_sum = np.cumsum(fps, axis=1).astype(dtype=np.float)\n for t, (tp, fp) in enumerate(zip(tp_sum, fp_sum)):\n tp = np.array(tp)\n fp = np.array(fp)\n nd = len(tp)\n rc = tp / npig\n pr = tp / (fp+tp+np.spacing(1))\n q = np.zeros((R,))\n ss = np.zeros((R,))\n\n if nd:\n recall[t,k,a,m] = rc[-1]\n else:\n recall[t,k,a,m] = 0\n\n # numpy is slow without cython optimization for accessing elements\n # use python array gets significant speed improvement\n pr = pr.tolist(); q = q.tolist()\n\n for i in range(nd-1, 0, -1):\n if pr[i] > pr[i-1]:\n pr[i-1] = pr[i]\n\n inds = np.searchsorted(rc, p.recThrs, side='left')\n try:\n for ri, pi in enumerate(inds):\n q[ri] = pr[pi]\n ss[ri] = dtScoresSorted[pi]\n except:\n pass\n precision[t,:,k,a,m] = np.array(q)\n scores[t,:,k,a,m] = np.array(ss)\n self.eval = {\n 'params': p,\n 'counts': [T, R, K, A, M],\n 'date': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),\n 'precision': precision,\n 'recall': recall,\n 'scores': scores,\n }\n toc = time.time()\n print('DONE (t={:0.2f}s).'.format( toc-tic))\n\n def summarize(self):\n '''\n Compute and display summary metrics for evaluation results.\n Note this functin can *only* be applied on the default parameter setting\n '''\n def _summarize( ap=1, iouThr=None, areaRng='all', maxDets=100 ):\n p = self.params\n iStr = ' {:<18} {} @[ IoU={:<9} | area={:>6s} | maxDets={:>3d} ] = {:0.3f}'\n titleStr = 'Average Precision' if ap == 1 else 'Average Recall'\n typeStr = '(AP)' if ap==1 else '(AR)'\n iouStr = '{:0.2f}:{:0.2f}'.format(p.iouThrs[0], p.iouThrs[-1]) \\\n if iouThr is None else '{:0.2f}'.format(iouThr)\n\n aind = [i for i, aRng in enumerate(p.areaRngLbl) if aRng == areaRng]\n mind = [i for i, mDet in enumerate(p.maxDets) if mDet == maxDets]\n if ap == 1:\n # dimension of precision: [TxRxKxAxM]\n s = self.eval['precision']\n # IoU\n if iouThr is not None:\n t = np.where(iouThr == p.iouThrs)[0]\n s = s[t]\n s = s[:,:,:,aind,mind]\n else:\n # dimension of recall: [TxKxAxM]\n s = self.eval['recall']\n if iouThr is not None:\n t = np.where(iouThr == p.iouThrs)[0]\n s = s[t]\n s = s[:,:,aind,mind]\n if len(s[s>-1])==0:\n mean_s = -1\n else:\n mean_s = np.mean(s[s>-1])\n print(iStr.format(titleStr, typeStr, iouStr, areaRng, maxDets, mean_s))\n return mean_s\n def _summarizeDets():\n stats = np.zeros((12,))\n stats[0] = _summarize(1)\n stats[1] = _summarize(1, iouThr=.5, maxDets=self.params.maxDets[2])\n stats[2] = _summarize(1, iouThr=.75, maxDets=self.params.maxDets[2])\n stats[3] = _summarize(1, areaRng='small', maxDets=self.params.maxDets[2])\n stats[4] = _summarize(1, areaRng='medium', maxDets=self.params.maxDets[2])\n stats[5] = _summarize(1, areaRng='large', maxDets=self.params.maxDets[2])\n stats[6] = _summarize(0, maxDets=self.params.maxDets[0])\n stats[7] = _summarize(0, maxDets=self.params.maxDets[1])\n stats[8] = _summarize(0, maxDets=self.params.maxDets[2])\n stats[9] = _summarize(0, areaRng='small', maxDets=self.params.maxDets[2])\n stats[10] = _summarize(0, areaRng='medium', maxDets=self.params.maxDets[2])\n stats[11] = _summarize(0, areaRng='large', maxDets=self.params.maxDets[2])\n return stats\n def _summarizeKps():\n stats = np.zeros((10,))\n stats[0] = _summarize(1, maxDets=20)\n stats[1] = _summarize(1, maxDets=20, iouThr=.5)\n stats[2] = _summarize(1, maxDets=20, iouThr=.75)\n stats[3] = _summarize(1, maxDets=20, areaRng='medium')\n stats[4] = _summarize(1, maxDets=20, areaRng='large')\n stats[5] = _summarize(0, maxDets=20)\n stats[6] = _summarize(0, maxDets=20, iouThr=.5)\n stats[7] = _summarize(0, maxDets=20, iouThr=.75)\n stats[8] = _summarize(0, maxDets=20, areaRng='medium')\n stats[9] = _summarize(0, maxDets=20, areaRng='large')\n return stats\n if not self.eval:\n raise Exception('Please run accumulate() first')\n iouType = self.params.iouType\n if iouType == 'segm' or iouType == 'bbox':\n summarize = _summarizeDets\n elif iouType == 'keypoints':\n summarize = _summarizeKps\n self.stats = summarize()\n\n def __str__(self):\n self.summarize()" } ]
import contextlib import copy import io import itertools import json import logging import numpy as np import os import pickle import pycocotools.mask as mask_util import torch import detectron2.utils.comm as comm from collections import OrderedDict from pycocotools.coco import COCO from pycocotools.cocoeval import COCOeval from tabulate import tabulate from detectron2.config import CfgNode from detectron2.data import MetadataCatalog from detectron2.data.datasets.coco import convert_to_coco_json from detectron2.structures import Boxes, BoxMode, pairwise_iou from detectron2.utils.file_io import PathManager from detectron2.utils.logger import create_small_table from .evaluator import DatasetEvaluator from detectron2.evaluation.fast_eval_api import COCOeval_opt from detectron2.evaluation.refcocoeval import RefCOCOeval
14,835
self._logger.info( "Evaluation results for {}: \n".format(iou_type) + create_small_table(results) ) # results.update({"AP-" + name: ap for name, ap in results_per_category}) return results def instances_to_coco_json(instances, img_id): """ Dump an "Instances" object to a COCO-format json that's used for evaluation. Args: instances (Instances): img_id (int): the image id Returns: list[dict]: list of json annotations in COCO format. """ num_instance = len(instances) if num_instance == 0: return [] boxes = instances.pred_boxes.tensor.numpy() boxes = BoxMode.convert(boxes, BoxMode.XYXY_ABS, BoxMode.XYWH_ABS) boxes = boxes.tolist() scores = instances.scores.tolist() classes = instances.pred_classes.tolist() has_mask = instances.has("pred_masks") if has_mask: # use RLE to encode the masks, because they are too large and takes memory # since this evaluator stores outputs of the entire dataset rles = [ mask_util.encode(np.array(mask[:, :, None], order="F", dtype="uint8"))[0] for mask in instances.pred_masks ] for rle in rles: # "counts" is an array encoded by mask_util as a byte-stream. Python3's # json writer which always produces strings cannot serialize a bytestream # unless you decode it. Thankfully, utf-8 works out (which is also what # the pycocotools/_mask.pyx does). rle["counts"] = rle["counts"].decode("utf-8") has_keypoints = instances.has("pred_keypoints") if has_keypoints: keypoints = instances.pred_keypoints results = [] for k in range(num_instance): result = { "image_id": img_id, "category_id": classes[k], "bbox": boxes[k], "score": scores[k], } if has_mask: result["segmentation"] = rles[k] if has_keypoints: # In COCO annotations, # keypoints coordinates are pixel indices. # However our predictions are floating point coordinates. # Therefore we subtract 0.5 to be consistent with the annotation format. # This is the inverse of data loading logic in `datasets/coco.py`. keypoints[k][:, :2] -= 0.5 result["keypoints"] = keypoints[k].flatten().tolist() results.append(result) return results # inspired from Detectron: # https://github.com/facebookresearch/Detectron/blob/a6a835f5b8208c45d0dce217ce9bbda915f44df7/detectron/datasets/json_dataset_evaluator.py#L255 # noqa def _evaluate_box_proposals(dataset_predictions, coco_api, thresholds=None, area="all", limit=None): """ Evaluate detection proposal recall metrics. This function is a much faster alternative to the official COCO API recall evaluation code. However, it produces slightly different results. """ # Record max overlap value for each gt box # Return vector of overlap values areas = { "all": 0, "small": 1, "medium": 2, "large": 3, "96-128": 4, "128-256": 5, "256-512": 6, "512-inf": 7, } area_ranges = [ [0 ** 2, 1e5 ** 2], # all [0 ** 2, 32 ** 2], # small [32 ** 2, 96 ** 2], # medium [96 ** 2, 1e5 ** 2], # large [96 ** 2, 128 ** 2], # 96-128 [128 ** 2, 256 ** 2], # 128-256 [256 ** 2, 512 ** 2], # 256-512 [512 ** 2, 1e5 ** 2], ] # 512-inf assert area in areas, "Unknown area range: {}".format(area) area_range = area_ranges[areas[area]] gt_overlaps = [] num_pos = 0 for prediction_dict in dataset_predictions: predictions = prediction_dict["proposals"] # sort predictions in descending order # TODO maybe remove this and make it explicit in the documentation inds = predictions.objectness_logits.sort(descending=True)[1] predictions = predictions[inds] ann_ids = coco_api.getAnnIds(imgIds=prediction_dict["image_id"]) anno = coco_api.loadAnns(ann_ids) gt_boxes = [ BoxMode.convert(obj["bbox"], BoxMode.XYWH_ABS, BoxMode.XYXY_ABS) for obj in anno if obj["iscrowd"] == 0 ] gt_boxes = torch.as_tensor(gt_boxes).reshape(-1, 4) # guard against no boxes
# Copyright (c) Facebook, Inc. and its affiliates. try: except ImportError: COCOeval_opt = COCOeval class COCOEvaluator(DatasetEvaluator): """ Evaluate AR for object proposals, AP for instance detection/segmentation, AP for keypoint detection outputs using COCO's metrics. See http://cocodataset.org/#detection-eval and http://cocodataset.org/#keypoints-eval to understand its metrics. The metrics range from 0 to 100 (instead of 0 to 1), where a -1 or NaN means the metric cannot be computed (e.g. due to no predictions made). In addition to COCO, this evaluator is able to support any bounding box detection, instance segmentation, or keypoint detection dataset. """ def __init__( self, dataset_name, tasks=None, distributed=True, output_dir=None, *, max_dets_per_image=None, use_fast_impl=True, kpt_oks_sigmas=(), allow_cached_coco=True, force_tasks=None, refcoco=False ): """ Args: dataset_name (str): name of the dataset to be evaluated. It must have either the following corresponding metadata: "json_file": the path to the COCO format annotation Or it must be in detectron2's standard dataset format so it can be converted to COCO format automatically. tasks (tuple[str]): tasks that can be evaluated under the given configuration. A task is one of "bbox", "segm", "keypoints". By default, will infer this automatically from predictions. distributed (True): if True, will collect results from all ranks and run evaluation in the main process. Otherwise, will only evaluate the results in the current process. output_dir (str): optional, an output directory to dump all results predicted on the dataset. The dump contains two files: 1. "instances_predictions.pth" a file that can be loaded with `torch.load` and contains all the results in the format they are produced by the model. 2. "coco_instances_results.json" a json file in COCO's result format. max_dets_per_image (int): limit on the maximum number of detections per image. By default in COCO, this limit is to 100, but this can be customized to be greater, as is needed in evaluation metrics AP fixed and AP pool (see https://arxiv.org/pdf/2102.01066.pdf) This doesn't affect keypoint evaluation. use_fast_impl (bool): use a fast but **unofficial** implementation to compute AP. Although the results should be very close to the official implementation in COCO API, it is still recommended to compute results with the official API for use in papers. The faster implementation also uses more RAM. kpt_oks_sigmas (list[float]): The sigmas used to calculate keypoint OKS. See http://cocodataset.org/#keypoints-eval When empty, it will use the defaults in COCO. Otherwise it should be the same length as ROI_KEYPOINT_HEAD.NUM_KEYPOINTS. allow_cached_coco (bool): Whether to use cached coco json from previous validation runs. You should set this to False if you need to use different validation data. Defaults to True. """ self.dataset_name = dataset_name self._logger = logging.getLogger(__name__) self._distributed = distributed self._output_dir = output_dir self.force_tasks = force_tasks self.refcoco = refcoco if use_fast_impl and (COCOeval_opt is COCOeval): self._logger.info("Fast COCO eval is not built. Falling back to official COCO eval.") use_fast_impl = False self._use_fast_impl = use_fast_impl # COCOeval requires the limit on the number of detections per image (maxDets) to be a list # with at least 3 elements. The default maxDets in COCOeval is [1, 10, 100], in which the # 3rd element (100) is used as the limit on the number of detections per image when # evaluating AP. COCOEvaluator expects an integer for max_dets_per_image, so for COCOeval, # we reformat max_dets_per_image into [1, 10, max_dets_per_image], based on the defaults. if max_dets_per_image is None: max_dets_per_image = [1, 10, 100] else: max_dets_per_image = [1, 10, max_dets_per_image] self._max_dets_per_image = max_dets_per_image if tasks is not None and isinstance(tasks, CfgNode): kpt_oks_sigmas = ( tasks.TEST.KEYPOINT_OKS_SIGMAS if not kpt_oks_sigmas else kpt_oks_sigmas ) self._logger.warn( "COCO Evaluator instantiated using config, this is deprecated behavior." " Please pass in explicit arguments instead." ) self._tasks = None # Infering it from predictions should be better else: self._tasks = tasks self._cpu_device = torch.device("cpu") self._metadata = MetadataCatalog.get(dataset_name) if not hasattr(self._metadata, "json_file"): if output_dir is None: raise ValueError( "output_dir must be provided to COCOEvaluator " "for datasets not in COCO format." ) self._logger.info(f"Trying to convert '{dataset_name}' to COCO format ...") cache_path = os.path.join(output_dir, f"{dataset_name}_coco_format.json") self._metadata.json_file = cache_path convert_to_coco_json(dataset_name, cache_path, allow_cached=allow_cached_coco) json_file = PathManager.get_local_path(self._metadata.json_file) with contextlib.redirect_stdout(io.StringIO()): self._coco_api = COCO(json_file) # Test set json files do not contain annotations (evaluation must be # performed using the COCO evaluation server). self._do_evaluation = "annotations" in self._coco_api.dataset if self._do_evaluation: self._kpt_oks_sigmas = kpt_oks_sigmas def reset(self): self._predictions = [] def process(self, inputs, outputs): """ Args: inputs: the inputs to a COCO model (e.g., GeneralizedRCNN). It is a list of dict. Each dict corresponds to an image and contains keys like "height", "width", "file_name", "image_id". outputs: the outputs of a COCO model. It is a list of dicts with key "instances" that contains :class:`Instances`. """ for input, output in zip(inputs, outputs): prediction = {"image_id": input["image_id"]} if "instances" in output: instances = output["instances"].to(self._cpu_device) prediction["instances"] = instances_to_coco_json(instances, input["image_id"]) if "proposals" in output: prediction["proposals"] = output["proposals"].to(self._cpu_device) if len(prediction) > 1: self._predictions.append(prediction) def evaluate(self, img_ids=None): """ Args: img_ids: a list of image IDs to evaluate on. Default to None for the whole dataset """ if self._distributed: comm.synchronize() predictions = comm.gather(self._predictions, dst=0) predictions = list(itertools.chain(*predictions)) if not comm.is_main_process(): return {} else: predictions = self._predictions if len(predictions) == 0: self._logger.warning("[COCOEvaluator] Did not receive valid predictions.") return {} if self._output_dir: PathManager.mkdirs(self._output_dir) file_path = os.path.join(self._output_dir, "instances_predictions.pth") with PathManager.open(file_path, "wb") as f: torch.save(predictions, f) self._results = OrderedDict() if "proposals" in predictions[0]: self._eval_box_proposals(predictions) if "instances" in predictions[0]: self._eval_predictions(predictions, img_ids=img_ids) # Copy so the caller can do whatever with results return copy.deepcopy(self._results) def _tasks_from_predictions(self, predictions): """ Get COCO API "tasks" (i.e. iou_type) from COCO-format predictions. """ tasks = {"bbox"} for pred in predictions: if "segmentation" in pred: tasks.add("segm") if "keypoints" in pred: tasks.add("keypoints") return sorted(tasks) def _eval_predictions(self, predictions, img_ids=None): """ Evaluate predictions. Fill self._results with the metrics of the tasks. """ self._logger.info("Preparing results for COCO format ...") coco_results = list(itertools.chain(*[x["instances"] for x in predictions])) tasks = self._tasks or self._tasks_from_predictions(coco_results) if self.force_tasks is not None: tasks = self.force_tasks # unmap the category ids for COCO if hasattr(self._metadata, "thing_dataset_id_to_contiguous_id"): dataset_id_to_contiguous_id = self._metadata.thing_dataset_id_to_contiguous_id all_contiguous_ids = list(dataset_id_to_contiguous_id.values()) num_classes = len(all_contiguous_ids) assert min(all_contiguous_ids) == 0 and max(all_contiguous_ids) == num_classes - 1 reverse_id_mapping = {v: k for k, v in dataset_id_to_contiguous_id.items()} for result in coco_results: category_id = result["category_id"] assert category_id < num_classes, ( f"A prediction has class={category_id}, " f"but the dataset only has {num_classes} classes and " f"predicted class id should be in [0, {num_classes - 1}]." ) result["category_id"] = reverse_id_mapping[category_id] if self._output_dir: if "refcoco" in self.dataset_name: file_path = os.path.join(self._output_dir, "{}_instances_results.json".format(self.dataset_name)) else: file_path = os.path.join(self._output_dir, "coco_instances_results.json") self._logger.info("Saving results to {}".format(file_path)) with PathManager.open(file_path, "w") as f: f.write(json.dumps(coco_results)) f.flush() if not self._do_evaluation: self._logger.info("Annotations are not available for evaluation.") return self._logger.info( "Evaluating predictions with {} COCO API...".format( "unofficial" if self._use_fast_impl else "official" ) ) for task in sorted(tasks): assert task in {"bbox", "segm", "keypoints"}, f"Got unknown task: {task}!" coco_eval = ( _evaluate_predictions_on_coco( self._coco_api, coco_results, task, kpt_oks_sigmas=self._kpt_oks_sigmas, use_fast_impl=self._use_fast_impl, img_ids=img_ids, max_dets_per_image=self._max_dets_per_image, refcoco=self.refcoco ) if len(coco_results) > 0 else None # cocoapi does not handle empty results very well ) if not self.refcoco: res = self._derive_coco_results( coco_eval, task, class_names=self._metadata.get("thing_classes") ) self._results[task] = res else: res = self._derive_refcoco_results(coco_eval, task) self._results[task] = res def _eval_box_proposals(self, predictions): """ Evaluate the box proposals in predictions. Fill self._results with the metrics for "box_proposals" task. """ if self._output_dir: # Saving generated box proposals to file. # Predicted box_proposals are in XYXY_ABS mode. bbox_mode = BoxMode.XYXY_ABS.value ids, boxes, objectness_logits = [], [], [] for prediction in predictions: ids.append(prediction["image_id"]) boxes.append(prediction["proposals"].proposal_boxes.tensor.numpy()) objectness_logits.append(prediction["proposals"].objectness_logits.numpy()) proposal_data = { "boxes": boxes, "objectness_logits": objectness_logits, "ids": ids, "bbox_mode": bbox_mode, } with PathManager.open(os.path.join(self._output_dir, "box_proposals.pkl"), "wb") as f: pickle.dump(proposal_data, f) if not self._do_evaluation: self._logger.info("Annotations are not available for evaluation.") return self._logger.info("Evaluating bbox proposals ...") res = {} areas = {"all": "", "small": "s", "medium": "m", "large": "l"} for limit in [100, 1000]: for area, suffix in areas.items(): stats = _evaluate_box_proposals(predictions, self._coco_api, area=area, limit=limit) key = "AR{}@{:d}".format(suffix, limit) res[key] = float(stats["ar"].item() * 100) self._logger.info("Proposal metrics: \n" + create_small_table(res)) self._results["box_proposals"] = res def _derive_coco_results(self, coco_eval, iou_type, class_names=None): """ Derive the desired score numbers from summarized COCOeval. Args: coco_eval (None or COCOEval): None represents no predictions from model. iou_type (str): class_names (None or list[str]): if provided, will use it to predict per-category AP. Returns: a dict of {metric name: score} """ metrics = { "bbox": ["AP", "AP50", "AP75", "APs", "APm", "APl"], "segm": ["AP", "AP50", "AP75", "APs", "APm", "APl"], "keypoints": ["AP", "AP50", "AP75", "APm", "APl"], }[iou_type] if coco_eval is None: self._logger.warn("No predictions from the model!") return {metric: float("nan") for metric in metrics} # the standard metrics results = { metric: float(coco_eval.stats[idx] * 100 if coco_eval.stats[idx] >= 0 else "nan") for idx, metric in enumerate(metrics) } self._logger.info( "Evaluation results for {}: \n".format(iou_type) + create_small_table(results) ) if not np.isfinite(sum(results.values())): self._logger.info("Some metrics cannot be computed and is shown as NaN.") if class_names is None or len(class_names) <= 1: return results # Compute per-category AP # from https://github.com/facebookresearch/Detectron/blob/a6a835f5b8208c45d0dce217ce9bbda915f44df7/detectron/datasets/json_dataset_evaluator.py#L222-L252 # noqa precisions = coco_eval.eval["precision"] # precision has dims (iou, recall, cls, area range, max dets) assert len(class_names) == precisions.shape[2] results_per_category = [] for idx, name in enumerate(class_names): # area range index 0: all area ranges # max dets index -1: typically 100 per image precision = precisions[:, :, idx, 0, -1] precision = precision[precision > -1] ap = np.mean(precision) if precision.size else float("nan") results_per_category.append(("{}".format(name), float(ap * 100))) # tabulate it N_COLS = min(6, len(results_per_category) * 2) results_flatten = list(itertools.chain(*results_per_category)) results_2d = itertools.zip_longest(*[results_flatten[i::N_COLS] for i in range(N_COLS)]) table = tabulate( results_2d, tablefmt="pipe", floatfmt=".3f", headers=["category", "AP"] * (N_COLS // 2), numalign="left", ) self._logger.info("Per-category {} AP: \n".format(iou_type) + table) results.update({"AP-" + name: ap for name, ap in results_per_category}) return results def _derive_refcoco_results(self, coco_eval, iou_type): """ Derive the desired score numbers from summarized COCOeval. Args: coco_eval (None or COCOEval): None represents no predictions from model. iou_type (str): class_names (None or list[str]): if provided, will use it to predict per-category AP. Returns: a dict of {metric name: score} """ metrics = {"bbox": ["[email protected]", "[email protected]", "[email protected]", "[email protected]", "[email protected]", "oIoU", "mIoU"], "segm": ["[email protected]", "[email protected]", "[email protected]", "[email protected]", "[email protected]", "oIoU", "mIoU"] }[iou_type] if coco_eval is None: self._logger.warn("No predictions from the model!") return {metric: float("nan") for metric in metrics} # the standard metrics results = { metric: float("nan") for idx, metric in enumerate(metrics) } ious = np.array([v for (k, v) in coco_eval.ious.items()]) total_intersection_area = coco_eval.total_intersection_area total_union_area = coco_eval.total_union_area iou_list = coco_eval.iou_list # compute metrics results["[email protected]"] = np.sum(ious > 0.5) / len(ious) * 100 results["[email protected]"] = np.sum(ious > 0.6) / len(ious) * 100 results["[email protected]"] = np.sum(ious > 0.7) / len(ious) * 100 results["[email protected]"] = np.sum(ious > 0.8) / len(ious) * 100 results["[email protected]"] = np.sum(ious > 0.9) / len(ious) * 100 results["oIoU"] = total_intersection_area / total_union_area * 100 results["mIoU"] = np.mean(ious) * 100 # if iou_type == "bbox": # results["[email protected]"] = np.sum(ious > 0.5) / len(ious) * 100 # elif iou_type == "segm": # results["mIoU"] = np.mean(ious) * 100 # else: # raise ValueError("Unsupported iou_type!") self._logger.info( "Evaluation results for {}: \n".format(iou_type) + create_small_table(results) ) # results.update({"AP-" + name: ap for name, ap in results_per_category}) return results def instances_to_coco_json(instances, img_id): """ Dump an "Instances" object to a COCO-format json that's used for evaluation. Args: instances (Instances): img_id (int): the image id Returns: list[dict]: list of json annotations in COCO format. """ num_instance = len(instances) if num_instance == 0: return [] boxes = instances.pred_boxes.tensor.numpy() boxes = BoxMode.convert(boxes, BoxMode.XYXY_ABS, BoxMode.XYWH_ABS) boxes = boxes.tolist() scores = instances.scores.tolist() classes = instances.pred_classes.tolist() has_mask = instances.has("pred_masks") if has_mask: # use RLE to encode the masks, because they are too large and takes memory # since this evaluator stores outputs of the entire dataset rles = [ mask_util.encode(np.array(mask[:, :, None], order="F", dtype="uint8"))[0] for mask in instances.pred_masks ] for rle in rles: # "counts" is an array encoded by mask_util as a byte-stream. Python3's # json writer which always produces strings cannot serialize a bytestream # unless you decode it. Thankfully, utf-8 works out (which is also what # the pycocotools/_mask.pyx does). rle["counts"] = rle["counts"].decode("utf-8") has_keypoints = instances.has("pred_keypoints") if has_keypoints: keypoints = instances.pred_keypoints results = [] for k in range(num_instance): result = { "image_id": img_id, "category_id": classes[k], "bbox": boxes[k], "score": scores[k], } if has_mask: result["segmentation"] = rles[k] if has_keypoints: # In COCO annotations, # keypoints coordinates are pixel indices. # However our predictions are floating point coordinates. # Therefore we subtract 0.5 to be consistent with the annotation format. # This is the inverse of data loading logic in `datasets/coco.py`. keypoints[k][:, :2] -= 0.5 result["keypoints"] = keypoints[k].flatten().tolist() results.append(result) return results # inspired from Detectron: # https://github.com/facebookresearch/Detectron/blob/a6a835f5b8208c45d0dce217ce9bbda915f44df7/detectron/datasets/json_dataset_evaluator.py#L255 # noqa def _evaluate_box_proposals(dataset_predictions, coco_api, thresholds=None, area="all", limit=None): """ Evaluate detection proposal recall metrics. This function is a much faster alternative to the official COCO API recall evaluation code. However, it produces slightly different results. """ # Record max overlap value for each gt box # Return vector of overlap values areas = { "all": 0, "small": 1, "medium": 2, "large": 3, "96-128": 4, "128-256": 5, "256-512": 6, "512-inf": 7, } area_ranges = [ [0 ** 2, 1e5 ** 2], # all [0 ** 2, 32 ** 2], # small [32 ** 2, 96 ** 2], # medium [96 ** 2, 1e5 ** 2], # large [96 ** 2, 128 ** 2], # 96-128 [128 ** 2, 256 ** 2], # 128-256 [256 ** 2, 512 ** 2], # 256-512 [512 ** 2, 1e5 ** 2], ] # 512-inf assert area in areas, "Unknown area range: {}".format(area) area_range = area_ranges[areas[area]] gt_overlaps = [] num_pos = 0 for prediction_dict in dataset_predictions: predictions = prediction_dict["proposals"] # sort predictions in descending order # TODO maybe remove this and make it explicit in the documentation inds = predictions.objectness_logits.sort(descending=True)[1] predictions = predictions[inds] ann_ids = coco_api.getAnnIds(imgIds=prediction_dict["image_id"]) anno = coco_api.loadAnns(ann_ids) gt_boxes = [ BoxMode.convert(obj["bbox"], BoxMode.XYWH_ABS, BoxMode.XYXY_ABS) for obj in anno if obj["iscrowd"] == 0 ] gt_boxes = torch.as_tensor(gt_boxes).reshape(-1, 4) # guard against no boxes
gt_boxes = Boxes(gt_boxes)
3
2023-12-22 13:31:33+00:00
24k
xhuangcv/humannorm
threestudio/models/geometry/tetrahedra_sdf_grid.py
[ { "identifier": "BaseExplicitGeometry", "path": "threestudio/models/geometry/base.py", "snippet": "class BaseExplicitGeometry(BaseGeometry):\n @dataclass\n class Config(BaseGeometry.Config):\n radius: float = 1.0\n\n cfg: Config\n\n def configure(self) -> None:\n self.bbox: Float[Tensor, \"2 3\"]\n self.register_buffer(\n \"bbox\",\n torch.as_tensor(\n [\n [-self.cfg.radius, -self.cfg.radius, -self.cfg.radius],\n [self.cfg.radius, self.cfg.radius, self.cfg.radius],\n ],\n dtype=torch.float32,\n ),\n )" }, { "identifier": "BaseGeometry", "path": "threestudio/models/geometry/base.py", "snippet": "class BaseGeometry(BaseModule):\n @dataclass\n class Config(BaseModule.Config):\n pass\n\n cfg: Config\n\n @staticmethod\n def create_from(\n other: \"BaseGeometry\", cfg: Optional[Union[dict, DictConfig]] = None, **kwargs\n ) -> \"BaseGeometry\":\n raise TypeError(\n f\"Cannot create {BaseGeometry.__name__} from {other.__class__.__name__}\"\n )\n\n def export(self, *args, **kwargs) -> Dict[str, Any]:\n return {}" }, { "identifier": "contract_to_unisphere", "path": "threestudio/models/geometry/base.py", "snippet": "def contract_to_unisphere(\n x: Float[Tensor, \"... 3\"], bbox: Float[Tensor, \"2 3\"], unbounded: bool = False\n) -> Float[Tensor, \"... 3\"]:\n if unbounded:\n x = scale_tensor(x, bbox, (0, 1))\n x = x * 2 - 1 # aabb is at [-1, 1]\n mag = x.norm(dim=-1, keepdim=True)\n mask = mag.squeeze(-1) > 1\n x[mask] = (2 - 1 / mag[mask]) * (x[mask] / mag[mask])\n x = x / 4 + 0.5 # [-inf, inf] is at [0, 1]\n else:\n x = scale_tensor(x, bbox, (0, 1))\n return x" }, { "identifier": "ImplicitSDF", "path": "threestudio/models/geometry/implicit_sdf.py", "snippet": "class ImplicitSDF(BaseImplicitGeometry):\n @dataclass\n class Config(BaseImplicitGeometry.Config):\n n_input_dims: int = 3\n n_feature_dims: int = 3\n pos_encoding_config: dict = field(\n default_factory=lambda: {\n \"otype\": \"HashGrid\",\n \"n_levels\": 16,\n \"n_features_per_level\": 2,\n \"log2_hashmap_size\": 19,\n \"base_resolution\": 16,\n \"per_level_scale\": 1.447269237440378,\n }\n )\n mlp_network_config: dict = field(\n default_factory=lambda: {\n \"otype\": \"VanillaMLP\",\n \"activation\": \"ReLU\",\n \"output_activation\": \"none\",\n \"n_neurons\": 64,\n \"n_hidden_layers\": 1,\n }\n )\n normal_type: Optional[\n str\n ] = \"finite_difference\" # in ['pred', 'finite_difference', 'finite_difference_laplacian']\n finite_difference_normal_eps: Union[\n float, str\n ] = 0.01 # in [float, \"progressive\"]\n shape_init: Optional[str] = None\n shape_init_params: Optional[Any] = None\n shape_init_mesh_up: str = \"+z\"\n shape_init_mesh_front: str = \"+x\"\n force_shape_init: bool = False\n sdf_bias: Union[float, str] = 0.0\n sdf_bias_params: Optional[Any] = None\n\n # no need to removal outlier for SDF\n isosurface_remove_outliers: bool = False\n\n # improve the resolution of DMTET at these steps\n progressive_resolution_steps: Optional[int] = None\n\n cfg: Config\n\n def configure(self) -> None:\n super().configure()\n self.encoding = get_encoding(\n self.cfg.n_input_dims, self.cfg.pos_encoding_config\n )\n self.sdf_network = get_mlp(\n self.encoding.n_output_dims, 1, self.cfg.mlp_network_config\n )\n\n if self.cfg.n_feature_dims > 0:\n self.feature_network = get_mlp(\n self.encoding.n_output_dims,\n self.cfg.n_feature_dims,\n self.cfg.mlp_network_config,\n )\n\n if self.cfg.normal_type == \"pred\":\n self.normal_network = get_mlp(\n self.encoding.n_output_dims, 3, self.cfg.mlp_network_config\n )\n if self.cfg.isosurface_deformable_grid:\n assert (\n self.cfg.isosurface_method == \"mt\"\n ), \"isosurface_deformable_grid only works with mt\"\n self.deformation_network = get_mlp(\n self.encoding.n_output_dims, 3, self.cfg.mlp_network_config\n )\n\n self.finite_difference_normal_eps: Optional[float] = None\n self.cached_sdf = None\n\n def initialize_shape(self) -> None:\n if self.cfg.shape_init is None and not self.cfg.force_shape_init:\n return\n\n # do not initialize shape if weights are provided\n if self.cfg.weights is not None and not self.cfg.force_shape_init:\n return\n\n if self.cfg.sdf_bias != 0.0:\n threestudio.warn(\n \"shape_init and sdf_bias are both specified, which may lead to unexpected results.\"\n )\n\n get_gt_sdf: Callable[[Float[Tensor, \"N 3\"]], Float[Tensor, \"N 1\"]]\n assert isinstance(self.cfg.shape_init, str)\n if self.cfg.shape_init == \"ellipsoid\":\n assert (\n isinstance(self.cfg.shape_init_params, Sized)\n and len(self.cfg.shape_init_params) == 3\n )\n size = torch.as_tensor(self.cfg.shape_init_params).to(self.device)\n\n def func(points_rand: Float[Tensor, \"N 3\"]) -> Float[Tensor, \"N 1\"]:\n return ((points_rand / size) ** 2).sum(\n dim=-1, keepdim=True\n ).sqrt() - 1.0 # pseudo signed distance of an ellipsoid\n\n get_gt_sdf = func\n elif self.cfg.shape_init == \"sphere\":\n assert isinstance(self.cfg.shape_init_params, float)\n radius = self.cfg.shape_init_params\n\n def func(points_rand: Float[Tensor, \"N 3\"]) -> Float[Tensor, \"N 1\"]:\n return (points_rand**2).sum(dim=-1, keepdim=True).sqrt() - radius\n\n get_gt_sdf = func\n elif self.cfg.shape_init.startswith(\"mesh:\"):\n assert isinstance(self.cfg.shape_init_params, float)\n mesh_path = self.cfg.shape_init[5:]\n if not os.path.exists(mesh_path):\n raise ValueError(f\"Mesh file {mesh_path} does not exist.\")\n\n import trimesh\n\n scene = trimesh.load(mesh_path)\n if isinstance(scene, trimesh.Trimesh):\n mesh = scene\n elif isinstance(scene, trimesh.scene.Scene):\n mesh = trimesh.Trimesh()\n for obj in scene.geometry.values():\n mesh = trimesh.util.concatenate([mesh, obj])\n else:\n raise ValueError(f\"Unknown mesh type at {mesh_path}.\")\n\n # move to center\n centroid = mesh.vertices.mean(0)\n mesh.vertices = mesh.vertices - centroid\n\n # adjust the position of mesh\n if \"full_body\" in mesh_path:\n mesh.vertices[:,1] = mesh.vertices[:,1] + 0.3\n elif \"half_body\" in mesh_path:\n mesh.vertices[:,1] = mesh.vertices[:,1] + 0.1\n elif \"head_only\" in mesh_path:\n mesh.vertices[:,2] = mesh.vertices[:,2] + 0.15\n elif \"t-pose\" in mesh_path:\n mesh.vertices[:,1] = mesh.vertices[:,1] + 0.4\n\n # align to up-z and front-x\n dirs = [\"+x\", \"+y\", \"+z\", \"-x\", \"-y\", \"-z\"]\n dir2vec = {\n \"+x\": np.array([1, 0, 0]),\n \"+y\": np.array([0, 1, 0]),\n \"+z\": np.array([0, 0, 1]),\n \"-x\": np.array([-1, 0, 0]),\n \"-y\": np.array([0, -1, 0]),\n \"-z\": np.array([0, 0, -1]),\n }\n if (\n self.cfg.shape_init_mesh_up not in dirs\n or self.cfg.shape_init_mesh_front not in dirs\n ):\n raise ValueError(\n f\"shape_init_mesh_up and shape_init_mesh_front must be one of {dirs}.\"\n )\n if self.cfg.shape_init_mesh_up[1] == self.cfg.shape_init_mesh_front[1]:\n raise ValueError(\n \"shape_init_mesh_up and shape_init_mesh_front must be orthogonal.\"\n )\n z_, x_ = (\n dir2vec[self.cfg.shape_init_mesh_up],\n dir2vec[self.cfg.shape_init_mesh_front],\n )\n y_ = np.cross(z_, x_)\n std2mesh = np.stack([x_, y_, z_], axis=0).T\n mesh2std = np.linalg.inv(std2mesh)\n\n # scaling\n scale = np.abs(mesh.vertices).max()\n mesh.vertices = mesh.vertices / scale * self.cfg.shape_init_params\n mesh.vertices = np.dot(mesh2std, mesh.vertices.T).T\n\n from pysdf import SDF\n\n sdf = SDF(mesh.vertices, mesh.faces)\n\n def func(points_rand: Float[Tensor, \"N 3\"]) -> Float[Tensor, \"N 1\"]:\n # add a negative signed here\n # as in pysdf the inside of the shape has positive signed distance\n return torch.from_numpy(-sdf(points_rand.cpu().numpy())).to(\n points_rand\n )[..., None]\n\n get_gt_sdf = func\n\n else:\n raise ValueError(\n f\"Unknown shape initialization type: {self.cfg.shape_init}\"\n )\n\n # Initialize SDF to a given shape when no weights are provided or force_shape_init is True\n optim = torch.optim.Adam(self.parameters(), lr=1e-3)\n from tqdm import tqdm\n\n for _ in tqdm(\n range(2000),\n desc=f\"Initializing SDF to a(n) {self.cfg.shape_init}:\",\n disable=get_rank() != 0,\n ):\n points_rand = (\n torch.rand((40000, 3), dtype=torch.float32).to(self.device) * 2.0 - 1.0\n )\n sdf_gt = get_gt_sdf(points_rand)\n sdf_pred = self.forward_sdf(points_rand)\n loss = F.mse_loss(sdf_pred, sdf_gt)\n optim.zero_grad()\n loss.backward()\n optim.step()\n\n # explicit broadcast to ensure param consistency across ranks\n for param in self.parameters():\n broadcast(param, src=0)\n\n def get_shifted_sdf(\n self, points: Float[Tensor, \"*N Di\"], sdf: Float[Tensor, \"*N 1\"]\n ) -> Float[Tensor, \"*N 1\"]:\n sdf_bias: Union[float, Float[Tensor, \"*N 1\"]]\n if self.cfg.sdf_bias == \"ellipsoid\":\n assert (\n isinstance(self.cfg.sdf_bias_params, Sized)\n and len(self.cfg.sdf_bias_params) == 3\n )\n size = torch.as_tensor(self.cfg.sdf_bias_params).to(points)\n sdf_bias = ((points / size) ** 2).sum(\n dim=-1, keepdim=True\n ).sqrt() - 1.0 # pseudo signed distance of an ellipsoid\n elif self.cfg.sdf_bias == \"sphere\":\n assert isinstance(self.cfg.sdf_bias_params, float)\n radius = self.cfg.sdf_bias_params\n sdf_bias = (points**2).sum(dim=-1, keepdim=True).sqrt() - radius\n elif isinstance(self.cfg.sdf_bias, float):\n sdf_bias = self.cfg.sdf_bias\n else:\n raise ValueError(f\"Unknown sdf bias {self.cfg.sdf_bias}\")\n return sdf + sdf_bias\n\n def forward(\n self, points: Float[Tensor, \"*N Di\"], output_normal: bool = False\n ) -> Dict[str, Float[Tensor, \"...\"]]:\n grad_enabled = torch.is_grad_enabled()\n\n if output_normal and self.cfg.normal_type == \"analytic\":\n torch.set_grad_enabled(True)\n points.requires_grad_(True)\n\n points_unscaled = points # points in the original scale\n points = contract_to_unisphere(\n points, self.bbox, self.unbounded\n ) # points normalized to (0, 1)\n\n enc = self.encoding(points.view(-1, self.cfg.n_input_dims))\n sdf = self.sdf_network(enc).view(*points.shape[:-1], 1)\n sdf = self.get_shifted_sdf(points_unscaled, sdf)\n output = {\"sdf\": sdf}\n\n if self.cfg.n_feature_dims > 0:\n features = self.feature_network(enc).view(\n *points.shape[:-1], self.cfg.n_feature_dims\n )\n output.update({\"features\": features})\n\n if output_normal:\n if (\n self.cfg.normal_type == \"finite_difference\"\n or self.cfg.normal_type == \"finite_difference_laplacian\"\n ):\n assert self.finite_difference_normal_eps is not None\n eps: float = self.finite_difference_normal_eps\n if self.cfg.normal_type == \"finite_difference_laplacian\":\n offsets: Float[Tensor, \"6 3\"] = torch.as_tensor(\n [\n [eps, 0.0, 0.0],\n [-eps, 0.0, 0.0],\n [0.0, eps, 0.0],\n [0.0, -eps, 0.0],\n [0.0, 0.0, eps],\n [0.0, 0.0, -eps],\n ]\n ).to(points_unscaled)\n points_offset: Float[Tensor, \"... 6 3\"] = (\n points_unscaled[..., None, :] + offsets\n ).clamp(-self.cfg.radius, self.cfg.radius)\n sdf_offset: Float[Tensor, \"... 6 1\"] = self.forward_sdf(\n points_offset\n )\n sdf_grad = (\n 0.5\n * (sdf_offset[..., 0::2, 0] - sdf_offset[..., 1::2, 0])\n / eps\n )\n else:\n offsets: Float[Tensor, \"3 3\"] = torch.as_tensor(\n [[eps, 0.0, 0.0], [0.0, eps, 0.0], [0.0, 0.0, eps]]\n ).to(points_unscaled)\n points_offset: Float[Tensor, \"... 3 3\"] = (\n points_unscaled[..., None, :] + offsets\n ).clamp(-self.cfg.radius, self.cfg.radius)\n sdf_offset: Float[Tensor, \"... 3 1\"] = self.forward_sdf(\n points_offset\n )\n sdf_grad = (sdf_offset[..., 0::1, 0] - sdf) / eps\n normal = F.normalize(sdf_grad, dim=-1)\n elif self.cfg.normal_type == \"pred\":\n normal = self.normal_network(enc).view(*points.shape[:-1], 3)\n normal = F.normalize(normal, dim=-1)\n sdf_grad = normal\n elif self.cfg.normal_type == \"analytic\":\n sdf_grad = -torch.autograd.grad(\n sdf,\n points_unscaled,\n grad_outputs=torch.ones_like(sdf),\n create_graph=True,\n )[0]\n normal = F.normalize(sdf_grad, dim=-1)\n if not grad_enabled:\n sdf_grad = sdf_grad.detach()\n normal = normal.detach()\n else:\n raise AttributeError(f\"Unknown normal type {self.cfg.normal_type}\")\n output.update(\n {\"normal\": normal, \"shading_normal\": normal, \"sdf_grad\": sdf_grad}\n )\n return output\n\n def forward_sdf(self, points: Float[Tensor, \"*N Di\"]) -> Float[Tensor, \"*N 1\"]:\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n\n sdf = self.sdf_network(\n self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n ).reshape(*points.shape[:-1], 1)\n sdf = self.get_shifted_sdf(points_unscaled, sdf)\n return sdf\n\n def forward_field(\n self, points: Float[Tensor, \"*N Di\"]\n ) -> Tuple[Float[Tensor, \"*N 1\"], Optional[Float[Tensor, \"*N 3\"]]]:\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n enc = self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n sdf = self.sdf_network(enc).reshape(*points.shape[:-1], 1)\n sdf = self.get_shifted_sdf(points_unscaled, sdf)\n deformation: Optional[Float[Tensor, \"*N 3\"]] = None\n if self.cfg.isosurface_deformable_grid:\n deformation = self.deformation_network(enc).reshape(*points.shape[:-1], 3)\n\n sdf_loss: Optional[Float[Tensor, \"*N 1\"]] = None\n if self.cfg.use_sdf_loss and self.cached_sdf is not None:\n selected_points_idx = torch.LongTensor(random.sample(range(points_unscaled.shape[0]), 100000))\n gt_sdf = torch.from_numpy(-self.cached_sdf(points_unscaled[selected_points_idx].cpu().numpy())).to(\n points_unscaled\n )[..., None]\n sdf_loss = F.mse_loss(gt_sdf, sdf[selected_points_idx], reduction='sum')\n return sdf, deformation, sdf_loss\n\n def forward_level(\n self, field: Float[Tensor, \"*N 1\"], threshold: float\n ) -> Float[Tensor, \"*N 1\"]:\n return field - threshold\n\n def export(self, points: Float[Tensor, \"*N Di\"], **kwargs) -> Dict[str, Any]:\n out: Dict[str, Any] = {}\n if self.cfg.n_feature_dims == 0:\n return out\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n enc = self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n features = self.feature_network(enc).view(\n *points.shape[:-1], self.cfg.n_feature_dims\n )\n out.update(\n {\n \"features\": features,\n }\n )\n return out\n\n def update_step(self, epoch: int, global_step: int, on_load_weights: bool = False):\n\n if global_step >= (self.cfg.start_sdf_loss_step + 1) and self.cached_sdf is None:\n\n from pysdf import SDF\n import trimesh\n\n mesh_v_pos = np.load('.threestudio_cache/mesh_v_pos.npy')\n mesh_t_pos_idx = np.load('.threestudio_cache/mesh_t_pos_idx.npy')\n cached_mesh = trimesh.Trimesh(\n vertices=mesh_v_pos,\n faces=mesh_t_pos_idx,\n )\n self.cached_sdf = SDF(cached_mesh.vertices, cached_mesh.faces)\n\n if self.cfg.progressive_resolution_steps is not None:\n if global_step >= self.cfg.progressive_resolution_steps[0] and self.cfg.isosurface_resolution < 256:\n self.cfg.isosurface_resolution = 256\n self.isosurface_helper = None\n self._initilize_isosurface_helper()\n if global_step >= self.cfg.progressive_resolution_steps[1] and self.cfg.isosurface_resolution < 512:\n self.cfg.isosurface_resolution = 512\n self.isosurface_helper = None\n self._initilize_isosurface_helper()\n\n if (\n self.cfg.normal_type == \"finite_difference\"\n or self.cfg.normal_type == \"finite_difference_laplacian\"\n ):\n if isinstance(self.cfg.finite_difference_normal_eps, float):\n self.finite_difference_normal_eps = (\n self.cfg.finite_difference_normal_eps\n )\n elif self.cfg.finite_difference_normal_eps == \"progressive\":\n # progressive finite difference eps from Neuralangelo\n # https://arxiv.org/abs/2306.03092\n hg_conf: Any = self.cfg.pos_encoding_config\n assert (\n hg_conf.otype == \"ProgressiveBandHashGrid\"\n ), \"finite_difference_normal_eps=progressive only works with ProgressiveBandHashGrid\"\n current_level = min(\n hg_conf.start_level\n + max(global_step - hg_conf.start_step, 0) // hg_conf.update_steps,\n hg_conf.n_levels,\n )\n grid_res = hg_conf.base_resolution * hg_conf.per_level_scale ** (\n current_level - 1\n )\n grid_size = 2 * self.cfg.radius / grid_res\n if grid_size != self.finite_difference_normal_eps:\n threestudio.info(\n f\"Update finite_difference_normal_eps to {grid_size}\"\n )\n self.finite_difference_normal_eps = grid_size\n else:\n raise ValueError(\n f\"Unknown finite_difference_normal_eps={self.cfg.finite_difference_normal_eps}\"\n )" }, { "identifier": "ImplicitVolume", "path": "threestudio/models/geometry/implicit_volume.py", "snippet": "class ImplicitVolume(BaseImplicitGeometry):\n @dataclass\n class Config(BaseImplicitGeometry.Config):\n n_input_dims: int = 3\n n_feature_dims: int = 3\n density_activation: Optional[str] = \"softplus\"\n density_bias: Union[float, str] = \"blob_magic3d\"\n density_blob_scale: float = 10.0\n density_blob_std: float = 0.5\n pos_encoding_config: dict = field(\n default_factory=lambda: {\n \"otype\": \"HashGrid\",\n \"n_levels\": 16,\n \"n_features_per_level\": 2,\n \"log2_hashmap_size\": 19,\n \"base_resolution\": 16,\n \"per_level_scale\": 1.447269237440378,\n }\n )\n mlp_network_config: dict = field(\n default_factory=lambda: {\n \"otype\": \"VanillaMLP\",\n \"activation\": \"ReLU\",\n \"output_activation\": \"none\",\n \"n_neurons\": 64,\n \"n_hidden_layers\": 1,\n }\n )\n normal_type: Optional[\n str\n ] = \"finite_difference\" # in ['pred', 'finite_difference', 'finite_difference_laplacian']\n finite_difference_normal_eps: float = 0.01\n\n # automatically determine the threshold\n isosurface_threshold: Union[float, str] = 25.0\n\n cfg: Config\n\n def configure(self) -> None:\n super().configure()\n self.encoding = get_encoding(\n self.cfg.n_input_dims, self.cfg.pos_encoding_config\n )\n self.density_network = get_mlp(\n self.encoding.n_output_dims, 1, self.cfg.mlp_network_config\n )\n if self.cfg.n_feature_dims > 0:\n self.feature_network = get_mlp(\n self.encoding.n_output_dims,\n self.cfg.n_feature_dims,\n self.cfg.mlp_network_config,\n )\n if self.cfg.normal_type == \"pred\":\n self.normal_network = get_mlp(\n self.encoding.n_output_dims, 3, self.cfg.mlp_network_config\n )\n\n def get_activated_density(\n self, points: Float[Tensor, \"*N Di\"], density: Float[Tensor, \"*N 1\"]\n ) -> Tuple[Float[Tensor, \"*N 1\"], Float[Tensor, \"*N 1\"]]:\n density_bias: Union[float, Float[Tensor, \"*N 1\"]]\n if self.cfg.density_bias == \"blob_dreamfusion\":\n # pre-activation density bias\n density_bias = (\n self.cfg.density_blob_scale\n * torch.exp(\n -0.5 * (points**2).sum(dim=-1) / self.cfg.density_blob_std**2\n )[..., None]\n )\n elif self.cfg.density_bias == \"blob_magic3d\":\n # pre-activation density bias\n density_bias = (\n self.cfg.density_blob_scale\n * (\n 1\n - torch.sqrt((points**2).sum(dim=-1)) / self.cfg.density_blob_std\n )[..., None]\n )\n elif isinstance(self.cfg.density_bias, float):\n density_bias = self.cfg.density_bias\n else:\n raise ValueError(f\"Unknown density bias {self.cfg.density_bias}\")\n raw_density: Float[Tensor, \"*N 1\"] = density + density_bias\n density = get_activation(self.cfg.density_activation)(raw_density)\n return raw_density, density\n\n def forward(\n self, points: Float[Tensor, \"*N Di\"], output_normal: bool = False\n ) -> Dict[str, Float[Tensor, \"...\"]]:\n grad_enabled = torch.is_grad_enabled()\n\n if output_normal and self.cfg.normal_type == \"analytic\":\n torch.set_grad_enabled(True)\n points.requires_grad_(True)\n\n points_unscaled = points # points in the original scale\n points = contract_to_unisphere(\n points, self.bbox, self.unbounded\n ) # points normalized to (0, 1)\n\n enc = self.encoding(points.view(-1, self.cfg.n_input_dims))\n density = self.density_network(enc).view(*points.shape[:-1], 1)\n raw_density, density = self.get_activated_density(points_unscaled, density)\n\n output = {\n \"density\": density,\n }\n\n if self.cfg.n_feature_dims > 0:\n features = self.feature_network(enc).view(\n *points.shape[:-1], self.cfg.n_feature_dims\n )\n output.update({\"features\": features})\n\n if output_normal:\n if (\n self.cfg.normal_type == \"finite_difference\"\n or self.cfg.normal_type == \"finite_difference_laplacian\"\n ):\n # TODO: use raw density\n eps = self.cfg.finite_difference_normal_eps\n if self.cfg.normal_type == \"finite_difference_laplacian\":\n offsets: Float[Tensor, \"6 3\"] = torch.as_tensor(\n [\n [eps, 0.0, 0.0],\n [-eps, 0.0, 0.0],\n [0.0, eps, 0.0],\n [0.0, -eps, 0.0],\n [0.0, 0.0, eps],\n [0.0, 0.0, -eps],\n ]\n ).to(points_unscaled)\n points_offset: Float[Tensor, \"... 6 3\"] = (\n points_unscaled[..., None, :] + offsets\n ).clamp(-self.cfg.radius, self.cfg.radius)\n density_offset: Float[Tensor, \"... 6 1\"] = self.forward_density(\n points_offset\n )\n normal = (\n -0.5\n * (density_offset[..., 0::2, 0] - density_offset[..., 1::2, 0])\n / eps\n )\n else:\n offsets: Float[Tensor, \"3 3\"] = torch.as_tensor(\n [[eps, 0.0, 0.0], [0.0, eps, 0.0], [0.0, 0.0, eps]]\n ).to(points_unscaled)\n points_offset: Float[Tensor, \"... 3 3\"] = (\n points_unscaled[..., None, :] + offsets\n ).clamp(-self.cfg.radius, self.cfg.radius)\n density_offset: Float[Tensor, \"... 3 1\"] = self.forward_density(\n points_offset\n )\n normal = -(density_offset[..., 0::1, 0] - density) / eps\n normal = F.normalize(normal, dim=-1)\n elif self.cfg.normal_type == \"pred\":\n normal = self.normal_network(enc).view(*points.shape[:-1], 3)\n normal = F.normalize(normal, dim=-1)\n elif self.cfg.normal_type == \"analytic\":\n normal = -torch.autograd.grad(\n density,\n points_unscaled,\n grad_outputs=torch.ones_like(density),\n create_graph=True,\n )[0]\n normal = F.normalize(normal, dim=-1)\n if not grad_enabled:\n normal = normal.detach()\n else:\n raise AttributeError(f\"Unknown normal type {self.cfg.normal_type}\")\n output.update({\"normal\": normal, \"shading_normal\": normal})\n\n torch.set_grad_enabled(grad_enabled)\n return output\n\n def forward_density(self, points: Float[Tensor, \"*N Di\"]) -> Float[Tensor, \"*N 1\"]:\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n\n density = self.density_network(\n self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n ).reshape(*points.shape[:-1], 1)\n\n _, density = self.get_activated_density(points_unscaled, density)\n return density\n\n def forward_field(\n self, points: Float[Tensor, \"*N Di\"]\n ) -> Tuple[Float[Tensor, \"*N 1\"], Optional[Float[Tensor, \"*N 3\"]]]:\n if self.cfg.isosurface_deformable_grid:\n threestudio.warn(\n f\"{self.__class__.__name__} does not support isosurface_deformable_grid. Ignoring.\"\n )\n density = self.forward_density(points)\n return density, None\n\n def forward_level(\n self, field: Float[Tensor, \"*N 1\"], threshold: float\n ) -> Float[Tensor, \"*N 1\"]:\n return -(field - threshold)\n\n def export(self, points: Float[Tensor, \"*N Di\"], **kwargs) -> Dict[str, Any]:\n out: Dict[str, Any] = {}\n if self.cfg.n_feature_dims == 0:\n return out\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n enc = self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n features = self.feature_network(enc).view(\n *points.shape[:-1], self.cfg.n_feature_dims\n )\n out.update(\n {\n \"features\": features,\n }\n )\n return out\n\n @staticmethod\n @torch.no_grad()\n def create_from(\n other: BaseGeometry,\n cfg: Optional[Union[dict, DictConfig]] = None,\n copy_net: bool = True,\n **kwargs,\n ) -> \"ImplicitVolume\":\n if isinstance(other, ImplicitVolume):\n instance = ImplicitVolume(cfg, **kwargs)\n instance.encoding.load_state_dict(other.encoding.state_dict())\n instance.density_network.load_state_dict(other.density_network.state_dict())\n if copy_net:\n if (\n instance.cfg.n_feature_dims > 0\n and other.cfg.n_feature_dims == instance.cfg.n_feature_dims\n ):\n instance.feature_network.load_state_dict(\n other.feature_network.state_dict()\n )\n if (\n instance.cfg.normal_type == \"pred\"\n and other.cfg.normal_type == \"pred\"\n ):\n instance.normal_network.load_state_dict(\n other.normal_network.state_dict()\n )\n return instance\n else:\n raise TypeError(\n f\"Cannot create {ImplicitVolume.__name__} from {other.__class__.__name__}\"\n )" }, { "identifier": "MarchingTetrahedraHelper", "path": "threestudio/models/isosurface.py", "snippet": "class MarchingTetrahedraHelper(IsosurfaceHelper):\n def __init__(self, resolution: int, tets_path: str):\n super().__init__()\n self.resolution = resolution\n self.tets_path = tets_path\n\n self.triangle_table: Float[Tensor, \"...\"]\n self.register_buffer(\n \"triangle_table\",\n torch.as_tensor(\n [\n [-1, -1, -1, -1, -1, -1],\n [1, 0, 2, -1, -1, -1],\n [4, 0, 3, -1, -1, -1],\n [1, 4, 2, 1, 3, 4],\n [3, 1, 5, -1, -1, -1],\n [2, 3, 0, 2, 5, 3],\n [1, 4, 0, 1, 5, 4],\n [4, 2, 5, -1, -1, -1],\n [4, 5, 2, -1, -1, -1],\n [4, 1, 0, 4, 5, 1],\n [3, 2, 0, 3, 5, 2],\n [1, 3, 5, -1, -1, -1],\n [4, 1, 2, 4, 3, 1],\n [3, 0, 4, -1, -1, -1],\n [2, 0, 1, -1, -1, -1],\n [-1, -1, -1, -1, -1, -1],\n ],\n dtype=torch.long,\n ),\n persistent=False,\n )\n self.num_triangles_table: Integer[Tensor, \"...\"]\n self.register_buffer(\n \"num_triangles_table\",\n torch.as_tensor(\n [0, 1, 1, 2, 1, 2, 2, 1, 1, 2, 2, 1, 2, 1, 1, 0], dtype=torch.long\n ),\n persistent=False,\n )\n self.base_tet_edges: Integer[Tensor, \"...\"]\n self.register_buffer(\n \"base_tet_edges\",\n torch.as_tensor([0, 1, 0, 2, 0, 3, 1, 2, 1, 3, 2, 3], dtype=torch.long),\n persistent=False,\n )\n\n tets = np.load(self.tets_path)\n self._grid_vertices: Float[Tensor, \"...\"]\n self.register_buffer(\n \"_grid_vertices\",\n torch.from_numpy(tets[\"vertices\"]).float(),\n persistent=False,\n )\n self.indices: Integer[Tensor, \"...\"]\n self.register_buffer(\n \"indices\", torch.from_numpy(tets[\"indices\"]).long(), persistent=False\n )\n\n self._all_edges: Optional[Integer[Tensor, \"Ne 2\"]] = None\n\n def normalize_grid_deformation(\n self, grid_vertex_offsets: Float[Tensor, \"Nv 3\"]\n ) -> Float[Tensor, \"Nv 3\"]:\n return (\n (self.points_range[1] - self.points_range[0])\n / (self.resolution) # half tet size is approximately 1 / self.resolution\n * torch.tanh(grid_vertex_offsets)\n ) # FIXME: hard-coded activation\n\n @property\n def grid_vertices(self) -> Float[Tensor, \"Nv 3\"]:\n return self._grid_vertices\n\n @property\n def all_edges(self) -> Integer[Tensor, \"Ne 2\"]:\n if self._all_edges is None:\n # compute edges on GPU, or it would be VERY SLOW (basically due to the unique operation)\n edges = torch.tensor(\n [0, 1, 0, 2, 0, 3, 1, 2, 1, 3, 2, 3],\n dtype=torch.long,\n device=self.indices.device,\n )\n _all_edges = self.indices[:, edges].reshape(-1, 2)\n _all_edges_sorted = torch.sort(_all_edges, dim=1)[0]\n _all_edges = torch.unique(_all_edges_sorted, dim=0)\n self._all_edges = _all_edges\n return self._all_edges\n\n def sort_edges(self, edges_ex2):\n with torch.no_grad():\n order = (edges_ex2[:, 0] > edges_ex2[:, 1]).long()\n order = order.unsqueeze(dim=1)\n\n a = torch.gather(input=edges_ex2, index=order, dim=1)\n b = torch.gather(input=edges_ex2, index=1 - order, dim=1)\n\n return torch.stack([a, b], -1)\n\n def _forward(self, pos_nx3, sdf_n, tet_fx4):\n with torch.no_grad():\n occ_n = sdf_n > 0\n occ_fx4 = occ_n[tet_fx4.reshape(-1)].reshape(-1, 4)\n occ_sum = torch.sum(occ_fx4, -1)\n valid_tets = (occ_sum > 0) & (occ_sum < 4)\n occ_sum = occ_sum[valid_tets]\n\n # find all vertices\n all_edges = tet_fx4[valid_tets][:, self.base_tet_edges].reshape(-1, 2)\n all_edges = self.sort_edges(all_edges)\n unique_edges, idx_map = torch.unique(all_edges, dim=0, return_inverse=True)\n\n unique_edges = unique_edges.long()\n mask_edges = occ_n[unique_edges.reshape(-1)].reshape(-1, 2).sum(-1) == 1\n mapping = (\n torch.ones(\n (unique_edges.shape[0]), dtype=torch.long, device=pos_nx3.device\n )\n * -1\n )\n mapping[mask_edges] = torch.arange(\n mask_edges.sum(), dtype=torch.long, device=pos_nx3.device\n )\n idx_map = mapping[idx_map] # map edges to verts\n\n interp_v = unique_edges[mask_edges]\n edges_to_interp = pos_nx3[interp_v.reshape(-1)].reshape(-1, 2, 3)\n edges_to_interp_sdf = sdf_n[interp_v.reshape(-1)].reshape(-1, 2, 1)\n edges_to_interp_sdf[:, -1] *= -1\n\n denominator = edges_to_interp_sdf.sum(1, keepdim=True)\n\n edges_to_interp_sdf = torch.flip(edges_to_interp_sdf, [1]) / denominator\n verts = (edges_to_interp * edges_to_interp_sdf).sum(1)\n\n idx_map = idx_map.reshape(-1, 6)\n\n v_id = torch.pow(2, torch.arange(4, dtype=torch.long, device=pos_nx3.device))\n tetindex = (occ_fx4[valid_tets] * v_id.unsqueeze(0)).sum(-1)\n num_triangles = self.num_triangles_table[tetindex]\n\n # Generate triangle indices\n faces = torch.cat(\n (\n torch.gather(\n input=idx_map[num_triangles == 1],\n dim=1,\n index=self.triangle_table[tetindex[num_triangles == 1]][:, :3],\n ).reshape(-1, 3),\n torch.gather(\n input=idx_map[num_triangles == 2],\n dim=1,\n index=self.triangle_table[tetindex[num_triangles == 2]][:, :6],\n ).reshape(-1, 3),\n ),\n dim=0,\n )\n\n return verts, faces\n\n def forward(\n self,\n level: Float[Tensor, \"N3 1\"],\n deformation: Optional[Float[Tensor, \"N3 3\"]] = None,\n ) -> Mesh:\n if deformation is not None:\n grid_vertices = self.grid_vertices + self.normalize_grid_deformation(\n deformation\n )\n else:\n grid_vertices = self.grid_vertices\n\n v_pos, t_pos_idx = self._forward(grid_vertices, level, self.indices)\n\n mesh = Mesh(\n v_pos=v_pos,\n t_pos_idx=t_pos_idx,\n # extras\n grid_vertices=grid_vertices,\n tet_edges=self.all_edges,\n grid_level=level,\n grid_deformation=deformation,\n )\n\n return mesh" }, { "identifier": "Mesh", "path": "threestudio/models/mesh.py", "snippet": "class Mesh:\n def __init__(\n self, v_pos: Float[Tensor, \"Nv 3\"], t_pos_idx: Integer[Tensor, \"Nf 3\"], **kwargs\n ) -> None:\n self.v_pos: Float[Tensor, \"Nv 3\"] = v_pos\n self.t_pos_idx: Integer[Tensor, \"Nf 3\"] = t_pos_idx\n self._v_nrm: Optional[Float[Tensor, \"Nv 3\"]] = None\n self._v_tng: Optional[Float[Tensor, \"Nv 3\"]] = None\n self._v_tex: Optional[Float[Tensor, \"Nt 3\"]] = None\n self._t_tex_idx: Optional[Float[Tensor, \"Nf 3\"]] = None\n self._v_rgb: Optional[Float[Tensor, \"Nv 3\"]] = None\n self._edges: Optional[Integer[Tensor, \"Ne 2\"]] = None\n self.extras: Dict[str, Any] = {}\n for k, v in kwargs.items():\n self.add_extra(k, v)\n\n def add_extra(self, k, v) -> None:\n self.extras[k] = v\n\n def remove_outlier(self, outlier_n_faces_threshold: Union[int, float]) -> Mesh:\n if self.requires_grad:\n threestudio.debug(\"Mesh is differentiable, not removing outliers\")\n return self\n\n # use trimesh to first split the mesh into connected components\n # then remove the components with less than n_face_threshold faces\n import trimesh\n\n # construct a trimesh object\n mesh = trimesh.Trimesh(\n vertices=self.v_pos.detach().cpu().numpy(),\n faces=self.t_pos_idx.detach().cpu().numpy(),\n )\n\n # split the mesh into connected components\n components = mesh.split(only_watertight=False)\n # log the number of faces in each component\n threestudio.debug(\n \"Mesh has {} components, with faces: {}\".format(\n len(components), [c.faces.shape[0] for c in components]\n )\n )\n\n n_faces_threshold: int\n if isinstance(outlier_n_faces_threshold, float):\n # set the threshold to the number of faces in the largest component multiplied by outlier_n_faces_threshold\n n_faces_threshold = int(\n max([c.faces.shape[0] for c in components]) * outlier_n_faces_threshold\n )\n else:\n # set the threshold directly to outlier_n_faces_threshold\n n_faces_threshold = outlier_n_faces_threshold\n\n # log the threshold\n threestudio.debug(\n \"Removing components with less than {} faces\".format(n_faces_threshold)\n )\n\n # remove the components with less than n_face_threshold faces\n components = [c for c in components if c.faces.shape[0] >= n_faces_threshold]\n\n # log the number of faces in each component after removing outliers\n threestudio.debug(\n \"Mesh has {} components after removing outliers, with faces: {}\".format(\n len(components), [c.faces.shape[0] for c in components]\n )\n )\n # merge the components\n mesh = trimesh.util.concatenate(components)\n\n # convert back to our mesh format\n v_pos = torch.from_numpy(mesh.vertices).to(self.v_pos)\n t_pos_idx = torch.from_numpy(mesh.faces).to(self.t_pos_idx)\n\n clean_mesh = Mesh(v_pos, t_pos_idx)\n # keep the extras unchanged\n\n if len(self.extras) > 0:\n clean_mesh.extras = self.extras\n threestudio.debug(\n f\"The following extra attributes are inherited from the original mesh unchanged: {list(self.extras.keys())}\"\n )\n return clean_mesh\n\n @property\n def requires_grad(self):\n return self.v_pos.requires_grad\n\n @property\n def v_nrm(self):\n if self._v_nrm is None:\n self._v_nrm = self._compute_vertex_normal()\n return self._v_nrm\n\n @property\n def v_tng(self):\n if self._v_tng is None:\n self._v_tng = self._compute_vertex_tangent()\n return self._v_tng\n\n @property\n def v_tex(self):\n if self._v_tex is None:\n self._v_tex, self._t_tex_idx = self._unwrap_uv()\n return self._v_tex\n\n @property\n def t_tex_idx(self):\n if self._t_tex_idx is None:\n self._v_tex, self._t_tex_idx = self._unwrap_uv()\n return self._t_tex_idx\n\n @property\n def v_rgb(self):\n return self._v_rgb\n\n @property\n def edges(self):\n if self._edges is None:\n self._edges = self._compute_edges()\n return self._edges\n\n def _compute_vertex_normal(self):\n i0 = self.t_pos_idx[:, 0]\n i1 = self.t_pos_idx[:, 1]\n i2 = self.t_pos_idx[:, 2]\n\n v0 = self.v_pos[i0, :]\n v1 = self.v_pos[i1, :]\n v2 = self.v_pos[i2, :]\n\n face_normals = torch.cross(v1 - v0, v2 - v0)\n\n # Splat face normals to vertices\n v_nrm = torch.zeros_like(self.v_pos)\n v_nrm.scatter_add_(0, i0[:, None].repeat(1, 3), face_normals)\n v_nrm.scatter_add_(0, i1[:, None].repeat(1, 3), face_normals)\n v_nrm.scatter_add_(0, i2[:, None].repeat(1, 3), face_normals)\n\n # Normalize, replace zero (degenerated) normals with some default value\n v_nrm = torch.where(\n dot(v_nrm, v_nrm) > 1e-20, v_nrm, torch.as_tensor([0.0, 0.0, 1.0]).to(v_nrm)\n )\n v_nrm = F.normalize(v_nrm, dim=1)\n\n if torch.is_anomaly_enabled():\n assert torch.all(torch.isfinite(v_nrm))\n\n return v_nrm\n\n def _compute_vertex_tangent(self):\n vn_idx = [None] * 3\n pos = [None] * 3\n tex = [None] * 3\n for i in range(0, 3):\n pos[i] = self.v_pos[self.t_pos_idx[:, i]]\n tex[i] = self.v_tex[self.t_tex_idx[:, i]]\n # t_nrm_idx is always the same as t_pos_idx\n vn_idx[i] = self.t_pos_idx[:, i]\n\n tangents = torch.zeros_like(self.v_nrm)\n tansum = torch.zeros_like(self.v_nrm)\n\n # Compute tangent space for each triangle\n uve1 = tex[1] - tex[0]\n uve2 = tex[2] - tex[0]\n pe1 = pos[1] - pos[0]\n pe2 = pos[2] - pos[0]\n\n nom = pe1 * uve2[..., 1:2] - pe2 * uve1[..., 1:2]\n denom = uve1[..., 0:1] * uve2[..., 1:2] - uve1[..., 1:2] * uve2[..., 0:1]\n\n # Avoid division by zero for degenerated texture coordinates\n tang = nom / torch.where(\n denom > 0.0, torch.clamp(denom, min=1e-6), torch.clamp(denom, max=-1e-6)\n )\n\n # Update all 3 vertices\n for i in range(0, 3):\n idx = vn_idx[i][:, None].repeat(1, 3)\n tangents.scatter_add_(0, idx, tang) # tangents[n_i] = tangents[n_i] + tang\n tansum.scatter_add_(\n 0, idx, torch.ones_like(tang)\n ) # tansum[n_i] = tansum[n_i] + 1\n tangents = tangents / tansum\n\n # Normalize and make sure tangent is perpendicular to normal\n tangents = F.normalize(tangents, dim=1)\n tangents = F.normalize(tangents - dot(tangents, self.v_nrm) * self.v_nrm)\n\n if torch.is_anomaly_enabled():\n assert torch.all(torch.isfinite(tangents))\n\n return tangents\n\n def _unwrap_uv(\n self, xatlas_chart_options: dict = {}, xatlas_pack_options: dict = {}\n ):\n threestudio.info(\"Using xatlas to perform UV unwrapping, may take a while ...\")\n\n import xatlas\n\n atlas = xatlas.Atlas()\n atlas.add_mesh(\n self.v_pos.detach().cpu().numpy(),\n self.t_pos_idx.cpu().numpy(),\n )\n co = xatlas.ChartOptions()\n po = xatlas.PackOptions()\n for k, v in xatlas_chart_options.items():\n setattr(co, k, v)\n for k, v in xatlas_pack_options.items():\n setattr(po, k, v)\n \n setattr(co, 'max_cost', 2.0)\n setattr(po, 'resolution', 4096)\n \n atlas.generate(co, po, verbose=True)\n vmapping, indices, uvs = atlas.get_mesh(0)\n vmapping = (\n torch.from_numpy(\n vmapping.astype(np.uint64, casting=\"same_kind\").view(np.int64)\n )\n .to(self.v_pos.device)\n .long()\n )\n uvs = torch.from_numpy(uvs).to(self.v_pos.device).float()\n indices = (\n torch.from_numpy(\n indices.astype(np.uint64, casting=\"same_kind\").view(np.int64)\n )\n .to(self.v_pos.device)\n .long()\n )\n return uvs, indices\n\n def unwrap_uv(\n self, xatlas_chart_options: dict = {}, xatlas_pack_options: dict = {}\n ):\n self._v_tex, self._t_tex_idx = self._unwrap_uv(\n xatlas_chart_options, xatlas_pack_options\n )\n\n def set_vertex_color(self, v_rgb):\n assert v_rgb.shape[0] == self.v_pos.shape[0]\n self._v_rgb = v_rgb\n\n def _compute_edges(self):\n # Compute edges\n edges = torch.cat(\n [\n self.t_pos_idx[:, [0, 1]],\n self.t_pos_idx[:, [1, 2]],\n self.t_pos_idx[:, [2, 0]],\n ],\n dim=0,\n )\n edges = edges.sort()[0]\n edges = torch.unique(edges, dim=0)\n return edges\n\n def normal_consistency(self) -> Float[Tensor, \"\"]:\n edge_nrm: Float[Tensor, \"Ne 2 3\"] = self.v_nrm[self.edges]\n nc = (\n 1.0 - torch.cosine_similarity(edge_nrm[:, 0], edge_nrm[:, 1], dim=-1)\n ).mean()\n return nc\n\n def _laplacian_uniform(self):\n # from stable-dreamfusion\n # https://github.com/ashawkey/stable-dreamfusion/blob/8fb3613e9e4cd1ded1066b46e80ca801dfb9fd06/nerf/renderer.py#L224\n verts, faces = self.v_pos, self.t_pos_idx\n\n V = verts.shape[0]\n F = faces.shape[0]\n\n # Neighbor indices\n ii = faces[:, [1, 2, 0]].flatten()\n jj = faces[:, [2, 0, 1]].flatten()\n adj = torch.stack([torch.cat([ii, jj]), torch.cat([jj, ii])], dim=0).unique(\n dim=1\n )\n adj_values = torch.ones(adj.shape[1]).to(verts)\n\n # Diagonal indices\n diag_idx = adj[0]\n\n # Build the sparse matrix\n idx = torch.cat((adj, torch.stack((diag_idx, diag_idx), dim=0)), dim=1)\n values = torch.cat((-adj_values, adj_values))\n\n # The coalesce operation sums the duplicate indices, resulting in the\n # correct diagonal\n return torch.sparse_coo_tensor(idx, values, (V, V)).coalesce()\n\n def laplacian(self) -> Float[Tensor, \"\"]:\n with torch.no_grad():\n L = self._laplacian_uniform()\n loss = L.mm(self.v_pos)\n loss = loss.norm(dim=1)\n loss = loss.mean()\n return loss" }, { "identifier": "get_encoding", "path": "threestudio/models/networks.py", "snippet": "def get_encoding(n_input_dims: int, config) -> nn.Module:\n # input suppose to be range [0, 1]\n encoding: nn.Module\n if config.otype == \"ProgressiveBandFrequency\":\n encoding = ProgressiveBandFrequency(n_input_dims, config_to_primitive(config))\n elif config.otype == \"ProgressiveBandHashGrid\":\n encoding = ProgressiveBandHashGrid(n_input_dims, config_to_primitive(config))\n else:\n encoding = TCNNEncoding(n_input_dims, config_to_primitive(config))\n encoding = CompositeEncoding(\n encoding,\n include_xyz=config.get(\"include_xyz\", False),\n xyz_scale=2.0,\n xyz_offset=-1.0,\n ) # FIXME: hard coded\n return encoding" }, { "identifier": "get_mlp", "path": "threestudio/models/networks.py", "snippet": "def get_mlp(n_input_dims, n_output_dims, config) -> nn.Module:\n network: nn.Module\n if config.otype == \"VanillaMLP\":\n network = VanillaMLP(n_input_dims, n_output_dims, config_to_primitive(config))\n elif config.otype == \"SphereInitVanillaMLP\":\n network = SphereInitVanillaMLP(\n n_input_dims, n_output_dims, config_to_primitive(config)\n )\n else:\n assert (\n config.get(\"sphere_init\", False) is False\n ), \"sphere_init=True only supported by VanillaMLP\"\n network = TCNNNetwork(n_input_dims, n_output_dims, config_to_primitive(config))\n return network" }, { "identifier": "scale_tensor", "path": "threestudio/utils/ops.py", "snippet": "def scale_tensor(\n dat: Num[Tensor, \"... D\"], inp_scale: ValidScale, tgt_scale: ValidScale\n):\n if inp_scale is None:\n inp_scale = (0, 1)\n if tgt_scale is None:\n tgt_scale = (0, 1)\n if isinstance(tgt_scale, Tensor):\n assert dat.shape[-1] == tgt_scale.shape[-1]\n dat = (dat - inp_scale[0]) / (inp_scale[1] - inp_scale[0])\n dat = dat * (tgt_scale[1] - tgt_scale[0]) + tgt_scale[0]\n return dat" } ]
from dataclasses import dataclass, field from threestudio.models.geometry.base import ( BaseExplicitGeometry, BaseGeometry, contract_to_unisphere, ) from threestudio.models.geometry.implicit_sdf import ImplicitSDF from threestudio.models.geometry.implicit_volume import ImplicitVolume from threestudio.models.isosurface import MarchingTetrahedraHelper from threestudio.models.mesh import Mesh from threestudio.models.networks import get_encoding, get_mlp from threestudio.utils.ops import scale_tensor from threestudio.utils.typing import * from pysdf import SDF from tqdm import tqdm import os import numpy as np import torch import torch.nn as nn import torch.nn.functional as F import threestudio import trimesh
15,779
scene = trimesh.load(mesh_path) if isinstance(scene, trimesh.Trimesh): mesh = scene elif isinstance(scene, trimesh.scene.Scene): mesh = trimesh.Trimesh() for obj in scene.geometry.values(): mesh = trimesh.util.concatenate([mesh, obj]) else: raise ValueError(f"Unknown mesh type at {mesh_path}.") # move to center centroid = mesh.vertices.mean(0) mesh.vertices = mesh.vertices - centroid # align to up-z and front-x dirs = ["+x", "+y", "+z", "-x", "-y", "-z"] dir2vec = { "+x": np.array([1, 0, 0]), "+y": np.array([0, 1, 0]), "+z": np.array([0, 0, 1]), "-x": np.array([-1, 0, 0]), "-y": np.array([0, -1, 0]), "-z": np.array([0, 0, -1]), } if ( self.cfg.shape_init_mesh_up not in dirs or self.cfg.shape_init_mesh_front not in dirs ): raise ValueError( f"shape_init_mesh_up and shape_init_mesh_front must be one of {dirs}." ) if self.cfg.shape_init_mesh_up[1] == self.cfg.shape_init_mesh_front[1]: raise ValueError( "shape_init_mesh_up and shape_init_mesh_front must be orthogonal." ) z_, x_ = ( dir2vec[self.cfg.shape_init_mesh_up], dir2vec[self.cfg.shape_init_mesh_front], ) y_ = np.cross(z_, x_) std2mesh = np.stack([x_, y_, z_], axis=0).T mesh2std = np.linalg.inv(std2mesh) # scaling scale = np.abs(mesh.vertices).max() mesh.vertices = mesh.vertices / scale * self.cfg.shape_init_params mesh.vertices = np.dot(mesh2std, mesh.vertices.T).T sdf = SDF(mesh.vertices, mesh.faces) def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: # add a negative signed here # as in pysdf the inside of the shape has positive signed distance return torch.from_numpy(-sdf(points_rand.cpu().numpy())).to( points_rand )[..., None] get_gt_sdf = func else: raise ValueError( f"Unknown shape initialization type: {self.cfg.shape_init}" ) # Initialize SDF to a given shape when no weights are provided or force_shape_init is True optim = torch.optim.Adam(self.parameters(), lr=1e-3) for _ in tqdm( range(1000), desc=f"Initializing SDF to a(n) {self.cfg.shape_init}:", disable=get_rank() != 0, ): points_rand = ( torch.rand((10000, 3), dtype=torch.float32).to(self.device) * 2.0 - 1.0 ) sdf_gt = get_gt_sdf(points_rand) sdf_pred = self.forward_sdf(points_rand) loss = F.mse_loss(sdf_pred, sdf_gt) optim.zero_grad() loss.backward() optim.step() # explicit broadcast to ensure param consistency across ranks for param in self.parameters(): broadcast(param, src=0) def isosurface(self) -> Mesh: # return cached mesh if fix_geometry is True to save computation if self.cfg.fix_geometry and self.mesh is not None: return self.mesh mesh = self.isosurface_helper(self.sdf, self.deformation) mesh.v_pos = scale_tensor( mesh.v_pos, self.isosurface_helper.points_range, self.isosurface_bbox ) if self.cfg.isosurface_remove_outliers: mesh = mesh.remove_outlier(self.cfg.isosurface_outlier_n_faces_threshold) self.mesh = mesh return mesh def forward( self, points: Float[Tensor, "*N Di"], output_normal: bool = False ) -> Dict[str, Float[Tensor, "..."]]: if self.cfg.geometry_only: return {} assert ( output_normal == False ), f"Normal output is not supported for {self.__class__.__name__}" points_unscaled = points # points in the original scale points = contract_to_unisphere(points, self.bbox) # points normalized to (0, 1) enc = self.encoding(points.view(-1, self.cfg.n_input_dims)) features = self.feature_network(enc).view( *points.shape[:-1], self.cfg.n_feature_dims ) return {"features": features} @staticmethod @torch.no_grad() def create_from(
@threestudio.register("tetrahedra-sdf-grid") class TetrahedraSDFGrid(BaseExplicitGeometry): @dataclass class Config(BaseExplicitGeometry.Config): isosurface_resolution: int = 128 isosurface_deformable_grid: bool = True isosurface_remove_outliers: bool = False isosurface_outlier_n_faces_threshold: Union[int, float] = 0.01 n_input_dims: int = 3 n_feature_dims: int = 3 pos_encoding_config: dict = field( default_factory=lambda: { "otype": "HashGrid", "n_levels": 16, "n_features_per_level": 2, "log2_hashmap_size": 19, "base_resolution": 16, "per_level_scale": 1.447269237440378, } ) mlp_network_config: dict = field( default_factory=lambda: { "otype": "VanillaMLP", "activation": "ReLU", "output_activation": "none", "n_neurons": 64, "n_hidden_layers": 1, } ) shape_init: Optional[str] = None shape_init_params: Optional[Any] = None force_shape_init: bool = False geometry_only: bool = False fix_geometry: bool = False cfg: Config def configure(self) -> None: super().configure() # this should be saved to state_dict, register as buffer self.isosurface_bbox: Float[Tensor, "2 3"] self.register_buffer("isosurface_bbox", self.bbox.clone()) self.isosurface_helper = MarchingTetrahedraHelper( self.cfg.isosurface_resolution, f"load/tets/{self.cfg.isosurface_resolution}_tets.npz", ) self.sdf: Float[Tensor, "Nv 1"] self.deformation: Optional[Float[Tensor, "Nv 3"]] if not self.cfg.fix_geometry: self.register_parameter( "sdf", nn.Parameter( torch.zeros( (self.isosurface_helper.grid_vertices.shape[0], 1), dtype=torch.float32, ) ), ) if self.cfg.isosurface_deformable_grid: self.register_parameter( "deformation", nn.Parameter( torch.zeros_like(self.isosurface_helper.grid_vertices) ), ) else: self.deformation = None else: self.register_buffer( "sdf", torch.zeros( (self.isosurface_helper.grid_vertices.shape[0], 1), dtype=torch.float32, ), ) if self.cfg.isosurface_deformable_grid: self.register_buffer( "deformation", torch.zeros_like(self.isosurface_helper.grid_vertices), ) else: self.deformation = None if not self.cfg.geometry_only: self.encoding = get_encoding( self.cfg.n_input_dims, self.cfg.pos_encoding_config ) self.feature_network = get_mlp( self.encoding.n_output_dims, self.cfg.n_feature_dims, self.cfg.mlp_network_config, ) self.mesh: Optional[Mesh] = None def initialize_shape(self) -> None: if self.cfg.shape_init is None and not self.cfg.force_shape_init: return # do not initialize shape if weights are provided if self.cfg.weights is not None and not self.cfg.force_shape_init: return if self.cfg.sdf_bias != 0.0: threestudio.warn( "shape_init and sdf_bias are both specified, which may lead to unexpected results." ) get_gt_sdf: Callable[[Float[Tensor, "N 3"]], Float[Tensor, "N 1"]] assert isinstance(self.cfg.shape_init, str) if self.cfg.shape_init == "ellipsoid": assert ( isinstance(self.cfg.shape_init_params, Sized) and len(self.cfg.shape_init_params) == 3 ) size = torch.as_tensor(self.cfg.shape_init_params).to(self.device) def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: return ((points_rand / size) ** 2).sum( dim=-1, keepdim=True ).sqrt() - 1.0 # pseudo signed distance of an ellipsoid get_gt_sdf = func elif self.cfg.shape_init == "sphere": assert isinstance(self.cfg.shape_init_params, float) radius = self.cfg.shape_init_params def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: return (points_rand**2).sum(dim=-1, keepdim=True).sqrt() - radius get_gt_sdf = func elif self.cfg.shape_init.startswith("mesh:"): assert isinstance(self.cfg.shape_init_params, float) mesh_path = self.cfg.shape_init[5:] if not os.path.exists(mesh_path): raise ValueError(f"Mesh file {mesh_path} does not exist.") scene = trimesh.load(mesh_path) if isinstance(scene, trimesh.Trimesh): mesh = scene elif isinstance(scene, trimesh.scene.Scene): mesh = trimesh.Trimesh() for obj in scene.geometry.values(): mesh = trimesh.util.concatenate([mesh, obj]) else: raise ValueError(f"Unknown mesh type at {mesh_path}.") # move to center centroid = mesh.vertices.mean(0) mesh.vertices = mesh.vertices - centroid # align to up-z and front-x dirs = ["+x", "+y", "+z", "-x", "-y", "-z"] dir2vec = { "+x": np.array([1, 0, 0]), "+y": np.array([0, 1, 0]), "+z": np.array([0, 0, 1]), "-x": np.array([-1, 0, 0]), "-y": np.array([0, -1, 0]), "-z": np.array([0, 0, -1]), } if ( self.cfg.shape_init_mesh_up not in dirs or self.cfg.shape_init_mesh_front not in dirs ): raise ValueError( f"shape_init_mesh_up and shape_init_mesh_front must be one of {dirs}." ) if self.cfg.shape_init_mesh_up[1] == self.cfg.shape_init_mesh_front[1]: raise ValueError( "shape_init_mesh_up and shape_init_mesh_front must be orthogonal." ) z_, x_ = ( dir2vec[self.cfg.shape_init_mesh_up], dir2vec[self.cfg.shape_init_mesh_front], ) y_ = np.cross(z_, x_) std2mesh = np.stack([x_, y_, z_], axis=0).T mesh2std = np.linalg.inv(std2mesh) # scaling scale = np.abs(mesh.vertices).max() mesh.vertices = mesh.vertices / scale * self.cfg.shape_init_params mesh.vertices = np.dot(mesh2std, mesh.vertices.T).T sdf = SDF(mesh.vertices, mesh.faces) def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: # add a negative signed here # as in pysdf the inside of the shape has positive signed distance return torch.from_numpy(-sdf(points_rand.cpu().numpy())).to( points_rand )[..., None] get_gt_sdf = func else: raise ValueError( f"Unknown shape initialization type: {self.cfg.shape_init}" ) # Initialize SDF to a given shape when no weights are provided or force_shape_init is True optim = torch.optim.Adam(self.parameters(), lr=1e-3) for _ in tqdm( range(1000), desc=f"Initializing SDF to a(n) {self.cfg.shape_init}:", disable=get_rank() != 0, ): points_rand = ( torch.rand((10000, 3), dtype=torch.float32).to(self.device) * 2.0 - 1.0 ) sdf_gt = get_gt_sdf(points_rand) sdf_pred = self.forward_sdf(points_rand) loss = F.mse_loss(sdf_pred, sdf_gt) optim.zero_grad() loss.backward() optim.step() # explicit broadcast to ensure param consistency across ranks for param in self.parameters(): broadcast(param, src=0) def isosurface(self) -> Mesh: # return cached mesh if fix_geometry is True to save computation if self.cfg.fix_geometry and self.mesh is not None: return self.mesh mesh = self.isosurface_helper(self.sdf, self.deformation) mesh.v_pos = scale_tensor( mesh.v_pos, self.isosurface_helper.points_range, self.isosurface_bbox ) if self.cfg.isosurface_remove_outliers: mesh = mesh.remove_outlier(self.cfg.isosurface_outlier_n_faces_threshold) self.mesh = mesh return mesh def forward( self, points: Float[Tensor, "*N Di"], output_normal: bool = False ) -> Dict[str, Float[Tensor, "..."]]: if self.cfg.geometry_only: return {} assert ( output_normal == False ), f"Normal output is not supported for {self.__class__.__name__}" points_unscaled = points # points in the original scale points = contract_to_unisphere(points, self.bbox) # points normalized to (0, 1) enc = self.encoding(points.view(-1, self.cfg.n_input_dims)) features = self.feature_network(enc).view( *points.shape[:-1], self.cfg.n_feature_dims ) return {"features": features} @staticmethod @torch.no_grad() def create_from(
other: BaseGeometry,
1
2023-12-23 12:37:48+00:00
24k
dakpinaroglu/Frame2seq
frame2seq/openfold/model/structure_module.py
[ { "identifier": "Linear", "path": "frame2seq/openfold/model/primitives.py", "snippet": "class Linear(nn.Linear):\n \"\"\"\n A Linear layer with built-in nonstandard initializations. Called just\n like torch.nn.Linear.\n\n Implements the initializers in 1.11.4, plus some additional ones found\n in the code.\n \"\"\"\n\n def __init__(\n self,\n in_dim: int,\n out_dim: int,\n bias: bool = True,\n init: str = \"default\",\n init_fn: Optional[Callable[[torch.Tensor, torch.Tensor], None]] = None,\n ):\n \"\"\"\n Args:\n in_dim:\n The final dimension of inputs to the layer\n out_dim:\n The final dimension of layer outputs\n bias:\n Whether to learn an additive bias. True by default\n init:\n The initializer to use. Choose from:\n\n \"default\": LeCun fan-in truncated normal initialization\n \"relu\": He initialization w/ truncated normal distribution\n \"glorot\": Fan-average Glorot uniform initialization\n \"gating\": Weights=0, Bias=1\n \"normal\": Normal initialization with std=1/sqrt(fan_in)\n \"final\": Weights=0, Bias=0\n\n Overridden by init_fn if the latter is not None.\n init_fn:\n A custom initializer taking weight and bias as inputs.\n Overrides init if not None.\n \"\"\"\n super(Linear, self).__init__(in_dim, out_dim, bias=bias)\n\n if bias:\n with torch.no_grad():\n self.bias.fill_(0)\n\n with torch.no_grad():\n if init_fn is not None:\n init_fn(self.weight, self.bias)\n else:\n if init == \"default\":\n lecun_normal_init_(self.weight)\n elif init == \"relu\":\n he_normal_init_(self.weight)\n elif init == \"glorot\":\n glorot_uniform_init_(self.weight)\n elif init == \"gating\":\n gating_init_(self.weight)\n if bias:\n self.bias.fill_(1.0)\n elif init == \"normal\":\n normal_init_(self.weight)\n elif init == \"final\":\n final_init_(self.weight)\n else:\n raise ValueError(\"Invalid init string.\")" }, { "identifier": "LayerNorm", "path": "frame2seq/openfold/model/primitives.py", "snippet": "class LayerNorm(nn.Module):\n def __init__(self, c_in, eps=1e-5):\n super(LayerNorm, self).__init__()\n \n self.c_in = (c_in,)\n self.eps = eps\n\n self.weight = nn.Parameter(torch.ones(c_in))\n self.bias = nn.Parameter(torch.zeros(c_in))\n\n def forward(self, x): \n d = x.dtype\n # deepspeed_is_initialized = (\n # deepspeed_is_installed and \n # deepspeed.utils.is_initialized()\n # )\n # if(d is torch.bfloat16 and not deepspeed_is_initialized):\n # with torch.cuda.amp.autocast(enabled=False):\n # out = nn.functional.layer_norm(\n # x, \n # self.c_in, \n # self.weight.to(dtype=d), \n # self.bias.to(dtype=d), \n # self.eps\n # )\n # else:\n out = nn.functional.layer_norm(\n x,\n self.c_in,\n self.weight,\n self.bias,\n self.eps,\n )\n\n return out" }, { "identifier": "ipa_point_weights_init_", "path": "frame2seq/openfold/model/primitives.py", "snippet": "def ipa_point_weights_init_(weights):\n with torch.no_grad():\n softplus_inverse_1 = 0.541324854612918\n weights.fill_(softplus_inverse_1)" }, { "identifier": "restype_rigid_group_default_frame", "path": "frame2seq/openfold/np/residue_constants.py", "snippet": "def load_stereo_chemical_props() -> Tuple[\n def make_bond_key(atom1_name, atom2_name):\ndef sequence_to_onehot(\n sequence: str, mapping: Mapping[str, int], map_unknown_to_x: bool = False\n) -> np.ndarray:\ndef _make_standard_atom_mask() -> np.ndarray:\ndef chi_angle_atom(atom_index: int) -> np.ndarray:\ndef _make_rigid_transformation_4x4(ex, ey, translation):\ndef _make_rigid_group_constants():\ndef make_atom14_dists_bounds(\n overlap_tolerance=1.5, bond_length_tolerance_factor=15\n):\ndef _make_atom14_ambiguity_feats():\ndef aatype_to_str_sequence(aatype):\nHHBLITS_AA_TO_ID = {\n \"A\": 0,\n \"B\": 2,\n \"C\": 1,\n \"D\": 2,\n \"E\": 3,\n \"F\": 4,\n \"G\": 5,\n \"H\": 6,\n \"I\": 7,\n \"J\": 20,\n \"K\": 8,\n \"L\": 9,\n \"M\": 10,\n \"N\": 11,\n \"O\": 20,\n \"P\": 12,\n \"Q\": 13,\n \"R\": 14,\n \"S\": 15,\n \"T\": 16,\n \"U\": 1,\n \"V\": 17,\n \"W\": 18,\n \"X\": 20,\n \"Y\": 19,\n \"Z\": 3,\n \"-\": 21,\n}\nID_TO_HHBLITS_AA = {\n 0: \"A\",\n 1: \"C\", # Also U.\n 2: \"D\", # Also B.\n 3: \"E\", # Also Z.\n 4: \"F\",\n 5: \"G\",\n 6: \"H\",\n 7: \"I\",\n 8: \"K\",\n 9: \"L\",\n 10: \"M\",\n 11: \"N\",\n 12: \"P\",\n 13: \"Q\",\n 14: \"R\",\n 15: \"S\",\n 16: \"T\",\n 17: \"V\",\n 18: \"W\",\n 19: \"Y\",\n 20: \"X\", # Includes J and O.\n 21: \"-\",\n}\nMAP_HHBLITS_AATYPE_TO_OUR_AATYPE = tuple(\n restypes_with_x_and_gap.index(ID_TO_HHBLITS_AA[i])\n for i in range(len(restypes_with_x_and_gap))\n)\nSTANDARD_ATOM_MASK = _make_standard_atom_mask()" }, { "identifier": "frames_and_literature_positions_to_atom14_pos", "path": "frame2seq/openfold/utils/feats.py", "snippet": "def frames_and_literature_positions_to_atom14_pos(\n r: Rigid,\n aatype: torch.Tensor,\n default_frames,\n group_idx,\n atom_mask,\n lit_positions,\n):\n # [*, N, 14, 4, 4]\n default_4x4 = default_frames[aatype, ...]\n\n # [*, N, 14]\n group_mask = group_idx[aatype, ...]\n\n # [*, N, 14, 8]\n group_mask = nn.functional.one_hot(\n group_mask,\n num_classes=default_frames.shape[-3],\n )\n\n # [*, N, 14, 8]\n t_atoms_to_global = r[..., None, :] * group_mask\n\n # [*, N, 14]\n t_atoms_to_global = t_atoms_to_global.map_tensor_fn(\n lambda x: torch.sum(x, dim=-1)\n )\n\n # [*, N, 14, 1]\n atom_mask = atom_mask[aatype, ...].unsqueeze(-1)\n\n # [*, N, 14, 3]\n lit_positions = lit_positions[aatype, ...]\n pred_positions = t_atoms_to_global.apply(lit_positions)\n pred_positions = pred_positions * atom_mask\n\n return pred_positions" }, { "identifier": "torsion_angles_to_frames", "path": "frame2seq/openfold/utils/feats.py", "snippet": "def torsion_angles_to_frames(\n r: Rigid,\n alpha: torch.Tensor,\n aatype: torch.Tensor,\n rrgdf: torch.Tensor,\n):\n # [*, N, 8, 4, 4]\n default_4x4 = rrgdf[aatype, ...]\n\n # [*, N, 8] transformations, i.e.\n # One [*, N, 8, 3, 3] rotation matrix and\n # One [*, N, 8, 3] translation matrix\n default_r = r.from_tensor_4x4(default_4x4)\n\n bb_rot = alpha.new_zeros((*((1,) * len(alpha.shape[:-1])), 2))\n bb_rot[..., 1] = 1\n\n # [*, N, 8, 2]\n alpha = torch.cat(\n [bb_rot.expand(*alpha.shape[:-2], -1, -1), alpha], dim=-2\n )\n\n # [*, N, 8, 3, 3]\n # Produces rotation matrices of the form:\n # [\n # [1, 0 , 0 ],\n # [0, a_2,-a_1],\n # [0, a_1, a_2]\n # ]\n # This follows the original code rather than the supplement, which uses\n # different indices.\n\n all_rots = alpha.new_zeros(default_r.get_rots().get_rot_mats().shape)\n all_rots[..., 0, 0] = 1\n all_rots[..., 1, 1] = alpha[..., 1]\n all_rots[..., 1, 2] = -alpha[..., 0]\n all_rots[..., 2, 1:] = alpha\n\n all_rots = Rigid(Rotation(rot_mats=all_rots), None)\n\n all_frames = default_r.compose(all_rots)\n\n chi2_frame_to_frame = all_frames[..., 5]\n chi3_frame_to_frame = all_frames[..., 6]\n chi4_frame_to_frame = all_frames[..., 7]\n\n chi1_frame_to_bb = all_frames[..., 4]\n chi2_frame_to_bb = chi1_frame_to_bb.compose(chi2_frame_to_frame)\n chi3_frame_to_bb = chi2_frame_to_bb.compose(chi3_frame_to_frame)\n chi4_frame_to_bb = chi3_frame_to_bb.compose(chi4_frame_to_frame)\n\n all_frames_to_bb = Rigid.cat(\n [\n all_frames[..., :5],\n chi2_frame_to_bb.unsqueeze(-1),\n chi3_frame_to_bb.unsqueeze(-1),\n chi4_frame_to_bb.unsqueeze(-1),\n ],\n dim=-1,\n )\n\n all_frames_to_global = r[..., None].compose(all_frames_to_bb)\n\n return all_frames_to_global" }, { "identifier": "is_fp16_enabled", "path": "frame2seq/openfold/utils/precision_utils.py", "snippet": "def is_fp16_enabled():\n # Autocast world\n try:\n fp16_enabled = torch.get_autocast_gpu_dtype() == torch.float16\n fp16_enabled = fp16_enabled and torch.is_autocast_enabled()\n except AttributeError:\n fp16_enabled = False\n\n return fp16_enabled" }, { "identifier": "Rotation", "path": "frame2seq/openfold/utils/rigid_utils.py", "snippet": "class Rotation:\n \"\"\"\n A 3D rotation. Depending on how the object is initialized, the\n rotation is represented by either a rotation matrix or a\n quaternion, though both formats are made available by helper functions.\n To simplify gradient computation, the underlying format of the\n rotation cannot be changed in-place. Like Rigid, the class is designed\n to mimic the behavior of a torch Tensor, almost as if each Rotation\n object were a tensor of rotations, in one format or another.\n \"\"\"\n def __init__(self,\n rot_mats: Optional[torch.Tensor] = None,\n quats: Optional[torch.Tensor] = None,\n normalize_quats: bool = True,\n ):\n \"\"\"\n Args:\n rot_mats:\n A [*, 3, 3] rotation matrix tensor. Mutually exclusive with\n quats\n quats:\n A [*, 4] quaternion. Mutually exclusive with rot_mats. If\n normalize_quats is not True, must be a unit quaternion\n normalize_quats:\n If quats is specified, whether to normalize quats\n \"\"\"\n if((rot_mats is None and quats is None) or \n (rot_mats is not None and quats is not None)):\n raise ValueError(\"Exactly one input argument must be specified\")\n\n if((rot_mats is not None and rot_mats.shape[-2:] != (3, 3)) or \n (quats is not None and quats.shape[-1] != 4)):\n raise ValueError(\n \"Incorrectly shaped rotation matrix or quaternion\"\n )\n\n # Force full-precision\n if(quats is not None):\n quats = quats.to(dtype=torch.float32)\n if(rot_mats is not None):\n rot_mats = rot_mats.to(dtype=torch.float32)\n\n if(quats is not None and normalize_quats):\n quats = quats / torch.linalg.norm(quats, dim=-1, keepdim=True)\n\n self._rot_mats = rot_mats\n self._quats = quats\n\n @staticmethod\n def identity(\n shape,\n dtype: Optional[torch.dtype] = None,\n device: Optional[torch.device] = None,\n requires_grad: bool = True,\n fmt: str = \"quat\",\n ) -> Rotation:\n \"\"\"\n Returns an identity Rotation.\n\n Args:\n shape:\n The \"shape\" of the resulting Rotation object. See documentation\n for the shape property\n dtype:\n The torch dtype for the rotation\n device:\n The torch device for the new rotation\n requires_grad:\n Whether the underlying tensors in the new rotation object\n should require gradient computation\n fmt:\n One of \"quat\" or \"rot_mat\". Determines the underlying format\n of the new object's rotation \n Returns:\n A new identity rotation\n \"\"\"\n if(fmt == \"rot_mat\"):\n rot_mats = identity_rot_mats(\n shape, dtype, device, requires_grad,\n )\n return Rotation(rot_mats=rot_mats, quats=None)\n elif(fmt == \"quat\"):\n quats = identity_quats(shape, dtype, device, requires_grad)\n return Rotation(rot_mats=None, quats=quats, normalize_quats=False)\n else:\n raise ValueError(f\"Invalid format: f{fmt}\")\n\n # Magic methods\n\n def __getitem__(self, index: Any) -> Rotation:\n \"\"\"\n Allows torch-style indexing over the virtual shape of the rotation\n object. See documentation for the shape property.\n\n Args:\n index:\n A torch index. E.g. (1, 3, 2), or (slice(None,))\n Returns:\n The indexed rotation\n \"\"\"\n if type(index) != tuple:\n index = (index,)\n\n if(self._rot_mats is not None):\n rot_mats = self._rot_mats[index + (slice(None), slice(None))]\n return Rotation(rot_mats=rot_mats)\n elif(self._quats is not None):\n quats = self._quats[index + (slice(None),)]\n return Rotation(quats=quats, normalize_quats=False)\n else:\n raise ValueError(\"Both rotations are None\")\n\n def __mul__(self,\n right: torch.Tensor,\n ) -> Rotation:\n \"\"\"\n Pointwise left multiplication of the rotation with a tensor. Can be\n used to e.g. mask the Rotation.\n\n Args:\n right:\n The tensor multiplicand\n Returns:\n The product\n \"\"\"\n if not(isinstance(right, torch.Tensor)):\n raise TypeError(\"The other multiplicand must be a Tensor\")\n\n if(self._rot_mats is not None):\n rot_mats = self._rot_mats * right[..., None, None]\n return Rotation(rot_mats=rot_mats, quats=None)\n elif(self._quats is not None):\n quats = self._quats * right[..., None]\n return Rotation(rot_mats=None, quats=quats, normalize_quats=False)\n else:\n raise ValueError(\"Both rotations are None\")\n\n def __rmul__(self,\n left: torch.Tensor,\n ) -> Rotation:\n \"\"\"\n Reverse pointwise multiplication of the rotation with a tensor.\n\n Args:\n left:\n The left multiplicand\n Returns:\n The product\n \"\"\"\n return self.__mul__(left)\n \n # Properties\n\n @property\n def shape(self) -> torch.Size:\n \"\"\"\n Returns the virtual shape of the rotation object. This shape is\n defined as the batch dimensions of the underlying rotation matrix\n or quaternion. If the Rotation was initialized with a [10, 3, 3]\n rotation matrix tensor, for example, the resulting shape would be\n [10].\n \n Returns:\n The virtual shape of the rotation object\n \"\"\"\n s = None\n if(self._quats is not None):\n s = self._quats.shape[:-1]\n else:\n s = self._rot_mats.shape[:-2]\n\n return s\n\n @property\n def dtype(self) -> torch.dtype:\n \"\"\"\n Returns the dtype of the underlying rotation.\n\n Returns:\n The dtype of the underlying rotation\n \"\"\"\n if(self._rot_mats is not None):\n return self._rot_mats.dtype\n elif(self._quats is not None):\n return self._quats.dtype\n else:\n raise ValueError(\"Both rotations are None\")\n\n @property\n def device(self) -> torch.device:\n \"\"\"\n The device of the underlying rotation\n\n Returns:\n The device of the underlying rotation\n \"\"\"\n if(self._rot_mats is not None):\n return self._rot_mats.device\n elif(self._quats is not None):\n return self._quats.device\n else:\n raise ValueError(\"Both rotations are None\")\n\n @property\n def requires_grad(self) -> bool:\n \"\"\"\n Returns the requires_grad property of the underlying rotation\n\n Returns:\n The requires_grad property of the underlying tensor\n \"\"\"\n if(self._rot_mats is not None):\n return self._rot_mats.requires_grad\n elif(self._quats is not None):\n return self._quats.requires_grad\n else:\n raise ValueError(\"Both rotations are None\")\n\n def get_rot_mats(self) -> torch.Tensor:\n \"\"\"\n Returns the underlying rotation as a rotation matrix tensor.\n\n Returns:\n The rotation as a rotation matrix tensor\n \"\"\"\n rot_mats = self._rot_mats\n if(rot_mats is None):\n if(self._quats is None):\n raise ValueError(\"Both rotations are None\")\n else:\n rot_mats = quat_to_rot(self._quats)\n\n return rot_mats \n\n def get_quats(self) -> torch.Tensor:\n \"\"\"\n Returns the underlying rotation as a quaternion tensor.\n\n Depending on whether the Rotation was initialized with a\n quaternion, this function may call torch.linalg.eigh.\n\n Returns:\n The rotation as a quaternion tensor.\n \"\"\"\n quats = self._quats\n if(quats is None):\n if(self._rot_mats is None):\n raise ValueError(\"Both rotations are None\")\n else:\n quats = rot_to_quat(self._rot_mats)\n\n return quats\n\n def get_cur_rot(self) -> torch.Tensor:\n \"\"\"\n Return the underlying rotation in its current form\n\n Returns:\n The stored rotation\n \"\"\"\n if(self._rot_mats is not None):\n return self._rot_mats\n elif(self._quats is not None):\n return self._quats\n else:\n raise ValueError(\"Both rotations are None\")\n\n # Rotation functions\n\n def compose_q_update_vec(self, \n q_update_vec: torch.Tensor, \n normalize_quats: bool = True\n ) -> Rotation:\n \"\"\"\n Returns a new quaternion Rotation after updating the current\n object's underlying rotation with a quaternion update, formatted\n as a [*, 3] tensor whose final three columns represent x, y, z such \n that (1, x, y, z) is the desired (not necessarily unit) quaternion\n update.\n\n Args:\n q_update_vec:\n A [*, 3] quaternion update tensor\n normalize_quats:\n Whether to normalize the output quaternion\n Returns:\n An updated Rotation\n \"\"\"\n quats = self.get_quats()\n new_quats = quats + quat_multiply_by_vec(quats, q_update_vec)\n return Rotation(\n rot_mats=None, \n quats=new_quats, \n normalize_quats=normalize_quats,\n )\n\n def compose_r(self, r: Rotation) -> Rotation:\n \"\"\"\n Compose the rotation matrices of the current Rotation object with\n those of another.\n\n Args:\n r:\n An update rotation object\n Returns:\n An updated rotation object\n \"\"\"\n r1 = self.get_rot_mats()\n r2 = r.get_rot_mats()\n new_rot_mats = rot_matmul(r1, r2)\n return Rotation(rot_mats=new_rot_mats, quats=None)\n\n def compose_q(self, r: Rotation, normalize_quats: bool = True) -> Rotation:\n \"\"\"\n Compose the quaternions of the current Rotation object with those\n of another.\n\n Depending on whether either Rotation was initialized with\n quaternions, this function may call torch.linalg.eigh.\n\n Args:\n r:\n An update rotation object\n Returns:\n An updated rotation object\n \"\"\"\n q1 = self.get_quats()\n q2 = r.get_quats()\n new_quats = quat_multiply(q1, q2)\n return Rotation(\n rot_mats=None, quats=new_quats, normalize_quats=normalize_quats\n )\n\n def apply(self, pts: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Apply the current Rotation as a rotation matrix to a set of 3D\n coordinates.\n\n Args:\n pts:\n A [*, 3] set of points\n Returns:\n [*, 3] rotated points\n \"\"\"\n rot_mats = self.get_rot_mats()\n return rot_vec_mul(rot_mats, pts)\n\n def invert_apply(self, pts: torch.Tensor) -> torch.Tensor:\n \"\"\"\n The inverse of the apply() method.\n\n Args:\n pts:\n A [*, 3] set of points\n Returns:\n [*, 3] inverse-rotated points\n \"\"\"\n rot_mats = self.get_rot_mats()\n inv_rot_mats = invert_rot_mat(rot_mats) \n return rot_vec_mul(inv_rot_mats, pts)\n\n def invert(self) -> Rotation:\n \"\"\"\n Returns the inverse of the current Rotation.\n\n Returns:\n The inverse of the current Rotation\n \"\"\"\n if(self._rot_mats is not None):\n return Rotation(\n rot_mats=invert_rot_mat(self._rot_mats), \n quats=None\n )\n elif(self._quats is not None):\n return Rotation(\n rot_mats=None,\n quats=invert_quat(self._quats),\n normalize_quats=False,\n )\n else:\n raise ValueError(\"Both rotations are None\")\n\n # \"Tensor\" stuff\n\n def unsqueeze(self, \n dim: int,\n ) -> Rigid:\n \"\"\"\n Analogous to torch.unsqueeze. The dimension is relative to the\n shape of the Rotation object.\n \n Args:\n dim: A positive or negative dimension index.\n Returns:\n The unsqueezed Rotation.\n \"\"\"\n if dim >= len(self.shape):\n raise ValueError(\"Invalid dimension\")\n\n if(self._rot_mats is not None):\n rot_mats = self._rot_mats.unsqueeze(dim if dim >= 0 else dim - 2)\n return Rotation(rot_mats=rot_mats, quats=None)\n elif(self._quats is not None):\n quats = self._quats.unsqueeze(dim if dim >= 0 else dim - 1)\n return Rotation(rot_mats=None, quats=quats, normalize_quats=False)\n else:\n raise ValueError(\"Both rotations are None\")\n\n @staticmethod\n def cat(\n rs: Sequence[Rotation], \n dim: int,\n ) -> Rigid:\n \"\"\"\n Concatenates rotations along one of the batch dimensions. Analogous\n to torch.cat().\n\n Note that the output of this operation is always a rotation matrix,\n regardless of the format of input rotations.\n\n Args:\n rs: \n A list of rotation objects\n dim: \n The dimension along which the rotations should be \n concatenated\n Returns:\n A concatenated Rotation object in rotation matrix format\n \"\"\"\n rot_mats = [r.get_rot_mats() for r in rs]\n rot_mats = torch.cat(rot_mats, dim=dim if dim >= 0 else dim - 2)\n\n return Rotation(rot_mats=rot_mats, quats=None) \n\n def map_tensor_fn(self, \n fn: Callable[torch.Tensor, torch.Tensor]\n ) -> Rotation:\n \"\"\"\n Apply a Tensor -> Tensor function to underlying rotation tensors,\n mapping over the rotation dimension(s). Can be used e.g. to sum out\n a one-hot batch dimension.\n\n Args:\n fn:\n A Tensor -> Tensor function to be mapped over the Rotation \n Returns:\n The transformed Rotation object\n \"\"\" \n if(self._rot_mats is not None):\n rot_mats = self._rot_mats.view(self._rot_mats.shape[:-2] + (9,))\n rot_mats = torch.stack(\n list(map(fn, torch.unbind(rot_mats, dim=-1))), dim=-1\n )\n rot_mats = rot_mats.view(rot_mats.shape[:-1] + (3, 3))\n return Rotation(rot_mats=rot_mats, quats=None)\n elif(self._quats is not None):\n quats = torch.stack(\n list(map(fn, torch.unbind(self._quats, dim=-1))), dim=-1\n )\n return Rotation(rot_mats=None, quats=quats, normalize_quats=False)\n else:\n raise ValueError(\"Both rotations are None\")\n \n def cuda(self) -> Rotation:\n \"\"\"\n Analogous to the cuda() method of torch Tensors\n\n Returns:\n A copy of the Rotation in CUDA memory\n \"\"\"\n if(self._rot_mats is not None):\n return Rotation(rot_mats=self._rot_mats.cuda(), quats=None)\n elif(self._quats is not None):\n return Rotation(\n rot_mats=None, \n quats=self._quats.cuda(),\n normalize_quats=False\n )\n else:\n raise ValueError(\"Both rotations are None\")\n\n def to(self, \n device: Optional[torch.device], \n dtype: Optional[torch.dtype]\n ) -> Rotation:\n \"\"\"\n Analogous to the to() method of torch Tensors\n\n Args:\n device:\n A torch device\n dtype:\n A torch dtype\n Returns:\n A copy of the Rotation using the new device and dtype\n \"\"\"\n if(self._rot_mats is not None):\n return Rotation(\n rot_mats=self._rot_mats.to(device=device, dtype=dtype), \n quats=None,\n )\n elif(self._quats is not None):\n return Rotation(\n rot_mats=None, \n quats=self._quats.to(device=device, dtype=dtype),\n normalize_quats=False,\n )\n else:\n raise ValueError(\"Both rotations are None\")\n\n def detach(self) -> Rotation:\n \"\"\"\n Returns a copy of the Rotation whose underlying Tensor has been\n detached from its torch graph.\n\n Returns:\n A copy of the Rotation whose underlying Tensor has been detached\n from its torch graph\n \"\"\"\n if(self._rot_mats is not None):\n return Rotation(rot_mats=self._rot_mats.detach(), quats=None)\n elif(self._quats is not None):\n return Rotation(\n rot_mats=None, \n quats=self._quats.detach(), \n normalize_quats=False,\n )\n else:\n raise ValueError(\"Both rotations are None\")" }, { "identifier": "Rigid", "path": "frame2seq/openfold/utils/rigid_utils.py", "snippet": "class Rigid:\n \"\"\"\n A class representing a rigid transformation. Little more than a wrapper\n around two objects: a Rotation object and a [*, 3] translation\n Designed to behave approximately like a single torch tensor with the \n shape of the shared batch dimensions of its component parts.\n \"\"\"\n def __init__(self, \n rots: Optional[Rotation],\n trans: Optional[torch.Tensor],\n ):\n \"\"\"\n Args:\n rots: A [*, 3, 3] rotation tensor\n trans: A corresponding [*, 3] translation tensor\n \"\"\"\n # (we need device, dtype, etc. from at least one input)\n\n batch_dims, dtype, device, requires_grad = None, None, None, None\n if(trans is not None):\n batch_dims = trans.shape[:-1]\n dtype = trans.dtype\n device = trans.device\n requires_grad = trans.requires_grad\n elif(rots is not None):\n batch_dims = rots.shape\n dtype = rots.dtype\n device = rots.device\n requires_grad = rots.requires_grad\n else:\n raise ValueError(\"At least one input argument must be specified\")\n\n if(rots is None):\n rots = Rotation.identity(\n batch_dims, dtype, device, requires_grad,\n )\n elif(trans is None):\n trans = identity_trans(\n batch_dims, dtype, device, requires_grad,\n )\n\n if((rots.shape != trans.shape[:-1]) or\n (rots.device != trans.device)):\n raise ValueError(\"Rots and trans incompatible\")\n\n # Force full precision. Happens to the rotations automatically.\n trans = trans.to(dtype=torch.float32)\n\n self._rots = rots\n self._trans = trans\n\n @staticmethod\n def identity(\n shape: Tuple[int], \n dtype: Optional[torch.dtype] = None,\n device: Optional[torch.device] = None, \n requires_grad: bool = True,\n fmt: str = \"quat\",\n ) -> Rigid:\n \"\"\"\n Constructs an identity transformation.\n\n Args:\n shape: \n The desired shape\n dtype: \n The dtype of both internal tensors\n device: \n The device of both internal tensors\n requires_grad: \n Whether grad should be enabled for the internal tensors\n Returns:\n The identity transformation\n \"\"\"\n return Rigid(\n Rotation.identity(shape, dtype, device, requires_grad, fmt=fmt),\n identity_trans(shape, dtype, device, requires_grad),\n )\n\n def __getitem__(self, \n index: Any,\n ) -> Rigid:\n \"\"\" \n Indexes the affine transformation with PyTorch-style indices.\n The index is applied to the shared dimensions of both the rotation\n and the translation.\n\n E.g.::\n\n r = Rotation(rot_mats=torch.rand(10, 10, 3, 3), quats=None)\n t = Rigid(r, torch.rand(10, 10, 3))\n indexed = t[3, 4:6]\n assert(indexed.shape == (2,))\n assert(indexed.get_rots().shape == (2,))\n assert(indexed.get_trans().shape == (2, 3))\n\n Args:\n index: A standard torch tensor index. E.g. 8, (10, None, 3),\n or (3, slice(0, 1, None))\n Returns:\n The indexed tensor \n \"\"\"\n if type(index) != tuple:\n index = (index,)\n \n return Rigid(\n self._rots[index],\n self._trans[index + (slice(None),)],\n )\n\n def __mul__(self,\n right: torch.Tensor,\n ) -> Rigid:\n \"\"\"\n Pointwise left multiplication of the transformation with a tensor.\n Can be used to e.g. mask the Rigid.\n\n Args:\n right:\n The tensor multiplicand\n Returns:\n The product\n \"\"\"\n if not(isinstance(right, torch.Tensor)):\n raise TypeError(\"The other multiplicand must be a Tensor\")\n\n new_rots = self._rots * right\n new_trans = self._trans * right[..., None]\n\n return Rigid(new_rots, new_trans)\n\n def __rmul__(self,\n left: torch.Tensor,\n ) -> Rigid:\n \"\"\"\n Reverse pointwise multiplication of the transformation with a \n tensor.\n\n Args:\n left:\n The left multiplicand\n Returns:\n The product\n \"\"\"\n return self.__mul__(left)\n\n @property\n def shape(self) -> torch.Size:\n \"\"\"\n Returns the shape of the shared dimensions of the rotation and\n the translation.\n \n Returns:\n The shape of the transformation\n \"\"\"\n s = self._trans.shape[:-1]\n return s\n\n @property\n def device(self) -> torch.device:\n \"\"\"\n Returns the device on which the Rigid's tensors are located.\n\n Returns:\n The device on which the Rigid's tensors are located\n \"\"\"\n return self._trans.device\n\n def get_rots(self) -> Rotation:\n \"\"\"\n Getter for the rotation.\n\n Returns:\n The rotation object\n \"\"\"\n return self._rots\n\n def get_trans(self) -> torch.Tensor:\n \"\"\"\n Getter for the translation.\n\n Returns:\n The stored translation\n \"\"\"\n return self._trans\n\n def compose_q_update_vec(self, \n q_update_vec: torch.Tensor,\n ) -> Rigid:\n \"\"\"\n Composes the transformation with a quaternion update vector of\n shape [*, 6], where the final 6 columns represent the x, y, and\n z values of a quaternion of form (1, x, y, z) followed by a 3D\n translation.\n\n Args:\n q_vec: The quaternion update vector.\n Returns:\n The composed transformation.\n \"\"\"\n q_vec, t_vec = q_update_vec[..., :3], q_update_vec[..., 3:]\n new_rots = self._rots.compose_q_update_vec(q_vec)\n\n trans_update = self._rots.apply(t_vec)\n new_translation = self._trans + trans_update\n\n return Rigid(new_rots, new_translation)\n\n def compose(self,\n r: Rigid,\n ) -> Rigid:\n \"\"\"\n Composes the current rigid object with another.\n\n Args:\n r:\n Another Rigid object\n Returns:\n The composition of the two transformations\n \"\"\"\n new_rot = self._rots.compose_r(r._rots)\n new_trans = self._rots.apply(r._trans) + self._trans\n return Rigid(new_rot, new_trans)\n\n def apply(self, \n pts: torch.Tensor,\n ) -> torch.Tensor:\n \"\"\"\n Applies the transformation to a coordinate tensor.\n\n Args:\n pts: A [*, 3] coordinate tensor.\n Returns:\n The transformed points.\n \"\"\"\n rotated = self._rots.apply(pts) \n return rotated + self._trans\n\n def invert_apply(self, \n pts: torch.Tensor\n ) -> torch.Tensor:\n \"\"\"\n Applies the inverse of the transformation to a coordinate tensor.\n\n Args:\n pts: A [*, 3] coordinate tensor\n Returns:\n The transformed points.\n \"\"\"\n pts = pts - self._trans\n return self._rots.invert_apply(pts) \n\n def invert(self) -> Rigid:\n \"\"\"\n Inverts the transformation.\n\n Returns:\n The inverse transformation.\n \"\"\"\n rot_inv = self._rots.invert() \n trn_inv = rot_inv.apply(self._trans)\n\n return Rigid(rot_inv, -1 * trn_inv)\n\n def map_tensor_fn(self, \n fn: Callable[torch.Tensor, torch.Tensor]\n ) -> Rigid:\n \"\"\"\n Apply a Tensor -> Tensor function to underlying translation and\n rotation tensors, mapping over the translation/rotation dimensions\n respectively.\n\n Args:\n fn:\n A Tensor -> Tensor function to be mapped over the Rigid\n Returns:\n The transformed Rigid object\n \"\"\" \n new_rots = self._rots.map_tensor_fn(fn) \n new_trans = torch.stack(\n list(map(fn, torch.unbind(self._trans, dim=-1))), \n dim=-1\n )\n\n return Rigid(new_rots, new_trans)\n\n def to_tensor_4x4(self) -> torch.Tensor:\n \"\"\"\n Converts a transformation to a homogenous transformation tensor.\n\n Returns:\n A [*, 4, 4] homogenous transformation tensor\n \"\"\"\n tensor = self._trans.new_zeros((*self.shape, 4, 4))\n tensor[..., :3, :3] = self._rots.get_rot_mats()\n tensor[..., :3, 3] = self._trans\n tensor[..., 3, 3] = 1\n return tensor\n\n @staticmethod\n def from_tensor_4x4(\n t: torch.Tensor\n ) -> Rigid:\n \"\"\"\n Constructs a transformation from a homogenous transformation\n tensor.\n\n Args:\n t: [*, 4, 4] homogenous transformation tensor\n Returns:\n T object with shape [*]\n \"\"\"\n if(t.shape[-2:] != (4, 4)):\n raise ValueError(\"Incorrectly shaped input tensor\")\n\n rots = Rotation(rot_mats=t[..., :3, :3], quats=None)\n trans = t[..., :3, 3]\n \n return Rigid(rots, trans)\n\n def to_tensor_7(self) -> torch.Tensor:\n \"\"\"\n Converts a transformation to a tensor with 7 final columns, four \n for the quaternion followed by three for the translation.\n\n Returns:\n A [*, 7] tensor representation of the transformation\n \"\"\"\n tensor = self._trans.new_zeros((*self.shape, 7))\n tensor[..., :4] = self._rots.get_quats()\n tensor[..., 4:] = self._trans\n\n return tensor\n\n @staticmethod\n def from_tensor_7(\n t: torch.Tensor,\n normalize_quats: bool = False,\n ) -> Rigid:\n if(t.shape[-1] != 7):\n raise ValueError(\"Incorrectly shaped input tensor\")\n\n quats, trans = t[..., :4], t[..., 4:]\n\n rots = Rotation(\n rot_mats=None, \n quats=quats, \n normalize_quats=normalize_quats\n )\n\n return Rigid(rots, trans)\n\n @staticmethod\n def from_3_points(\n p_neg_x_axis: torch.Tensor, \n origin: torch.Tensor, \n p_xy_plane: torch.Tensor, \n eps: float = 1e-8\n ) -> Rigid:\n \"\"\"\n Implements algorithm 21. Constructs transformations from sets of 3 \n points using the Gram-Schmidt algorithm.\n\n Args:\n p_neg_x_axis: [*, 3] coordinates\n origin: [*, 3] coordinates used as frame origins\n p_xy_plane: [*, 3] coordinates\n eps: Small epsilon value\n Returns:\n A transformation object of shape [*]\n \"\"\"\n p_neg_x_axis = torch.unbind(p_neg_x_axis, dim=-1)\n origin = torch.unbind(origin, dim=-1)\n p_xy_plane = torch.unbind(p_xy_plane, dim=-1)\n\n e0 = [c1 - c2 for c1, c2 in zip(origin, p_neg_x_axis)]\n e1 = [c1 - c2 for c1, c2 in zip(p_xy_plane, origin)]\n\n denom = torch.sqrt(sum((c * c for c in e0)) + eps)\n e0 = [c / denom for c in e0]\n dot = sum((c1 * c2 for c1, c2 in zip(e0, e1)))\n e1 = [c2 - c1 * dot for c1, c2 in zip(e0, e1)]\n denom = torch.sqrt(sum((c * c for c in e1)) + eps)\n e1 = [c / denom for c in e1]\n e2 = [\n e0[1] * e1[2] - e0[2] * e1[1],\n e0[2] * e1[0] - e0[0] * e1[2],\n e0[0] * e1[1] - e0[1] * e1[0],\n ]\n\n rots = torch.stack([c for tup in zip(e0, e1, e2) for c in tup], dim=-1)\n rots = rots.reshape(rots.shape[:-1] + (3, 3))\n\n rot_obj = Rotation(rot_mats=rots, quats=None)\n\n return Rigid(rot_obj, torch.stack(origin, dim=-1))\n\n def unsqueeze(self, \n dim: int,\n ) -> Rigid:\n \"\"\"\n Analogous to torch.unsqueeze. The dimension is relative to the\n shared dimensions of the rotation/translation.\n \n Args:\n dim: A positive or negative dimension index.\n Returns:\n The unsqueezed transformation.\n \"\"\"\n if dim >= len(self.shape):\n raise ValueError(\"Invalid dimension\")\n rots = self._rots.unsqueeze(dim)\n trans = self._trans.unsqueeze(dim if dim >= 0 else dim - 1)\n\n return Rigid(rots, trans)\n\n @staticmethod\n def cat(\n ts: Sequence[Rigid], \n dim: int,\n ) -> Rigid:\n \"\"\"\n Concatenates transformations along a new dimension.\n\n Args:\n ts: \n A list of T objects\n dim: \n The dimension along which the transformations should be \n concatenated\n Returns:\n A concatenated transformation object\n \"\"\"\n rots = Rotation.cat([t._rots for t in ts], dim) \n trans = torch.cat(\n [t._trans for t in ts], dim=dim if dim >= 0 else dim - 1\n )\n\n return Rigid(rots, trans)\n\n def apply_rot_fn(self, fn: Callable[Rotation, Rotation]) -> Rigid:\n \"\"\"\n Applies a Rotation -> Rotation function to the stored rotation\n object.\n\n Args:\n fn: A function of type Rotation -> Rotation\n Returns:\n A transformation object with a transformed rotation.\n \"\"\"\n return Rigid(fn(self._rots), self._trans)\n\n def apply_trans_fn(self, fn: Callable[torch.Tensor, torch.Tensor]) -> Rigid:\n \"\"\"\n Applies a Tensor -> Tensor function to the stored translation.\n\n Args:\n fn: \n A function of type Tensor -> Tensor to be applied to the\n translation\n Returns:\n A transformation object with a transformed translation.\n \"\"\"\n return Rigid(self._rots, fn(self._trans))\n\n def scale_translation(self, trans_scale_factor: float) -> Rigid:\n \"\"\"\n Scales the translation by a constant factor.\n\n Args:\n trans_scale_factor:\n The constant factor\n Returns:\n A transformation object with a scaled translation.\n \"\"\"\n fn = lambda t: t * trans_scale_factor\n return self.apply_trans_fn(fn)\n\n def stop_rot_gradient(self) -> Rigid:\n \"\"\"\n Detaches the underlying rotation object\n\n Returns:\n A transformation object with detached rotations\n \"\"\"\n fn = lambda r: r.detach()\n return self.apply_rot_fn(fn)\n\n @staticmethod\n def make_transform_from_reference(n_xyz, ca_xyz, c_xyz, eps=1e-20):\n \"\"\"\n Returns a transformation object from reference coordinates.\n \n Note that this method does not take care of symmetries. If you \n provide the atom positions in the non-standard way, the N atom will \n end up not at [-0.527250, 1.359329, 0.0] but instead at \n [-0.527250, -1.359329, 0.0]. You need to take care of such cases in \n your code.\n \n Args:\n n_xyz: A [*, 3] tensor of nitrogen xyz coordinates.\n ca_xyz: A [*, 3] tensor of carbon alpha xyz coordinates.\n c_xyz: A [*, 3] tensor of carbon xyz coordinates.\n Returns:\n A transformation object. After applying the translation and \n rotation to the reference backbone, the coordinates will \n approximately equal to the input coordinates.\n \"\"\" \n translation = -1 * ca_xyz\n n_xyz = n_xyz + translation\n c_xyz = c_xyz + translation\n\n c_x, c_y, c_z = [c_xyz[..., i] for i in range(3)]\n norm = torch.sqrt(eps + c_x ** 2 + c_y ** 2)\n sin_c1 = -c_y / norm\n cos_c1 = c_x / norm\n zeros = sin_c1.new_zeros(sin_c1.shape)\n ones = sin_c1.new_ones(sin_c1.shape)\n\n c1_rots = sin_c1.new_zeros((*sin_c1.shape, 3, 3))\n c1_rots[..., 0, 0] = cos_c1\n c1_rots[..., 0, 1] = -1 * sin_c1\n c1_rots[..., 1, 0] = sin_c1\n c1_rots[..., 1, 1] = cos_c1\n c1_rots[..., 2, 2] = 1\n\n norm = torch.sqrt(eps + c_x ** 2 + c_y ** 2 + c_z ** 2)\n sin_c2 = c_z / norm\n cos_c2 = torch.sqrt(c_x ** 2 + c_y ** 2) / norm\n\n c2_rots = sin_c2.new_zeros((*sin_c2.shape, 3, 3))\n c2_rots[..., 0, 0] = cos_c2\n c2_rots[..., 0, 2] = sin_c2\n c2_rots[..., 1, 1] = 1\n c2_rots[..., 2, 0] = -1 * sin_c2\n c2_rots[..., 2, 2] = cos_c2\n\n c_rots = rot_matmul(c2_rots, c1_rots)\n n_xyz = rot_vec_mul(c_rots, n_xyz)\n\n _, n_y, n_z = [n_xyz[..., i] for i in range(3)]\n norm = torch.sqrt(eps + n_y ** 2 + n_z ** 2)\n sin_n = -n_z / norm\n cos_n = n_y / norm\n\n n_rots = sin_c2.new_zeros((*sin_c2.shape, 3, 3))\n n_rots[..., 0, 0] = 1\n n_rots[..., 1, 1] = cos_n\n n_rots[..., 1, 2] = -1 * sin_n\n n_rots[..., 2, 1] = sin_n\n n_rots[..., 2, 2] = cos_n\n\n rots = rot_matmul(n_rots, c_rots)\n\n rots = rots.transpose(-1, -2)\n translation = -1 * translation\n\n rot_obj = Rotation(rot_mats=rots, quats=None)\n\n return Rigid(rot_obj, translation)\n\n def cuda(self) -> Rigid:\n \"\"\"\n Moves the transformation object to GPU memory\n \n Returns:\n A version of the transformation on GPU\n \"\"\"\n return Rigid(self._rots.cuda(), self._trans.cuda())" }, { "identifier": "dict_multimap", "path": "frame2seq/openfold/utils/tensor_utils.py", "snippet": "def dict_multimap(fn, dicts):\n first = dicts[0]\n new_dict = {}\n for k, v in first.items():\n all_v = [d[k] for d in dicts]\n if type(v) is dict:\n new_dict[k] = dict_multimap(fn, all_v)\n else:\n new_dict[k] = fn(all_v)\n\n return new_dict" }, { "identifier": "permute_final_dims", "path": "frame2seq/openfold/utils/tensor_utils.py", "snippet": "def permute_final_dims(tensor: torch.Tensor, inds: List[int]):\n zero_index = -1 * len(inds)\n first_inds = list(range(len(tensor.shape[:zero_index])))\n return tensor.permute(first_inds + [zero_index + i for i in inds])" }, { "identifier": "flatten_final_dims", "path": "frame2seq/openfold/utils/tensor_utils.py", "snippet": "def flatten_final_dims(t: torch.Tensor, no_dims: int):\n return t.reshape(t.shape[:-no_dims] + (-1,))" } ]
from functools import reduce from operator import mul from typing import Optional, Tuple, Sequence from frame2seq.openfold.model.primitives import Linear, LayerNorm, ipa_point_weights_init_ from frame2seq.openfold.np.residue_constants import ( restype_rigid_group_default_frame, restype_atom14_to_rigid_group, restype_atom14_mask, restype_atom14_rigid_group_positions, ) from frame2seq.openfold.utils.feats import ( frames_and_literature_positions_to_atom14_pos, torsion_angles_to_frames, ) from frame2seq.openfold.utils.precision_utils import is_fp16_enabled from frame2seq.openfold.utils.rigid_utils import Rotation, Rigid from frame2seq.openfold.utils.tensor_utils import ( dict_multimap, permute_final_dims, flatten_final_dims, ) import importlib import math import sys import torch import torch.nn as nn
14,435
self.no_qk_points = no_qk_points self.no_v_points = no_v_points self.inf = inf self.eps = eps # These linear layers differ from their specifications in the # supplement. There, they lack bias and use Glorot initialization. # Here as in the official source, they have bias and use the default # Lecun initialization. hc = self.c_hidden * self.no_heads self.linear_q = Linear(self.c_s, hc) self.linear_kv = Linear(self.c_s, 2 * hc) hpq = self.no_heads * self.no_qk_points * 3 self.linear_q_points = Linear(self.c_s, hpq) hpkv = self.no_heads * (self.no_qk_points + self.no_v_points) * 3 self.linear_kv_points = Linear(self.c_s, hpkv) hpv = self.no_heads * self.no_v_points * 3 self.linear_b = Linear(self.c_z, self.no_heads) self.head_weights = nn.Parameter(torch.zeros((no_heads))) ipa_point_weights_init_(self.head_weights) concat_out_dim = self.no_heads * ( self.c_z + self.c_hidden + self.no_v_points * 4 ) self.linear_out = Linear(concat_out_dim, self.c_s, init="final") self.softmax = nn.Softmax(dim=-1) self.softplus = nn.Softplus() def forward( self, s: torch.Tensor, z: Optional[torch.Tensor], r: Rigid, mask: torch.Tensor, inplace_safe: bool = False, _offload_inference: bool = False, _z_reference_list: Optional[Sequence[torch.Tensor]] = None, attn_drop_rate = 0.0, ) -> torch.Tensor: """ Args: s: [*, N_res, C_s] single representation z: [*, N_res, N_res, C_z] pair representation r: [*, N_res] transformation object mask: [*, N_res] mask Returns: [*, N_res, C_s] single representation update """ if(_offload_inference and inplace_safe): z = _z_reference_list else: z = [z] ####################################### # Generate scalar and point activations ####################################### # [*, N_res, H * C_hidden] q = self.linear_q(s) kv = self.linear_kv(s) # [*, N_res, H, C_hidden] q = q.view(q.shape[:-1] + (self.no_heads, -1)) # [*, N_res, H, 2 * C_hidden] kv = kv.view(kv.shape[:-1] + (self.no_heads, -1)) # [*, N_res, H, C_hidden] k, v = torch.split(kv, self.c_hidden, dim=-1) # [*, N_res, H * P_q * 3] q_pts = self.linear_q_points(s) # This is kind of clunky, but it's how the original does it # [*, N_res, H * P_q, 3] q_pts = torch.split(q_pts, q_pts.shape[-1] // 3, dim=-1) q_pts = torch.stack(q_pts, dim=-1) q_pts = r[..., None].apply(q_pts) # [*, N_res, H, P_q, 3] q_pts = q_pts.view( q_pts.shape[:-2] + (self.no_heads, self.no_qk_points, 3) ) # [*, N_res, H * (P_q + P_v) * 3] kv_pts = self.linear_kv_points(s) # [*, N_res, H * (P_q + P_v), 3] kv_pts = torch.split(kv_pts, kv_pts.shape[-1] // 3, dim=-1) kv_pts = torch.stack(kv_pts, dim=-1) kv_pts = r[..., None].apply(kv_pts) # [*, N_res, H, (P_q + P_v), 3] kv_pts = kv_pts.view(kv_pts.shape[:-2] + (self.no_heads, -1, 3)) # [*, N_res, H, P_q/P_v, 3] k_pts, v_pts = torch.split( kv_pts, [self.no_qk_points, self.no_v_points], dim=-2 ) ########################## # Compute attention scores ########################## # [*, N_res, N_res, H] b = self.linear_b(z[0]) if(_offload_inference): assert(sys.getrefcount(z[0]) == 2) z[0] = z[0].cpu() # [*, H, N_res, N_res]
# Copyright 2021 AlQuraishi Laboratory # Copyright 2021 DeepMind Technologies Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. attn_core_inplace_cuda = False class AngleResnetBlock(nn.Module): def __init__(self, c_hidden): """ Args: c_hidden: Hidden channel dimension """ super(AngleResnetBlock, self).__init__() self.c_hidden = c_hidden self.linear_1 = Linear(self.c_hidden, self.c_hidden, init="relu") self.linear_2 = Linear(self.c_hidden, self.c_hidden, init="final") self.relu = nn.ReLU() def forward(self, a: torch.Tensor) -> torch.Tensor: s_initial = a a = self.relu(a) a = self.linear_1(a) a = self.relu(a) a = self.linear_2(a) return a + s_initial class AngleResnet(nn.Module): """ Implements Algorithm 20, lines 11-14 """ def __init__(self, c_in, c_hidden, no_blocks, no_angles, epsilon): """ Args: c_in: Input channel dimension c_hidden: Hidden channel dimension no_blocks: Number of resnet blocks no_angles: Number of torsion angles to generate epsilon: Small constant for normalization """ super(AngleResnet, self).__init__() self.c_in = c_in self.c_hidden = c_hidden self.no_blocks = no_blocks self.no_angles = no_angles self.eps = epsilon self.linear_in = Linear(self.c_in, self.c_hidden) self.linear_initial = Linear(self.c_in, self.c_hidden) self.layers = nn.ModuleList() for _ in range(self.no_blocks): layer = AngleResnetBlock(c_hidden=self.c_hidden) self.layers.append(layer) self.linear_out = Linear(self.c_hidden, self.no_angles * 2) self.relu = nn.ReLU() def forward( self, s: torch.Tensor, s_initial: torch.Tensor ) -> Tuple[torch.Tensor, torch.Tensor]: """ Args: s: [*, C_hidden] single embedding s_initial: [*, C_hidden] single embedding as of the start of the StructureModule Returns: [*, no_angles, 2] predicted angles """ # NOTE: The ReLU's applied to the inputs are absent from the supplement # pseudocode but present in the source. For maximal compatibility with # the pretrained weights, I'm going with the source. # [*, C_hidden] s_initial = self.relu(s_initial) s_initial = self.linear_initial(s_initial) s = self.relu(s) s = self.linear_in(s) s = s + s_initial for l in self.layers: s = l(s) s = self.relu(s) # [*, no_angles * 2] s = self.linear_out(s) # [*, no_angles, 2] s = s.view(s.shape[:-1] + (-1, 2)) unnormalized_s = s norm_denom = torch.sqrt( torch.clamp( torch.sum(s ** 2, dim=-1, keepdim=True), min=self.eps, ) ) s = s / norm_denom return unnormalized_s, s class InvariantPointAttention(nn.Module): """ Implements Algorithm 22. """ def __init__( self, c_s: int, c_z: int, c_hidden: int, no_heads: int, no_qk_points: int, no_v_points: int, inf: float = 1e5, eps: float = 1e-8, ): """ Args: c_s: Single representation channel dimension c_z: Pair representation channel dimension c_hidden: Hidden channel dimension no_heads: Number of attention heads no_qk_points: Number of query/key points to generate no_v_points: Number of value points to generate """ super(InvariantPointAttention, self).__init__() self.c_s = c_s self.c_z = c_z self.c_hidden = c_hidden self.no_heads = no_heads self.no_qk_points = no_qk_points self.no_v_points = no_v_points self.inf = inf self.eps = eps # These linear layers differ from their specifications in the # supplement. There, they lack bias and use Glorot initialization. # Here as in the official source, they have bias and use the default # Lecun initialization. hc = self.c_hidden * self.no_heads self.linear_q = Linear(self.c_s, hc) self.linear_kv = Linear(self.c_s, 2 * hc) hpq = self.no_heads * self.no_qk_points * 3 self.linear_q_points = Linear(self.c_s, hpq) hpkv = self.no_heads * (self.no_qk_points + self.no_v_points) * 3 self.linear_kv_points = Linear(self.c_s, hpkv) hpv = self.no_heads * self.no_v_points * 3 self.linear_b = Linear(self.c_z, self.no_heads) self.head_weights = nn.Parameter(torch.zeros((no_heads))) ipa_point_weights_init_(self.head_weights) concat_out_dim = self.no_heads * ( self.c_z + self.c_hidden + self.no_v_points * 4 ) self.linear_out = Linear(concat_out_dim, self.c_s, init="final") self.softmax = nn.Softmax(dim=-1) self.softplus = nn.Softplus() def forward( self, s: torch.Tensor, z: Optional[torch.Tensor], r: Rigid, mask: torch.Tensor, inplace_safe: bool = False, _offload_inference: bool = False, _z_reference_list: Optional[Sequence[torch.Tensor]] = None, attn_drop_rate = 0.0, ) -> torch.Tensor: """ Args: s: [*, N_res, C_s] single representation z: [*, N_res, N_res, C_z] pair representation r: [*, N_res] transformation object mask: [*, N_res] mask Returns: [*, N_res, C_s] single representation update """ if(_offload_inference and inplace_safe): z = _z_reference_list else: z = [z] ####################################### # Generate scalar and point activations ####################################### # [*, N_res, H * C_hidden] q = self.linear_q(s) kv = self.linear_kv(s) # [*, N_res, H, C_hidden] q = q.view(q.shape[:-1] + (self.no_heads, -1)) # [*, N_res, H, 2 * C_hidden] kv = kv.view(kv.shape[:-1] + (self.no_heads, -1)) # [*, N_res, H, C_hidden] k, v = torch.split(kv, self.c_hidden, dim=-1) # [*, N_res, H * P_q * 3] q_pts = self.linear_q_points(s) # This is kind of clunky, but it's how the original does it # [*, N_res, H * P_q, 3] q_pts = torch.split(q_pts, q_pts.shape[-1] // 3, dim=-1) q_pts = torch.stack(q_pts, dim=-1) q_pts = r[..., None].apply(q_pts) # [*, N_res, H, P_q, 3] q_pts = q_pts.view( q_pts.shape[:-2] + (self.no_heads, self.no_qk_points, 3) ) # [*, N_res, H * (P_q + P_v) * 3] kv_pts = self.linear_kv_points(s) # [*, N_res, H * (P_q + P_v), 3] kv_pts = torch.split(kv_pts, kv_pts.shape[-1] // 3, dim=-1) kv_pts = torch.stack(kv_pts, dim=-1) kv_pts = r[..., None].apply(kv_pts) # [*, N_res, H, (P_q + P_v), 3] kv_pts = kv_pts.view(kv_pts.shape[:-2] + (self.no_heads, -1, 3)) # [*, N_res, H, P_q/P_v, 3] k_pts, v_pts = torch.split( kv_pts, [self.no_qk_points, self.no_v_points], dim=-2 ) ########################## # Compute attention scores ########################## # [*, N_res, N_res, H] b = self.linear_b(z[0]) if(_offload_inference): assert(sys.getrefcount(z[0]) == 2) z[0] = z[0].cpu() # [*, H, N_res, N_res]
if(is_fp16_enabled()):
6
2023-12-25 09:29:36+00:00
24k
iKala/ievals
ievals/cli/ieval.py
[ { "identifier": "TGI_Evaluator", "path": "ievals/modules/qa_evaluators/tgi.py", "snippet": "class TGI_Evaluator(Evaluator):\n def __init__(\n self,\n choices,\n k,\n ip_addr,\n model_name,\n systemMessageToken=\"<|im_start|>system\\n\",\n messageEndToken=\"<|im_end|>\",\n assistantMessageToken=\"<|im_start|>assistant\\n\",\n userMessageToken=\"<|im_start|>user\\n\",\n switch_zh_hans=False,\n ):\n super(TGI_Evaluator, self).__init__(choices, model_name, k)\n self.ip_addr = ip_addr\n self.model_name = model_name\n self.userMessageToken = userMessageToken\n self.assistantMessageToken = assistantMessageToken\n self.messageEndToken = messageEndToken\n self.systemMessageToken = systemMessageToken\n self.converter = None\n if switch_zh_hans:\n self.converter = opencc.OpenCC(\"t2s.json\")\n\n def format_example(self, line, include_answer=True, cot=False):\n example = line[\"question\"]\n for choice in self.choices:\n example += f'\\n{choice}. {line[f\"{choice}\"]}'\n\n example += \"\\n答案:\"\n if include_answer:\n if cot:\n ans = line[\"answer\"]\n content = \"讓我們一步一步思考,\\n\" + line[\"explanation\"] + f\"\\n所以答案是{ans}。\"\n return [\n {\"role\": \"user\", \"content\": example},\n {\"role\": \"assistant\", \"content\": content},\n ]\n else:\n return [\n {\"role\": \"user\", \"content\": example},\n {\"role\": \"assistant\", \"content\": line[\"answer\"]},\n ]\n else:\n return [\n {\"role\": \"user\", \"content\": example},\n ]\n\n def generate_few_shot_prompt(self, subject, dev_df, cot=False):\n prompt = [\n {\n \"role\": \"system\",\n \"content\": f\"你是一位專業的中文AI助理,以下是關於{subject}考試單選題,請選出正確的答案。\",\n }\n ]\n k = self.k\n if self.k == -1:\n k = dev_df.shape[0]\n for i in range(k):\n tmp = self.format_example(dev_df.iloc[i, :], include_answer=True, cot=cot)\n if i == 0:\n tmp[0][\"content\"] = (\n f\"以下是關於{subject}考試單選題,請選出正確的答案。\\n\\n\" + tmp[0][\"content\"]\n )\n prompt += tmp\n return prompt\n\n def eval_subject(\n self,\n subject_name,\n test_df,\n dev_df=None,\n few_shot=False,\n save_result_dir=None,\n cot=False,\n ):\n correct_num = 0\n if save_result_dir:\n result = []\n score = []\n if few_shot:\n few_shot_prompt = self.generate_few_shot_prompt(\n subject_name, dev_df, cot=cot\n )\n else:\n few_shot_prompt = [\n {\n \"role\": \"system\",\n \"content\": f\"你是一位專業的中文AI助理,以下是關於{subject_name}考試單選題,請選出正確的答案。\",\n }\n ]\n answers = list(test_df[\"answer\"])\n for row_index, row in tqdm(\n test_df.iterrows(), total=len(test_df), dynamic_ncols=True\n ):\n question = self.format_example(row, include_answer=False)\n full_prompt = few_shot_prompt + question\n if not few_shot:\n full_prompt[-1][\"content\"] = (\n f\"以下是關於{subject_name}考試單選題,請選出正確的答案。\\n\\n\"\n + full_prompt[-1][\"content\"]\n )\n response = None\n timeout_counter = 0\n text = \"\"\n for prompt in full_prompt:\n if prompt[\"role\"] == \"system\":\n text += (\n self.systemMessageToken\n + prompt[\"content\"]\n + self.messageEndToken\n )\n elif prompt[\"role\"] == \"user\":\n text += (\n self.userMessageToken + prompt[\"content\"] + self.messageEndToken\n )\n elif prompt[\"role\"] == \"assistant\":\n text += (\n self.assistantMessageToken\n + prompt[\"content\"]\n + self.messageEndToken\n )\n text += self.assistantMessageToken\n if self.converter:\n text = self.converter.convert(text)\n\n while response is None and timeout_counter <= 30:\n try:\n response = requests.post(\n f\"http://{self.ip_addr}/generate\",\n data=json.dumps(\n {\n \"inputs\": text,\n \"parameters\": {\n \"max_new_tokens\": 90,\n \"temperature\": 0.001,\n \"stop\": [self.messageEndToken],\n },\n }\n ),\n headers={\"Content-Type\": \"application/json\"},\n )\n r = response.json()\n if \"generated_text\" not in r:\n raise ValueError(\"not found: \" + str(r))\n except Exception as msg:\n if \"timeout=600\" in str(msg):\n timeout_counter += 1\n logging.error(msg)\n sleep(5)\n continue\n if response == None:\n response_str = \"\"\n else:\n response_str = response.json()[\"generated_text\"].split(\n self.messageEndToken\n )[0]\n if cot:\n ans_list = re.findall(r\"答案是(.+?)。\", response_str)\n if len(ans_list) == 0:\n ans_list = re.findall(r\"答案為(.+?)。\", response_str)\n if len(ans_list) == 0:\n ans_list = re.findall(r\"選項(.+?)是正確的。\", response_str)\n if len(ans_list) == 0:\n ans_list = re.findall(r\"答案为(.+?)。\", response_str)\n if len(ans_list) == 0:\n ans_list = re.findall(r\"选项(.+?)是正确的。\", response_str)\n\n if len(ans_list) == 0:\n correct = 0\n else:\n if self.exact_match(ans_list[-1], row[\"answer\"]):\n correct_num += 1\n correct = 1\n else:\n correct = 0\n else:\n response_str = response_str.strip()\n if few_shot:\n if len(response_str) > 0:\n if self.exact_match(response_str, row[\"answer\"]):\n correct_num += 1\n correct = 1\n else:\n ans_list = self.extract_ans(response_str)\n if len(ans_list) > 0 and (ans_list[-1] == row[\"answer\"]):\n correct_num += 1\n correct = 1\n else:\n correct = 0\n else:\n correct = 0\n else:\n if len(response_str) > 0:\n ans_list = self.extract_ans(response_str)\n if len(ans_list) > 0 and (ans_list[-1] == row[\"answer\"]):\n correct_num += 1\n correct = 1\n else:\n correct = 0\n else:\n correct = 0\n if save_result_dir:\n result.append(response_str)\n score.append(correct)\n correct_ratio = 100 * correct_num / len(answers)\n\n if save_result_dir:\n test_df[\"model_output\"] = result\n test_df[\"correctness\"] = score\n test_df.to_csv(\n os.path.join(save_result_dir, f\"{subject_name}_val.csv\"),\n encoding=\"utf-8\",\n index=False,\n )\n return correct_ratio\n\n def extract_ans(self, response_str):\n pattern = [\n r\"([A-D]).\",\n r\"答案:([A-D])\",\n r\"([A-D]). \",\n r\"^選([A-D])\",\n r\"^选([A-D])\",\n r\"^选项([A-D])\",\n r\"^選項([A-D])\",\n r\"答案是\\s?选?项?\\s?([A-D])\",\n r\"答案为\\s?选?项?\\s?([A-D])\",\n r\"答案应为\\s?选?项?\\s?([A-D])\",\n r\"答案选\\s?选?项?\\s?([A-D])\",\n r\"答案是:\\s?选?项?\\s?([A-D])\",\n r\"答案应该是:\\s?选?项?\\s?([A-D])\",\n r\"答案應該是:\\s?选?项?\\s?([A-D])\",\n r\"正确的一项是\\s?([A-D])\",\n r\"正確答案是([A-D])\",\n r\"正確答案是 ([A-D])\",\n r\"正確的一項是\\s?([A-D])\",\n r\"答案为:\\s?选?项?\\s?([A-D])\",\n r\"答案為:\\s?選?項?\\s?([A-D])\",\n r\"答案应为:\\s?选?项?\\s?([A-D])\",\n r\"答案:\\s?选?项?\\s?([A-D])\",\n r\"答案是:\\s?选?项?\\s?([A-D])\",\n r\"答案应该是:\\s?选?项?\\s?([A-D])\",\n r\"答案應該是:\\s?選?項?\\s?([A-D])\",\n r\"答案为:\\s?选?项?\\s?([A-D])\",\n r\"答案应为:\\s?选?项?\\s?([A-D])\",\n r\"答案:\\s?选?项?\\s?([A-D])\",\n ]\n ans_list = []\n if response_str[0] in [\"A\", \"B\", \"C\", \"D\"]:\n ans_list.append(response_str[0])\n for p in pattern:\n if self.converter:\n p = self.converter.convert(p)\n\n if len(ans_list) == 0:\n ans_list = re.findall(p, response_str)\n else:\n break\n return ans_list" }, { "identifier": "Gemini_Evaluator", "path": "ievals/modules/qa_evaluators/gemini.py", "snippet": "class Gemini_Evaluator(Evaluator):\n def __init__(self, choices, k, api_key, model_name, switch_zh_hans=False):\n super(Gemini_Evaluator, self).__init__(choices, model_name, k)\n genai.configure(api_key=api_key)\n\n self.model = genai.GenerativeModel(\n model_name,\n safety_settings=[\n {\n \"category\": \"HARM_CATEGORY_HARASSMENT\",\n \"threshold\": \"BLOCK_ONLY_HIGH\",\n },\n {\n \"category\": \"HARM_CATEGORY_HATE_SPEECH\",\n \"threshold\": \"BLOCK_ONLY_HIGH\",\n },\n {\n \"category\": \"HARM_CATEGORY_SEXUALLY_EXPLICIT\",\n \"threshold\": \"BLOCK_ONLY_HIGH\",\n },\n {\n \"category\": \"HARM_CATEGORY_DANGEROUS_CONTENT\",\n \"threshold\": \"BLOCK_ONLY_HIGH\",\n },\n ],\n )\n\n self.model_name = model_name\n self.converter = None\n if switch_zh_hans:\n self.converter = opencc.OpenCC(\"t2s.json\")\n\n def format_example(self, line, include_answer=True, cot=False):\n example = line[\"question\"]\n for choice in self.choices:\n example += f'\\n{choice}. {line[f\"{choice}\"]}'\n\n example += \"\\n答案:\"\n if include_answer:\n if cot:\n ans = line[\"answer\"]\n content = \"讓我們一步一步思考,\\n\" + line[\"explanation\"] + f\"\\n所以答案是{ans}。\"\n return [\n {\"role\": \"user\", \"content\": example},\n {\"role\": \"assistant\", \"content\": content},\n ]\n else:\n return [\n {\"role\": \"user\", \"content\": example},\n {\"role\": \"assistant\", \"content\": line[\"answer\"]},\n ]\n else:\n return [\n {\"role\": \"user\", \"content\": example},\n ]\n\n def generate_few_shot_prompt(self, subject, dev_df, cot=False):\n prompt = [\n {\n \"role\": \"system\",\n \"content\": f\"你是一位專業的中文AI主力,以下是關於{subject}考試單選題,請選出正確的答案。\",\n }\n ]\n k = self.k\n if self.k == -1:\n k = dev_df.shape[0]\n for i in range(k):\n tmp = self.format_example(dev_df.iloc[i, :], include_answer=True, cot=cot)\n if i == 0:\n tmp[0][\"content\"] = (\n f\"以下是關於{subject}考試單選題,請選出正確的答案。\\n\\n\" + tmp[0][\"content\"]\n )\n prompt += tmp\n return prompt\n\n def eval_subject(\n self,\n subject_name,\n test_df,\n dev_df=None,\n few_shot=False,\n save_result_dir=None,\n cot=False,\n ):\n correct_num = 0\n if save_result_dir:\n result = []\n score = []\n if few_shot:\n few_shot_prompt = self.generate_few_shot_prompt(\n subject_name, dev_df, cot=cot\n )\n else:\n few_shot_prompt = [\n {\n \"role\": \"system\",\n \"content\": f\"你是一位專業的中文AI主力,以下是關於{subject_name}考試單選題,請選出正確的答案。\",\n }\n ]\n answers = list(test_df[\"answer\"])\n for row_index, row in tqdm(\n test_df.iterrows(), total=len(test_df), dynamic_ncols=True\n ):\n question = self.format_example(row, include_answer=False)\n full_prompt = few_shot_prompt + question\n if not few_shot:\n full_prompt[-1][\"content\"] = (\n f\"以下是關於{subject_name}考試單選題,請選出正確的答案。\\n\\n\"\n + full_prompt[-1][\"content\"]\n )\n response = None\n timeout_counter = 0\n text = []\n prev_role = \"\"\n for prompt in full_prompt:\n if prompt[\"role\"] == \"system\":\n text.append(prompt[\"content\"] + \"\\n\")\n elif prompt[\"role\"] == \"user\":\n if prev_role == \"system\":\n text[-1] += \"問題: \" + prompt[\"content\"] + \"\\n\"\n else:\n text.append(\"問題: \" + prompt[\"content\"] + \"\\n\")\n elif prompt[\"role\"] == \"assistant\":\n text.append(prompt[\"content\"] + \"\\n\")\n prev_role = prompt[\"role\"]\n if self.converter:\n text = [self.converter.convert(seg) for seg in text]\n\n while response is None and timeout_counter <= 30:\n try:\n response = self.model.generate_content(text)\n except Exception as msg:\n if \"timeout=600\" in str(msg):\n timeout_counter += 1\n logging.error(msg)\n sleep(5)\n continue\n\n if response == None:\n response_str = \"\"\n else:\n try:\n response_str = response.text\n except (ValueError, IndexError):\n response_str = \"\"\n\n if cot:\n ans_list = re.findall(r\"答案是(.+?)。\", response_str)\n if self.converter:\n if len(ans_list) == 0:\n ans_list = re.findall(r\"答案为(.+?)。\", response_str)\n if len(ans_list) == 0:\n ans_list = re.findall(r\"选项(.+?)是正确的。\", response_str)\n else:\n if len(ans_list) == 0:\n ans_list = re.findall(r\"答案為(.+?)。\", response_str)\n if len(ans_list) == 0:\n ans_list = re.findall(r\"選項(.+?)是正確的。\", response_str)\n\n if len(ans_list) == 0:\n correct = 0\n else:\n if self.exact_match(ans_list[-1], row[\"answer\"]):\n correct_num += 1\n correct = 1\n else:\n correct = 0\n else:\n response_str = response_str.strip()\n if few_shot:\n if len(response_str) > 0:\n if self.exact_match(response_str, row[\"answer\"]):\n correct_num += 1\n correct = 1\n else:\n ans_list = self.extract_ans(response_str)\n if len(ans_list) > 0 and (ans_list[-1] == row[\"answer\"]):\n correct_num += 1\n correct = 1\n else:\n correct = 0\n else:\n correct = 0\n else:\n if len(response_str) > 0:\n ans_list = self.extract_ans(response_str)\n if len(ans_list) > 0 and (ans_list[-1] == row[\"answer\"]):\n correct_num += 1\n correct = 1\n else:\n correct = 0\n else:\n correct = 0\n if save_result_dir:\n result.append(response_str)\n score.append(correct)\n correct_ratio = 100 * correct_num / len(answers)\n\n if save_result_dir:\n test_df[\"model_output\"] = result\n test_df[\"correctness\"] = score\n test_df.to_csv(\n os.path.join(save_result_dir, f\"{subject_name}_val.csv\"),\n encoding=\"utf-8\",\n index=False,\n )\n return correct_ratio\n\n def extract_ans(self, response_str):\n pattern = [\n r\"^选([A-D])\",\n r\"^选项([A-D])\",\n r\"答案是\\s?选?项?\\s?([A-D])\",\n r\"答案为\\s?选?项?\\s?([A-D])\",\n r\"答案应为\\s?选?项?\\s?([A-D])\",\n r\"答案选\\s?选?项?\\s?([A-D])\",\n r\"答案是:\\s?选?项?\\s?([A-D])\",\n r\"答案应该是:\\s?选?项?\\s?([A-D])\",\n r\"正确的一项是\\s?([A-D])\",\n r\"答案为:\\s?选?项?\\s?([A-D])\",\n r\"答案应为:\\s?选?项?\\s?([A-D])\",\n r\"答案:\\s?选?项?\\s?([A-D])\",\n r\"答案是:\\s?选?项?\\s?([A-D])\",\n r\"答案应该是:\\s?选?项?\\s?([A-D])\",\n r\"答案为:\\s?选?项?\\s?([A-D])\",\n r\"答案应为:\\s?选?项?\\s?([A-D])\",\n r\"答案:\\s?选?项?\\s?([A-D])\",\n r\"^選([A-D])\",\n r\"^選項([A-D])\",\n r\"答案是\\s?選?項?\\s?([A-D])\",\n r\"答案為\\s?選?項?\\s?([A-D])\",\n r\"答案應為\\s?選?項?\\s?([A-D])\",\n r\"答案選\\s?選?項?\\s?([A-D])\",\n r\"答案是:\\s?選?項?\\s?([A-D])\",\n r\"答案應該是:\\s?選?項?\\s?([A-D])\",\n r\"正確的一項是\\s?([A-D])\",\n r\"答案為:\\s?選?項?\\s?([A-D])\",\n r\"答案應為:\\s?選?項?\\s?([A-D])\",\n r\"答案:\\s?選?項?\\s?([A-D])\",\n r\"答案是:\\s?選?項?\\s?([A-D])\",\n r\"答案應該是:\\s?選?項?\\s?([A-D])\",\n r\"答案為:\\s?選?項?\\s?([A-D])\",\n r\"答案應為:\\s?選?項?\\s?([A-D])\",\n r\"答案:\\s?選?項?\\s?([A-D])\",\n ]\n ans_list = []\n if response_str[0] in [\"A\", \"B\", \"C\", \"D\"]:\n ans_list.append(response_str[0])\n for p in pattern:\n if self.converter:\n p = self.converter.convert(p)\n if len(ans_list) == 0:\n ans_list = re.findall(p, response_str)\n else:\n break\n return ans_list" }, { "identifier": "Claude_Evaluator", "path": "ievals/modules/qa_evaluators/claude.py", "snippet": "class Claude_Evaluator(Evaluator):\n def __init__(self, choices, k, api_key, model_name, switch_zh_hans=False):\n super(Claude_Evaluator, self).__init__(choices, model_name, k)\n self.client = anthropic.Anthropic(api_key=api_key)\n self.model_name\n self.converter = None\n if switch_zh_hans:\n self.converter = opencc.OpenCC(\"t2s.json\")\n\n def format_example(self, line, include_answer=True, cot=False):\n example = line[\"question\"]\n for choice in self.choices:\n example += f'\\n{choice}. {line[f\"{choice}\"]}'\n\n example += \"\\n答案:\"\n if include_answer:\n if cot:\n ans = line[\"answer\"]\n content = \"讓我們一步一步思考,\\n\" + line[\"explanation\"] + f\"\\n所以答案是{ans}。\"\n return [\n {\"role\": \"user\", \"content\": example},\n {\"role\": \"assistant\", \"content\": content},\n ]\n else:\n return [\n {\"role\": \"user\", \"content\": example},\n {\"role\": \"assistant\", \"content\": line[\"answer\"]},\n ]\n else:\n return [\n {\"role\": \"user\", \"content\": example},\n ]\n\n def generate_few_shot_prompt(self, subject, dev_df, cot=False):\n prompt = [\n {\n \"role\": \"system\",\n \"content\": f\"你是一位專業的中文AI助理,以下是關於{subject}考試單選題,請直接選出正確的答案。\",\n }\n ]\n k = self.k\n if self.k == -1:\n k = dev_df.shape[0]\n for i in range(k):\n tmp = self.format_example(dev_df.iloc[i, :], include_answer=True, cot=cot)\n if i == 0:\n tmp[0][\"content\"] = (\n f\"以下是關於{subject}考試單選題,請直接選出正確的答案。\\n\\n\" + tmp[0][\"content\"]\n )\n prompt += tmp\n return prompt\n\n def eval_subject(\n self,\n subject_name,\n test_df,\n dev_df=None,\n few_shot=False,\n save_result_dir=None,\n cot=False,\n ):\n correct_num = 0\n if save_result_dir:\n result = []\n score = []\n if few_shot:\n few_shot_prompt = self.generate_few_shot_prompt(\n subject_name, dev_df, cot=cot\n )\n else:\n few_shot_prompt = [\n {\n \"role\": \"system\",\n \"content\": f\"你是一位專業的中文AI助理,以下是關於{subject_name}考試單選題,請直接選出正確的答案。\",\n }\n ]\n\n answers = list(test_df[\"answer\"])\n for row_index, row in tqdm(\n test_df.iterrows(), total=len(test_df), dynamic_ncols=True\n ):\n question = self.format_example(row, include_answer=False)\n full_prompt = few_shot_prompt + question\n if not few_shot:\n full_prompt[-1][\"content\"] = (\n f\"以下是關於{subject_name}考試單選題,請直接選出正確的答案。\\n\\n\"\n + full_prompt[-1][\"content\"]\n )\n response = None\n timeout_counter = 0\n text = \"\"\n for prompt in full_prompt:\n if prompt[\"role\"] == \"system\":\n text += anthropic.HUMAN_PROMPT + \" \" + prompt[\"content\"]\n elif prompt[\"role\"] == \"user\":\n text += anthropic.HUMAN_PROMPT + \" \" + prompt[\"content\"]\n elif prompt[\"role\"] == \"assistant\":\n text += anthropic.AI_PROMPT + \" \" + prompt[\"content\"]\n text += anthropic.AI_PROMPT\n if self.converter:\n text = self.converter.convert(text)\n\n while response is None and timeout_counter <= 30:\n try:\n response = self.client.completions.create(\n prompt=text,\n stop_sequences=[anthropic.HUMAN_PROMPT],\n model=self.model_name,\n temperature=0.1,\n max_tokens_to_sample=300,\n )\n except Exception as msg:\n if \"timeout=600\" in str(msg):\n timeout_counter += 1\n logging.error(msg)\n sleep(5)\n continue\n if response == None:\n response_str = \"\"\n else:\n response_str = response.completion\n\n if cot:\n ans_list = re.findall(r\"答案是(.+?)。\", response_str)\n\n if self.converter:\n if len(ans_list) == 0:\n ans_list = re.findall(r\"答案为(.+?)。\", response_str)\n if len(ans_list) == 0:\n ans_list = re.findall(r\"选项(.+?)是正确的。\", response_str)\n else:\n if len(ans_list) == 0:\n ans_list = re.findall(r\"答案為(.+?)。\", response_str)\n if len(ans_list) == 0:\n ans_list = re.findall(r\"選項(.+?)是正確的。\", response_str)\n\n if len(ans_list) == 0:\n correct = 0\n else:\n if self.exact_match(ans_list[-1], row[\"answer\"]):\n correct_num += 1\n correct = 1\n else:\n correct = 0\n else:\n response_str = response_str.strip()\n if few_shot:\n if len(response_str) > 0:\n if self.exact_match(response_str, row[\"answer\"]):\n correct_num += 1\n correct = 1\n else:\n correct = 0\n else:\n correct = 0\n else:\n if len(response_str) > 0:\n ans_list = self.extract_ans(response_str)\n if len(ans_list) > 0 and (ans_list[-1] == row[\"answer\"]):\n correct_num += 1\n correct = 1\n else:\n correct = 0\n else:\n correct = 0\n if save_result_dir:\n result.append(response_str)\n score.append(correct)\n correct_ratio = 100 * correct_num / len(answers)\n\n if save_result_dir:\n test_df[\"model_output\"] = result\n test_df[\"correctness\"] = score\n test_df.to_csv(\n os.path.join(save_result_dir, f\"{subject_name}_val.csv\"),\n encoding=\"utf-8\",\n index=False,\n )\n return correct_ratio\n\n def extract_ans(self, response_str):\n pattern = [\n r\"正確的答案應該是:.*?\\b([A-D])\\b\",\n r\"正確的選項應為:.*?\\b([A-D])\\b\",\n r\"所以答案為([A-D])\",\n r\"答案為\\s?([A-D])\",\n r\"所以下列方程式的解是([A-D])\",\n r\"选([A-D])\",\n r\"选项([A-D])\",\n r\"^選([A-D])\",\n r\"^選項([A-D])\",\n r\"答案是\\s?選?項?\\s?([A-D])\",\n r\"答案為\\s?選?項?\\s?([A-D])\",\n r\"答案應為\\s?選?項?\\s?([A-D])\",\n r\"答案为\\s?选?项?\\s?([A-D])\",\n r\"答案应为\\s?选?项?\\s?([A-D])\",\n r\"答案選\\s?選?項?\\s?([A-D])\",\n r\"答案选\\s?选?项?\\s?([A-D])\",\n r\"答案是:\\s?選?項?\\s?([A-D])\",\n r\"答案應該是:\\s?選?項?\\s?([A-D])\",\n r\"答案应该是:\\s?选?项?\\s?([A-D])\",\n r\"正確的一項是\\s?([A-D])\",\n r\"正确的一项是\\s?([A-D])\",\n r\"答案為:\\s?選?項?\\s?([A-D])\",\n r\"答案應為:\\s?選?項?\\s?([A-D])\",\n r\"答案:\\s?選?項?\\s?([A-D])\",\n r\"答案是\\s?选?项?\\s?([A-D])\",\n r\"答案为\\s?选?项?\\s?([A-D])\",\n r\"答案应为\\s?选?项?\\s?([A-D])\",\n r\"答案选\\s?选?项?\\s?([A-D])\",\n r\"答案是:\\s?选?项?\\s?([A-D])\",\n r\"答案应该是:\\s?选?项?\\s?([A-D])\",\n r\"正确的一项是\\s?([A-D])\",\n r\"答案为:\\s?选?项?\\s?([A-D])\",\n r\"答案应为:\\s?选?项?\\s?([A-D])\",\n r\"答案:\\s?选?项?\\s?([A-D])\",\n r\"答案是:\\s?选?项?\\s?([A-D])\",\n r\"答案应该是:\\s?选?项?\\s?([A-D])\",\n r\"答案为:\\s?选?项?\\s?([A-D])\",\n r\"答案应为:\\s?选?项?\\s?([A-D])\",\n r\"答案:\\s?选?项?\\s?([A-D])\",\n ]\n ans_list = []\n if response_str[0] in [\"A\", \"B\", \"C\", \"D\"]:\n ans_list.append(response_str[0])\n for p in pattern:\n if self.converter:\n p = self.converter.convert(p)\n if len(ans_list) == 0:\n ans_list = re.findall(p, response_str, re.DOTALL)\n else:\n break\n return ans_list" }, { "identifier": "Azure_Evaluator", "path": "ievals/modules/qa_evaluators/azure.py", "snippet": "class Azure_Evaluator(Evaluator):\n def __init__(self, choices, k, api_key, model_name, switch_zh_hans=False):\n super(Azure_Evaluator, self).__init__(choices, model_name, k)\n self.client = AzureOpenAI(\n api_key=api_key,\n api_version=os.getenv(\"AZURE_OPENAI_VERSION\", \"2023-07-01-preview\"),\n azure_endpoint=os.getenv(\"AZURE_OPENAI_ENDPOINT\"),\n )\n self.converter = None\n if switch_zh_hans:\n self.converter = opencc.OpenCC(\"t2s.json\")\n\n def format_example(self, line, include_answer=True, cot=False):\n example = line[\"question\"]\n for choice in self.choices:\n example += f'\\n{choice}. {line[f\"{choice}\"]}'\n\n example += \"\\n答案:\"\n if include_answer:\n if cot:\n ans = line[\"answer\"]\n content = \"讓我們一步一步思考,\\n\" + line[\"explanation\"] + f\"\\n所以答案是{ans}。\"\n return [\n {\"role\": \"user\", \"content\": example},\n {\"role\": \"assistant\", \"content\": content},\n ]\n else:\n return [\n {\"role\": \"user\", \"content\": example},\n {\"role\": \"assistant\", \"content\": line[\"answer\"]},\n ]\n else:\n return [\n {\"role\": \"user\", \"content\": example},\n ]\n\n def generate_few_shot_prompt(self, subject, dev_df, cot=False):\n prompt = [\n {\n \"role\": \"system\",\n \"content\": f\"你是一位專業的中文AI助理,以下是關於{subject}考試單選題,請選出正確的答案。\",\n }\n ]\n k = self.k\n if self.k == -1:\n k = dev_df.shape[0]\n for i in range(k):\n tmp = self.format_example(dev_df.iloc[i, :], include_answer=True, cot=cot)\n if i == 0:\n tmp[0][\"content\"] = (\n f\"以下是關於{subject}考試單選題,請選出正確的答案。\\n\\n\" + tmp[0][\"content\"]\n )\n if self.converter:\n tmp[0][\"content\"] = self.converter.convert(tmp[0][\"content\"])\n\n prompt += tmp\n\n return prompt\n\n def eval_subject(\n self,\n subject_name,\n test_df,\n dev_df=None,\n few_shot=False,\n save_result_dir=None,\n cot=False,\n ):\n correct_num = 0\n if save_result_dir:\n result = []\n score = []\n if few_shot:\n few_shot_prompt = self.generate_few_shot_prompt(\n subject_name, dev_df, cot=cot\n )\n else:\n few_shot_prompt = [\n {\n \"role\": \"system\",\n \"content\": f\"你是一位專業的中文AI助理,以下是關於{subject_name}考試單選題,請選出正確的答案。\",\n }\n ]\n answers = list(test_df[\"answer\"])\n for row_index, row in tqdm(\n test_df.iterrows(), total=len(test_df), dynamic_ncols=True\n ):\n question = self.format_example(row, include_answer=False)\n full_prompt = few_shot_prompt + question\n if not few_shot:\n full_prompt[-1][\"content\"] = (\n f\"以下是關於{subject_name}考試單選題,請選出正確的答案。\\n\\n\"\n + full_prompt[-1][\"content\"]\n )\n\n if self.converter:\n converted = []\n for p in full_prompt:\n p[\"content\"] = self.converter.convert(p[\"content\"])\n converted.append(p)\n full_prompt = converted\n\n response = None\n timeout_counter = 0\n\n while response is None and timeout_counter <= 30:\n try:\n response = self.client.chat.completions.create(\n model=self.model_name, messages=full_prompt, temperature=0.0\n )\n except Exception as msg:\n if \"timeout=600\" in str(msg):\n timeout_counter += 1\n logging.error(msg)\n sleep(5)\n continue\n\n response_str = \"\"\n if response != None:\n response_str = response.choices[0].message.content\n\n if cot:\n ans_list = re.findall(r\"答案是(.+?)。\", response_str)\n if self.converter:\n if len(ans_list) == 0:\n ans_list = re.findall(r\"答案为(.+?)。\", response_str)\n if len(ans_list) == 0:\n ans_list = re.findall(r\"选项(.+?)是正确的。\", response_str)\n else:\n if len(ans_list) == 0:\n ans_list = re.findall(r\"答案為(.+?)。\", response_str)\n if len(ans_list) == 0:\n ans_list = re.findall(r\"選項(.+?)是正確的。\", response_str)\n\n if len(ans_list) == 0:\n correct = 0\n else:\n if self.exact_match(ans_list[-1], row[\"answer\"]):\n correct_num += 1\n correct = 1\n else:\n correct = 0\n else:\n if response_str is None:\n response_str = \"\"\n else:\n response_str = response_str.strip()\n if few_shot:\n if len(response_str) > 0:\n if self.exact_match(response_str, row[\"answer\"]):\n correct_num += 1\n correct = 1\n else:\n ans_list = self.extract_ans(response_str)\n if len(ans_list) > 0 and (ans_list[-1] == row[\"answer\"]):\n correct_num += 1\n correct = 1\n else:\n correct = 0\n else:\n correct = 0\n else:\n if len(response_str) > 0:\n ans_list = self.extract_ans(response_str)\n if len(ans_list) > 0 and (ans_list[-1] == row[\"answer\"]):\n correct_num += 1\n correct = 1\n else:\n correct = 0\n else:\n correct = 0\n if save_result_dir:\n result.append(response_str)\n score.append(correct)\n correct_ratio = 100 * correct_num / len(answers)\n\n if save_result_dir:\n test_df[\"model_output\"] = result\n test_df[\"correctness\"] = score\n test_df.to_csv(\n os.path.join(save_result_dir, f\"{subject_name}_val.csv\"),\n encoding=\"utf-8\",\n index=False,\n )\n return correct_ratio\n\n def extract_ans(self, response_str):\n pattern = [\n r\"([A-D]). \",\n r\"([A-D]).\",\n r\"^選([A-D])\",\n r\"^選項([A-D])\",\n r\"^选([A-D])\",\n r\"^选项([A-D])\",\n r\"答案是\\s?選?項?\\s?([A-D])\",\n r\"答案為\\s?選?項?\\s?([A-D])\",\n r\"答案應為\\s?選?項?\\s?([A-D])\",\n r\"答案为\\s?选?项?\\s?([A-D])\",\n r\"答案应为\\s?选?项?\\s?([A-D])\",\n r\"答案選\\s?選?項?\\s?([A-D])\",\n r\"答案选\\s?选?项?\\s?([A-D])\",\n r\"答案是:\\s?選?項?\\s?([A-D])\",\n r\"答案應該是:\\s?選?項?\\s?([A-D])\",\n r\"答案应该是:\\s?选?项?\\s?([A-D])\",\n r\"正確的一項是\\s?([A-D])\",\n r\"正确的一项是\\s?([A-D])\",\n r\"答案為:\\s?選?項?\\s?([A-D])\",\n r\"答案應為:\\s?選?項?\\s?([A-D])\",\n r\"答案:\\s?選?項?\\s?([A-D])\",\n r\"答案是:\\s?選?項?\\s?([A-D])\",\n r\"答案應該是:\\s?選?項?\\s?([A-D])\",\n r\"答案為:\\s?選?項?\\s?([A-D])\",\n r\"答案應為:\\s?選?項?\\s?([A-D])\",\n r\"答案:\\s?選?項?\\s?([A-D])\",\n r\"答案为:\\s?选?项?\\s?([A-D])\",\n r\"答案应为:\\s?选?项?\\s?([A-D])\",\n r\"答案:\\s?选?项?\\s?([A-D])\",\n r\"答案是:\\s?选?项?\\s?([A-D])\",\n r\"答案应该是:\\s?选?项?\\s?([A-D])\",\n r\"答案为:\\s?选?项?\\s?([A-D])\",\n r\"答案应为:\\s?选?项?\\s?([A-D])\",\n r\"答案:\\s?选?项?\\s?([A-D])\",\n ]\n ans_list = []\n if response_str[0] in [\"A\", \"B\", \"C\", \"D\"]:\n ans_list.append(response_str[0])\n for p in pattern:\n if self.converter:\n p = self.converter.convert(p)\n if len(ans_list) == 0:\n ans_list = re.findall(p, response_str)\n else:\n break\n return ans_list" }, { "identifier": "GPT_Evaluator", "path": "ievals/modules/qa_evaluators/oai_complete.py", "snippet": "class GPT_Evaluator(Evaluator):\n \"\"\"\n Completion endpoint for instruction based model\n davinci, gpt-3.5-instruct\n \"\"\"\n\n def __init__(self, choices, k, api_key, model_name, switch_zh_hans=False):\n super(GPT_Evaluator, self).__init__(choices, model_name, k)\n openai.api_key = api_key\n self.client = openai.OpenAI(api_key=api_key)\n self.converter = None\n if switch_zh_hans:\n self.converter = opencc.OpenCC(\"t2s.json\")\n\n def format_example(self, line, include_answer=True, cot=False):\n example = line[\"question\"]\n for choice in self.choices:\n example += f'\\n{choice}. {line[f\"{choice}\"]}'\n\n example += \"\\n答案:\"\n if include_answer:\n if cot:\n ans = line[\"answer\"]\n content = \"讓我們一步一步思考,\\n\" + line[\"explanation\"] + f\"\\n所以答案是{ans}。\"\n return [\n {\"role\": \"user\", \"content\": example},\n {\"role\": \"assistant\", \"content\": content},\n ]\n else:\n return [\n {\"role\": \"user\", \"content\": example},\n {\"role\": \"assistant\", \"content\": line[\"answer\"]},\n ]\n else:\n return [\n {\"role\": \"user\", \"content\": example},\n ]\n\n def generate_few_shot_prompt(self, subject, dev_df, cot=False):\n prompt = [\n {\n \"role\": \"system\",\n \"content\": f\"你是一位專業的中文AI助理,以下是關於{subject}考試單選題,請選出正確的答案。\",\n }\n ]\n k = self.k\n if self.k == -1:\n k = dev_df.shape[0]\n for i in range(k):\n tmp = self.format_example(dev_df.iloc[i, :], include_answer=True, cot=cot)\n if i == 0:\n tmp[0][\"content\"] = (\n f\"以下是關於{subject}考試單選題,請選出正確的答案。\\n\\n\" + tmp[0][\"content\"]\n )\n if self.converter:\n tmp[0][\"content\"] = self.converter.convert(tmp[0][\"content\"])\n prompt += tmp\n return prompt\n\n def eval_subject(\n self,\n subject_name,\n test_df,\n dev_df=None,\n few_shot=False,\n save_result_dir=None,\n cot=False,\n ):\n correct_num = 0\n if save_result_dir:\n result = []\n score = []\n if few_shot:\n few_shot_prompt = self.generate_few_shot_prompt(\n subject_name, dev_df, cot=cot\n )\n else:\n few_shot_prompt = [\n {\n \"role\": \"system\",\n \"content\": f\"你是一位專業的中文AI助理,以下是關於{subject_name}考試單選題,請選出正確的答案。\",\n }\n ]\n answers = list(test_df[\"answer\"])\n for row_index, row in tqdm(\n test_df.iterrows(), total=len(test_df), dynamic_ncols=True\n ):\n question = self.format_example(row, include_answer=False)\n full_prompt = few_shot_prompt + question\n if not few_shot:\n full_prompt[-1][\"content\"] = (\n f\"以下是關於{subject_name}考試單選題,請選出正確的答案。\\n\\n\"\n + full_prompt[-1][\"content\"]\n )\n response = None\n timeout_counter = 0\n if self.converter:\n converted = []\n for p in full_prompt:\n p[\"content\"] = self.converter.convert(p[\"content\"])\n converted.append(p)\n full_prompt = converted\n\n text = \"\"\n for prompt in full_prompt:\n text += prompt[\"content\"] + \"\\n\"\n\n while response is None and timeout_counter <= 30:\n try:\n response = self.client.completions.create(\n model=self.model_name, prompt=text, temperature=0.0\n )\n except Exception as msg:\n if \"timeout=600\" in str(msg):\n timeout_counter += 1\n logging.error(msg)\n sleep(5)\n continue\n if response == None:\n response_str = \"\"\n else:\n response_str = response.choices[0].text\n\n if cot:\n ans_list = re.findall(r\"答案是(.+?)。\", response_str)\n if self.converter: # simplified chinese\n if len(ans_list) == 0:\n ans_list = re.findall(r\"答案为(.+?)。\", response_str)\n if len(ans_list) == 0:\n ans_list = re.findall(r\"选项(.+?)是正确的。\", response_str)\n else:\n if len(ans_list) == 0:\n ans_list = re.findall(r\"答案為(.+?)。\", response_str)\n if len(ans_list) == 0:\n ans_list = re.findall(r\"選項(.+?)是正確的。\", response_str)\n\n if len(ans_list) == 0:\n correct = 0\n else:\n if self.exact_match(ans_list[-1], row[\"answer\"]):\n correct_num += 1\n correct = 1\n else:\n correct = 0\n else:\n response_str = response_str.strip()\n if few_shot:\n if len(response_str) > 0:\n if self.exact_match(response_str, row[\"answer\"]):\n correct_num += 1\n correct = 1\n else:\n correct = 0\n else:\n correct = 0\n else:\n if len(response_str) > 0:\n ans_list = self.extract_ans(response_str)\n if len(ans_list) > 0 and (ans_list[-1] == row[\"answer\"]):\n correct_num += 1\n correct = 1\n else:\n correct = 0\n else:\n correct = 0\n if save_result_dir:\n result.append(response_str)\n score.append(correct)\n correct_ratio = 100 * correct_num / len(answers)\n\n if save_result_dir:\n test_df[\"model_output\"] = result\n test_df[\"correctness\"] = score\n test_df.to_csv(\n os.path.join(save_result_dir, f\"{subject_name}_val.csv\"),\n encoding=\"utf-8\",\n index=False,\n )\n return correct_ratio\n\n def extract_ans(self, response_str):\n pattern = [\n r\"([A-D]). \",\n r\"([A-D]).\",\n r\"^選([A-D])\",\n r\"^選項([A-D])\",\n r\"^选([A-D])\",\n r\"^选项([A-D])\",\n r\"答案是\\s?選?項?\\s?([A-D])\",\n r\"答案為\\s?選?項?\\s?([A-D])\",\n r\"答案應為\\s?選?項?\\s?([A-D])\",\n r\"答案为\\s?选?项?\\s?([A-D])\",\n r\"答案应为\\s?选?项?\\s?([A-D])\",\n r\"答案選\\s?選?項?\\s?([A-D])\",\n r\"答案选\\s?选?项?\\s?([A-D])\",\n r\"答案是:\\s?選?項?\\s?([A-D])\",\n r\"答案應該是:\\s?選?項?\\s?([A-D])\",\n r\"答案应该是:\\s?选?项?\\s?([A-D])\",\n r\"正確的一項是\\s?([A-D])\",\n r\"正确的一项是\\s?([A-D])\",\n r\"答案為:\\s?選?項?\\s?([A-D])\",\n r\"答案應為:\\s?選?項?\\s?([A-D])\",\n r\"答案:\\s?選?項?\\s?([A-D])\",\n r\"答案是:\\s?選?項?\\s?([A-D])\",\n r\"答案應該是:\\s?選?項?\\s?([A-D])\",\n r\"答案為:\\s?選?項?\\s?([A-D])\",\n r\"答案應為:\\s?選?項?\\s?([A-D])\",\n r\"答案:\\s?選?項?\\s?([A-D])\",\n r\"答案为:\\s?选?项?\\s?([A-D])\",\n r\"答案应为:\\s?选?项?\\s?([A-D])\",\n r\"答案:\\s?选?项?\\s?([A-D])\",\n r\"答案是:\\s?选?项?\\s?([A-D])\",\n r\"答案应该是:\\s?选?项?\\s?([A-D])\",\n r\"答案为:\\s?选?项?\\s?([A-D])\",\n r\"答案应为:\\s?选?项?\\s?([A-D])\",\n r\"答案:\\s?选?项?\\s?([A-D])\",\n ]\n ans_list = []\n if response_str[0] in [\"A\", \"B\", \"C\", \"D\"]:\n ans_list.append(response_str[0])\n for p in pattern:\n if self.converter:\n p = self.converter.convert(p)\n if len(ans_list) == 0:\n ans_list = re.findall(p, response_str)\n else:\n break\n return ans_list" }, { "identifier": "ChatGPT_Evaluator", "path": "ievals/modules/qa_evaluators/chatgpt.py", "snippet": "class ChatGPT_Evaluator(Evaluator):\n def __init__(self, choices, k, api_key, model_name, switch_zh_hans=False):\n super(ChatGPT_Evaluator, self).__init__(choices, model_name, k)\n openai.api_key = api_key\n self.client = openai.OpenAI(api_key=api_key)\n self.converter = None\n if switch_zh_hans:\n self.converter = opencc.OpenCC(\"t2s.json\")\n\n def format_example(self, line, include_answer=True, cot=False):\n example = line[\"question\"]\n for choice in self.choices:\n example += f'\\n{choice}. {line[f\"{choice}\"]}'\n\n example += \"\\n答案:\"\n if include_answer:\n if cot:\n ans = line[\"answer\"]\n content = \"讓我們一步一步思考,\\n\" + line[\"explanation\"] + f\"\\n所以答案是{ans}。\"\n return [\n {\"role\": \"user\", \"content\": example},\n {\"role\": \"assistant\", \"content\": content},\n ]\n else:\n return [\n {\"role\": \"user\", \"content\": example},\n {\"role\": \"assistant\", \"content\": line[\"answer\"]},\n ]\n else:\n return [\n {\"role\": \"user\", \"content\": example},\n ]\n\n def generate_few_shot_prompt(self, subject, dev_df, cot=False):\n prompt = [\n {\n \"role\": \"system\",\n \"content\": f\"你是一位專業的中文AI助理,以下是關於{subject}考試單選題,請選出正確的答案。\",\n }\n ]\n k = self.k\n if self.k == -1:\n k = dev_df.shape[0]\n for i in range(k):\n tmp = self.format_example(dev_df.iloc[i, :], include_answer=True, cot=cot)\n if i == 0:\n tmp[0][\"content\"] = (\n f\"以下是關於{subject}考試單選題,請選出正確的答案。\\n\\n\" + tmp[0][\"content\"]\n )\n if self.converter:\n tmp[0][\"content\"] = self.converter.convert(tmp[0][\"content\"])\n prompt += tmp\n\n return prompt\n\n def eval_subject(\n self,\n subject_name,\n test_df,\n dev_df=None,\n few_shot=False,\n save_result_dir=None,\n cot=False,\n ):\n correct_num = 0\n if save_result_dir:\n result = []\n score = []\n if few_shot:\n few_shot_prompt = self.generate_few_shot_prompt(\n subject_name, dev_df, cot=cot\n )\n else:\n few_shot_prompt = [\n {\n \"role\": \"system\",\n \"content\": f\"你是一位專業的中文AI助理,以下是關於{subject_name}考試單選題,請選出正確的答案。\",\n }\n ]\n answers = list(test_df[\"answer\"])\n for row_index, row in tqdm(\n test_df.iterrows(), total=len(test_df), dynamic_ncols=True\n ):\n question = self.format_example(row, include_answer=False)\n full_prompt = few_shot_prompt + question\n if not few_shot:\n full_prompt[-1][\"content\"] = (\n f\"以下是關於{subject_name}考試單選題,請選出正確的答案。\\n\\n\"\n + full_prompt[-1][\"content\"]\n )\n response = None\n timeout_counter = 0\n if self.converter: # convert to simplified chinese\n for idx, prompt in enumerate(full_prompt):\n full_prompt[idx][\"content\"] = self.converter.convert(\n prompt[\"content\"]\n )\n\n while response is None and timeout_counter <= 30:\n try:\n response = self.client.chat.completions.create(\n model=self.model_name,\n messages=full_prompt,\n temperature=0.0,\n max_tokens=200,\n )\n except Exception as msg:\n if \"timeout=600\" in str(msg):\n timeout_counter += 1\n logging.error(msg)\n sleep(5)\n continue\n if response == None:\n response_str = \"\"\n else:\n response_str = response.choices[0].message.content\n if cot:\n ans_list = re.findall(r\"答案是(.+?)。\", response_str)\n if self.converter:\n if len(ans_list) == 0:\n ans_list = re.findall(r\"答案为(.+?)。\", response_str)\n if len(ans_list) == 0:\n ans_list = re.findall(r\"选项(.+?)是正确的。\", response_str)\n else:\n if len(ans_list) == 0:\n ans_list = re.findall(r\"答案為(.+?)。\", response_str)\n if len(ans_list) == 0:\n ans_list = re.findall(r\"選項(.+?)是正確的。\", response_str)\n\n if len(ans_list) == 0:\n correct = 0\n else:\n if self.exact_match(ans_list[-1], row[\"answer\"]):\n correct_num += 1\n correct = 1\n else:\n correct = 0\n else:\n response_str = response_str.strip()\n if few_shot:\n if len(response_str) > 0:\n if self.exact_match(response_str, row[\"answer\"]):\n correct_num += 1\n correct = 1\n else:\n correct = 0\n else:\n correct = 0\n else:\n if len(response_str) > 0:\n ans_list = self.extract_ans(response_str)\n if len(ans_list) > 0 and (ans_list[-1] == row[\"answer\"]):\n correct_num += 1\n correct = 1\n else:\n correct = 0\n else:\n correct = 0\n if save_result_dir:\n result.append(response_str)\n score.append(correct)\n correct_ratio = 100 * correct_num / len(answers)\n\n if save_result_dir:\n test_df[\"model_output\"] = result\n test_df[\"correctness\"] = score\n test_df.to_csv(\n os.path.join(save_result_dir, f\"{subject_name}_val.csv\"),\n encoding=\"utf-8\",\n index=False,\n )\n return correct_ratio\n\n def extract_ans(self, response_str):\n # manually found regex which can be used to parse most of the response\n # text\n pattern = [\n r\"([A-D]). \",\n r\"([A-D]).\",\n r\"^選([A-D])\",\n r\"^選項([A-D])\",\n r\"^选([A-D])\",\n r\"^选项([A-D])\",\n r\"答案是\\s?選?項?\\s?([A-D])\",\n r\"答案為\\s?選?項?\\s?([A-D])\",\n r\"答案應為\\s?選?項?\\s?([A-D])\",\n r\"答案为\\s?选?项?\\s?([A-D])\",\n r\"答案应为\\s?选?项?\\s?([A-D])\",\n r\"答案選\\s?選?項?\\s?([A-D])\",\n r\"答案选\\s?选?项?\\s?([A-D])\",\n r\"答案是:\\s?選?項?\\s?([A-D])\",\n r\"答案應該是:\\s?選?項?\\s?([A-D])\",\n r\"答案应该是:\\s?选?项?\\s?([A-D])\",\n r\"正確的一項是\\s?([A-D])\",\n r\"正确的一项是\\s?([A-D])\",\n r\"答案為:\\s?選?項?\\s?([A-D])\",\n r\"答案應為:\\s?選?項?\\s?([A-D])\",\n r\"答案:\\s?選?項?\\s?([A-D])\",\n r\"答案是:\\s?選?項?\\s?([A-D])\",\n r\"答案應該是:\\s?選?項?\\s?([A-D])\",\n r\"答案為:\\s?選?項?\\s?([A-D])\",\n r\"答案應為:\\s?選?項?\\s?([A-D])\",\n r\"答案:\\s?選?項?\\s?([A-D])\",\n r\"答案为:\\s?选?项?\\s?([A-D])\",\n r\"答案应为:\\s?选?项?\\s?([A-D])\",\n r\"答案:\\s?选?项?\\s?([A-D])\",\n r\"答案是:\\s?选?项?\\s?([A-D])\",\n r\"答案应该是:\\s?选?项?\\s?([A-D])\",\n r\"答案为:\\s?选?项?\\s?([A-D])\",\n r\"答案应为:\\s?选?项?\\s?([A-D])\",\n r\"答案:\\s?选?项?\\s?([A-D])\",\n ]\n ans_list = []\n if response_str[0] in [\"A\", \"B\", \"C\", \"D\"]:\n ans_list.append(response_str[0])\n for p in pattern:\n if self.converter:\n p = self.converter.convert(p)\n\n if len(ans_list) == 0:\n ans_list = re.findall(p, response_str)\n else:\n break\n return ans_list" }, { "identifier": "DashScope_Evaluator", "path": "ievals/modules/qa_evaluators/ali_dashscope.py", "snippet": "class DashScope_Evaluator(Evaluator):\n \"\"\"\n Completion endpoint for instruction based model\n qwen models\n \"\"\"\n\n def __init__(self, choices, k, api_key, model_name, switch_zh_hans=False):\n super(DashScope_Evaluator, self).__init__(choices, model_name, k)\n dashscope.api_key = api_key\n assert model_name in set(Generation.Models.__dict__.values())\n self.model_name = model_name\n self.converter = None\n if switch_zh_hans:\n self.converter = opencc.OpenCC(\"t2s.json\")\n\n def format_example(self, line, include_answer=True, cot=False):\n example = line[\"question\"]\n for choice in self.choices:\n example += f'\\n{choice}. {line[f\"{choice}\"]}'\n\n example += \"\\n答案:\"\n if include_answer:\n if cot:\n ans = line[\"answer\"]\n content = \"讓我們一步一步思考,\\n\" + line[\"explanation\"] + f\"\\n所以答案是{ans}。\"\n return [\n {\"role\": \"user\", \"content\": example},\n {\"role\": \"assistant\", \"content\": content},\n ]\n else:\n return [\n {\"role\": \"user\", \"content\": example},\n {\"role\": \"assistant\", \"content\": line[\"answer\"]},\n ]\n else:\n return [\n {\"role\": \"user\", \"content\": example},\n ]\n\n def generate_few_shot_prompt(self, subject, dev_df, cot=False):\n prompt = [\n {\n \"role\": \"system\",\n \"content\": f\"你是一位專業的中文AI助理,以下是關於{subject}考試單選題,請選出正確的答案。\",\n }\n ]\n k = self.k\n if self.k == -1:\n k = dev_df.shape[0]\n for i in range(k):\n tmp = self.format_example(dev_df.iloc[i, :], include_answer=True, cot=cot)\n if i == 0:\n tmp[0][\"content\"] = (\n f\"以下是關於{subject}考試單選題,請選出正確的答案。\\n\\n\" + tmp[0][\"content\"]\n )\n if self.converter:\n tmp[0][\"content\"] = self.converter.convert(tmp[0][\"content\"])\n prompt += tmp\n return prompt\n\n def eval_subject(\n self,\n subject_name,\n test_df,\n dev_df=None,\n few_shot=False,\n save_result_dir=None,\n cot=False,\n ):\n correct_num = 0\n if save_result_dir:\n result = []\n score = []\n if few_shot:\n few_shot_prompt = self.generate_few_shot_prompt(\n subject_name, dev_df, cot=cot\n )\n else:\n few_shot_prompt = [\n {\n \"role\": \"system\",\n \"content\": f\"你是一位專業的中文AI助理,以下是關於{subject_name}考試單選題,請選出正確的答案。\",\n }\n ]\n answers = list(test_df[\"answer\"])\n for row_index, row in tqdm(\n test_df.iterrows(), total=len(test_df), dynamic_ncols=True\n ):\n question = self.format_example(row, include_answer=False)\n full_prompt = few_shot_prompt + question\n if not few_shot:\n full_prompt[-1][\"content\"] = (\n f\"以下是關於{subject_name}考試單選題,請選出正確的答案。\\n\\n\"\n + full_prompt[-1][\"content\"]\n )\n response = None\n timeout_counter = 0\n if self.converter:\n converted = []\n for p in full_prompt:\n p[\"content\"] = self.converter.convert(p[\"content\"])\n converted.append(p)\n full_prompt = converted\n\n text = \"\"\n for prompt in full_prompt:\n text += prompt[\"content\"] + \"\\n\"\n\n while response is None and timeout_counter <= 30:\n try:\n response = Generation.call(model=self.model_name, prompt=text)\n except Exception as msg:\n if \"timeout=600\" in str(msg):\n timeout_counter += 1\n logging.error(msg)\n sleep(5)\n continue\n\n if response.status_code == HTTPStatus.OK:\n response_str = response.output.text\n else:\n response_str = \"\"\n\n if cot:\n ans_list = re.findall(r\"答案是(.+?)。\", response_str)\n if self.converter: # simplified chinese\n if len(ans_list) == 0:\n ans_list = re.findall(r\"答案为(.+?)。\", response_str)\n if len(ans_list) == 0:\n ans_list = re.findall(r\"选项(.+?)是正确的。\", response_str)\n else:\n if len(ans_list) == 0:\n ans_list = re.findall(r\"答案為(.+?)。\", response_str)\n if len(ans_list) == 0:\n ans_list = re.findall(r\"選項(.+?)是正確的。\", response_str)\n\n if len(ans_list) == 0:\n correct = 0\n else:\n if self.exact_match(ans_list[-1], row[\"answer\"]):\n correct_num += 1\n correct = 1\n else:\n correct = 0\n else:\n response_str = response_str.strip()\n if few_shot:\n if len(response_str) > 0:\n if self.exact_match(response_str, row[\"answer\"]):\n correct_num += 1\n correct = 1\n else:\n ans_list = self.extract_ans(response_str)\n if len(ans_list) > 0 and (ans_list[-1] == row[\"answer\"]):\n correct_num += 1\n correct = 1\n else:\n correct = 0\n else:\n correct = 0\n else:\n if len(response_str) > 0:\n ans_list = self.extract_ans(response_str)\n if len(ans_list) > 0 and (ans_list[-1] == row[\"answer\"]):\n correct_num += 1\n correct = 1\n else:\n correct = 0\n else:\n correct = 0\n if save_result_dir:\n result.append(response_str)\n score.append(correct)\n correct_ratio = 100 * correct_num / len(answers)\n\n if save_result_dir:\n test_df[\"model_output\"] = result\n test_df[\"correctness\"] = score\n test_df.to_csv(\n os.path.join(save_result_dir, f\"{subject_name}_val.csv\"),\n encoding=\"utf-8\",\n index=False,\n )\n return correct_ratio\n\n def extract_ans(self, response_str):\n pattern = [\n r\"([A-D]). \",\n r\"([A-D]).\",\n r\"^選([A-D])\",\n r\"^選項([A-D])\",\n r\"^选([A-D])\",\n r\"^选项([A-D])\",\n r\"答案是\\s?選?項?\\s?([A-D])\",\n r\"答案為\\s?選?項?\\s?([A-D])\",\n r\"答案應為\\s?選?項?\\s?([A-D])\",\n r\"答案为\\s?选?项?\\s?([A-D])\",\n r\"答案应为\\s?选?项?\\s?([A-D])\",\n r\"答案選\\s?選?項?\\s?([A-D])\",\n r\"答案选\\s?选?项?\\s?([A-D])\",\n r\"答案是:\\s?選?項?\\s?([A-D])\",\n r\"答案應該是:\\s?選?項?\\s?([A-D])\",\n r\"答案应该是:\\s?选?项?\\s?([A-D])\",\n r\"正確的一項是\\s?([A-D])\",\n r\"正确的一项是\\s?([A-D])\",\n r\"答案為:\\s?選?項?\\s?([A-D])\",\n r\"答案應為:\\s?選?項?\\s?([A-D])\",\n r\"答案:\\s?選?項?\\s?([A-D])\",\n r\"答案是:\\s?選?項?\\s?([A-D])\",\n r\"答案應該是:\\s?選?項?\\s?([A-D])\",\n r\"答案為:\\s?選?項?\\s?([A-D])\",\n r\"答案應為:\\s?選?項?\\s?([A-D])\",\n r\"答案:\\s?選?項?\\s?([A-D])\",\n r\"答案为:\\s?选?项?\\s?([A-D])\",\n r\"答案应为:\\s?选?项?\\s?([A-D])\",\n r\"答案:\\s?选?项?\\s?([A-D])\",\n r\"答案是:\\s?选?项?\\s?([A-D])\",\n r\"答案应该是:\\s?选?项?\\s?([A-D])\",\n r\"答案为:\\s?选?项?\\s?([A-D])\",\n r\"答案应为:\\s?选?项?\\s?([A-D])\",\n r\"答案:\\s?选?项?\\s?([A-D])\",\n ]\n ans_list = []\n if response_str[0] in [\"A\", \"B\", \"C\", \"D\"]:\n ans_list.append(response_str[0])\n for p in pattern:\n if self.converter:\n p = self.converter.convert(p)\n if len(ans_list) == 0:\n ans_list = re.findall(p, response_str)\n else:\n break\n return ans_list" }, { "identifier": "run_exp", "path": "ievals/exp_executer.py", "snippet": "def run_exp(\n evaluator,\n model_name,\n dataset,\n postfix_name=\"tgi\",\n cache_path=\".cache\",\n split_name=\"test\",\n few_shot=False,\n):\n model_name_path = model_name.replace(\"/\", \"_\")\n save_result_dir = None\n\n if cache_path:\n os.makedirs(f\"{cache_path}\", exist_ok=True)\n os.makedirs(f\"{cache_path}/{model_name_path}\", exist_ok=True)\n save_result_dir = f\"{cache_path}/{model_name_path}\"\n\n task_list, subject2name, subject2category = get_exp_setting(dataset)\n postfix = model_name.split(\"/\")[-1]\n prefix_name = dataset.split(\"/\")[-1]\n result_cache = f\"{prefix_name}_{postfix_name}.tsv\"\n if os.path.exists(result_cache):\n logging.info(f\"Found previous cache {result_cache}, skipping executed subjects\")\n df = pd.read_csv(result_cache, delimiter=\"\\t\", header=None)\n df.columns = [\"model_name\", \"subject\", \"score\"]\n finished_subjects = df[\"subject\"].tolist()\n task_list = [t for t in task_list if t not in finished_subjects]\n\n output_filename = \"\"\n # TODO: absract out the dataset-task logic, as this is likely\n # limited under multi subject task only\n for task in task_list:\n zh_name = subject2name[task]\n test = load_dataset(dataset, task)[split_name]\n test_df = pd.DataFrame([dict(row) for row in test])\n dev = load_dataset(dataset, task)[\"train\"]\n dev_df = pd.DataFrame([dict(row) for row in dev])\n\n accuracy = evaluator.eval_subject(\n zh_name,\n test_df,\n dev_df=dev_df,\n few_shot=few_shot,\n save_result_dir=f\"{cache_path}/{model_name_path}\",\n )\n\n with open(result_cache, \"a\") as fout:\n fout.write(\"{}\\t{}\\t{:.5f}\\n\".format(model_name, task, accuracy))\n\n df = pd.read_csv(result_cache, delimiter=\"\\t\", header=None)\n df.columns = [\"model_name\", \"subject\", \"score\"]\n for model_name in df[\"model_name\"].unique():\n print(model_name)" } ]
import os import logging import argparse import pandas as pd from datasets import load_dataset from ievals.modules.qa_evaluators.tgi import TGI_Evaluator from ievals.modules.qa_evaluators.gemini import Gemini_Evaluator from ievals.modules.qa_evaluators.claude import Claude_Evaluator from ievals.modules.qa_evaluators.azure import Azure_Evaluator from ievals.modules.qa_evaluators.oai_complete import GPT_Evaluator from ievals.modules.qa_evaluators.chatgpt import ChatGPT_Evaluator from ievals.modules.qa_evaluators.hf_chat import HF_Chat_Evaluator from ievals.modules.qa_evaluators.hf_base import ( Qwen_Evaluator, ) # we only use this for qwen base model from ievals.modules.qa_evaluators.ali_dashscope import DashScope_Evaluator from ievals.exp_executer import run_exp
20,400
elif series == "openai_complete": return GPT_Evaluator elif series == "gemini": return Gemini_Evaluator elif series == "hf_chat": # implement the chat function return HF_Chat_Evaluator elif series == "tgi": # implement the chat function return TGI_Evaluator l_model_name = model_name.lower() if "gemini" in model_name: return Gemini_Evaluator if "gpt-" in model_name: # its possible to match gpt-3.5-instruct, # but we don't really want to sacrifice more fixed params for that return ChatGPT_Evaluator elif "claude" in model_name: return Claude_Evaluator elif "Qwen" in model_name: if "chat" in l_model_name: return HF_Chat_Evaluator else: return Qwen_Evaluator elif "qwen" in model_name: return DashScope_Evaluator return TGI_Evaluator def get_parser(): parser = argparse.ArgumentParser(description="Run TMMLU+ evals") parser.add_argument("model", type=str, help="Name of the eval model") parser.add_argument("--series", type=str, default="") parser.add_argument("--dataset", type=str, default="ikala/tmmluplus") parser.add_argument("--choices", type=str, default="A,B,C,D") parser.add_argument("--top_k", type=int, default=0) parser.add_argument("--api_key", type=str, default=None) parser.add_argument("--max_samples", type=int, default=None) parser.add_argument("--cache", action=argparse.BooleanOptionalAction, default=True) parser.add_argument( "--switch_zh_hans", action=argparse.BooleanOptionalAction, default=False ) parser.add_argument( "--ip_addr", type=str, default="", help="IP:PORT for text-generation-inference server", ) parser.add_argument("--sys_token", type=str, default="", help="system prompt token") parser.add_argument("--usr_token", type=str, default="", help="user starting token") parser.add_argument( "--ast_token", type=str, default="", help="assistant starting token" ) parser.add_argument( "--eos_token", type=str, default="", help="end-of-sentence token usually its <|endoftext|> or </s>, but you have to verify from hf model tokenizer.json", ) parser.add_argument("--hf_cache", type=str, default="", help="huggingface cache") return parser def main(): parser = get_parser() args = parser.parse_args() model_name = args.model if model_name == "supported": valid_model_names, _ = get_model_config() print(valid_model_names) exit(0) valid_choices = args.choices.split(",") eval_cls = get_evaluator(model_name, args.series) if "TGI" in str(eval_cls): if len(args.usr_token): prompt_config = { "systemMessageToken": args.sys_token, "userMessageToken": args.usr_token, "messageEndToken": args.eos_token, "assistantMessageToken": args.ast_token, } else: prompt_config = get_tgi_prompt_config(model_name) eval_ins = eval_cls( choices=valid_choices, k=args.top_k, ip_addr=args.ip_addr, model_name=model_name, switch_zh_hans=args.switch_zh_hans, **prompt_config, ) elif ("HF_Chat" in str(eval_cls)) or ("Qwen" in str(eval_cls)): eval_ins = eval_cls( choices=valid_choices, k=args.top_k, api_key=args.api_key, model_name=model_name, switch_zh_hans=args.switch_zh_hans, ) else: eval_ins = eval_cls( choices=valid_choices, k=args.top_k, api_key=args.api_key, model_name=model_name, switch_zh_hans=args.switch_zh_hans, ) postfix = model_name.split("/")[-1] if args.top_k > 0: postfix += f"_top_{args.top_k}" cache_path = None if args.cache: cache_path = ".cache" if args.top_k > 0: cache_path += f"_top_{args.top_k}"
""" CLI for all models Support mode: if tgi service was used you must pass in IP and hostname if the service was found in model_config.csv you could skip providing the 4 tokens (user, assistant, system, eos) else you need to pass in the four token in args """ try: except ImportError as e: logging.error("huggingface and qwen models are not supported due to " + str(e)) def get_model_config(): current_dir = os.path.dirname(os.path.abspath(__file__)) up_dir = os.path.abspath(os.path.join(current_dir, os.pardir)) df = pd.read_csv(os.path.join(up_dir, "model_config.csv")) df.fillna("", inplace=True) valid_model_names = df["model_name"].tolist() return valid_model_names, df def get_tgi_prompt_config(model_name): valid_model_names, df = get_model_config() if model_name not in valid_model_names: return None, None prompt_config = df[df["model_name"] == model_name].iloc[0] prompt_config.pop("model_name") return prompt_config def get_evaluator(model_name, series=""): if len(series): if series == "azure": return Azure_Evaluator elif series == "openai_chat": return ChatGPT_Evaluator elif series == "openai_complete": return GPT_Evaluator elif series == "gemini": return Gemini_Evaluator elif series == "hf_chat": # implement the chat function return HF_Chat_Evaluator elif series == "tgi": # implement the chat function return TGI_Evaluator l_model_name = model_name.lower() if "gemini" in model_name: return Gemini_Evaluator if "gpt-" in model_name: # its possible to match gpt-3.5-instruct, # but we don't really want to sacrifice more fixed params for that return ChatGPT_Evaluator elif "claude" in model_name: return Claude_Evaluator elif "Qwen" in model_name: if "chat" in l_model_name: return HF_Chat_Evaluator else: return Qwen_Evaluator elif "qwen" in model_name: return DashScope_Evaluator return TGI_Evaluator def get_parser(): parser = argparse.ArgumentParser(description="Run TMMLU+ evals") parser.add_argument("model", type=str, help="Name of the eval model") parser.add_argument("--series", type=str, default="") parser.add_argument("--dataset", type=str, default="ikala/tmmluplus") parser.add_argument("--choices", type=str, default="A,B,C,D") parser.add_argument("--top_k", type=int, default=0) parser.add_argument("--api_key", type=str, default=None) parser.add_argument("--max_samples", type=int, default=None) parser.add_argument("--cache", action=argparse.BooleanOptionalAction, default=True) parser.add_argument( "--switch_zh_hans", action=argparse.BooleanOptionalAction, default=False ) parser.add_argument( "--ip_addr", type=str, default="", help="IP:PORT for text-generation-inference server", ) parser.add_argument("--sys_token", type=str, default="", help="system prompt token") parser.add_argument("--usr_token", type=str, default="", help="user starting token") parser.add_argument( "--ast_token", type=str, default="", help="assistant starting token" ) parser.add_argument( "--eos_token", type=str, default="", help="end-of-sentence token usually its <|endoftext|> or </s>, but you have to verify from hf model tokenizer.json", ) parser.add_argument("--hf_cache", type=str, default="", help="huggingface cache") return parser def main(): parser = get_parser() args = parser.parse_args() model_name = args.model if model_name == "supported": valid_model_names, _ = get_model_config() print(valid_model_names) exit(0) valid_choices = args.choices.split(",") eval_cls = get_evaluator(model_name, args.series) if "TGI" in str(eval_cls): if len(args.usr_token): prompt_config = { "systemMessageToken": args.sys_token, "userMessageToken": args.usr_token, "messageEndToken": args.eos_token, "assistantMessageToken": args.ast_token, } else: prompt_config = get_tgi_prompt_config(model_name) eval_ins = eval_cls( choices=valid_choices, k=args.top_k, ip_addr=args.ip_addr, model_name=model_name, switch_zh_hans=args.switch_zh_hans, **prompt_config, ) elif ("HF_Chat" in str(eval_cls)) or ("Qwen" in str(eval_cls)): eval_ins = eval_cls( choices=valid_choices, k=args.top_k, api_key=args.api_key, model_name=model_name, switch_zh_hans=args.switch_zh_hans, ) else: eval_ins = eval_cls( choices=valid_choices, k=args.top_k, api_key=args.api_key, model_name=model_name, switch_zh_hans=args.switch_zh_hans, ) postfix = model_name.split("/")[-1] if args.top_k > 0: postfix += f"_top_{args.top_k}" cache_path = None if args.cache: cache_path = ".cache" if args.top_k > 0: cache_path += f"_top_{args.top_k}"
run_exp(
7
2023-12-24 08:00:38+00:00
24k
kraina-ai/quackosm
tests/test_pbf_file_reader.py
[ { "identifier": "FEATURES_INDEX", "path": "quackosm/_constants.py", "snippet": "FEATURES_INDEX = \"feature_id\"" }, { "identifier": "OsmTagsFilter", "path": "quackosm/_osm_tags_filters.py", "snippet": "def merge_osm_tags_filter(osm_tags_filter: OsmTagsFilter) -> OsmTagsFilter: ...\ndef merge_osm_tags_filter(osm_tags_filter: GroupedOsmTagsFilter) -> OsmTagsFilter: ...\ndef merge_osm_tags_filter(osm_tags_filter: Iterable[OsmTagsFilter]) -> OsmTagsFilter: ...\ndef merge_osm_tags_filter(osm_tags_filter: Iterable[GroupedOsmTagsFilter]) -> OsmTagsFilter: ...\ndef merge_osm_tags_filter(\n osm_tags_filter: Union[\n OsmTagsFilter, GroupedOsmTagsFilter, Iterable[OsmTagsFilter], Iterable[GroupedOsmTagsFilter]\n ]\n) -> OsmTagsFilter:\ndef _merge_grouped_osm_tags_filter(grouped_filter: GroupedOsmTagsFilter) -> OsmTagsFilter:\ndef _merge_multiple_osm_tags_filters(osm_tags_filters: Iterable[OsmTagsFilter]) -> OsmTagsFilter:" }, { "identifier": "PbfFileReader", "path": "quackosm/pbf_file_reader.py", "snippet": "class PbfFileReader:\n \"\"\"\n PbfFileReader.\n\n PBF(Protocolbuffer Binary Format)[1] file reader is a dedicated `*.osm.pbf` files reader\n class based on DuckDB[2] and its spatial extension[3].\n\n Handler can filter out OSM features based on tags filter and geometry filter\n to limit the result.\n\n References:\n 1. https://wiki.openstreetmap.org/wiki/PBF_Format\n 2. https://duckdb.org/\n 3. https://github.com/duckdb/duckdb_spatial\n \"\"\"\n\n class ConvertedOSMParquetFiles(NamedTuple):\n \"\"\"List of parquet files read from the `*.osm.pbf` file.\"\"\"\n\n nodes_valid_with_tags: \"duckdb.DuckDBPyRelation\"\n nodes_filtered_ids: \"duckdb.DuckDBPyRelation\"\n\n ways_all_with_tags: \"duckdb.DuckDBPyRelation\"\n ways_with_unnested_nodes_refs: \"duckdb.DuckDBPyRelation\"\n ways_required_ids: \"duckdb.DuckDBPyRelation\"\n ways_filtered_ids: \"duckdb.DuckDBPyRelation\"\n\n relations_all_with_tags: \"duckdb.DuckDBPyRelation\"\n relations_with_unnested_way_refs: \"duckdb.DuckDBPyRelation\"\n relations_filtered_ids: \"duckdb.DuckDBPyRelation\"\n\n class ParsedOSMFeatures(NamedTuple):\n \"\"\"Final list of parsed features from the `*.osm.pbf` file.\"\"\"\n\n nodes: \"duckdb.DuckDBPyRelation\"\n ways: \"duckdb.DuckDBPyRelation\"\n relations: \"duckdb.DuckDBPyRelation\"\n\n def __init__(\n self,\n tags_filter: Optional[Union[OsmTagsFilter, GroupedOsmTagsFilter]] = None,\n geometry_filter: Optional[BaseGeometry] = None,\n working_directory: Union[str, Path] = \"files\",\n osm_way_polygon_features_config: Optional[\n Union[OsmWayPolygonConfig, dict[str, Any]]\n ] = None,\n ) -> None:\n \"\"\"\n Initialize PbfFileReader.\n\n Args:\n tags_filter (Union[OsmTagsFilter, GroupedOsmTagsFilter], optional): A dictionary\n specifying which tags to download.\n The keys should be OSM tags (e.g. `building`, `amenity`).\n The values should either be `True` for retrieving all objects with the tag,\n string for retrieving a single tag-value pair\n or list of strings for retrieving all values specified in the list.\n `tags={'leisure': 'park}` would return parks from the area.\n `tags={'leisure': 'park, 'amenity': True, 'shop': ['bakery', 'bicycle']}`\n would return parks, all amenity types, bakeries and bicycle shops.\n If `None`, handler will allow all of the tags to be parsed. Defaults to `None`.\n geometry_filter (BaseGeometry, optional): Region which can be used to filter only\n intersecting OSM objects. Defaults to `None`.\n working_directory (Union[str, Path], optional): Directory where to save\n the parsed `*.parquet` files. Defaults to \"files\".\n osm_way_polygon_features_config (Union[OsmWayPolygonConfig, dict[str, Any]], optional):\n Config used to determine which closed way features are polygons.\n Modifications to this config left are left for experienced OSM users.\n Defaults to predefined \"osm_way_polygon_features.json\".\n \"\"\"\n self.tags_filter = tags_filter\n self.merged_tags_filter = merge_osm_tags_filter(tags_filter) if tags_filter else None\n self.geometry_filter = geometry_filter\n self.working_directory = Path(working_directory)\n self.working_directory.mkdir(parents=True, exist_ok=True)\n self.connection: duckdb.DuckDBPyConnection = None\n\n self.rows_per_bucket = 1_000_000\n memory = psutil.virtual_memory()\n # If less than 8 / 16 GB total memory, reduce number of rows per group\n if memory.total < (8 * (1024**3)):\n self.rows_per_bucket = 100_000\n elif memory.total < (16 * (1024**3)):\n self.rows_per_bucket = 500_000\n\n if osm_way_polygon_features_config is None:\n # Config based on two sources + manual OSM wiki check\n # 1. https://github.com/tyrasd/osm-polygon-features/blob/v0.9.2/polygon-features.json\n # 2. https://github.com/ideditor/id-area-keys/blob/v5.0.1/areaKeys.json\n osm_way_polygon_features_config = json.loads(\n (Path(__file__).parent / \"osm_way_polygon_features.json\").read_text()\n )\n\n self.osm_way_polygon_features_config: OsmWayPolygonConfig = (\n osm_way_polygon_features_config\n if isinstance(osm_way_polygon_features_config, OsmWayPolygonConfig)\n else parse_dict_to_config_object(osm_way_polygon_features_config)\n )\n\n def get_features_gdf(\n self,\n file_paths: Union[str, Path, Iterable[Union[str, Path]]],\n explode_tags: Optional[bool] = None,\n ignore_cache: bool = False,\n filter_osm_ids: Optional[list[str]] = None,\n ) -> gpd.GeoDataFrame:\n \"\"\"\n Get features GeoDataFrame from a list of PBF files.\n\n Function parses multiple PBF files and returns a single GeoDataFrame with parsed\n OSM objects.\n\n Args:\n file_paths (Union[str, Path, Iterable[Union[str, Path]]]):\n Path or list of paths of `*.osm.pbf` files to be parsed.\n explode_tags (bool, optional): Whether to split tags into columns based on OSM tag keys.\n If `None`, will be set based on `tags_filter` parameter.\n If no tags filter is provided, then `explode_tags` will set to `False`,\n if there is tags filter it will set to `True`. Defaults to `None`.\n ignore_cache: (bool, optional): Whether to ignore precalculated geoparquet files or not.\n Defaults to False.\n filter_osm_ids: (list[str], optional): List of OSM features ids to read from the file.\n Have to be in the form of 'node/<id>', 'way/<id>' or 'relation/<id>'.\n Defaults to an empty list.\n\n Returns:\n gpd.GeoDataFrame: GeoDataFrame with OSM features.\n \"\"\"\n if isinstance(file_paths, (str, Path)):\n file_paths = [file_paths]\n\n if filter_osm_ids is None:\n filter_osm_ids = []\n\n if explode_tags is None:\n explode_tags = self.tags_filter is not None\n\n parsed_geoparquet_files = []\n for file_path in file_paths:\n parsed_geoparquet_file = self.convert_pbf_to_gpq(\n file_path,\n explode_tags=explode_tags,\n ignore_cache=ignore_cache,\n filter_osm_ids=filter_osm_ids,\n )\n parsed_geoparquet_files.append(parsed_geoparquet_file)\n\n parquet_tables = [\n io.read_geoparquet_table(parsed_parquet_file) # type: ignore\n for parsed_parquet_file in parsed_geoparquet_files\n ]\n joined_parquet_table: pa.Table = pa.concat_tables(parquet_tables)\n gdf_parquet = gpd.GeoDataFrame(\n data=joined_parquet_table.drop(GEOMETRY_COLUMN).to_pandas(maps_as_pydicts=\"strict\"),\n geometry=ga.to_geopandas(joined_parquet_table.column(GEOMETRY_COLUMN)),\n ).set_index(FEATURES_INDEX)\n\n return gdf_parquet\n\n def convert_pbf_to_gpq(\n self,\n pbf_path: Union[str, Path],\n result_file_path: Optional[Union[str, Path]] = None,\n explode_tags: Optional[bool] = None,\n ignore_cache: bool = False,\n filter_osm_ids: Optional[list[str]] = None,\n ) -> Path:\n \"\"\"\n Convert PBF file to GeoParquet file.\n\n Args:\n pbf_path (Union[str, Path]): Pbf file to be parsed to GeoParquet.\n result_file_path (Union[str, Path], optional): Where to save\n the geoparquet file. If not provided, will be generated based on hashes\n from provided tags filter and geometry filter. Defaults to `None`.\n explode_tags (bool, optional): Whether to split tags into columns based on OSM tag keys.\n If `None`, will be set based on `tags_filter` parameter.\n If no tags filter is provided, then `explode_tags` will set to `False`,\n if there is tags filter it will set to `True`. Defaults to `None`.\n ignore_cache (bool, optional): Whether to ignore precalculated geoparquet files or not.\n Defaults to False.\n filter_osm_ids: (list[str], optional): List of OSM features ids to read from the file.\n Have to be in the form of 'node/<id>', 'way/<id>' or 'relation/<id>'.\n Defaults to an empty list.\n\n Returns:\n Path: Path to the generated GeoParquet file.\n \"\"\"\n if filter_osm_ids is None:\n filter_osm_ids = []\n\n if explode_tags is None:\n explode_tags = self.tags_filter is not None\n\n with tempfile.TemporaryDirectory(dir=self.working_directory.resolve()) as tmp_dir_name:\n try:\n self._set_up_duckdb_connection(tmp_dir_name)\n result_file_path = result_file_path or self._generate_geoparquet_result_file_path(\n pbf_path,\n filter_osm_ids=filter_osm_ids,\n explode_tags=explode_tags,\n )\n parsed_geoparquet_file = self._parse_pbf_file(\n pbf_path=pbf_path,\n tmp_dir_name=tmp_dir_name,\n result_file_path=Path(result_file_path),\n filter_osm_ids=filter_osm_ids,\n explode_tags=explode_tags,\n ignore_cache=ignore_cache,\n )\n return parsed_geoparquet_file\n finally:\n if self.connection is not None:\n self.connection.close()\n self.connection = None\n\n def _set_up_duckdb_connection(self, tmp_dir_name: str) -> None:\n self.connection = duckdb.connect(\n database=str(Path(tmp_dir_name) / \"db.duckdb\"),\n config=dict(preserve_insertion_order=False),\n )\n for extension_name in (\"parquet\", \"spatial\"):\n self.connection.install_extension(extension_name)\n self.connection.load_extension(extension_name)\n\n self.connection.sql(\"\"\"\n CREATE OR REPLACE MACRO linestring_to_linestring_wkt(ls) AS\n 'LINESTRING (' || array_to_string([pt.x || ' ' || pt.y for pt in ls], ', ') || ')';\n \"\"\")\n self.connection.sql(\"\"\"\n CREATE OR REPLACE MACRO linestring_to_polygon_wkt(ls) AS\n 'POLYGON ((' || array_to_string([pt.x || ' ' || pt.y for pt in ls], ', ') || '))';\n \"\"\")\n\n def _parse_pbf_file(\n self,\n pbf_path: Union[str, Path],\n tmp_dir_name: str,\n result_file_path: Path,\n filter_osm_ids: list[str],\n explode_tags: bool = True,\n ignore_cache: bool = False,\n ) -> Path:\n if not result_file_path.exists() or ignore_cache:\n elements = self.connection.sql(f\"SELECT * FROM ST_READOSM('{Path(pbf_path)}');\")\n converted_osm_parquet_files = self._prefilter_elements_ids(\n elements, tmp_dir_name, filter_osm_ids\n )\n\n self._delete_directories(\n tmp_dir_name,\n [\n \"nodes_filtered_non_distinct_ids\",\n \"nodes_prepared_ids\",\n \"ways_valid_ids\",\n \"ways_filtered_non_distinct_ids\",\n \"relations_valid_ids\",\n \"relations_ids\",\n ],\n )\n\n filtered_nodes_with_geometry = self._get_filtered_nodes_with_geometry(\n converted_osm_parquet_files, tmp_dir_name\n )\n self._delete_directories(tmp_dir_name, \"nodes_filtered_ids\")\n\n ways_refs_with_nodes_structs = self._get_ways_refs_with_nodes_structs(\n converted_osm_parquet_files, tmp_dir_name\n )\n self._delete_directories(\n tmp_dir_name,\n [\n \"nodes_valid_with_tags\",\n ],\n )\n\n filtered_ways_with_linestrings = self._get_filtered_ways_with_linestrings(\n osm_parquet_files=converted_osm_parquet_files,\n ways_refs_with_nodes_structs=ways_refs_with_nodes_structs,\n tmp_dir_name=tmp_dir_name,\n )\n required_ways_with_linestrings = self._get_required_ways_with_linestrings(\n osm_parquet_files=converted_osm_parquet_files,\n ways_refs_with_nodes_structs=ways_refs_with_nodes_structs,\n tmp_dir_name=tmp_dir_name,\n )\n self._delete_directories(\n tmp_dir_name,\n [\n \"ways_required_grouped\",\n \"ways_required_ids\",\n \"ways_with_unnested_nodes_refs\",\n \"ways_refs_with_nodes_structs\",\n \"required_ways_ids_grouped\",\n \"required_ways_grouped\",\n \"required_ways_tmp\",\n \"filtered_ways_ids_grouped\",\n \"filtered_ways_grouped\",\n \"filtered_ways_tmp\",\n ],\n )\n\n filtered_ways_with_proper_geometry = self._get_filtered_ways_with_proper_geometry(\n converted_osm_parquet_files, filtered_ways_with_linestrings, tmp_dir_name\n )\n self._delete_directories(\n tmp_dir_name,\n [\n \"ways_prepared_ids\",\n \"ways_filtered_ids\",\n \"ways_all_with_tags\",\n \"filtered_ways_with_linestrings\",\n ],\n )\n\n filtered_relations_with_geometry = self._get_filtered_relations_with_geometry(\n converted_osm_parquet_files, required_ways_with_linestrings, tmp_dir_name\n )\n self._delete_directories(\n tmp_dir_name,\n [\n \"relations_all_with_tags\",\n \"relations_with_unnested_way_refs\",\n \"relations_filtered_ids\",\n \"required_ways_with_linestrings\",\n \"valid_relation_parts\",\n \"relation_inner_parts\",\n \"relation_outer_parts\",\n \"relation_outer_parts_with_holes\",\n \"relation_outer_parts_without_holes\",\n ],\n )\n\n self._concatenate_results_to_geoparquet(\n PbfFileReader.ParsedOSMFeatures(\n nodes=filtered_nodes_with_geometry,\n ways=filtered_ways_with_proper_geometry,\n relations=filtered_relations_with_geometry,\n ),\n tmp_dir_name=tmp_dir_name,\n save_file_path=result_file_path,\n explode_tags=explode_tags,\n )\n\n return result_file_path\n\n def _generate_geoparquet_result_file_path(\n self,\n pbf_file_path: Union[str, Path],\n explode_tags: bool,\n filter_osm_ids: list[str],\n ) -> Path:\n pbf_file_name = Path(pbf_file_path).name.removesuffix(\".osm.pbf\")\n\n osm_filter_tags_hash_part = \"nofilter\"\n if self.tags_filter is not None:\n h = hashlib.new(\"sha256\")\n h.update(json.dumps(self.tags_filter).encode())\n osm_filter_tags_hash_part = h.hexdigest()\n\n clipping_geometry_hash_part = \"noclip\"\n if self.geometry_filter is not None:\n h = hashlib.new(\"sha256\")\n h.update(wktlib.dumps(self.geometry_filter).encode())\n clipping_geometry_hash_part = h.hexdigest()\n\n exploded_tags_part = \"exploded\" if explode_tags else \"compact\"\n\n filter_osm_ids_hash_part = \"\"\n if filter_osm_ids:\n h = hashlib.new(\"sha256\")\n h.update(json.dumps(sorted(set(filter_osm_ids))).encode())\n filter_osm_ids_hash_part = f\"_{h.hexdigest()}\"\n\n result_file_name = (\n f\"{pbf_file_name}_{osm_filter_tags_hash_part}\"\n f\"_{clipping_geometry_hash_part}_{exploded_tags_part}{filter_osm_ids_hash_part}.geoparquet\"\n )\n return Path(self.working_directory) / result_file_name\n\n def _prefilter_elements_ids(\n self, elements: \"duckdb.DuckDBPyRelation\", tmp_dir_name: str, filter_osm_ids: list[str]\n ) -> ConvertedOSMParquetFiles:\n sql_filter = self._generate_osm_tags_sql_filter()\n filtered_tags_clause = self._generate_filtered_tags_clause()\n\n is_intersecting = self.geometry_filter is not None\n\n with TaskProgressSpinner(\"Reading nodes\", \"1\"):\n # NODES - VALID (NV)\n # - select all with kind = 'node'\n # - select all with lat and lon not empty\n nodes_valid_with_tags = self._sql_to_parquet_file(\n sql_query=f\"\"\"\n SELECT\n id,\n {filtered_tags_clause},\n lon,\n lat\n FROM ({elements.sql_query()})\n WHERE kind = 'node'\n AND lat IS NOT NULL AND lon IS NOT NULL\n \"\"\",\n file_path=Path(tmp_dir_name) / \"nodes_valid_with_tags\",\n )\n # NODES - INTERSECTING (NI)\n # - select all from NV which intersect given geometry filter\n # NODES - FILTERED (NF)\n # - select all from NI with tags filter\n filter_osm_node_ids_filter = self._generate_elements_filter(filter_osm_ids, \"node\")\n if is_intersecting:\n wkt = cast(BaseGeometry, self.geometry_filter).wkt\n intersection_filter = f\"ST_Intersects(ST_Point(lon, lat), ST_GeomFromText('{wkt}'))\"\n with TaskProgressSpinner(\"Filtering nodes - intersection\", \"2\"):\n nodes_intersecting_ids = self._sql_to_parquet_file(\n sql_query=f\"\"\"\n SELECT DISTINCT id FROM ({nodes_valid_with_tags.sql_query()}) n\n WHERE {intersection_filter} = true\n \"\"\",\n file_path=Path(tmp_dir_name) / \"nodes_intersecting_ids\",\n )\n with TaskProgressSpinner(\"Filtering nodes - tags\", \"3\"):\n self._sql_to_parquet_file(\n sql_query=f\"\"\"\n SELECT id FROM ({nodes_valid_with_tags.sql_query()}) n\n SEMI JOIN ({nodes_intersecting_ids.sql_query()}) ni ON n.id = ni.id\n WHERE tags IS NOT NULL AND cardinality(tags) > 0 AND ({sql_filter})\n AND ({filter_osm_node_ids_filter})\n \"\"\",\n file_path=Path(tmp_dir_name) / \"nodes_filtered_non_distinct_ids\",\n )\n else:\n with TaskProgressSpinner(\"Filtering nodes - intersection\", \"2\"):\n pass\n with TaskProgressSpinner(\"Filtering nodes - tags\", \"3\"):\n nodes_intersecting_ids = nodes_valid_with_tags\n self._sql_to_parquet_file(\n sql_query=f\"\"\"\n SELECT id FROM ({nodes_valid_with_tags.sql_query()}) n\n WHERE tags IS NOT NULL AND cardinality(tags) > 0 AND ({sql_filter})\n AND ({filter_osm_node_ids_filter})\n \"\"\",\n file_path=Path(tmp_dir_name) / \"nodes_filtered_non_distinct_ids\",\n )\n with TaskProgressSpinner(\"Calculating distinct filtered nodes ids\", \"4\"):\n nodes_filtered_ids = self._calculate_unique_ids_to_parquet(\n Path(tmp_dir_name) / \"nodes_filtered_non_distinct_ids\",\n Path(tmp_dir_name) / \"nodes_filtered_ids\",\n )\n\n with TaskProgressSpinner(\"Reading ways\", \"5\"):\n # WAYS - VALID (WV)\n # - select all with kind = 'way'\n # - select all with more then one ref\n # - join all NV to refs\n # - select all where all refs has been joined (total_refs == found_refs)\n self.connection.sql(f\"\"\"\n SELECT *\n FROM ({elements.sql_query()}) w\n WHERE kind = 'way' AND len(refs) >= 2\n \"\"\").to_view(\"ways\", replace=True)\n ways_all_with_tags = self._sql_to_parquet_file(\n sql_query=f\"\"\"\n WITH filtered_tags AS (\n SELECT id, {filtered_tags_clause}, tags as raw_tags\n FROM ways w\n WHERE tags IS NOT NULL AND cardinality(tags) > 0\n )\n SELECT id, tags, raw_tags\n FROM filtered_tags\n WHERE tags IS NOT NULL AND cardinality(tags) > 0\n \"\"\",\n file_path=Path(tmp_dir_name) / \"ways_all_with_tags\",\n )\n with TaskProgressSpinner(\"Unnesting ways\", \"6\"):\n ways_with_unnested_nodes_refs = self._sql_to_parquet_file(\n sql_query=\"\"\"\n SELECT w.id, UNNEST(refs) as ref, UNNEST(range(length(refs))) as ref_idx\n FROM ways w\n \"\"\",\n file_path=Path(tmp_dir_name) / \"ways_with_unnested_nodes_refs\",\n )\n with TaskProgressSpinner(\"Filtering ways - valid refs\", \"7\"):\n ways_valid_ids = self._sql_to_parquet_file(\n sql_query=f\"\"\"\n WITH total_ways_with_nodes_refs AS (\n SELECT id, ref\n FROM ({ways_with_unnested_nodes_refs.sql_query()})\n ),\n unmatched_ways_with_nodes_refs AS (\n SELECT id, ref\n FROM ({ways_with_unnested_nodes_refs.sql_query()}) w\n ANTI JOIN ({nodes_valid_with_tags.sql_query()}) nv ON nv.id = w.ref\n )\n SELECT DISTINCT id\n FROM total_ways_with_nodes_refs\n EXCEPT\n SELECT DISTINCT id\n FROM unmatched_ways_with_nodes_refs\n \"\"\",\n file_path=Path(tmp_dir_name) / \"ways_valid_ids\",\n )\n\n with TaskProgressSpinner(\"Filtering ways - intersection\", \"8\"):\n # WAYS - INTERSECTING (WI)\n # - select all from WV with joining any from NV on ref\n if is_intersecting:\n ways_intersecting_ids = self._sql_to_parquet_file(\n sql_query=f\"\"\"\n SELECT DISTINCT uwr.id\n FROM ({ways_with_unnested_nodes_refs.sql_query()}) uwr\n SEMI JOIN ({ways_valid_ids.sql_query()}) wv ON uwr.id = wv.id\n SEMI JOIN ({nodes_intersecting_ids.sql_query()}) n ON n.id = uwr.ref\n \"\"\",\n file_path=Path(tmp_dir_name) / \"ways_intersecting_ids\",\n )\n else:\n ways_intersecting_ids = ways_valid_ids\n with TaskProgressSpinner(\"Filtering ways - tags\", \"9\"):\n # WAYS - FILTERED (WF)\n # - select all from WI with tags filter\n filter_osm_way_ids_filter = self._generate_elements_filter(filter_osm_ids, \"way\")\n self._sql_to_parquet_file(\n sql_query=f\"\"\"\n SELECT id FROM ({ways_all_with_tags.sql_query()}) w\n SEMI JOIN ({ways_intersecting_ids.sql_query()}) wi ON w.id = wi.id\n WHERE ({sql_filter}) AND ({filter_osm_way_ids_filter})\n \"\"\",\n file_path=Path(tmp_dir_name) / \"ways_filtered_non_distinct_ids\",\n )\n\n with TaskProgressSpinner(\"Calculating distinct filtered ways ids\", \"10\"):\n ways_filtered_ids = self._calculate_unique_ids_to_parquet(\n Path(tmp_dir_name) / \"ways_filtered_non_distinct_ids\",\n Path(tmp_dir_name) / \"ways_filtered_ids\",\n )\n\n with TaskProgressSpinner(\"Reading relations\", \"11\"):\n # RELATIONS - VALID (RV)\n # - select all with kind = 'relation'\n # - select all with more then one ref\n # - select all with type in ['boundary', 'multipolygon']\n # - join all WV to refs\n # - select all where all refs has been joined (total_refs == found_refs)\n self.connection.sql(f\"\"\"\n SELECT *\n FROM ({elements.sql_query()})\n WHERE kind = 'relation' AND len(refs) > 0\n AND list_contains(map_keys(tags), 'type')\n AND list_has_any(map_extract(tags, 'type'), ['boundary', 'multipolygon'])\n \"\"\").to_view(\"relations\", replace=True)\n relations_all_with_tags = self._sql_to_parquet_file(\n sql_query=f\"\"\"\n WITH filtered_tags AS (\n SELECT id, {filtered_tags_clause}\n FROM relations r\n WHERE tags IS NOT NULL AND cardinality(tags) > 0\n )\n SELECT id, tags\n FROM filtered_tags\n WHERE tags IS NOT NULL AND cardinality(tags) > 0\n \"\"\",\n file_path=Path(tmp_dir_name) / \"relations_all_with_tags\",\n )\n\n with TaskProgressSpinner(\"Unnesting relations\", \"12\"):\n relations_with_unnested_way_refs = self._sql_to_parquet_file(\n sql_query=\"\"\"\n WITH unnested_relation_refs AS (\n SELECT\n r.id,\n UNNEST(refs) as ref,\n UNNEST(ref_types) as ref_type,\n UNNEST(ref_roles) as ref_role,\n UNNEST(range(length(refs))) as ref_idx\n FROM relations r\n )\n SELECT id, ref, ref_role, ref_idx\n FROM unnested_relation_refs\n WHERE ref_type = 'way'\n \"\"\",\n file_path=Path(tmp_dir_name) / \"relations_with_unnested_way_refs\",\n )\n\n with TaskProgressSpinner(\"Filtering relations - valid refs\", \"13\"):\n relations_valid_ids = self._sql_to_parquet_file(\n sql_query=f\"\"\"\n WITH total_relation_refs AS (\n SELECT id, ref\n FROM ({relations_with_unnested_way_refs.sql_query()}) frr\n ),\n unmatched_relation_refs AS (\n SELECT id, ref\n FROM ({relations_with_unnested_way_refs.sql_query()}) r\n ANTI JOIN ({ways_valid_ids.sql_query()}) wv ON wv.id = r.ref\n )\n SELECT DISTINCT id\n FROM total_relation_refs\n EXCEPT\n SELECT DISTINCT id\n FROM unmatched_relation_refs\n \"\"\",\n file_path=Path(tmp_dir_name) / \"relations_valid_ids\",\n )\n\n with TaskProgressSpinner(\"Filtering relations - intersection\", \"14\"):\n # RELATIONS - INTERSECTING (RI)\n # - select all from RW with joining any from RV on ref\n if is_intersecting:\n relations_intersecting_ids = self._sql_to_parquet_file(\n sql_query=f\"\"\"\n SELECT frr.id\n FROM ({relations_with_unnested_way_refs.sql_query()}) frr\n SEMI JOIN ({relations_valid_ids.sql_query()}) rv ON frr.id = rv.id\n SEMI JOIN ({ways_intersecting_ids.sql_query()}) wi ON wi.id = frr.ref\n \"\"\",\n file_path=Path(tmp_dir_name) / \"relations_intersecting_ids\",\n )\n else:\n relations_intersecting_ids = relations_valid_ids\n\n with TaskProgressSpinner(\"Filtering relations - tags\", \"15\"):\n # RELATIONS - FILTERED (RF)\n # - select all from RI with tags filter\n filter_osm_relation_ids_filter = self._generate_elements_filter(\n filter_osm_ids, \"relation\"\n )\n\n relations_ids_path = Path(tmp_dir_name) / \"relations_ids\"\n relations_ids_path.mkdir(parents=True, exist_ok=True)\n self._sql_to_parquet_file(\n sql_query=f\"\"\"\n SELECT id FROM ({relations_all_with_tags.sql_query()}) r\n SEMI JOIN ({relations_intersecting_ids.sql_query()}) ri ON r.id = ri.id\n WHERE ({sql_filter}) AND ({filter_osm_relation_ids_filter})\n \"\"\",\n file_path=relations_ids_path / \"filtered\",\n )\n\n with TaskProgressSpinner(\"Calculating distinct filtered relations ids\", \"16\"):\n relations_filtered_ids = self._calculate_unique_ids_to_parquet(\n relations_ids_path / \"filtered\", Path(tmp_dir_name) / \"relations_filtered_ids\"\n )\n\n ways_prepared_ids_path = Path(tmp_dir_name) / \"ways_prepared_ids\"\n ways_prepared_ids_path.mkdir(parents=True, exist_ok=True)\n\n with TaskProgressSpinner(\"Loading required ways - by relations\", \"17\"):\n # WAYS - REQUIRED (WR)\n # - required - all IDs from WF\n # + all needed to construct relations from RF\n self._sql_to_parquet_file(\n sql_query=f\"\"\"\n SELECT ref as id\n FROM ({relations_with_unnested_way_refs.sql_query()}) frr\n SEMI JOIN ({relations_filtered_ids.sql_query()}) fri ON fri.id = frr.id\n \"\"\",\n file_path=ways_prepared_ids_path / \"required_by_relations\",\n )\n\n with TaskProgressSpinner(\"Calculating distinct required ways ids\", \"18\"):\n ways_required_ids = self._calculate_unique_ids_to_parquet(\n ways_prepared_ids_path, Path(tmp_dir_name) / \"ways_required_ids\"\n )\n\n return PbfFileReader.ConvertedOSMParquetFiles(\n nodes_valid_with_tags=nodes_valid_with_tags,\n nodes_filtered_ids=nodes_filtered_ids,\n ways_all_with_tags=ways_all_with_tags,\n ways_with_unnested_nodes_refs=ways_with_unnested_nodes_refs,\n ways_required_ids=ways_required_ids,\n ways_filtered_ids=ways_filtered_ids,\n relations_all_with_tags=relations_all_with_tags,\n relations_with_unnested_way_refs=relations_with_unnested_way_refs,\n relations_filtered_ids=relations_filtered_ids,\n )\n\n def _delete_directories(\n self, tmp_dir_name: Union[Path, str], directories: Union[str, list[str]]\n ) -> None:\n if isinstance(directories, str):\n directories = [directories]\n for directory in directories:\n directory_path = Path(tmp_dir_name) / directory\n if not directory_path.exists():\n continue\n shutil.rmtree(directory_path)\n\n def _generate_osm_tags_sql_filter(self) -> str:\n \"\"\"Prepare features filter clauses based on tags filter.\"\"\"\n filter_clauses = [\"(1=1)\"]\n\n if self.merged_tags_filter:\n filter_clauses.clear()\n\n for filter_tag_key, filter_tag_value in self.merged_tags_filter.items():\n if isinstance(filter_tag_value, bool) and filter_tag_value:\n filter_clauses.append(f\"(list_contains(map_keys(tags), '{filter_tag_key}'))\")\n elif isinstance(filter_tag_value, str):\n escaped_value = self._sql_escape(filter_tag_value)\n filter_clauses.append(\n f\"list_extract(map_extract(tags, '{filter_tag_key}'), 1) =\"\n f\" '{escaped_value}'\"\n )\n elif isinstance(filter_tag_value, list) and filter_tag_value:\n values_list = [f\"'{self._sql_escape(value)}'\" for value in filter_tag_value]\n filter_clauses.append(\n f\"list_extract(map_extract(tags, '{filter_tag_key}'), 1) IN\"\n f\" ({', '.join(values_list)})\"\n )\n\n return \" OR \".join(filter_clauses)\n\n def _generate_filtered_tags_clause(self) -> str:\n \"\"\"Prepare filtered tags clause by removing tags commonly ignored by OGR.\"\"\"\n tags_to_ignore = [\n \"area\",\n \"created_by\",\n \"converted_by\",\n \"source\",\n \"time\",\n \"ele\",\n \"note\",\n \"todo\",\n \"fixme\",\n \"FIXME\",\n \"openGeoDB:\",\n ]\n escaped_tags_to_ignore = [f\"'{tag}'\" for tag in tags_to_ignore]\n\n return f\"\"\"\n map_from_entries(\n [\n tag_entry\n for tag_entry in map_entries(tags)\n if not tag_entry.key in ({','.join(escaped_tags_to_ignore)})\n and not starts_with(tag_entry.key, 'openGeoDB:')\n ]\n ) as tags\n \"\"\"\n\n def _generate_elements_filter(\n self, filter_osm_ids: list[str], element_type: Literal[\"node\", \"way\", \"relation\"]\n ) -> str:\n filter_osm_relation_ids = [\n osm_id.replace(f\"{element_type}/\", \"\")\n for osm_id in filter_osm_ids\n if osm_id.startswith(f\"{element_type}/\")\n ]\n if not filter_osm_ids:\n filter_osm_ids_filter = \"1=1\"\n elif filter_osm_relation_ids:\n filter_osm_ids_filter = f\"id in ({','.join(filter_osm_relation_ids)})\"\n else:\n filter_osm_ids_filter = \"id IS NULL\"\n\n return filter_osm_ids_filter\n\n def _sql_escape(self, value: str) -> str:\n \"\"\"Escape value for SQL query.\"\"\"\n return value.replace(\"'\", \"''\")\n\n def _sql_to_parquet_file(self, sql_query: str, file_path: Path) -> \"duckdb.DuckDBPyRelation\":\n relation = self.connection.sql(sql_query)\n return self._save_parquet_file(relation, file_path)\n\n def _save_parquet_file(\n self, relation: \"duckdb.DuckDBPyRelation\", file_path: Path\n ) -> \"duckdb.DuckDBPyRelation\":\n self.connection.sql(f\"\"\"\n COPY (\n SELECT * FROM ({relation.sql_query()})\n ) TO '{file_path}' (FORMAT 'parquet', PER_THREAD_OUTPUT true, ROW_GROUP_SIZE 25000)\n \"\"\")\n return self.connection.sql(f\"\"\"\n SELECT * FROM read_parquet('{file_path}/**')\n \"\"\")\n\n def _calculate_unique_ids_to_parquet(\n self, file_path: Path, result_path: Optional[Path] = None\n ) -> \"duckdb.DuckDBPyRelation\":\n if result_path is None:\n result_path = file_path / \"distinct\"\n\n self.connection.sql(f\"\"\"\n COPY (\n SELECT id FROM read_parquet('{file_path}/**') GROUP BY id\n ) TO '{result_path}' (FORMAT 'parquet', PER_THREAD_OUTPUT true, ROW_GROUP_SIZE 25000)\n \"\"\")\n\n return self.connection.sql(f\"\"\"\n SELECT * FROM read_parquet('{result_path}/**')\n \"\"\")\n\n def _get_filtered_nodes_with_geometry(\n self,\n osm_parquet_files: ConvertedOSMParquetFiles,\n tmp_dir_name: str,\n ) -> \"duckdb.DuckDBPyRelation\":\n nodes_with_geometry = self.connection.sql(f\"\"\"\n SELECT\n n.id,\n n.tags,\n ST_Point(round(n.lon, 7), round(n.lat, 7)) geometry\n FROM ({osm_parquet_files.nodes_valid_with_tags.sql_query()}) n\n SEMI JOIN ({osm_parquet_files.nodes_filtered_ids.sql_query()}) fn ON n.id = fn.id\n \"\"\")\n nodes_parquet = self._save_parquet_file_with_geometry(\n relation=nodes_with_geometry,\n file_path=Path(tmp_dir_name) / \"filtered_nodes_with_geometry\",\n step_name=\"Saving filtered nodes with geometries\",\n step_number=\"19\",\n )\n return nodes_parquet\n\n def _get_ways_refs_with_nodes_structs(\n self,\n osm_parquet_files: ConvertedOSMParquetFiles,\n tmp_dir_name: str,\n ) -> \"duckdb.DuckDBPyRelation\":\n ways_refs_with_nodes_structs = self.connection.sql(f\"\"\"\n SELECT\n w.id,\n w.ref,\n w.ref_idx,\n struct_pack(x := round(n.lon, 7), y := round(n.lat, 7))::POINT_2D point\n FROM ({osm_parquet_files.nodes_valid_with_tags.sql_query()}) n\n JOIN ({osm_parquet_files.ways_with_unnested_nodes_refs.sql_query()}) w ON w.ref = n.id\n \"\"\")\n with TaskProgressSpinner(\"Saving required nodes with structs\", \"20\"):\n ways_refs_parquet = self._save_parquet_file(\n relation=ways_refs_with_nodes_structs,\n file_path=Path(tmp_dir_name) / \"ways_refs_with_nodes_structs\",\n )\n return ways_refs_parquet\n\n def _get_filtered_ways_with_linestrings(\n self,\n osm_parquet_files: ConvertedOSMParquetFiles,\n ways_refs_with_nodes_structs: \"duckdb.DuckDBPyRelation\",\n tmp_dir_name: str,\n ) -> \"duckdb.DuckDBPyRelation\":\n grouped_ways_path = Path(tmp_dir_name) / \"filtered_ways_grouped\"\n grouped_ways_tmp_path = Path(tmp_dir_name) / \"filtered_ways_tmp\"\n destination_dir_path = Path(tmp_dir_name) / \"filtered_ways_with_linestrings\"\n\n with TaskProgressSpinner(\"Grouping filtered ways\", \"21\"):\n groups = self._group_ways(\n ways_ids=osm_parquet_files.ways_filtered_ids,\n destination_dir_path=destination_dir_path,\n grouped_ways_tmp_path=grouped_ways_tmp_path,\n grouped_ways_path=grouped_ways_path,\n ways_refs_with_nodes_structs=ways_refs_with_nodes_structs,\n )\n\n with TaskProgressBar(\"Saving filtered ways with linestrings\", \"22\") as bar:\n self._construct_ways_linestrings(\n bar=bar,\n groups=groups,\n destination_dir_path=destination_dir_path,\n grouped_ways_path=grouped_ways_path,\n )\n\n ways_parquet = self.connection.sql(f\"\"\"\n SELECT * FROM read_parquet('{destination_dir_path}/**')\n \"\"\")\n return ways_parquet\n\n def _get_required_ways_with_linestrings(\n self,\n osm_parquet_files: ConvertedOSMParquetFiles,\n ways_refs_with_nodes_structs: \"duckdb.DuckDBPyRelation\",\n tmp_dir_name: str,\n ) -> \"duckdb.DuckDBPyRelation\":\n grouped_ways_path = Path(tmp_dir_name) / \"required_ways_grouped\"\n grouped_ways_tmp_path = Path(tmp_dir_name) / \"required_ways_tmp\"\n destination_dir_path = Path(tmp_dir_name) / \"required_ways_with_linestrings\"\n\n with TaskProgressSpinner(\"Grouping required ways\", \"23\"):\n groups = self._group_ways(\n ways_ids=osm_parquet_files.ways_required_ids,\n destination_dir_path=destination_dir_path,\n grouped_ways_tmp_path=grouped_ways_tmp_path,\n grouped_ways_path=grouped_ways_path,\n ways_refs_with_nodes_structs=ways_refs_with_nodes_structs,\n )\n\n with TaskProgressBar(\"Saving required ways with linestrings\", \"24\") as bar:\n self._construct_ways_linestrings(\n bar=bar,\n groups=groups,\n destination_dir_path=destination_dir_path,\n grouped_ways_path=grouped_ways_path,\n )\n\n ways_parquet = self.connection.sql(f\"\"\"\n SELECT * FROM read_parquet('{destination_dir_path}/**')\n \"\"\")\n return ways_parquet\n\n def _group_ways(\n self,\n ways_ids: \"duckdb.DuckDBPyRelation\",\n ways_refs_with_nodes_structs: \"duckdb.DuckDBPyRelation\",\n destination_dir_path: Path,\n grouped_ways_tmp_path: Path,\n grouped_ways_path: Path,\n ) -> int:\n total_required_ways = ways_ids.count(\"id\").fetchone()[0]\n\n destination_dir_path.mkdir(parents=True, exist_ok=True)\n grouped_ways_tmp_path.mkdir(parents=True, exist_ok=True)\n\n if total_required_ways == 0:\n empty_file_path = str(destination_dir_path / \"empty.parquet\")\n self.connection.sql(\"CREATE OR REPLACE TABLE x(id STRING, linestring LINESTRING_2D);\")\n self.connection.table(\"x\").to_parquet(empty_file_path)\n return -1\n\n groups = int(floor(total_required_ways / self.rows_per_bucket))\n\n ways_ids_grouped_relation = self.connection.sql(f\"\"\"\n SELECT id,\n floor(\n row_number() OVER () / {self.rows_per_bucket}\n )::INTEGER as \"group\",\n FROM ({ways_ids.sql_query()})\n \"\"\")\n grouped_ways_ids_with_group_path = grouped_ways_tmp_path / \"ids_with_group\"\n ways_ids_grouped_relation_parquet = self._save_parquet_file(\n relation=ways_ids_grouped_relation, file_path=grouped_ways_ids_with_group_path\n )\n\n ways_with_nodes_points_relation = self.connection.sql(f\"\"\"\n SELECT\n w.id, w.point, w.ref_idx, rw.\"group\"\n FROM ({ways_ids_grouped_relation_parquet.sql_query()}) rw\n JOIN ({ways_refs_with_nodes_structs.sql_query()}) w\n ON rw.id = w.id\n \"\"\")\n\n grouped_ways_ids_with_points_path = grouped_ways_tmp_path / \"ids_with_points\"\n ways_with_nodes_points_relation_parquet = self._save_parquet_file(\n relation=ways_with_nodes_points_relation, file_path=grouped_ways_ids_with_points_path\n )\n\n self.connection.sql(f\"\"\"\n COPY (\n SELECT\n id, point, ref_idx, \"group\"\n FROM ({ways_with_nodes_points_relation_parquet.sql_query()}) w\n ) TO '{grouped_ways_path}'\n (FORMAT 'parquet', PARTITION_BY (\"group\"), ROW_GROUP_SIZE 25000)\n \"\"\")\n\n return groups\n\n def _construct_ways_linestrings(\n self,\n bar: TaskProgressBar,\n groups: int,\n destination_dir_path: Path,\n grouped_ways_path: Path,\n ) -> None:\n grouped_ways_path.mkdir(parents=True, exist_ok=True)\n\n for group in bar.track(range(groups + 1)):\n current_ways_group_path = grouped_ways_path / f\"group={group}\"\n current_ways_group_relation = self.connection.sql(f\"\"\"\n SELECT * FROM read_parquet('{current_ways_group_path}/**')\n \"\"\")\n\n ways_with_linestrings = self.connection.sql(f\"\"\"\n SELECT id, list(point ORDER BY ref_idx ASC)::LINESTRING_2D linestring\n FROM ({current_ways_group_relation.sql_query()})\n GROUP BY id\n \"\"\")\n self._save_parquet_file(\n relation=ways_with_linestrings,\n file_path=destination_dir_path / f\"group={group}\",\n )\n\n def _get_filtered_ways_with_proper_geometry(\n self,\n osm_parquet_files: ConvertedOSMParquetFiles,\n required_ways_with_linestrings: \"duckdb.DuckDBPyRelation\",\n tmp_dir_name: str,\n ) -> \"duckdb.DuckDBPyRelation\":\n osm_way_polygon_features_filter_clauses = [\n \"list_contains(map_keys(raw_tags), 'area') AND \"\n \"list_extract(map_extract(raw_tags, 'area'), 1) = 'yes'\"\n ]\n\n for osm_tag_key in self.osm_way_polygon_features_config.all:\n osm_way_polygon_features_filter_clauses.append(\n f\"list_contains(map_keys(raw_tags), '{osm_tag_key}')\"\n )\n\n for osm_tag_key, osm_tag_values in self.osm_way_polygon_features_config.allowlist.items():\n escaped_values = \",\".join(\n [f\"'{self._sql_escape(osm_tag_value)}'\" for osm_tag_value in osm_tag_values]\n )\n osm_way_polygon_features_filter_clauses.append(\n f\"list_contains(map_keys(raw_tags), '{osm_tag_key}') AND\"\n f\" list_has_any(map_extract(raw_tags, '{osm_tag_key}'), [{escaped_values}])\"\n )\n\n for osm_tag_key, osm_tag_values in self.osm_way_polygon_features_config.denylist.items():\n escaped_values = \",\".join(\n [f\"'{self._sql_escape(osm_tag_value)}'\" for osm_tag_value in osm_tag_values]\n )\n osm_way_polygon_features_filter_clauses.append(\n f\"list_contains(map_keys(raw_tags), '{osm_tag_key}') AND NOT\"\n f\" list_has_any(map_extract(raw_tags, '{osm_tag_key}'), [{escaped_values}])\"\n )\n\n ways_with_proper_geometry = self.connection.sql(f\"\"\"\n WITH required_ways_with_linestrings AS (\n SELECT\n w.id,\n w.tags,\n w_l.linestring,\n -- Filter below is based on `_is_closed_way_a_polygon` function from OSMnx\n -- Filter values are built dynamically from a config.\n (\n -- if first and last nodes are the same\n ST_Equals(linestring[1]::POINT_2D, linestring[-1]::POINT_2D)\n -- if the element doesn't have any tags leave it as a Linestring\n AND raw_tags IS NOT NULL\n -- if the element is specifically tagged 'area':'no' -> LineString\n AND NOT (\n list_contains(map_keys(raw_tags), 'area')\n AND list_extract(map_extract(raw_tags, 'area'), 1) = 'no'\n )\n AND ({' OR '.join(osm_way_polygon_features_filter_clauses)})\n ) AS is_polygon\n FROM ({required_ways_with_linestrings.sql_query()}) w_l\n SEMI JOIN ({osm_parquet_files.ways_filtered_ids.sql_query()}) fw ON w_l.id = fw.id\n JOIN ({osm_parquet_files.ways_all_with_tags.sql_query()}) w ON w.id = w_l.id\n ),\n proper_geometries AS (\n SELECT\n id,\n tags,\n (CASE\n WHEN is_polygon\n THEN linestring_to_polygon_wkt(linestring)\n ELSE linestring_to_linestring_wkt(linestring)\n END)::GEOMETRY AS geometry\n FROM\n required_ways_with_linestrings w\n )\n SELECT id, tags, geometry FROM proper_geometries\n \"\"\")\n ways_parquet = self._save_parquet_file_with_geometry(\n relation=ways_with_proper_geometry,\n file_path=Path(tmp_dir_name) / \"filtered_ways_with_geometry\",\n step_name=\"Saving filtered ways with geometries\",\n step_number=\"25\",\n )\n return ways_parquet\n\n def _get_filtered_relations_with_geometry(\n self,\n osm_parquet_files: ConvertedOSMParquetFiles,\n required_ways_with_linestrings: \"duckdb.DuckDBPyRelation\",\n tmp_dir_name: str,\n ) -> \"duckdb.DuckDBPyRelation\":\n valid_relation_parts = self.connection.sql(f\"\"\"\n WITH unnested_relations AS (\n SELECT\n r.id,\n COALESCE(r.ref_role, 'outer') as ref_role,\n r.ref,\n linestring_to_linestring_wkt(w.linestring)::GEOMETRY as geometry\n FROM ({osm_parquet_files.relations_with_unnested_way_refs.sql_query()}) r\n SEMI JOIN ({osm_parquet_files.relations_filtered_ids.sql_query()}) fr\n ON r.id = fr.id\n JOIN ({required_ways_with_linestrings.sql_query()}) w\n ON w.id = r.ref\n ORDER BY r.id, r.ref_idx\n ),\n any_outer_refs AS (\n SELECT id, bool_or(ref_role == 'outer') any_outer_refs\n FROM unnested_relations\n GROUP BY id\n ),\n relations_with_geometries AS (\n SELECT\n x.id,\n CASE WHEN aor.any_outer_refs\n THEN x.ref_role ELSE 'outer'\n END as ref_role,\n x.geom geometry,\n row_number() OVER (PARTITION BY x.id) as geometry_id\n FROM (\n SELECT\n id,\n ref_role,\n UNNEST(\n ST_Dump(ST_LineMerge(ST_Collect(list(geometry)))), recursive := true\n ),\n FROM unnested_relations\n GROUP BY id, ref_role\n ) x\n JOIN any_outer_refs aor ON aor.id = x.id\n WHERE ST_NPoints(geom) >= 4\n ),\n valid_relations AS (\n SELECT id, is_valid\n FROM (\n SELECT\n id,\n bool_and(\n ST_Equals(ST_StartPoint(geometry), ST_EndPoint(geometry))\n ) is_valid\n FROM relations_with_geometries\n GROUP BY id\n )\n WHERE is_valid = true\n )\n SELECT * FROM relations_with_geometries\n SEMI JOIN valid_relations ON relations_with_geometries.id = valid_relations.id\n \"\"\")\n valid_relation_parts_parquet = self._save_parquet_file_with_geometry(\n relation=valid_relation_parts,\n file_path=Path(tmp_dir_name) / \"valid_relation_parts\",\n step_name=\"Saving valid relations parts\",\n step_number=\"26\",\n )\n relation_inner_parts = self.connection.sql(f\"\"\"\n SELECT id, geometry_id, ST_MakePolygon(geometry) geometry\n FROM ({valid_relation_parts_parquet.sql_query()})\n WHERE ref_role = 'inner'\n \"\"\")\n relation_inner_parts_parquet = self._save_parquet_file_with_geometry(\n relation=relation_inner_parts,\n file_path=Path(tmp_dir_name) / \"relation_inner_parts\",\n fix_geometries=True,\n step_name=\"Saving relations inner parts\",\n step_number=\"27\",\n )\n relation_outer_parts = self.connection.sql(f\"\"\"\n SELECT id, geometry_id, ST_MakePolygon(geometry) geometry\n FROM ({valid_relation_parts_parquet.sql_query()})\n WHERE ref_role = 'outer'\n \"\"\")\n relation_outer_parts_parquet = self._save_parquet_file_with_geometry(\n relation=relation_outer_parts,\n file_path=Path(tmp_dir_name) / \"relation_outer_parts\",\n fix_geometries=True,\n step_name=\"Saving relations outer parts\",\n step_number=\"28\",\n )\n relation_outer_parts_with_holes = self.connection.sql(f\"\"\"\n SELECT\n og.id,\n og.geometry_id,\n ST_Difference(any_value(og.geometry), ST_Union_Agg(ig.geometry)) geometry\n FROM ({relation_outer_parts_parquet.sql_query()}) og\n JOIN ({relation_inner_parts_parquet.sql_query()}) ig\n ON og.id = ig.id AND ST_WITHIN(ig.geometry, og.geometry)\n GROUP BY og.id, og.geometry_id\n \"\"\")\n relation_outer_parts_with_holes_parquet = self._save_parquet_file_with_geometry(\n relation=relation_outer_parts_with_holes,\n file_path=Path(tmp_dir_name) / \"relation_outer_parts_with_holes\",\n step_name=\"Saving relations outer parts with holes\",\n step_number=\"29\",\n )\n relation_outer_parts_without_holes = self.connection.sql(f\"\"\"\n SELECT\n og.id,\n og.geometry_id,\n og.geometry\n FROM ({relation_outer_parts_parquet.sql_query()}) og\n ANTI JOIN ({relation_outer_parts_with_holes_parquet.sql_query()}) ogwh\n ON og.id = ogwh.id AND og.geometry_id = ogwh.geometry_id\n \"\"\")\n relation_outer_parts_without_holes_parquet = self._save_parquet_file_with_geometry(\n relation=relation_outer_parts_without_holes,\n file_path=Path(tmp_dir_name) / \"relation_outer_parts_without_holes\",\n step_name=\"Saving relations outer parts without holes\",\n step_number=\"30\",\n )\n relations_with_geometry = self.connection.sql(f\"\"\"\n WITH unioned_outer_geometries AS (\n SELECT id, geometry\n FROM ({relation_outer_parts_with_holes_parquet.sql_query()})\n UNION ALL\n SELECT id, geometry\n FROM ({relation_outer_parts_without_holes_parquet.sql_query()})\n ),\n final_geometries AS (\n SELECT id, ST_Union_Agg(geometry) geometry\n FROM unioned_outer_geometries\n GROUP BY id\n )\n SELECT r_g.id, r.tags, r_g.geometry\n FROM final_geometries r_g\n JOIN ({osm_parquet_files.relations_all_with_tags.sql_query()}) r\n ON r.id = r_g.id\n \"\"\")\n relations_parquet = self._save_parquet_file_with_geometry(\n relation=relations_with_geometry,\n file_path=Path(tmp_dir_name) / \"filtered_relations_with_geometry\",\n step_name=\"Saving filtered relations with geometries\",\n step_number=\"31\",\n )\n return relations_parquet\n\n def _save_parquet_file_with_geometry(\n self,\n relation: \"duckdb.DuckDBPyRelation\",\n file_path: Path,\n step_name: str,\n step_number: str,\n fix_geometries: bool = False,\n ) -> \"duckdb.DuckDBPyRelation\":\n if not fix_geometries:\n with TaskProgressSpinner(step_name, step_number):\n self.connection.sql(f\"\"\"\n COPY (\n SELECT\n * EXCLUDE (geometry), ST_AsWKB(geometry) geometry_wkb\n FROM ({relation.sql_query()})\n ) TO '{file_path}' (\n FORMAT 'parquet',\n PER_THREAD_OUTPUT true,\n ROW_GROUP_SIZE 25000\n )\n \"\"\")\n else:\n valid_path = file_path / \"valid\"\n invalid_path = file_path / \"invalid\"\n fixed_path = file_path / \"fixed\"\n\n valid_path.mkdir(parents=True, exist_ok=True)\n invalid_path.mkdir(parents=True, exist_ok=True)\n fixed_path.mkdir(parents=True, exist_ok=True)\n\n # Save valid features\n with TaskProgressSpinner(f\"{step_name} - valid geometries\", f\"{step_number}.1\"):\n self.connection.sql(f\"\"\"\n COPY (\n SELECT\n * EXCLUDE (geometry), ST_AsWKB(geometry) geometry_wkb\n FROM ({relation.sql_query()})\n WHERE ST_IsValid(geometry)\n ) TO '{valid_path}' (\n FORMAT 'parquet',\n PER_THREAD_OUTPUT true,\n ROW_GROUP_SIZE 25000\n )\n \"\"\")\n\n # Save invalid features\n with TaskProgressSpinner(f\"{step_name} - invalid geometries\", f\"{step_number}.2\"):\n self.connection.sql(f\"\"\"\n COPY (\n SELECT\n * EXCLUDE (geometry), ST_AsWKB(geometry) geometry_wkb,\n floor(\n row_number() OVER () / {self.rows_per_bucket}\n )::INTEGER as \"group\",\n FROM ({relation.sql_query()})\n WHERE NOT ST_IsValid(geometry)\n ) TO '{invalid_path}' (\n FORMAT 'parquet', PARTITION_BY (\"group\"), ROW_GROUP_SIZE 25000\n )\n \"\"\")\n\n # Fix invalid features\n total_groups = 0\n while (invalid_path / f\"group={total_groups}\").exists():\n total_groups += 1\n\n if total_groups > 0:\n with TaskProgressBar(\n f\"{step_name} - fixing invalid geometries\", f\"{step_number}.3\"\n ) as bar:\n for group_id in bar.track(range(total_groups)):\n current_invalid_features_group_path = invalid_path / f\"group={group_id}\"\n current_invalid_features_group_table = pq.read_table(\n current_invalid_features_group_path\n ).drop(\"group\")\n valid_geometry_column = ga.as_wkb(\n ga.to_geopandas(\n ga.with_crs(\n current_invalid_features_group_table.column(\"geometry_wkb\"),\n WGS84_CRS,\n )\n ).make_valid(),\n )\n current_invalid_features_group_table = (\n current_invalid_features_group_table.drop(\"geometry_wkb\")\n )\n\n current_invalid_features_group_table = (\n current_invalid_features_group_table.append_column(\n \"geometry_wkb\", valid_geometry_column\n )\n )\n pq.write_table(\n current_invalid_features_group_table,\n fixed_path / f\"data_{group_id}.parquet\",\n )\n\n self._delete_directories(invalid_path.parent, [\"invalid\"])\n\n return self.connection.sql(f\"\"\"\n SELECT * EXCLUDE (geometry_wkb), ST_GeomFromWKB(geometry_wkb) geometry\n FROM read_parquet('{file_path}/**')\n \"\"\")\n\n def _concatenate_results_to_geoparquet(\n self,\n parsed_data: ParsedOSMFeatures,\n tmp_dir_name: str,\n save_file_path: Path,\n explode_tags: bool,\n ) -> None:\n select_clauses = [\n *self._generate_osm_tags_sql_select(parsed_data, explode_tags),\n \"geometry\",\n ]\n\n node_select_clauses = [\"'node/' || id as feature_id\", *select_clauses]\n way_select_clauses = [\"'way/' || id as feature_id\", *select_clauses]\n relation_select_clauses = [\"'relation/' || id as feature_id\", *select_clauses]\n\n unioned_features = self.connection.sql(f\"\"\"\n SELECT {', '.join(node_select_clauses)}\n FROM ({parsed_data.nodes.sql_query()}) n\n UNION ALL\n SELECT {', '.join(way_select_clauses)}\n FROM ({parsed_data.ways.sql_query()}) w\n UNION ALL\n SELECT {', '.join(relation_select_clauses)}\n FROM ({parsed_data.relations.sql_query()}) r\n \"\"\")\n\n grouped_features = self._parse_features_relation_to_groups(unioned_features, explode_tags)\n\n valid_features_full_relation = self.connection.sql(f\"\"\"\n SELECT * FROM ({grouped_features.sql_query()})\n WHERE ST_IsValid(geometry)\n \"\"\")\n\n valid_features_parquet_path = Path(tmp_dir_name) / \"osm_valid_elements\"\n valid_features_parquet_relation = self._save_parquet_file_with_geometry(\n valid_features_full_relation,\n valid_features_parquet_path,\n step_name=\"Saving valid features\",\n step_number=\"32.1\",\n )\n\n valid_features_parquet_table = pq.read_table(valid_features_parquet_path)\n\n is_empty = valid_features_parquet_table.num_rows == 0\n\n if not is_empty:\n geometry_column = ga.as_wkb(\n ga.with_crs(valid_features_parquet_table.column(\"geometry_wkb\"), WGS84_CRS)\n )\n else:\n geometry_column = ga.as_wkb(gpd.GeoSeries([], crs=WGS84_CRS))\n\n valid_features_parquet_table = valid_features_parquet_table.append_column(\n GEOMETRY_COLUMN, geometry_column\n ).drop(\"geometry_wkb\")\n\n parquet_tables = [valid_features_parquet_table]\n\n invalid_features_full_relation = self.connection.sql(f\"\"\"\n SELECT * FROM ({grouped_features.sql_query()}) a\n ANTI JOIN ({valid_features_parquet_relation.sql_query()}) b\n ON a.feature_id = b.feature_id\n \"\"\")\n\n total_nodes = parsed_data.nodes.count(\"id\").fetchone()[0]\n total_ways = parsed_data.ways.count(\"id\").fetchone()[0]\n total_relations = parsed_data.relations.count(\"id\").fetchone()[0]\n total_features = total_nodes + total_ways + total_relations\n\n valid_features = valid_features_parquet_relation.count(\"feature_id\").fetchone()[0]\n\n invalid_features = total_features - valid_features\n\n if invalid_features > 0:\n with TaskProgressSpinner(\"Grouping invalid features\", \"32.2\"):\n groups = floor(invalid_features / self.rows_per_bucket)\n grouped_invalid_features_result_parquet = (\n Path(tmp_dir_name) / \"osm_invalid_elements_grouped\"\n )\n self.connection.sql(f\"\"\"\n COPY (\n SELECT\n * EXCLUDE (geometry), ST_AsWKB(geometry) geometry_wkb,\n floor(\n row_number() OVER () / {self.rows_per_bucket}\n )::INTEGER as \"group\",\n FROM ({invalid_features_full_relation.sql_query()})\n ) TO '{grouped_invalid_features_result_parquet}'\n (FORMAT 'parquet', PARTITION_BY (\"group\"), ROW_GROUP_SIZE 25000)\n \"\"\")\n\n with TaskProgressBar(\"Fixing invalid features\", \"32.3\") as bar:\n for group in bar.track(range(groups + 1)):\n current_invalid_features_group_path = (\n grouped_invalid_features_result_parquet / f\"group={group}\"\n )\n current_invalid_features_group_table = pq.read_table(\n current_invalid_features_group_path\n ).drop(\"group\")\n valid_geometry_column = ga.as_wkb(\n ga.to_geopandas(\n ga.with_crs(\n current_invalid_features_group_table.column(\"geometry_wkb\"),\n WGS84_CRS,\n )\n ).make_valid()\n )\n\n current_invalid_features_group_table = (\n current_invalid_features_group_table.append_column(\n GEOMETRY_COLUMN, valid_geometry_column\n )\n )\n current_invalid_features_group_table = (\n current_invalid_features_group_table.drop(\"geometry_wkb\")\n )\n parquet_tables.append(current_invalid_features_group_table)\n\n joined_parquet_table: pa.Table = pa.concat_tables(parquet_tables)\n\n is_empty = joined_parquet_table.num_rows == 0\n\n empty_columns = []\n for column_name in joined_parquet_table.column_names:\n if column_name in (FEATURES_INDEX, GEOMETRY_COLUMN):\n continue\n if (\n is_empty\n or pa.compute.all(\n pa.compute.is_null(joined_parquet_table.column(column_name))\n ).as_py()\n ):\n empty_columns.append(column_name)\n\n if empty_columns:\n joined_parquet_table = joined_parquet_table.drop(empty_columns)\n\n with TaskProgressSpinner(\"Saving final geoparquet file\", \"33\"):\n io.write_geoparquet_table( # type: ignore\n joined_parquet_table, save_file_path, primary_geometry_column=GEOMETRY_COLUMN\n )\n\n def _generate_osm_tags_sql_select(\n self, parsed_data: ParsedOSMFeatures, explode_tags: bool\n ) -> list[str]:\n \"\"\"Prepare features filter clauses based on tags filter.\"\"\"\n osm_tag_keys_select_clauses = []\n\n # TODO: elif keep other tags\n if not self.merged_tags_filter and not explode_tags:\n osm_tag_keys_select_clauses = [\"tags\"]\n elif not self.merged_tags_filter and explode_tags:\n osm_tag_keys = set()\n for elements in (\n parsed_data.nodes,\n parsed_data.ways,\n parsed_data.relations,\n ):\n found_tag_keys = [row[0] for row in self.connection.sql(f\"\"\"\n SELECT DISTINCT UNNEST(map_keys(tags)) tag_key\n FROM ({elements.sql_query()})\n \"\"\").fetchall()]\n osm_tag_keys.update(found_tag_keys)\n osm_tag_keys_select_clauses = [\n f\"list_extract(map_extract(tags, '{osm_tag_key}'), 1) as \\\"{osm_tag_key}\\\"\"\n for osm_tag_key in sorted(list(osm_tag_keys))\n ]\n elif self.merged_tags_filter and not explode_tags:\n filter_tag_clauses = []\n for filter_tag_key, filter_tag_value in self.merged_tags_filter.items():\n if isinstance(filter_tag_value, bool) and filter_tag_value:\n filter_tag_clauses.append(f\"tag_entry.key = '{filter_tag_key}'\")\n elif isinstance(filter_tag_value, str):\n escaped_value = self._sql_escape(filter_tag_value)\n filter_tag_clauses.append(\n f\"(tag_entry.key = '{filter_tag_key}' AND tag_entry.value =\"\n f\" '{escaped_value}')\"\n )\n elif isinstance(filter_tag_value, list) and filter_tag_value:\n values_list = [f\"'{self._sql_escape(value)}'\" for value in filter_tag_value]\n filter_tag_clauses.append(\n f\"(tag_entry.key = '{filter_tag_key}' AND tag_entry.value IN\"\n f\" ({', '.join(values_list)}))\"\n )\n osm_tag_keys_select_clauses = [f\"\"\"\n map_from_entries(\n [\n tag_entry\n for tag_entry in map_entries(tags)\n if {\" OR \".join(filter_tag_clauses)}\n ]\n ) as tags\n \"\"\"]\n elif self.merged_tags_filter and explode_tags:\n for filter_tag_key, filter_tag_value in self.merged_tags_filter.items():\n if isinstance(filter_tag_value, bool) and filter_tag_value:\n osm_tag_keys_select_clauses.append(\n f\"list_extract(map_extract(tags, '{filter_tag_key}'), 1) as\"\n f' \"{filter_tag_key}\"'\n )\n elif isinstance(filter_tag_value, str):\n escaped_value = self._sql_escape(filter_tag_value)\n osm_tag_keys_select_clauses.append(f\"\"\"\n CASE WHEN list_extract(\n map_extract(tags, '{filter_tag_key}'), 1\n ) = '{escaped_value}'\n THEN '{escaped_value}'\n ELSE NULL\n END as \"{filter_tag_key}\"\n \"\"\")\n elif isinstance(filter_tag_value, list) and filter_tag_value:\n values_list = [f\"'{self._sql_escape(value)}'\" for value in filter_tag_value]\n osm_tag_keys_select_clauses.append(f\"\"\"\n CASE WHEN list_extract(\n map_extract(tags, '{filter_tag_key}'), 1\n ) IN ({', '.join(values_list)})\n THEN list_extract(map_extract(tags, '{filter_tag_key}'), 1)\n ELSE NULL\n END as \"{filter_tag_key}\"\n \"\"\")\n\n if len(osm_tag_keys_select_clauses) > 100:\n warnings.warn(\n \"Select clause contains more than 100 columns\"\n f\" (found {len(osm_tag_keys_select_clauses)} columns).\"\n \" Query might fail with insufficient memory resources.\"\n \" Consider applying more restrictive OsmTagsFilter for parsing.\",\n stacklevel=1,\n )\n\n return osm_tag_keys_select_clauses\n\n def _parse_features_relation_to_groups(\n self,\n features_relation: \"duckdb.DuckDBPyRelation\",\n explode_tags: bool,\n ) -> \"duckdb.DuckDBPyRelation\":\n \"\"\"\n Optionally group raw OSM features into groups defined in `GroupedOsmTagsFilter`.\n\n Creates new features based on definition from `GroupedOsmTagsFilter`.\n Returns transformed DuckDB relation with columns based on group names from the filter.\n Values are built by concatenation of matching tag key and value with\n an equal sign (eg. amenity=parking). Since many tags can match a definition\n of a single group, a first match is used as a feature value.\n\n Args:\n features_relation (duckdb.DuckDBPyRelation): Generated features from the loader.\n explode_tags (bool): Whether to split tags into columns based on OSM tag keys.\n\n Returns:\n duckdb.DuckDBPyRelation: Parsed features_relation.\n \"\"\"\n if not self.tags_filter or not is_expected_type(self.tags_filter, GroupedOsmTagsFilter):\n return features_relation\n\n grouped_features_relation: \"duckdb.DuckDBPyRelation\"\n grouped_tags_filter = cast(GroupedOsmTagsFilter, self.tags_filter)\n\n if explode_tags:\n case_clauses = []\n for group_name in sorted(grouped_tags_filter.keys()):\n osm_filter = grouped_tags_filter[group_name]\n case_when_clauses = []\n for osm_tag_key, osm_tag_value in osm_filter.items():\n if isinstance(osm_tag_value, bool) and osm_tag_value:\n case_when_clauses.append(\n f\"WHEN \\\"{osm_tag_key}\\\" IS NOT NULL THEN '{osm_tag_key}=' ||\"\n f' \"{osm_tag_key}\"'\n )\n elif isinstance(osm_tag_value, str):\n escaped_value = self._sql_escape(osm_tag_value)\n case_when_clauses.append(\n f\"WHEN \\\"{osm_tag_key}\\\" = '{escaped_value}' THEN '{osm_tag_key}=' ||\"\n f' \"{osm_tag_key}\"'\n )\n elif isinstance(osm_tag_value, list) and osm_tag_value:\n values_list = [f\"'{self._sql_escape(value)}'\" for value in osm_tag_value]\n case_when_clauses.append(\n f\"WHEN \\\"{osm_tag_key}\\\" IN ({', '.join(values_list)}) THEN\"\n f\" '{osm_tag_key}=' || \\\"{osm_tag_key}\\\"\"\n )\n case_clause = f'CASE {\" \".join(case_when_clauses)} END AS \"{group_name}\"'\n case_clauses.append(case_clause)\n\n joined_case_clauses = \", \".join(case_clauses)\n grouped_features_relation = self.connection.sql(f\"\"\"\n SELECT feature_id, {joined_case_clauses}, geometry\n FROM ({features_relation.sql_query()})\n \"\"\")\n else:\n case_clauses = []\n group_names = sorted(grouped_tags_filter.keys())\n for group_name in group_names:\n osm_filter = grouped_tags_filter[group_name]\n case_when_clauses = []\n for osm_tag_key, osm_tag_value in osm_filter.items():\n element_clause = f\"element_at(tags, '{osm_tag_key}')[1]\"\n if isinstance(osm_tag_value, bool) and osm_tag_value:\n case_when_clauses.append(\n f\"WHEN {element_clause} IS NOT NULL THEN '{osm_tag_key}=' ||\"\n f\" {element_clause}\"\n )\n elif isinstance(osm_tag_value, str):\n escaped_value = self._sql_escape(osm_tag_value)\n case_when_clauses.append(\n f\"WHEN {element_clause} = '{escaped_value}' THEN '{osm_tag_key}=' ||\"\n f\" {element_clause}\"\n )\n elif isinstance(osm_tag_value, list) and osm_tag_value:\n values_list = [f\"'{self._sql_escape(value)}'\" for value in osm_tag_value]\n case_when_clauses.append(\n f\"WHEN {element_clause} IN ({', '.join(values_list)}) THEN\"\n f\" '{osm_tag_key}=' || {element_clause}\"\n )\n case_clause = f'CASE {\" \".join(case_when_clauses)} END'\n case_clauses.append(case_clause)\n\n group_names_as_sql_strings = [f\"'{group_name}'\" for group_name in group_names]\n groups_map = (\n f\"map([{', '.join(group_names_as_sql_strings)}], [{', '.join(case_clauses)}])\"\n )\n non_null_groups_map = f\"\"\"map_from_entries(\n [\n tag_entry\n for tag_entry in map_entries({groups_map})\n if tag_entry.value IS NOT NULL\n ]\n ) as tags\"\"\"\n\n grouped_features_relation = self.connection.sql(f\"\"\"\n SELECT feature_id, {non_null_groups_map}, geometry\n FROM ({features_relation.sql_query()})\n \"\"\")\n\n return grouped_features_relation" } ]
import platform import re import subprocess import warnings import duckdb import geopandas as gpd import pandas as pd import pyogrio import pytest import six from collections.abc import Iterable from pathlib import Path from typing import Optional, Union, cast from unittest import TestCase from parametrization import Parametrization as P from shapely import hausdorff_distance from shapely.geometry import MultiPolygon, Polygon from shapely.geometry.base import BaseGeometry from shapely.ops import unary_union from srai.geometry import remove_interiors from srai.loaders.download import download_file from srai.loaders.osm_loaders.filters import GEOFABRIK_LAYERS, HEX2VEC_FILTER from quackosm._constants import FEATURES_INDEX from quackosm._osm_tags_filters import OsmTagsFilter from quackosm.pbf_file_reader import PbfFileReader
17,849
"""Tests for PbfFileReader.""" ut = TestCase() LFS_DIRECTORY_URL = "https://github.com/kraina-ai/srai-test-files/raw/main/files/" @pytest.mark.parametrize( # type: ignore "test_file_name,query,expected_result_length,expected_features_columns_length", [ ( "d17f922ed15e9609013a6b895e1e7af2d49158f03586f2c675d17b760af3452e.osm.pbf", None, 678, 271, ), ( "eb2848d259345ce7dfe8af34fd1ab24503bb0b952e04e872c87c55550fa50fbf.osm.pbf", None, 1, 22, ), ("529cdcbb7a3cc103658ef31b39bed24984e421127d319c867edf2f86ff3bb098.osm.pbf", None, 0, 0), ( "d17f922ed15e9609013a6b895e1e7af2d49158f03586f2c675d17b760af3452e.osm.pbf", HEX2VEC_FILTER, 97, 10, ), ( "eb2848d259345ce7dfe8af34fd1ab24503bb0b952e04e872c87c55550fa50fbf.osm.pbf", HEX2VEC_FILTER, 0, 0, ), ( "d17f922ed15e9609013a6b895e1e7af2d49158f03586f2c675d17b760af3452e.osm.pbf", GEOFABRIK_LAYERS, 433, 22, ), ( "eb2848d259345ce7dfe8af34fd1ab24503bb0b952e04e872c87c55550fa50fbf.osm.pbf", GEOFABRIK_LAYERS, 0, 0, ), ], ) def test_pbf_reader( test_file_name: str, query: OsmTagsFilter, expected_result_length: int, expected_features_columns_length: int, ): """Test proper files loading in `PbfFileReader`."""
"""Tests for PbfFileReader.""" ut = TestCase() LFS_DIRECTORY_URL = "https://github.com/kraina-ai/srai-test-files/raw/main/files/" @pytest.mark.parametrize( # type: ignore "test_file_name,query,expected_result_length,expected_features_columns_length", [ ( "d17f922ed15e9609013a6b895e1e7af2d49158f03586f2c675d17b760af3452e.osm.pbf", None, 678, 271, ), ( "eb2848d259345ce7dfe8af34fd1ab24503bb0b952e04e872c87c55550fa50fbf.osm.pbf", None, 1, 22, ), ("529cdcbb7a3cc103658ef31b39bed24984e421127d319c867edf2f86ff3bb098.osm.pbf", None, 0, 0), ( "d17f922ed15e9609013a6b895e1e7af2d49158f03586f2c675d17b760af3452e.osm.pbf", HEX2VEC_FILTER, 97, 10, ), ( "eb2848d259345ce7dfe8af34fd1ab24503bb0b952e04e872c87c55550fa50fbf.osm.pbf", HEX2VEC_FILTER, 0, 0, ), ( "d17f922ed15e9609013a6b895e1e7af2d49158f03586f2c675d17b760af3452e.osm.pbf", GEOFABRIK_LAYERS, 433, 22, ), ( "eb2848d259345ce7dfe8af34fd1ab24503bb0b952e04e872c87c55550fa50fbf.osm.pbf", GEOFABRIK_LAYERS, 0, 0, ), ], ) def test_pbf_reader( test_file_name: str, query: OsmTagsFilter, expected_result_length: int, expected_features_columns_length: int, ): """Test proper files loading in `PbfFileReader`."""
features_gdf = PbfFileReader(tags_filter=query).get_features_gdf(
2
2023-12-28 11:26:41+00:00
24k
KyanChen/TTP
mmdet/configs/rtmdet/rtmdet_ins_s_8xb32_300e_coco.py
[ { "identifier": "PackDetInputs", "path": "mmdet/datasets/transforms/formatting.py", "snippet": "class PackDetInputs(BaseTransform):\n \"\"\"Pack the inputs data for the detection / semantic segmentation /\n panoptic segmentation.\n\n The ``img_meta`` item is always populated. The contents of the\n ``img_meta`` dictionary depends on ``meta_keys``. By default this includes:\n\n - ``img_id``: id of the image\n\n - ``img_path``: path to the image file\n\n - ``ori_shape``: original shape of the image as a tuple (h, w)\n\n - ``img_shape``: shape of the image input to the network as a tuple \\\n (h, w). Note that images may be zero padded on the \\\n bottom/right if the batch tensor is larger than this shape.\n\n - ``scale_factor``: a float indicating the preprocessing scale\n\n - ``flip``: a boolean indicating if image flip transform was used\n\n - ``flip_direction``: the flipping direction\n\n Args:\n meta_keys (Sequence[str], optional): Meta keys to be converted to\n ``mmcv.DataContainer`` and collected in ``data[img_metas]``.\n Default: ``('img_id', 'img_path', 'ori_shape', 'img_shape',\n 'scale_factor', 'flip', 'flip_direction')``\n \"\"\"\n mapping_table = {\n 'gt_bboxes': 'bboxes',\n 'gt_bboxes_labels': 'labels',\n 'gt_masks': 'masks'\n }\n\n def __init__(self,\n meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',\n 'scale_factor', 'flip', 'flip_direction')):\n self.meta_keys = meta_keys\n\n def transform(self, results: dict) -> dict:\n \"\"\"Method to pack the input data.\n\n Args:\n results (dict): Result dict from the data pipeline.\n\n Returns:\n dict:\n\n - 'inputs' (obj:`torch.Tensor`): The forward data of models.\n - 'data_sample' (obj:`DetDataSample`): The annotation info of the\n sample.\n \"\"\"\n packed_results = dict()\n if 'img' in results:\n img = results['img']\n if len(img.shape) < 3:\n img = np.expand_dims(img, -1)\n # To improve the computational speed by by 3-5 times, apply:\n # If image is not contiguous, use\n # `numpy.transpose()` followed by `numpy.ascontiguousarray()`\n # If image is already contiguous, use\n # `torch.permute()` followed by `torch.contiguous()`\n # Refer to https://github.com/open-mmlab/mmdetection/pull/9533\n # for more details\n if not img.flags.c_contiguous:\n img = np.ascontiguousarray(img.transpose(2, 0, 1))\n img = to_tensor(img)\n else:\n img = to_tensor(img).permute(2, 0, 1).contiguous()\n\n packed_results['inputs'] = img\n\n if 'gt_ignore_flags' in results:\n valid_idx = np.where(results['gt_ignore_flags'] == 0)[0]\n ignore_idx = np.where(results['gt_ignore_flags'] == 1)[0]\n\n data_sample = DetDataSample()\n instance_data = InstanceData()\n ignore_instance_data = InstanceData()\n\n for key in self.mapping_table.keys():\n if key not in results:\n continue\n if key == 'gt_masks' or isinstance(results[key], BaseBoxes):\n if 'gt_ignore_flags' in results:\n instance_data[\n self.mapping_table[key]] = results[key][valid_idx]\n ignore_instance_data[\n self.mapping_table[key]] = results[key][ignore_idx]\n else:\n instance_data[self.mapping_table[key]] = results[key]\n else:\n if 'gt_ignore_flags' in results:\n instance_data[self.mapping_table[key]] = to_tensor(\n results[key][valid_idx])\n ignore_instance_data[self.mapping_table[key]] = to_tensor(\n results[key][ignore_idx])\n else:\n instance_data[self.mapping_table[key]] = to_tensor(\n results[key])\n data_sample.gt_instances = instance_data\n data_sample.ignored_instances = ignore_instance_data\n\n if 'proposals' in results:\n proposals = InstanceData(\n bboxes=to_tensor(results['proposals']),\n scores=to_tensor(results['proposals_scores']))\n data_sample.proposals = proposals\n\n if 'gt_seg_map' in results:\n gt_sem_seg_data = dict(\n sem_seg=to_tensor(results['gt_seg_map'][None, ...].copy()))\n gt_sem_seg_data = PixelData(**gt_sem_seg_data)\n if 'ignore_index' in results:\n metainfo = dict(ignore_index=results['ignore_index'])\n gt_sem_seg_data.set_metainfo(metainfo)\n data_sample.gt_sem_seg = gt_sem_seg_data\n\n img_meta = {}\n for key in self.meta_keys:\n if key in results:\n img_meta[key] = results[key]\n data_sample.set_metainfo(img_meta)\n packed_results['data_samples'] = data_sample\n\n return packed_results\n\n def __repr__(self) -> str:\n repr_str = self.__class__.__name__\n repr_str += f'(meta_keys={self.meta_keys})'\n return repr_str" }, { "identifier": "FilterAnnotations", "path": "mmdet/datasets/transforms/loading.py", "snippet": "class FilterAnnotations(BaseTransform):\n \"\"\"Filter invalid annotations.\n\n Required Keys:\n\n - gt_bboxes (BaseBoxes[torch.float32]) (optional)\n - gt_bboxes_labels (np.int64) (optional)\n - gt_masks (BitmapMasks | PolygonMasks) (optional)\n - gt_ignore_flags (bool) (optional)\n\n Modified Keys:\n\n - gt_bboxes (optional)\n - gt_bboxes_labels (optional)\n - gt_masks (optional)\n - gt_ignore_flags (optional)\n\n Args:\n min_gt_bbox_wh (tuple[float]): Minimum width and height of ground truth\n boxes. Default: (1., 1.)\n min_gt_mask_area (int): Minimum foreground area of ground truth masks.\n Default: 1\n by_box (bool): Filter instances with bounding boxes not meeting the\n min_gt_bbox_wh threshold. Default: True\n by_mask (bool): Filter instances with masks not meeting\n min_gt_mask_area threshold. Default: False\n keep_empty (bool): Whether to return None when it\n becomes an empty bbox after filtering. Defaults to True.\n \"\"\"\n\n def __init__(self,\n min_gt_bbox_wh: Tuple[int, int] = (1, 1),\n min_gt_mask_area: int = 1,\n by_box: bool = True,\n by_mask: bool = False,\n keep_empty: bool = True) -> None:\n # TODO: add more filter options\n assert by_box or by_mask\n self.min_gt_bbox_wh = min_gt_bbox_wh\n self.min_gt_mask_area = min_gt_mask_area\n self.by_box = by_box\n self.by_mask = by_mask\n self.keep_empty = keep_empty\n\n @autocast_box_type()\n def transform(self, results: dict) -> Union[dict, None]:\n \"\"\"Transform function to filter annotations.\n\n Args:\n results (dict): Result dict.\n\n Returns:\n dict: Updated result dict.\n \"\"\"\n assert 'gt_bboxes' in results\n gt_bboxes = results['gt_bboxes']\n if gt_bboxes.shape[0] == 0:\n return results\n\n tests = []\n if self.by_box:\n tests.append(\n ((gt_bboxes.widths > self.min_gt_bbox_wh[0]) &\n (gt_bboxes.heights > self.min_gt_bbox_wh[1])).numpy())\n if self.by_mask:\n assert 'gt_masks' in results\n gt_masks = results['gt_masks']\n tests.append(gt_masks.areas >= self.min_gt_mask_area)\n\n keep = tests[0]\n for t in tests[1:]:\n keep = keep & t\n\n if not keep.any():\n if self.keep_empty:\n return None\n\n keys = ('gt_bboxes', 'gt_bboxes_labels', 'gt_masks', 'gt_ignore_flags')\n for key in keys:\n if key in results:\n results[key] = results[key][keep]\n\n return results\n\n def __repr__(self):\n return self.__class__.__name__ + \\\n f'(min_gt_bbox_wh={self.min_gt_bbox_wh}, ' \\\n f'keep_empty={self.keep_empty})'" }, { "identifier": "LoadAnnotations", "path": "mmdet/datasets/transforms/loading.py", "snippet": "class LoadAnnotations(MMCV_LoadAnnotations):\n \"\"\"Load and process the ``instances`` and ``seg_map`` annotation provided\n by dataset.\n\n The annotation format is as the following:\n\n .. code-block:: python\n\n {\n 'instances':\n [\n {\n # List of 4 numbers representing the bounding box of the\n # instance, in (x1, y1, x2, y2) order.\n 'bbox': [x1, y1, x2, y2],\n\n # Label of image classification.\n 'bbox_label': 1,\n\n # Used in instance/panoptic segmentation. The segmentation mask\n # of the instance or the information of segments.\n # 1. If list[list[float]], it represents a list of polygons,\n # one for each connected component of the object. Each\n # list[float] is one simple polygon in the format of\n # [x1, y1, ..., xn, yn] (n >= 3). The Xs and Ys are absolute\n # coordinates in unit of pixels.\n # 2. If dict, it represents the per-pixel segmentation mask in\n # COCO's compressed RLE format. The dict should have keys\n # “size” and “counts”. Can be loaded by pycocotools\n 'mask': list[list[float]] or dict,\n\n }\n ]\n # Filename of semantic or panoptic segmentation ground truth file.\n 'seg_map_path': 'a/b/c'\n }\n\n After this module, the annotation has been changed to the format below:\n\n .. code-block:: python\n\n {\n # In (x1, y1, x2, y2) order, float type. N is the number of bboxes\n # in an image\n 'gt_bboxes': BaseBoxes(N, 4)\n # In int type.\n 'gt_bboxes_labels': np.ndarray(N, )\n # In built-in class\n 'gt_masks': PolygonMasks (H, W) or BitmapMasks (H, W)\n # In uint8 type.\n 'gt_seg_map': np.ndarray (H, W)\n # in (x, y, v) order, float type.\n }\n\n Required Keys:\n\n - height\n - width\n - instances\n\n - bbox (optional)\n - bbox_label\n - mask (optional)\n - ignore_flag\n\n - seg_map_path (optional)\n\n Added Keys:\n\n - gt_bboxes (BaseBoxes[torch.float32])\n - gt_bboxes_labels (np.int64)\n - gt_masks (BitmapMasks | PolygonMasks)\n - gt_seg_map (np.uint8)\n - gt_ignore_flags (bool)\n\n Args:\n with_bbox (bool): Whether to parse and load the bbox annotation.\n Defaults to True.\n with_label (bool): Whether to parse and load the label annotation.\n Defaults to True.\n with_mask (bool): Whether to parse and load the mask annotation.\n Default: False.\n with_seg (bool): Whether to parse and load the semantic segmentation\n annotation. Defaults to False.\n poly2mask (bool): Whether to convert mask to bitmap. Default: True.\n box_type (str): The box type used to wrap the bboxes. If ``box_type``\n is None, gt_bboxes will keep being np.ndarray. Defaults to 'hbox'.\n reduce_zero_label (bool): Whether reduce all label value\n by 1. Usually used for datasets where 0 is background label.\n Defaults to False.\n ignore_index (int): The label index to be ignored.\n Valid only if reduce_zero_label is true. Defaults is 255.\n imdecode_backend (str): The image decoding backend type. The backend\n argument for :func:``mmcv.imfrombytes``.\n See :fun:``mmcv.imfrombytes`` for details.\n Defaults to 'cv2'.\n backend_args (dict, optional): Arguments to instantiate the\n corresponding backend. Defaults to None.\n \"\"\"\n\n def __init__(\n self,\n with_mask: bool = False,\n poly2mask: bool = True,\n box_type: str = 'hbox',\n # use for semseg\n reduce_zero_label: bool = False,\n ignore_index: int = 255,\n **kwargs) -> None:\n super(LoadAnnotations, self).__init__(**kwargs)\n self.with_mask = with_mask\n self.poly2mask = poly2mask\n self.box_type = box_type\n self.reduce_zero_label = reduce_zero_label\n self.ignore_index = ignore_index\n\n def _load_bboxes(self, results: dict) -> None:\n \"\"\"Private function to load bounding box annotations.\n\n Args:\n results (dict): Result dict from :obj:``mmengine.BaseDataset``.\n Returns:\n dict: The dict contains loaded bounding box annotations.\n \"\"\"\n gt_bboxes = []\n gt_ignore_flags = []\n for instance in results.get('instances', []):\n gt_bboxes.append(instance['bbox'])\n gt_ignore_flags.append(instance['ignore_flag'])\n if self.box_type is None:\n results['gt_bboxes'] = np.array(\n gt_bboxes, dtype=np.float32).reshape((-1, 4))\n else:\n _, box_type_cls = get_box_type(self.box_type)\n results['gt_bboxes'] = box_type_cls(gt_bboxes, dtype=torch.float32)\n results['gt_ignore_flags'] = np.array(gt_ignore_flags, dtype=bool)\n\n def _load_labels(self, results: dict) -> None:\n \"\"\"Private function to load label annotations.\n\n Args:\n results (dict): Result dict from :obj:``mmengine.BaseDataset``.\n\n Returns:\n dict: The dict contains loaded label annotations.\n \"\"\"\n gt_bboxes_labels = []\n for instance in results.get('instances', []):\n gt_bboxes_labels.append(instance['bbox_label'])\n # TODO: Inconsistent with mmcv, consider how to deal with it later.\n results['gt_bboxes_labels'] = np.array(\n gt_bboxes_labels, dtype=np.int64)\n\n def _poly2mask(self, mask_ann: Union[list, dict], img_h: int,\n img_w: int) -> np.ndarray:\n \"\"\"Private function to convert masks represented with polygon to\n bitmaps.\n\n Args:\n mask_ann (list | dict): Polygon mask annotation input.\n img_h (int): The height of output mask.\n img_w (int): The width of output mask.\n\n Returns:\n np.ndarray: The decode bitmap mask of shape (img_h, img_w).\n \"\"\"\n\n if isinstance(mask_ann, list):\n # polygon -- a single object might consist of multiple parts\n # we merge all parts into one mask rle code\n rles = maskUtils.frPyObjects(mask_ann, img_h, img_w)\n rle = maskUtils.merge(rles)\n elif isinstance(mask_ann['counts'], list):\n # uncompressed RLE\n rle = maskUtils.frPyObjects(mask_ann, img_h, img_w)\n else:\n # rle\n rle = mask_ann\n mask = maskUtils.decode(rle)\n return mask\n\n def _process_masks(self, results: dict) -> list:\n \"\"\"Process gt_masks and filter invalid polygons.\n\n Args:\n results (dict): Result dict from :obj:``mmengine.BaseDataset``.\n\n Returns:\n list: Processed gt_masks.\n \"\"\"\n gt_masks = []\n gt_ignore_flags = []\n for instance in results.get('instances', []):\n gt_mask = instance['mask']\n # If the annotation of segmentation mask is invalid,\n # ignore the whole instance.\n if isinstance(gt_mask, list):\n gt_mask = [\n np.array(polygon) for polygon in gt_mask\n if len(polygon) % 2 == 0 and len(polygon) >= 6\n ]\n if len(gt_mask) == 0:\n # ignore this instance and set gt_mask to a fake mask\n instance['ignore_flag'] = 1\n gt_mask = [np.zeros(6)]\n elif not self.poly2mask:\n # `PolygonMasks` requires a ploygon of format List[np.array],\n # other formats are invalid.\n instance['ignore_flag'] = 1\n gt_mask = [np.zeros(6)]\n elif isinstance(gt_mask, dict) and \\\n not (gt_mask.get('counts') is not None and\n gt_mask.get('size') is not None and\n isinstance(gt_mask['counts'], (list, str))):\n # if gt_mask is a dict, it should include `counts` and `size`,\n # so that `BitmapMasks` can uncompressed RLE\n instance['ignore_flag'] = 1\n gt_mask = [np.zeros(6)]\n gt_masks.append(gt_mask)\n # re-process gt_ignore_flags\n gt_ignore_flags.append(instance['ignore_flag'])\n results['gt_ignore_flags'] = np.array(gt_ignore_flags, dtype=bool)\n return gt_masks\n\n def _load_masks(self, results: dict) -> None:\n \"\"\"Private function to load mask annotations.\n\n Args:\n results (dict): Result dict from :obj:``mmengine.BaseDataset``.\n \"\"\"\n h, w = results['ori_shape']\n gt_masks = self._process_masks(results)\n if self.poly2mask:\n gt_masks = BitmapMasks(\n [self._poly2mask(mask, h, w) for mask in gt_masks], h, w)\n else:\n # fake polygon masks will be ignored in `PackDetInputs`\n gt_masks = PolygonMasks([mask for mask in gt_masks], h, w)\n results['gt_masks'] = gt_masks\n\n def _load_seg_map(self, results: dict) -> None:\n \"\"\"Private function to load semantic segmentation annotations.\n\n Args:\n results (dict): Result dict from :obj:``mmcv.BaseDataset``.\n\n Returns:\n dict: The dict contains loaded semantic segmentation annotations.\n \"\"\"\n if results.get('seg_map_path', None) is None:\n return\n\n img_bytes = get(\n results['seg_map_path'], backend_args=self.backend_args)\n gt_semantic_seg = mmcv.imfrombytes(\n img_bytes, flag='unchanged',\n backend=self.imdecode_backend).squeeze()\n\n if self.reduce_zero_label:\n # avoid using underflow conversion\n gt_semantic_seg[gt_semantic_seg == 0] = self.ignore_index\n gt_semantic_seg = gt_semantic_seg - 1\n gt_semantic_seg[gt_semantic_seg == self.ignore_index -\n 1] = self.ignore_index\n\n # modify if custom classes\n if results.get('label_map', None) is not None:\n # Add deep copy to solve bug of repeatedly\n # replace `gt_semantic_seg`, which is reported in\n # https://github.com/open-mmlab/mmsegmentation/pull/1445/\n gt_semantic_seg_copy = gt_semantic_seg.copy()\n for old_id, new_id in results['label_map'].items():\n gt_semantic_seg[gt_semantic_seg_copy == old_id] = new_id\n results['gt_seg_map'] = gt_semantic_seg\n results['ignore_index'] = self.ignore_index\n\n def transform(self, results: dict) -> dict:\n \"\"\"Function to load multiple types annotations.\n\n Args:\n results (dict): Result dict from :obj:``mmengine.BaseDataset``.\n\n Returns:\n dict: The dict contains loaded bounding box, label and\n semantic segmentation.\n \"\"\"\n\n if self.with_bbox:\n self._load_bboxes(results)\n if self.with_label:\n self._load_labels(results)\n if self.with_mask:\n self._load_masks(results)\n if self.with_seg:\n self._load_seg_map(results)\n return results\n\n def __repr__(self) -> str:\n repr_str = self.__class__.__name__\n repr_str += f'(with_bbox={self.with_bbox}, '\n repr_str += f'with_label={self.with_label}, '\n repr_str += f'with_mask={self.with_mask}, '\n repr_str += f'with_seg={self.with_seg}, '\n repr_str += f'poly2mask={self.poly2mask}, '\n repr_str += f\"imdecode_backend='{self.imdecode_backend}', \"\n repr_str += f'backend_args={self.backend_args})'\n return repr_str" }, { "identifier": "CachedMixUp", "path": "mmdet/datasets/transforms/transforms.py", "snippet": "class CachedMixUp(BaseTransform):\n \"\"\"Cached mixup data augmentation.\n\n .. code:: text\n\n mixup transform\n +------------------------------+\n | mixup image | |\n | +--------|--------+ |\n | | | | |\n |---------------+ | |\n | | | |\n | | image | |\n | | | |\n | | | |\n | |-----------------+ |\n | pad |\n +------------------------------+\n\n The cached mixup transform steps are as follows:\n\n 1. Append the results from the last transform into the cache.\n 2. Another random image is picked from the cache and embedded in\n the top left patch(after padding and resizing)\n 3. The target of mixup transform is the weighted average of mixup\n image and origin image.\n\n Required Keys:\n\n - img\n - gt_bboxes (np.float32) (optional)\n - gt_bboxes_labels (np.int64) (optional)\n - gt_ignore_flags (bool) (optional)\n - mix_results (List[dict])\n\n\n Modified Keys:\n\n - img\n - img_shape\n - gt_bboxes (optional)\n - gt_bboxes_labels (optional)\n - gt_ignore_flags (optional)\n\n\n Args:\n img_scale (Sequence[int]): Image output size after mixup pipeline.\n The shape order should be (width, height). Defaults to (640, 640).\n ratio_range (Sequence[float]): Scale ratio of mixup image.\n Defaults to (0.5, 1.5).\n flip_ratio (float): Horizontal flip ratio of mixup image.\n Defaults to 0.5.\n pad_val (int): Pad value. Defaults to 114.\n max_iters (int): The maximum number of iterations. If the number of\n iterations is greater than `max_iters`, but gt_bbox is still\n empty, then the iteration is terminated. Defaults to 15.\n bbox_clip_border (bool, optional): Whether to clip the objects outside\n the border of the image. In some dataset like MOT17, the gt bboxes\n are allowed to cross the border of images. Therefore, we don't\n need to clip the gt bboxes in these cases. Defaults to True.\n max_cached_images (int): The maximum length of the cache. The larger\n the cache, the stronger the randomness of this transform. As a\n rule of thumb, providing 10 caches for each image suffices for\n randomness. Defaults to 20.\n random_pop (bool): Whether to randomly pop a result from the cache\n when the cache is full. If set to False, use FIFO popping method.\n Defaults to True.\n prob (float): Probability of applying this transformation.\n Defaults to 1.0.\n \"\"\"\n\n def __init__(self,\n img_scale: Tuple[int, int] = (640, 640),\n ratio_range: Tuple[float, float] = (0.5, 1.5),\n flip_ratio: float = 0.5,\n pad_val: float = 114.0,\n max_iters: int = 15,\n bbox_clip_border: bool = True,\n max_cached_images: int = 20,\n random_pop: bool = True,\n prob: float = 1.0) -> None:\n assert isinstance(img_scale, tuple)\n assert max_cached_images >= 2, 'The length of cache must >= 2, ' \\\n f'but got {max_cached_images}.'\n assert 0 <= prob <= 1.0, 'The probability should be in range [0,1]. ' \\\n f'got {prob}.'\n self.dynamic_scale = img_scale\n self.ratio_range = ratio_range\n self.flip_ratio = flip_ratio\n self.pad_val = pad_val\n self.max_iters = max_iters\n self.bbox_clip_border = bbox_clip_border\n self.results_cache = []\n\n self.max_cached_images = max_cached_images\n self.random_pop = random_pop\n self.prob = prob\n\n @cache_randomness\n def get_indexes(self, cache: list) -> int:\n \"\"\"Call function to collect indexes.\n\n Args:\n cache (list): The result cache.\n\n Returns:\n int: index.\n \"\"\"\n\n for i in range(self.max_iters):\n index = random.randint(0, len(cache) - 1)\n gt_bboxes_i = cache[index]['gt_bboxes']\n if len(gt_bboxes_i) != 0:\n break\n return index\n\n @autocast_box_type()\n def transform(self, results: dict) -> dict:\n \"\"\"MixUp transform function.\n\n Args:\n results (dict): Result dict.\n\n Returns:\n dict: Updated result dict.\n \"\"\"\n # cache and pop images\n self.results_cache.append(copy.deepcopy(results))\n if len(self.results_cache) > self.max_cached_images:\n if self.random_pop:\n index = random.randint(0, len(self.results_cache) - 1)\n else:\n index = 0\n self.results_cache.pop(index)\n\n if len(self.results_cache) <= 1:\n return results\n\n if random.uniform(0, 1) > self.prob:\n return results\n\n index = self.get_indexes(self.results_cache)\n retrieve_results = copy.deepcopy(self.results_cache[index])\n\n # TODO: refactor mixup to reuse these code.\n if retrieve_results['gt_bboxes'].shape[0] == 0:\n # empty bbox\n return results\n\n retrieve_img = retrieve_results['img']\n with_mask = True if 'gt_masks' in results else False\n\n jit_factor = random.uniform(*self.ratio_range)\n is_flip = random.uniform(0, 1) > self.flip_ratio\n\n if len(retrieve_img.shape) == 3:\n out_img = np.ones(\n (self.dynamic_scale[1], self.dynamic_scale[0], 3),\n dtype=retrieve_img.dtype) * self.pad_val\n else:\n out_img = np.ones(\n self.dynamic_scale[::-1],\n dtype=retrieve_img.dtype) * self.pad_val\n\n # 1. keep_ratio resize\n scale_ratio = min(self.dynamic_scale[1] / retrieve_img.shape[0],\n self.dynamic_scale[0] / retrieve_img.shape[1])\n retrieve_img = mmcv.imresize(\n retrieve_img, (int(retrieve_img.shape[1] * scale_ratio),\n int(retrieve_img.shape[0] * scale_ratio)))\n\n # 2. paste\n out_img[:retrieve_img.shape[0], :retrieve_img.shape[1]] = retrieve_img\n\n # 3. scale jit\n scale_ratio *= jit_factor\n out_img = mmcv.imresize(out_img, (int(out_img.shape[1] * jit_factor),\n int(out_img.shape[0] * jit_factor)))\n\n # 4. flip\n if is_flip:\n out_img = out_img[:, ::-1, :]\n\n # 5. random crop\n ori_img = results['img']\n origin_h, origin_w = out_img.shape[:2]\n target_h, target_w = ori_img.shape[:2]\n padded_img = np.ones((max(origin_h, target_h), max(\n origin_w, target_w), 3)) * self.pad_val\n padded_img = padded_img.astype(np.uint8)\n padded_img[:origin_h, :origin_w] = out_img\n\n x_offset, y_offset = 0, 0\n if padded_img.shape[0] > target_h:\n y_offset = random.randint(0, padded_img.shape[0] - target_h)\n if padded_img.shape[1] > target_w:\n x_offset = random.randint(0, padded_img.shape[1] - target_w)\n padded_cropped_img = padded_img[y_offset:y_offset + target_h,\n x_offset:x_offset + target_w]\n\n # 6. adjust bbox\n retrieve_gt_bboxes = retrieve_results['gt_bboxes']\n retrieve_gt_bboxes.rescale_([scale_ratio, scale_ratio])\n if with_mask:\n retrieve_gt_masks = retrieve_results['gt_masks'].rescale(\n scale_ratio)\n\n if self.bbox_clip_border:\n retrieve_gt_bboxes.clip_([origin_h, origin_w])\n\n if is_flip:\n retrieve_gt_bboxes.flip_([origin_h, origin_w],\n direction='horizontal')\n if with_mask:\n retrieve_gt_masks = retrieve_gt_masks.flip()\n\n # 7. filter\n cp_retrieve_gt_bboxes = retrieve_gt_bboxes.clone()\n cp_retrieve_gt_bboxes.translate_([-x_offset, -y_offset])\n if with_mask:\n retrieve_gt_masks = retrieve_gt_masks.translate(\n out_shape=(target_h, target_w),\n offset=-x_offset,\n direction='horizontal')\n retrieve_gt_masks = retrieve_gt_masks.translate(\n out_shape=(target_h, target_w),\n offset=-y_offset,\n direction='vertical')\n\n if self.bbox_clip_border:\n cp_retrieve_gt_bboxes.clip_([target_h, target_w])\n\n # 8. mix up\n ori_img = ori_img.astype(np.float32)\n mixup_img = 0.5 * ori_img + 0.5 * padded_cropped_img.astype(np.float32)\n\n retrieve_gt_bboxes_labels = retrieve_results['gt_bboxes_labels']\n retrieve_gt_ignore_flags = retrieve_results['gt_ignore_flags']\n\n mixup_gt_bboxes = cp_retrieve_gt_bboxes.cat(\n (results['gt_bboxes'], cp_retrieve_gt_bboxes), dim=0)\n mixup_gt_bboxes_labels = np.concatenate(\n (results['gt_bboxes_labels'], retrieve_gt_bboxes_labels), axis=0)\n mixup_gt_ignore_flags = np.concatenate(\n (results['gt_ignore_flags'], retrieve_gt_ignore_flags), axis=0)\n if with_mask:\n mixup_gt_masks = retrieve_gt_masks.cat(\n [results['gt_masks'], retrieve_gt_masks])\n\n # remove outside bbox\n inside_inds = mixup_gt_bboxes.is_inside([target_h, target_w]).numpy()\n mixup_gt_bboxes = mixup_gt_bboxes[inside_inds]\n mixup_gt_bboxes_labels = mixup_gt_bboxes_labels[inside_inds]\n mixup_gt_ignore_flags = mixup_gt_ignore_flags[inside_inds]\n if with_mask:\n mixup_gt_masks = mixup_gt_masks[inside_inds]\n\n results['img'] = mixup_img.astype(np.uint8)\n results['img_shape'] = mixup_img.shape[:2]\n results['gt_bboxes'] = mixup_gt_bboxes\n results['gt_bboxes_labels'] = mixup_gt_bboxes_labels\n results['gt_ignore_flags'] = mixup_gt_ignore_flags\n if with_mask:\n results['gt_masks'] = mixup_gt_masks\n return results\n\n def __repr__(self):\n repr_str = self.__class__.__name__\n repr_str += f'(dynamic_scale={self.dynamic_scale}, '\n repr_str += f'ratio_range={self.ratio_range}, '\n repr_str += f'flip_ratio={self.flip_ratio}, '\n repr_str += f'pad_val={self.pad_val}, '\n repr_str += f'max_iters={self.max_iters}, '\n repr_str += f'bbox_clip_border={self.bbox_clip_border}, '\n repr_str += f'max_cached_images={self.max_cached_images}, '\n repr_str += f'random_pop={self.random_pop}, '\n repr_str += f'prob={self.prob})'\n return repr_str" }, { "identifier": "CachedMosaic", "path": "mmdet/datasets/transforms/transforms.py", "snippet": "class CachedMosaic(Mosaic):\n \"\"\"Cached mosaic augmentation.\n\n Cached mosaic transform will random select images from the cache\n and combine them into one output image.\n\n .. code:: text\n\n mosaic transform\n center_x\n +------------------------------+\n | pad | pad |\n | +-----------+ |\n | | | |\n | | image1 |--------+ |\n | | | | |\n | | | image2 | |\n center_y |----+-------------+-----------|\n | | cropped | |\n |pad | image3 | image4 |\n | | | |\n +----|-------------+-----------+\n | |\n +-------------+\n\n The cached mosaic transform steps are as follows:\n\n 1. Append the results from the last transform into the cache.\n 2. Choose the mosaic center as the intersections of 4 images\n 3. Get the left top image according to the index, and randomly\n sample another 3 images from the result cache.\n 4. Sub image will be cropped if image is larger than mosaic patch\n\n Required Keys:\n\n - img\n - gt_bboxes (np.float32) (optional)\n - gt_bboxes_labels (np.int64) (optional)\n - gt_ignore_flags (bool) (optional)\n\n Modified Keys:\n\n - img\n - img_shape\n - gt_bboxes (optional)\n - gt_bboxes_labels (optional)\n - gt_ignore_flags (optional)\n\n Args:\n img_scale (Sequence[int]): Image size before mosaic pipeline of single\n image. The shape order should be (width, height).\n Defaults to (640, 640).\n center_ratio_range (Sequence[float]): Center ratio range of mosaic\n output. Defaults to (0.5, 1.5).\n bbox_clip_border (bool, optional): Whether to clip the objects outside\n the border of the image. In some dataset like MOT17, the gt bboxes\n are allowed to cross the border of images. Therefore, we don't\n need to clip the gt bboxes in these cases. Defaults to True.\n pad_val (int): Pad value. Defaults to 114.\n prob (float): Probability of applying this transformation.\n Defaults to 1.0.\n max_cached_images (int): The maximum length of the cache. The larger\n the cache, the stronger the randomness of this transform. As a\n rule of thumb, providing 10 caches for each image suffices for\n randomness. Defaults to 40.\n random_pop (bool): Whether to randomly pop a result from the cache\n when the cache is full. If set to False, use FIFO popping method.\n Defaults to True.\n \"\"\"\n\n def __init__(self,\n *args,\n max_cached_images: int = 40,\n random_pop: bool = True,\n **kwargs) -> None:\n super().__init__(*args, **kwargs)\n self.results_cache = []\n self.random_pop = random_pop\n assert max_cached_images >= 4, 'The length of cache must >= 4, ' \\\n f'but got {max_cached_images}.'\n self.max_cached_images = max_cached_images\n\n @cache_randomness\n def get_indexes(self, cache: list) -> list:\n \"\"\"Call function to collect indexes.\n\n Args:\n cache (list): The results cache.\n\n Returns:\n list: indexes.\n \"\"\"\n\n indexes = [random.randint(0, len(cache) - 1) for _ in range(3)]\n return indexes\n\n @autocast_box_type()\n def transform(self, results: dict) -> dict:\n \"\"\"Mosaic transform function.\n\n Args:\n results (dict): Result dict.\n\n Returns:\n dict: Updated result dict.\n \"\"\"\n # cache and pop images\n self.results_cache.append(copy.deepcopy(results))\n if len(self.results_cache) > self.max_cached_images:\n if self.random_pop:\n index = random.randint(0, len(self.results_cache) - 1)\n else:\n index = 0\n self.results_cache.pop(index)\n\n if len(self.results_cache) <= 4:\n return results\n\n if random.uniform(0, 1) > self.prob:\n return results\n indices = self.get_indexes(self.results_cache)\n mix_results = [copy.deepcopy(self.results_cache[i]) for i in indices]\n\n # TODO: refactor mosaic to reuse these code.\n mosaic_bboxes = []\n mosaic_bboxes_labels = []\n mosaic_ignore_flags = []\n mosaic_masks = []\n with_mask = True if 'gt_masks' in results else False\n\n if len(results['img'].shape) == 3:\n mosaic_img = np.full(\n (int(self.img_scale[1] * 2), int(self.img_scale[0] * 2), 3),\n self.pad_val,\n dtype=results['img'].dtype)\n else:\n mosaic_img = np.full(\n (int(self.img_scale[1] * 2), int(self.img_scale[0] * 2)),\n self.pad_val,\n dtype=results['img'].dtype)\n\n # mosaic center x, y\n center_x = int(\n random.uniform(*self.center_ratio_range) * self.img_scale[0])\n center_y = int(\n random.uniform(*self.center_ratio_range) * self.img_scale[1])\n center_position = (center_x, center_y)\n\n loc_strs = ('top_left', 'top_right', 'bottom_left', 'bottom_right')\n for i, loc in enumerate(loc_strs):\n if loc == 'top_left':\n results_patch = copy.deepcopy(results)\n else:\n results_patch = copy.deepcopy(mix_results[i - 1])\n\n img_i = results_patch['img']\n h_i, w_i = img_i.shape[:2]\n # keep_ratio resize\n scale_ratio_i = min(self.img_scale[1] / h_i,\n self.img_scale[0] / w_i)\n img_i = mmcv.imresize(\n img_i, (int(w_i * scale_ratio_i), int(h_i * scale_ratio_i)))\n\n # compute the combine parameters\n paste_coord, crop_coord = self._mosaic_combine(\n loc, center_position, img_i.shape[:2][::-1])\n x1_p, y1_p, x2_p, y2_p = paste_coord\n x1_c, y1_c, x2_c, y2_c = crop_coord\n\n # crop and paste image\n mosaic_img[y1_p:y2_p, x1_p:x2_p] = img_i[y1_c:y2_c, x1_c:x2_c]\n\n # adjust coordinate\n gt_bboxes_i = results_patch['gt_bboxes']\n gt_bboxes_labels_i = results_patch['gt_bboxes_labels']\n gt_ignore_flags_i = results_patch['gt_ignore_flags']\n\n padw = x1_p - x1_c\n padh = y1_p - y1_c\n gt_bboxes_i.rescale_([scale_ratio_i, scale_ratio_i])\n gt_bboxes_i.translate_([padw, padh])\n mosaic_bboxes.append(gt_bboxes_i)\n mosaic_bboxes_labels.append(gt_bboxes_labels_i)\n mosaic_ignore_flags.append(gt_ignore_flags_i)\n if with_mask and results_patch.get('gt_masks', None) is not None:\n gt_masks_i = results_patch['gt_masks']\n gt_masks_i = gt_masks_i.rescale(float(scale_ratio_i))\n gt_masks_i = gt_masks_i.translate(\n out_shape=(int(self.img_scale[0] * 2),\n int(self.img_scale[1] * 2)),\n offset=padw,\n direction='horizontal')\n gt_masks_i = gt_masks_i.translate(\n out_shape=(int(self.img_scale[0] * 2),\n int(self.img_scale[1] * 2)),\n offset=padh,\n direction='vertical')\n mosaic_masks.append(gt_masks_i)\n\n mosaic_bboxes = mosaic_bboxes[0].cat(mosaic_bboxes, 0)\n mosaic_bboxes_labels = np.concatenate(mosaic_bboxes_labels, 0)\n mosaic_ignore_flags = np.concatenate(mosaic_ignore_flags, 0)\n\n if self.bbox_clip_border:\n mosaic_bboxes.clip_([2 * self.img_scale[1], 2 * self.img_scale[0]])\n # remove outside bboxes\n inside_inds = mosaic_bboxes.is_inside(\n [2 * self.img_scale[1], 2 * self.img_scale[0]]).numpy()\n mosaic_bboxes = mosaic_bboxes[inside_inds]\n mosaic_bboxes_labels = mosaic_bboxes_labels[inside_inds]\n mosaic_ignore_flags = mosaic_ignore_flags[inside_inds]\n\n results['img'] = mosaic_img\n results['img_shape'] = mosaic_img.shape[:2]\n results['gt_bboxes'] = mosaic_bboxes\n results['gt_bboxes_labels'] = mosaic_bboxes_labels\n results['gt_ignore_flags'] = mosaic_ignore_flags\n\n if with_mask:\n mosaic_masks = mosaic_masks[0].cat(mosaic_masks)\n results['gt_masks'] = mosaic_masks[inside_inds]\n return results\n\n def __repr__(self):\n repr_str = self.__class__.__name__\n repr_str += f'(img_scale={self.img_scale}, '\n repr_str += f'center_ratio_range={self.center_ratio_range}, '\n repr_str += f'pad_val={self.pad_val}, '\n repr_str += f'prob={self.prob}, '\n repr_str += f'max_cached_images={self.max_cached_images}, '\n repr_str += f'random_pop={self.random_pop})'\n return repr_str" }, { "identifier": "Pad", "path": "mmdet/datasets/transforms/transforms.py", "snippet": "class Pad(MMCV_Pad):\n \"\"\"Pad the image & segmentation map.\n\n There are three padding modes: (1) pad to a fixed size and (2) pad to the\n minimum size that is divisible by some number. and (3)pad to square. Also,\n pad to square and pad to the minimum size can be used as the same time.\n\n Required Keys:\n\n - img\n - gt_bboxes (BaseBoxes[torch.float32]) (optional)\n - gt_masks (BitmapMasks | PolygonMasks) (optional)\n - gt_seg_map (np.uint8) (optional)\n\n Modified Keys:\n\n - img\n - img_shape\n - gt_masks\n - gt_seg_map\n\n Added Keys:\n\n - pad_shape\n - pad_fixed_size\n - pad_size_divisor\n\n Args:\n size (tuple, optional): Fixed padding size.\n Expected padding shape (width, height). Defaults to None.\n size_divisor (int, optional): The divisor of padded size. Defaults to\n None.\n pad_to_square (bool): Whether to pad the image into a square.\n Currently only used for YOLOX. Defaults to False.\n pad_val (Number | dict[str, Number], optional) - Padding value for if\n the pad_mode is \"constant\". If it is a single number, the value\n to pad the image is the number and to pad the semantic\n segmentation map is 255. If it is a dict, it should have the\n following keys:\n\n - img: The value to pad the image.\n - seg: The value to pad the semantic segmentation map.\n Defaults to dict(img=0, seg=255).\n padding_mode (str): Type of padding. Should be: constant, edge,\n reflect or symmetric. Defaults to 'constant'.\n\n - constant: pads with a constant value, this value is specified\n with pad_val.\n - edge: pads with the last value at the edge of the image.\n - reflect: pads with reflection of image without repeating the last\n value on the edge. For example, padding [1, 2, 3, 4] with 2\n elements on both sides in reflect mode will result in\n [3, 2, 1, 2, 3, 4, 3, 2].\n - symmetric: pads with reflection of image repeating the last value\n on the edge. For example, padding [1, 2, 3, 4] with 2 elements on\n both sides in symmetric mode will result in\n [2, 1, 1, 2, 3, 4, 4, 3]\n \"\"\"\n\n def _pad_masks(self, results: dict) -> None:\n \"\"\"Pad masks according to ``results['pad_shape']``.\"\"\"\n if results.get('gt_masks', None) is not None:\n pad_val = self.pad_val.get('masks', 0)\n pad_shape = results['pad_shape'][:2]\n results['gt_masks'] = results['gt_masks'].pad(\n pad_shape, pad_val=pad_val)\n\n def transform(self, results: dict) -> dict:\n \"\"\"Call function to pad images, masks, semantic segmentation maps.\n\n Args:\n results (dict): Result dict from loading pipeline.\n\n Returns:\n dict: Updated result dict.\n \"\"\"\n self._pad_img(results)\n self._pad_seg(results)\n self._pad_masks(results)\n return results" }, { "identifier": "RandomCrop", "path": "mmdet/datasets/transforms/transforms.py", "snippet": "class RandomCrop(BaseTransform):\n \"\"\"Random crop the image & bboxes & masks.\n\n The absolute ``crop_size`` is sampled based on ``crop_type`` and\n ``image_size``, then the cropped results are generated.\n\n Required Keys:\n\n - img\n - gt_bboxes (BaseBoxes[torch.float32]) (optional)\n - gt_bboxes_labels (np.int64) (optional)\n - gt_masks (BitmapMasks | PolygonMasks) (optional)\n - gt_ignore_flags (bool) (optional)\n - gt_seg_map (np.uint8) (optional)\n\n Modified Keys:\n\n - img\n - img_shape\n - gt_bboxes (optional)\n - gt_bboxes_labels (optional)\n - gt_masks (optional)\n - gt_ignore_flags (optional)\n - gt_seg_map (optional)\n - gt_instances_ids (options, only used in MOT/VIS)\n\n Added Keys:\n\n - homography_matrix\n\n Args:\n crop_size (tuple): The relative ratio or absolute pixels of\n (width, height).\n crop_type (str, optional): One of \"relative_range\", \"relative\",\n \"absolute\", \"absolute_range\". \"relative\" randomly crops\n (h * crop_size[0], w * crop_size[1]) part from an input of size\n (h, w). \"relative_range\" uniformly samples relative crop size from\n range [crop_size[0], 1] and [crop_size[1], 1] for height and width\n respectively. \"absolute\" crops from an input with absolute size\n (crop_size[0], crop_size[1]). \"absolute_range\" uniformly samples\n crop_h in range [crop_size[0], min(h, crop_size[1])] and crop_w\n in range [crop_size[0], min(w, crop_size[1])].\n Defaults to \"absolute\".\n allow_negative_crop (bool, optional): Whether to allow a crop that does\n not contain any bbox area. Defaults to False.\n recompute_bbox (bool, optional): Whether to re-compute the boxes based\n on cropped instance masks. Defaults to False.\n bbox_clip_border (bool, optional): Whether clip the objects outside\n the border of the image. Defaults to True.\n\n Note:\n - If the image is smaller than the absolute crop size, return the\n original image.\n - The keys for bboxes, labels and masks must be aligned. That is,\n ``gt_bboxes`` corresponds to ``gt_labels`` and ``gt_masks``, and\n ``gt_bboxes_ignore`` corresponds to ``gt_labels_ignore`` and\n ``gt_masks_ignore``.\n - If the crop does not contain any gt-bbox region and\n ``allow_negative_crop`` is set to False, skip this image.\n \"\"\"\n\n def __init__(self,\n crop_size: tuple,\n crop_type: str = 'absolute',\n allow_negative_crop: bool = False,\n recompute_bbox: bool = False,\n bbox_clip_border: bool = True) -> None:\n if crop_type not in [\n 'relative_range', 'relative', 'absolute', 'absolute_range'\n ]:\n raise ValueError(f'Invalid crop_type {crop_type}.')\n if crop_type in ['absolute', 'absolute_range']:\n assert crop_size[0] > 0 and crop_size[1] > 0\n assert isinstance(crop_size[0], int) and isinstance(\n crop_size[1], int)\n if crop_type == 'absolute_range':\n assert crop_size[0] <= crop_size[1]\n else:\n assert 0 < crop_size[0] <= 1 and 0 < crop_size[1] <= 1\n self.crop_size = crop_size\n self.crop_type = crop_type\n self.allow_negative_crop = allow_negative_crop\n self.bbox_clip_border = bbox_clip_border\n self.recompute_bbox = recompute_bbox\n\n def _crop_data(self, results: dict, crop_size: Tuple[int, int],\n allow_negative_crop: bool) -> Union[dict, None]:\n \"\"\"Function to randomly crop images, bounding boxes, masks, semantic\n segmentation maps.\n\n Args:\n results (dict): Result dict from loading pipeline.\n crop_size (Tuple[int, int]): Expected absolute size after\n cropping, (h, w).\n allow_negative_crop (bool): Whether to allow a crop that does not\n contain any bbox area.\n\n Returns:\n results (Union[dict, None]): Randomly cropped results, 'img_shape'\n key in result dict is updated according to crop size. None will\n be returned when there is no valid bbox after cropping.\n \"\"\"\n assert crop_size[0] > 0 and crop_size[1] > 0\n img = results['img']\n margin_h = max(img.shape[0] - crop_size[0], 0)\n margin_w = max(img.shape[1] - crop_size[1], 0)\n offset_h, offset_w = self._rand_offset((margin_h, margin_w))\n crop_y1, crop_y2 = offset_h, offset_h + crop_size[0]\n crop_x1, crop_x2 = offset_w, offset_w + crop_size[1]\n\n # Record the homography matrix for the RandomCrop\n homography_matrix = np.array(\n [[1, 0, -offset_w], [0, 1, -offset_h], [0, 0, 1]],\n dtype=np.float32)\n if results.get('homography_matrix', None) is None:\n results['homography_matrix'] = homography_matrix\n else:\n results['homography_matrix'] = homography_matrix @ results[\n 'homography_matrix']\n\n # crop the image\n img = img[crop_y1:crop_y2, crop_x1:crop_x2, ...]\n img_shape = img.shape\n results['img'] = img\n results['img_shape'] = img_shape[:2]\n\n # crop bboxes accordingly and clip to the image boundary\n if results.get('gt_bboxes', None) is not None:\n bboxes = results['gt_bboxes']\n bboxes.translate_([-offset_w, -offset_h])\n if self.bbox_clip_border:\n bboxes.clip_(img_shape[:2])\n valid_inds = bboxes.is_inside(img_shape[:2]).numpy()\n # If the crop does not contain any gt-bbox area and\n # allow_negative_crop is False, skip this image.\n if (not valid_inds.any() and not allow_negative_crop):\n return None\n\n results['gt_bboxes'] = bboxes[valid_inds]\n\n if results.get('gt_ignore_flags', None) is not None:\n results['gt_ignore_flags'] = \\\n results['gt_ignore_flags'][valid_inds]\n\n if results.get('gt_bboxes_labels', None) is not None:\n results['gt_bboxes_labels'] = \\\n results['gt_bboxes_labels'][valid_inds]\n\n if results.get('gt_masks', None) is not None:\n results['gt_masks'] = results['gt_masks'][\n valid_inds.nonzero()[0]].crop(\n np.asarray([crop_x1, crop_y1, crop_x2, crop_y2]))\n if self.recompute_bbox:\n results['gt_bboxes'] = results['gt_masks'].get_bboxes(\n type(results['gt_bboxes']))\n\n # We should remove the instance ids corresponding to invalid boxes.\n if results.get('gt_instances_ids', None) is not None:\n results['gt_instances_ids'] = \\\n results['gt_instances_ids'][valid_inds]\n\n # crop semantic seg\n if results.get('gt_seg_map', None) is not None:\n results['gt_seg_map'] = results['gt_seg_map'][crop_y1:crop_y2,\n crop_x1:crop_x2]\n\n return results\n\n @cache_randomness\n def _rand_offset(self, margin: Tuple[int, int]) -> Tuple[int, int]:\n \"\"\"Randomly generate crop offset.\n\n Args:\n margin (Tuple[int, int]): The upper bound for the offset generated\n randomly.\n\n Returns:\n Tuple[int, int]: The random offset for the crop.\n \"\"\"\n margin_h, margin_w = margin\n offset_h = np.random.randint(0, margin_h + 1)\n offset_w = np.random.randint(0, margin_w + 1)\n\n return offset_h, offset_w\n\n @cache_randomness\n def _get_crop_size(self, image_size: Tuple[int, int]) -> Tuple[int, int]:\n \"\"\"Randomly generates the absolute crop size based on `crop_type` and\n `image_size`.\n\n Args:\n image_size (Tuple[int, int]): (h, w).\n\n Returns:\n crop_size (Tuple[int, int]): (crop_h, crop_w) in absolute pixels.\n \"\"\"\n h, w = image_size\n if self.crop_type == 'absolute':\n return min(self.crop_size[1], h), min(self.crop_size[0], w)\n elif self.crop_type == 'absolute_range':\n crop_h = np.random.randint(\n min(h, self.crop_size[0]),\n min(h, self.crop_size[1]) + 1)\n crop_w = np.random.randint(\n min(w, self.crop_size[0]),\n min(w, self.crop_size[1]) + 1)\n return crop_h, crop_w\n elif self.crop_type == 'relative':\n crop_w, crop_h = self.crop_size\n return int(h * crop_h + 0.5), int(w * crop_w + 0.5)\n else:\n # 'relative_range'\n crop_size = np.asarray(self.crop_size, dtype=np.float32)\n crop_h, crop_w = crop_size + np.random.rand(2) * (1 - crop_size)\n return int(h * crop_h + 0.5), int(w * crop_w + 0.5)\n\n @autocast_box_type()\n def transform(self, results: dict) -> Union[dict, None]:\n \"\"\"Transform function to randomly crop images, bounding boxes, masks,\n semantic segmentation maps.\n\n Args:\n results (dict): Result dict from loading pipeline.\n\n Returns:\n results (Union[dict, None]): Randomly cropped results, 'img_shape'\n key in result dict is updated according to crop size. None will\n be returned when there is no valid bbox after cropping.\n \"\"\"\n image_size = results['img'].shape[:2]\n crop_size = self._get_crop_size(image_size)\n results = self._crop_data(results, crop_size, self.allow_negative_crop)\n return results\n\n def __repr__(self) -> str:\n repr_str = self.__class__.__name__\n repr_str += f'(crop_size={self.crop_size}, '\n repr_str += f'crop_type={self.crop_type}, '\n repr_str += f'allow_negative_crop={self.allow_negative_crop}, '\n repr_str += f'recompute_bbox={self.recompute_bbox}, '\n repr_str += f'bbox_clip_border={self.bbox_clip_border})'\n return repr_str" }, { "identifier": "RandomFlip", "path": "mmdet/datasets/transforms/transforms.py", "snippet": "class RandomFlip(MMCV_RandomFlip):\n \"\"\"Flip the image & bbox & mask & segmentation map. Added or Updated keys:\n flip, flip_direction, img, gt_bboxes, and gt_seg_map. There are 3 flip\n modes:\n\n - ``prob`` is float, ``direction`` is string: the image will be\n ``direction``ly flipped with probability of ``prob`` .\n E.g., ``prob=0.5``, ``direction='horizontal'``,\n then image will be horizontally flipped with probability of 0.5.\n - ``prob`` is float, ``direction`` is list of string: the image will\n be ``direction[i]``ly flipped with probability of\n ``prob/len(direction)``.\n E.g., ``prob=0.5``, ``direction=['horizontal', 'vertical']``,\n then image will be horizontally flipped with probability of 0.25,\n vertically with probability of 0.25.\n - ``prob`` is list of float, ``direction`` is list of string:\n given ``len(prob) == len(direction)``, the image will\n be ``direction[i]``ly flipped with probability of ``prob[i]``.\n E.g., ``prob=[0.3, 0.5]``, ``direction=['horizontal',\n 'vertical']``, then image will be horizontally flipped with\n probability of 0.3, vertically with probability of 0.5.\n\n\n Required Keys:\n\n - img\n - gt_bboxes (BaseBoxes[torch.float32]) (optional)\n - gt_masks (BitmapMasks | PolygonMasks) (optional)\n - gt_seg_map (np.uint8) (optional)\n\n Modified Keys:\n\n - img\n - gt_bboxes\n - gt_masks\n - gt_seg_map\n\n Added Keys:\n\n - flip\n - flip_direction\n - homography_matrix\n\n\n Args:\n prob (float | list[float], optional): The flipping probability.\n Defaults to None.\n direction(str | list[str]): The flipping direction. Options\n If input is a list, the length must equal ``prob``. Each\n element in ``prob`` indicates the flip probability of\n corresponding direction. Defaults to 'horizontal'.\n \"\"\"\n\n def _record_homography_matrix(self, results: dict) -> None:\n \"\"\"Record the homography matrix for the RandomFlip.\"\"\"\n cur_dir = results['flip_direction']\n h, w = results['img'].shape[:2]\n\n if cur_dir == 'horizontal':\n homography_matrix = np.array([[-1, 0, w], [0, 1, 0], [0, 0, 1]],\n dtype=np.float32)\n elif cur_dir == 'vertical':\n homography_matrix = np.array([[1, 0, 0], [0, -1, h], [0, 0, 1]],\n dtype=np.float32)\n elif cur_dir == 'diagonal':\n homography_matrix = np.array([[-1, 0, w], [0, -1, h], [0, 0, 1]],\n dtype=np.float32)\n else:\n homography_matrix = np.eye(3, dtype=np.float32)\n\n if results.get('homography_matrix', None) is None:\n results['homography_matrix'] = homography_matrix\n else:\n results['homography_matrix'] = homography_matrix @ results[\n 'homography_matrix']\n\n @autocast_box_type()\n def _flip(self, results: dict) -> None:\n \"\"\"Flip images, bounding boxes, and semantic segmentation map.\"\"\"\n # flip image\n results['img'] = mmcv.imflip(\n results['img'], direction=results['flip_direction'])\n\n img_shape = results['img'].shape[:2]\n\n # flip bboxes\n if results.get('gt_bboxes', None) is not None:\n results['gt_bboxes'].flip_(img_shape, results['flip_direction'])\n\n # flip masks\n if results.get('gt_masks', None) is not None:\n results['gt_masks'] = results['gt_masks'].flip(\n results['flip_direction'])\n\n # flip segs\n if results.get('gt_seg_map', None) is not None:\n results['gt_seg_map'] = mmcv.imflip(\n results['gt_seg_map'], direction=results['flip_direction'])\n\n # record homography matrix for flip\n self._record_homography_matrix(results)" }, { "identifier": "Resize", "path": "mmdet/datasets/transforms/transforms.py", "snippet": "class Resize(MMCV_Resize):\n \"\"\"Resize images & bbox & seg.\n\n This transform resizes the input image according to ``scale`` or\n ``scale_factor``. Bboxes, masks, and seg map are then resized\n with the same scale factor.\n if ``scale`` and ``scale_factor`` are both set, it will use ``scale`` to\n resize.\n\n Required Keys:\n\n - img\n - gt_bboxes (BaseBoxes[torch.float32]) (optional)\n - gt_masks (BitmapMasks | PolygonMasks) (optional)\n - gt_seg_map (np.uint8) (optional)\n\n Modified Keys:\n\n - img\n - img_shape\n - gt_bboxes\n - gt_masks\n - gt_seg_map\n\n\n Added Keys:\n\n - scale\n - scale_factor\n - keep_ratio\n - homography_matrix\n\n Args:\n scale (int or tuple): Images scales for resizing. Defaults to None\n scale_factor (float or tuple[float]): Scale factors for resizing.\n Defaults to None.\n keep_ratio (bool): Whether to keep the aspect ratio when resizing the\n image. Defaults to False.\n clip_object_border (bool): Whether to clip the objects\n outside the border of the image. In some dataset like MOT17, the gt\n bboxes are allowed to cross the border of images. Therefore, we\n don't need to clip the gt bboxes in these cases. Defaults to True.\n backend (str): Image resize backend, choices are 'cv2' and 'pillow'.\n These two backends generates slightly different results. Defaults\n to 'cv2'.\n interpolation (str): Interpolation method, accepted values are\n \"nearest\", \"bilinear\", \"bicubic\", \"area\", \"lanczos\" for 'cv2'\n backend, \"nearest\", \"bilinear\" for 'pillow' backend. Defaults\n to 'bilinear'.\n \"\"\"\n\n def _resize_masks(self, results: dict) -> None:\n \"\"\"Resize masks with ``results['scale']``\"\"\"\n if results.get('gt_masks', None) is not None:\n if self.keep_ratio:\n results['gt_masks'] = results['gt_masks'].rescale(\n results['scale'])\n else:\n results['gt_masks'] = results['gt_masks'].resize(\n results['img_shape'])\n\n def _resize_bboxes(self, results: dict) -> None:\n \"\"\"Resize bounding boxes with ``results['scale_factor']``.\"\"\"\n if results.get('gt_bboxes', None) is not None:\n results['gt_bboxes'].rescale_(results['scale_factor'])\n if self.clip_object_border:\n results['gt_bboxes'].clip_(results['img_shape'])\n\n def _record_homography_matrix(self, results: dict) -> None:\n \"\"\"Record the homography matrix for the Resize.\"\"\"\n w_scale, h_scale = results['scale_factor']\n homography_matrix = np.array(\n [[w_scale, 0, 0], [0, h_scale, 0], [0, 0, 1]], dtype=np.float32)\n if results.get('homography_matrix', None) is None:\n results['homography_matrix'] = homography_matrix\n else:\n results['homography_matrix'] = homography_matrix @ results[\n 'homography_matrix']\n\n @autocast_box_type()\n def transform(self, results: dict) -> dict:\n \"\"\"Transform function to resize images, bounding boxes and semantic\n segmentation map.\n\n Args:\n results (dict): Result dict from loading pipeline.\n Returns:\n dict: Resized results, 'img', 'gt_bboxes', 'gt_seg_map',\n 'scale', 'scale_factor', 'height', 'width', and 'keep_ratio' keys\n are updated in result dict.\n \"\"\"\n if self.scale:\n results['scale'] = self.scale\n else:\n img_shape = results['img'].shape[:2]\n results['scale'] = _scale_size(img_shape[::-1], self.scale_factor)\n self._resize_img(results)\n self._resize_bboxes(results)\n self._resize_masks(results)\n self._resize_seg(results)\n self._record_homography_matrix(results)\n return results\n\n def __repr__(self) -> str:\n repr_str = self.__class__.__name__\n repr_str += f'(scale={self.scale}, '\n repr_str += f'scale_factor={self.scale_factor}, '\n repr_str += f'keep_ratio={self.keep_ratio}, '\n repr_str += f'clip_object_border={self.clip_object_border}), '\n repr_str += f'backend={self.backend}), '\n repr_str += f'interpolation={self.interpolation})'\n return repr_str" }, { "identifier": "YOLOXHSVRandomAug", "path": "mmdet/datasets/transforms/transforms.py", "snippet": "class YOLOXHSVRandomAug(BaseTransform):\n \"\"\"Apply HSV augmentation to image sequentially. It is referenced from\n https://github.com/Megvii-\n BaseDetection/YOLOX/blob/main/yolox/data/data_augment.py#L21.\n\n Required Keys:\n\n - img\n\n Modified Keys:\n\n - img\n\n Args:\n hue_delta (int): delta of hue. Defaults to 5.\n saturation_delta (int): delta of saturation. Defaults to 30.\n value_delta (int): delat of value. Defaults to 30.\n \"\"\"\n\n def __init__(self,\n hue_delta: int = 5,\n saturation_delta: int = 30,\n value_delta: int = 30) -> None:\n self.hue_delta = hue_delta\n self.saturation_delta = saturation_delta\n self.value_delta = value_delta\n\n @cache_randomness\n def _get_hsv_gains(self):\n hsv_gains = np.random.uniform(-1, 1, 3) * [\n self.hue_delta, self.saturation_delta, self.value_delta\n ]\n # random selection of h, s, v\n hsv_gains *= np.random.randint(0, 2, 3)\n # prevent overflow\n hsv_gains = hsv_gains.astype(np.int16)\n return hsv_gains\n\n def transform(self, results: dict) -> dict:\n img = results['img']\n hsv_gains = self._get_hsv_gains()\n img_hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV).astype(np.int16)\n\n img_hsv[..., 0] = (img_hsv[..., 0] + hsv_gains[0]) % 180\n img_hsv[..., 1] = np.clip(img_hsv[..., 1] + hsv_gains[1], 0, 255)\n img_hsv[..., 2] = np.clip(img_hsv[..., 2] + hsv_gains[2], 0, 255)\n cv2.cvtColor(img_hsv.astype(img.dtype), cv2.COLOR_HSV2BGR, dst=img)\n\n results['img'] = img\n return results\n\n def __repr__(self):\n repr_str = self.__class__.__name__\n repr_str += f'(hue_delta={self.hue_delta}, '\n repr_str += f'saturation_delta={self.saturation_delta}, '\n repr_str += f'value_delta={self.value_delta})'\n return repr_str" }, { "identifier": "PipelineSwitchHook", "path": "mmdet/engine/hooks/pipeline_switch_hook.py", "snippet": "class PipelineSwitchHook(Hook):\n \"\"\"Switch data pipeline at switch_epoch.\n\n Args:\n switch_epoch (int): switch pipeline at this epoch.\n switch_pipeline (list[dict]): the pipeline to switch to.\n \"\"\"\n\n def __init__(self, switch_epoch, switch_pipeline):\n self.switch_epoch = switch_epoch\n self.switch_pipeline = switch_pipeline\n self._restart_dataloader = False\n self._has_switched = False\n\n def before_train_epoch(self, runner):\n \"\"\"switch pipeline.\"\"\"\n epoch = runner.epoch\n train_loader = runner.train_dataloader\n if epoch >= self.switch_epoch and not self._has_switched:\n runner.logger.info('Switch pipeline now!')\n # The dataset pipeline cannot be updated when persistent_workers\n # is True, so we need to force the dataloader's multi-process\n # restart. This is a very hacky approach.\n train_loader.dataset.pipeline = Compose(self.switch_pipeline)\n if hasattr(train_loader, 'persistent_workers'\n ) and train_loader.persistent_workers is True:\n train_loader._DataLoader__initialized = False\n train_loader._iterator = None\n self._restart_dataloader = True\n self._has_switched = True\n else:\n # Once the restart is complete, we need to restore\n # the initialization flag.\n if self._restart_dataloader:\n train_loader._DataLoader__initialized = True" }, { "identifier": "ExpMomentumEMA", "path": "mmdet/models/layers/ema.py", "snippet": "class ExpMomentumEMA(ExponentialMovingAverage):\n \"\"\"Exponential moving average (EMA) with exponential momentum strategy,\n which is used in YOLOX.\n\n Args:\n model (nn.Module): The model to be averaged.\n momentum (float): The momentum used for updating ema parameter.\n Ema's parameter are updated with the formula:\n `averaged_param = (1-momentum) * averaged_param + momentum *\n source_param`. Defaults to 0.0002.\n gamma (int): Use a larger momentum early in training and gradually\n annealing to a smaller value to update the ema model smoothly. The\n momentum is calculated as\n `(1 - momentum) * exp(-(1 + steps) / gamma) + momentum`.\n Defaults to 2000.\n interval (int): Interval between two updates. Defaults to 1.\n device (torch.device, optional): If provided, the averaged model will\n be stored on the :attr:`device`. Defaults to None.\n update_buffers (bool): if True, it will compute running averages for\n both the parameters and the buffers of the model. Defaults to\n False.\n \"\"\"\n\n def __init__(self,\n model: nn.Module,\n momentum: float = 0.0002,\n gamma: int = 2000,\n interval=1,\n device: Optional[torch.device] = None,\n update_buffers: bool = False) -> None:\n super().__init__(\n model=model,\n momentum=momentum,\n interval=interval,\n device=device,\n update_buffers=update_buffers)\n assert gamma > 0, f'gamma must be greater than 0, but got {gamma}'\n self.gamma = gamma\n\n def avg_func(self, averaged_param: Tensor, source_param: Tensor,\n steps: int) -> None:\n \"\"\"Compute the moving average of the parameters using the exponential\n momentum strategy.\n\n Args:\n averaged_param (Tensor): The averaged parameters.\n source_param (Tensor): The source parameters.\n steps (int): The number of times the parameters have been\n updated.\n \"\"\"\n momentum = (1 - self.momentum) * math.exp(\n -float(1 + steps) / self.gamma) + self.momentum\n averaged_param.mul_(1 - momentum).add_(source_param, alpha=momentum)" } ]
from mmengine.config import read_base from .rtmdet_ins_l_8xb32_300e_coco import * from mmcv.transforms.loading import LoadImageFromFile from mmcv.transforms.processing import RandomResize from mmengine.hooks.ema_hook import EMAHook from mmdet.datasets.transforms.formatting import PackDetInputs from mmdet.datasets.transforms.loading import (FilterAnnotations, LoadAnnotations) from mmdet.datasets.transforms.transforms import (CachedMixUp, CachedMosaic, Pad, RandomCrop, RandomFlip, Resize, YOLOXHSVRandomAug) from mmdet.engine.hooks.pipeline_switch_hook import PipelineSwitchHook from mmdet.models.layers.ema import ExpMomentumEMA
18,215
# Copyright (c) OpenMMLab. All rights reserved. # Please refer to https://mmengine.readthedocs.io/en/latest/advanced_tutorials/config.html#a-pure-python-style-configuration-file-beta for more details. # noqa # mmcv >= 2.0.1 # mmengine >= 0.8.0 with read_base(): checkpoint = 'https://download.openmmlab.com/mmdetection/v3.0/rtmdet/cspnext_rsb_pretrain/cspnext-s_imagenet_600e.pth' # noqa model.update( dict( backbone=dict( deepen_factor=0.33, widen_factor=0.5, init_cfg=dict( type='Pretrained', prefix='backbone.', checkpoint=checkpoint)), neck=dict( in_channels=[128, 256, 512], out_channels=128, num_csp_blocks=1), bbox_head=dict(in_channels=128, feat_channels=128))) train_pipeline = [ dict(type=LoadImageFromFile, backend_args=backend_args), dict( type=LoadAnnotations, with_bbox=True, with_mask=True, poly2mask=False), dict(type=CachedMosaic, img_scale=(640, 640), pad_val=114.0), dict( type=RandomResize, scale=(1280, 1280), ratio_range=(0.5, 2.0), resize_type=Resize, keep_ratio=True), dict( type=RandomCrop, crop_size=(640, 640), recompute_bbox=True, allow_negative_crop=True), dict(type=YOLOXHSVRandomAug),
# Copyright (c) OpenMMLab. All rights reserved. # Please refer to https://mmengine.readthedocs.io/en/latest/advanced_tutorials/config.html#a-pure-python-style-configuration-file-beta for more details. # noqa # mmcv >= 2.0.1 # mmengine >= 0.8.0 with read_base(): checkpoint = 'https://download.openmmlab.com/mmdetection/v3.0/rtmdet/cspnext_rsb_pretrain/cspnext-s_imagenet_600e.pth' # noqa model.update( dict( backbone=dict( deepen_factor=0.33, widen_factor=0.5, init_cfg=dict( type='Pretrained', prefix='backbone.', checkpoint=checkpoint)), neck=dict( in_channels=[128, 256, 512], out_channels=128, num_csp_blocks=1), bbox_head=dict(in_channels=128, feat_channels=128))) train_pipeline = [ dict(type=LoadImageFromFile, backend_args=backend_args), dict( type=LoadAnnotations, with_bbox=True, with_mask=True, poly2mask=False), dict(type=CachedMosaic, img_scale=(640, 640), pad_val=114.0), dict( type=RandomResize, scale=(1280, 1280), ratio_range=(0.5, 2.0), resize_type=Resize, keep_ratio=True), dict( type=RandomCrop, crop_size=(640, 640), recompute_bbox=True, allow_negative_crop=True), dict(type=YOLOXHSVRandomAug),
dict(type=RandomFlip, prob=0.5),
7
2023-12-23 08:36:47+00:00
24k
see2023/Bert-VITS2-ext
train_ms.py
[ { "identifier": "config", "path": "config.py", "snippet": "class Resample_config:\nclass Preprocess_text_config:\nclass Bert_gen_config:\nclass Emo_gen_config:\nclass Train_ms_config:\nclass Webui_config:\nclass Server_config:\nclass Translate_config:\nclass Config:\n def __init__(self, in_dir: str, out_dir: str, sampling_rate: int = 44100):\n def from_dict(cls, dataset_path: str, data: Dict[str, any]):\n def __init__(\n self,\n transcription_path: str,\n cleaned_path: str,\n train_path: str,\n val_path: str,\n config_path: str,\n val_per_lang: int = 5,\n max_val_total: int = 10000,\n clean: bool = True,\n ):\n def from_dict(cls, dataset_path: str, data: Dict[str, any]):\n def __init__(\n self,\n config_path: str,\n num_processes: int = 2,\n device: str = \"cuda\",\n use_multi_device: bool = False,\n ):\n def from_dict(cls, dataset_path: str, data: Dict[str, any]):\n def __init__(\n self,\n config_path: str,\n num_processes: int = 2,\n device: str = \"cuda\",\n use_multi_device: bool = False,\n ):\n def from_dict(cls, dataset_path: str, data: Dict[str, any]):\n def __init__(\n self,\n config_path: str,\n env: Dict[str, any],\n base: Dict[str, any],\n model: str,\n num_workers: int,\n spec_cache: bool,\n keep_ckpts: int,\n ):\n def from_dict(cls, dataset_path: str, data: Dict[str, any]):\n def __init__(\n self,\n device: str,\n model: str,\n v_model: str,\n config_path: str,\n language_identification_library: str,\n port: int = 7860,\n share: bool = False,\n debug: bool = False,\n ):\n def from_dict(cls, dataset_path: str, data: Dict[str, any]):\n def __init__(\n self, models: List[Dict[str, any]], port: int = 5000, device: str = \"cuda\"\n ):\n def from_dict(cls, data: Dict[str, any]):\n def __init__(self, app_key: str, secret_key: str):\n def from_dict(cls, data: Dict[str, any]):\n def __init__(self, config_path: str):" }, { "identifier": "TextAudioSpeakerLoader", "path": "data_utils.py", "snippet": "class TextAudioSpeakerLoader(torch.utils.data.Dataset):\n \"\"\"\n 1) loads audio, speaker_id, text pairs\n 2) normalizes text and converts them to sequences of integers\n 3) computes spectrograms from audio files.\n \"\"\"\n\n def __init__(self, audiopaths_sid_text, hparams):\n self.audiopaths_sid_text = load_filepaths_and_text(audiopaths_sid_text)\n self.max_wav_value = hparams.max_wav_value\n self.sampling_rate = hparams.sampling_rate\n self.filter_length = hparams.filter_length\n self.hop_length = hparams.hop_length\n self.win_length = hparams.win_length\n self.sampling_rate = hparams.sampling_rate\n self.spk_map = hparams.spk2id\n self.hparams = hparams\n\n self.use_mel_spec_posterior = getattr(\n hparams, \"use_mel_posterior_encoder\", False\n )\n if self.use_mel_spec_posterior:\n self.n_mel_channels = getattr(hparams, \"n_mel_channels\", 80)\n\n self.cleaned_text = getattr(hparams, \"cleaned_text\", False)\n\n self.add_blank = hparams.add_blank\n self.min_text_len = getattr(hparams, \"min_text_len\", 1)\n self.max_text_len = getattr(hparams, \"max_text_len\", 384)\n\n random.seed(1234)\n random.shuffle(self.audiopaths_sid_text)\n self._filter()\n\n def _filter(self):\n \"\"\"\n Filter text & store spec lengths\n \"\"\"\n # Store spectrogram lengths for Bucketing\n # wav_length ~= file_size / (wav_channels * Bytes per dim) = file_size / (1 * 2)\n # spec_length = wav_length // hop_length\n\n audiopaths_sid_text_new = []\n lengths = []\n skipped = 0\n logger.info(\"Init dataset...\")\n for _id, spk, language, text, phones, tone, word2ph in tqdm(\n self.audiopaths_sid_text\n ):\n audiopath = f\"{_id}\"\n if self.min_text_len <= len(phones) and len(phones) <= self.max_text_len:\n phones = phones.split(\" \")\n tone = [int(i) for i in tone.split(\" \")]\n word2ph = [int(i) for i in word2ph.split(\" \")]\n audiopaths_sid_text_new.append(\n [audiopath, spk, language, text, phones, tone, word2ph]\n )\n lengths.append(os.path.getsize(audiopath) // (2 * self.hop_length))\n else:\n skipped += 1\n logger.info(\n \"skipped: \"\n + str(skipped)\n + \", total: \"\n + str(len(self.audiopaths_sid_text))\n )\n self.audiopaths_sid_text = audiopaths_sid_text_new\n self.lengths = lengths\n\n def get_audio_text_speaker_pair(self, audiopath_sid_text):\n # separate filename, speaker_id and text\n audiopath, sid, language, text, phones, tone, word2ph = audiopath_sid_text\n\n bert, ja_bert, en_bert, phones, tone, language = self.get_text(\n text, word2ph, phones, tone, language, audiopath\n )\n\n spec, wav = self.get_audio(audiopath)\n sid = torch.LongTensor([int(self.spk_map[sid])])\n\n return (phones, spec, wav, sid, tone, language, bert, ja_bert, en_bert)\n\n def get_audio(self, filename):\n audio_norm, sampling_rate = torchaudio.load(filename, frame_offset=0, num_frames=-1, normalize=True, channels_first=True)\n '''\n # from https://github.com/YYuX-1145/Bert-VITS2-Integration-package\n audio, sampling_rate = load_wav_to_torch(filename)\n if sampling_rate != self.sampling_rate:\n raise ValueError(\n \"{} {} SR doesn't match target {} SR\".format(\n filename, sampling_rate, self.sampling_rate\n )\n )\n audio_norm = audio / self.max_wav_value\n audio_norm = audio_norm.unsqueeze(0)\n '''\n spec_filename = filename.replace(\".wav\", \".spec.pt\")\n if self.use_mel_spec_posterior:\n spec_filename = spec_filename.replace(\".spec.pt\", \".mel.pt\")\n try:\n spec = torch.load(spec_filename)\n except:\n if self.use_mel_spec_posterior:\n spec = mel_spectrogram_torch(\n audio_norm,\n self.filter_length,\n self.n_mel_channels,\n self.sampling_rate,\n self.hop_length,\n self.win_length,\n self.hparams.mel_fmin,\n self.hparams.mel_fmax,\n center=False,\n )\n else:\n spec = spectrogram_torch(\n audio_norm,\n self.filter_length,\n self.sampling_rate,\n self.hop_length,\n self.win_length,\n center=False,\n )\n spec = torch.squeeze(spec, 0)\n if config.train_ms_config.spec_cache:\n torch.save(spec, spec_filename)\n return spec, audio_norm\n\n def get_text(self, text, word2ph, phone, tone, language_str, wav_path):\n phone, tone, language = cleaned_text_to_sequence(phone, tone, language_str)\n if self.add_blank:\n phone = commons.intersperse(phone, 0)\n tone = commons.intersperse(tone, 0)\n language = commons.intersperse(language, 0)\n for i in range(len(word2ph)):\n word2ph[i] = word2ph[i] * 2\n word2ph[0] += 1\n bert_path = wav_path.replace(\".wav\", \".bert.pt\")\n try:\n bert_ori = torch.load(bert_path)\n assert bert_ori.shape[-1] == len(phone)\n except Exception as e:\n logger.warning(\"Bert load Failed\")\n logger.warning(e)\n\n if language_str == \"ZH\":\n bert = bert_ori\n ja_bert = torch.randn(1024, len(phone))\n en_bert = torch.randn(1024, len(phone))\n elif language_str == \"JP\":\n bert = torch.randn(1024, len(phone))\n ja_bert = bert_ori\n en_bert = torch.randn(1024, len(phone))\n elif language_str == \"EN\":\n bert = torch.randn(1024, len(phone))\n ja_bert = torch.randn(1024, len(phone))\n en_bert = bert_ori\n phone = torch.LongTensor(phone)\n tone = torch.LongTensor(tone)\n language = torch.LongTensor(language)\n return bert, ja_bert, en_bert, phone, tone, language\n\n def get_sid(self, sid):\n sid = torch.LongTensor([int(sid)])\n return sid\n\n def __getitem__(self, index):\n return self.get_audio_text_speaker_pair(self.audiopaths_sid_text[index])\n\n def __len__(self):\n return len(self.audiopaths_sid_text)" }, { "identifier": "TextAudioSpeakerCollate", "path": "data_utils.py", "snippet": "class TextAudioSpeakerCollate:\n \"\"\"Zero-pads model inputs and targets\"\"\"\n\n def __init__(self, return_ids=False):\n self.return_ids = return_ids\n\n def __call__(self, batch):\n \"\"\"Collate's training batch from normalized text, audio and speaker identities\n PARAMS\n ------\n batch: [text_normalized, spec_normalized, wav_normalized, sid]\n \"\"\"\n # Right zero-pad all one-hot text sequences to max input length\n _, ids_sorted_decreasing = torch.sort(\n torch.LongTensor([x[1].size(1) for x in batch]), dim=0, descending=True\n )\n\n max_text_len = max([len(x[0]) for x in batch])\n max_spec_len = max([x[1].size(1) for x in batch])\n max_wav_len = max([x[2].size(1) for x in batch])\n\n text_lengths = torch.LongTensor(len(batch))\n spec_lengths = torch.LongTensor(len(batch))\n wav_lengths = torch.LongTensor(len(batch))\n sid = torch.LongTensor(len(batch))\n\n text_padded = torch.LongTensor(len(batch), max_text_len)\n tone_padded = torch.LongTensor(len(batch), max_text_len)\n language_padded = torch.LongTensor(len(batch), max_text_len)\n bert_padded = torch.FloatTensor(len(batch), 1024, max_text_len)\n ja_bert_padded = torch.FloatTensor(len(batch), 1024, max_text_len)\n en_bert_padded = torch.FloatTensor(len(batch), 1024, max_text_len)\n\n spec_padded = torch.FloatTensor(len(batch), batch[0][1].size(0), max_spec_len)\n wav_padded = torch.FloatTensor(len(batch), 1, max_wav_len)\n text_padded.zero_()\n tone_padded.zero_()\n language_padded.zero_()\n spec_padded.zero_()\n wav_padded.zero_()\n bert_padded.zero_()\n ja_bert_padded.zero_()\n en_bert_padded.zero_()\n\n for i in range(len(ids_sorted_decreasing)):\n row = batch[ids_sorted_decreasing[i]]\n\n text = row[0]\n text_padded[i, : text.size(0)] = text\n text_lengths[i] = text.size(0)\n\n spec = row[1]\n spec_padded[i, :, : spec.size(1)] = spec\n spec_lengths[i] = spec.size(1)\n\n wav = row[2]\n wav_padded[i, :, : wav.size(1)] = wav\n wav_lengths[i] = wav.size(1)\n\n sid[i] = row[3]\n\n tone = row[4]\n tone_padded[i, : tone.size(0)] = tone\n\n language = row[5]\n language_padded[i, : language.size(0)] = language\n\n bert = row[6]\n bert_padded[i, :, : bert.size(1)] = bert\n\n ja_bert = row[7]\n ja_bert_padded[i, :, : ja_bert.size(1)] = ja_bert\n\n en_bert = row[8]\n en_bert_padded[i, :, : en_bert.size(1)] = en_bert\n\n return (\n text_padded,\n text_lengths,\n spec_padded,\n spec_lengths,\n wav_padded,\n wav_lengths,\n sid,\n tone_padded,\n language_padded,\n bert_padded,\n ja_bert_padded,\n en_bert_padded,\n )" }, { "identifier": "DistributedBucketSampler", "path": "data_utils.py", "snippet": "class DistributedBucketSampler(torch.utils.data.distributed.DistributedSampler):\n \"\"\"\n Maintain similar input lengths in a batch.\n Length groups are specified by boundaries.\n Ex) boundaries = [b1, b2, b3] -> any batch is included either {x | b1 < length(x) <=b2} or {x | b2 < length(x) <= b3}.\n\n It removes samples which are not included in the boundaries.\n Ex) boundaries = [b1, b2, b3] -> any x s.t. length(x) <= b1 or length(x) > b3 are discarded.\n \"\"\"\n\n def __init__(\n self,\n dataset,\n batch_size,\n boundaries,\n num_replicas=None,\n rank=None,\n shuffle=True,\n ):\n super().__init__(dataset, num_replicas=num_replicas, rank=rank, shuffle=shuffle)\n self.lengths = dataset.lengths\n self.batch_size = batch_size\n self.boundaries = boundaries\n\n self.buckets, self.num_samples_per_bucket = self._create_buckets()\n self.total_size = sum(self.num_samples_per_bucket)\n self.num_samples = self.total_size // self.num_replicas\n\n def _create_buckets(self):\n buckets = [[] for _ in range(len(self.boundaries) - 1)]\n for i in range(len(self.lengths)):\n length = self.lengths[i]\n idx_bucket = self._bisect(length)\n if idx_bucket != -1:\n buckets[idx_bucket].append(i)\n\n try:\n for i in range(len(buckets) - 1, 0, -1):\n if len(buckets[i]) == 0:\n buckets.pop(i)\n self.boundaries.pop(i + 1)\n assert all(len(bucket) > 0 for bucket in buckets)\n # When one bucket is not traversed\n except Exception as e:\n print(\"Bucket warning \", e)\n for i in range(len(buckets) - 1, -1, -1):\n if len(buckets[i]) == 0:\n buckets.pop(i)\n self.boundaries.pop(i + 1)\n\n num_samples_per_bucket = []\n for i in range(len(buckets)):\n len_bucket = len(buckets[i])\n total_batch_size = self.num_replicas * self.batch_size\n rem = (\n total_batch_size - (len_bucket % total_batch_size)\n ) % total_batch_size\n num_samples_per_bucket.append(len_bucket + rem)\n return buckets, num_samples_per_bucket\n\n def __iter__(self):\n # deterministically shuffle based on epoch\n g = torch.Generator()\n g.manual_seed(self.epoch)\n\n indices = []\n if self.shuffle:\n for bucket in self.buckets:\n indices.append(torch.randperm(len(bucket), generator=g).tolist())\n else:\n for bucket in self.buckets:\n indices.append(list(range(len(bucket))))\n\n batches = []\n for i in range(len(self.buckets)):\n bucket = self.buckets[i]\n len_bucket = len(bucket)\n if len_bucket == 0:\n continue\n ids_bucket = indices[i]\n num_samples_bucket = self.num_samples_per_bucket[i]\n\n # add extra samples to make it evenly divisible\n rem = num_samples_bucket - len_bucket\n ids_bucket = (\n ids_bucket\n + ids_bucket * (rem // len_bucket)\n + ids_bucket[: (rem % len_bucket)]\n )\n\n # subsample\n ids_bucket = ids_bucket[self.rank :: self.num_replicas]\n\n # batching\n for j in range(len(ids_bucket) // self.batch_size):\n batch = [\n bucket[idx]\n for idx in ids_bucket[\n j * self.batch_size : (j + 1) * self.batch_size\n ]\n ]\n batches.append(batch)\n\n if self.shuffle:\n batch_ids = torch.randperm(len(batches), generator=g).tolist()\n batches = [batches[i] for i in batch_ids]\n self.batches = batches\n\n assert len(self.batches) * self.batch_size == self.num_samples\n return iter(self.batches)\n\n def _bisect(self, x, lo=0, hi=None):\n if hi is None:\n hi = len(self.boundaries) - 1\n\n if hi > lo:\n mid = (hi + lo) // 2\n if self.boundaries[mid] < x and x <= self.boundaries[mid + 1]:\n return mid\n elif x <= self.boundaries[mid]:\n return self._bisect(x, lo, mid)\n else:\n return self._bisect(x, mid + 1, hi)\n else:\n return -1\n\n def __len__(self):\n return self.num_samples // self.batch_size" }, { "identifier": "AudioVisemesLoader", "path": "data_utils.py", "snippet": "class AudioVisemesLoader(torch.utils.data.Dataset):\n \"\"\"\n loads audio, visemes torch variable pairs from visemes list file .\n file is like: \n ./records/date_time.z.npy|./records/date_time.npy\n \"\"\"\n \n def __init__(self, audio_visemes_list_file, hparams):\n self.audio_visemes_list_items = load_filepaths_and_text(audio_visemes_list_file)\n print('audio_visemes_list_items: ', len(self.audio_visemes_list_items))\n random.seed(1234)\n random.shuffle(self.audio_visemes_list_items)\n self.max_visemes_len = 1210\n self.min_visemes_len = 1190\n self._filter()\n\n\n def _filter(self):\n # check if the file exists, and can parse as torch tensor\n audio_visemes_list_items_new = []\n for audio_file, visemes_file in self.audio_visemes_list_items:\n if os.path.exists(audio_file) and os.path.exists(visemes_file):\n # check using torch.load\n try:\n audio = torch.load(audio_file)\n visemes = np.load(visemes_file)\n if visemes.shape[0] < self.min_visemes_len:\n print('drop this data: --------- visemes.shape[0] < self.min_visemes_len: ', visemes.shape[0], visemes_file)\n continue\n audio_visemes_list_items_new.append([audio_file, visemes_file])\n except Exception as e:\n print('error: ', audio_file, visemes_file)\n print(e)\n self.audio_visemes_list_items = audio_visemes_list_items_new\n print('audio_visemes_list_items after filter: ', len(self.audio_visemes_list_items))\n\n def __getitem__(self, index):\n # read these two torch.tensor\n audio_file, visemes_file = self.audio_visemes_list_items[index]\n audio_z = torch.load(audio_file).squeeze(0).detach()\n # [192, seq_len(1722)]\n\n visemes = np.load(visemes_file)\n visemes = torch.from_numpy(visemes)\n #[seq_len(1194), 61]\n visemes = visemes.transpose(0, 1)\n #[61, seq_len(1194)]\n if visemes.shape[1] > self.max_visemes_len:\n # cut the extra part\n # print('__getitem__ 1 cut visemes from ', visemes.shape[0], ' to ', self.max_visemes_len, 'file: ', visemes_file)\n visemes = visemes[:, :self.max_visemes_len]\n elif visemes.shape[1] < self.max_visemes_len:\n # padding to max_visemes_len with last frame\n # print('__getitem__ 2 padding visemes from ', visemes.shape[0], ' to ', self.max_visemes_len, 'file: ', visemes_file)\n # last_frame = visemes[-1]\n # visemes = np.concatenate([visemes, np.tile(last_frame, (self.max_visemes_len - visemes.shape[0], 1))], axis=0)\n # visemes = torch.from_numpy(visemes)\n pass\n\n visemes_offset = 0.08 # 将visemes延迟n s\n visemes_offset_frames = int(visemes_offset * const_map.ARKIT_FPS)\n visemes = visemes[:, visemes_offset_frames:]\n\n audio_z_offset = 0.0\n audio_z_offset_frames = int(audio_z_offset * const_map.Z_FPS)\n audio_z = audio_z[:, audio_z_offset_frames:]\n\n # 获取二者的时长,将过长的一方多的部分丢弃\n visemes_duration = visemes.shape[1] / const_map.ARKIT_FPS\n audio_z_duration = audio_z.shape[1] / const_map.Z_FPS\n if visemes_duration > audio_z_duration:\n visemes = visemes[:, :int(audio_z_duration * const_map.ARKIT_FPS)]\n elif visemes_duration < audio_z_duration:\n audio_z = audio_z[:, :int(visemes_duration * const_map.Z_FPS)]\n\n\n # print('__getitem__ 3 audio.shape: ', audio.shape, 'visemes.shape: ', visemes.shape,'file: ', visemes_file)\n return audio_z, visemes\n\n def __len__(self):\n return len(self.audio_visemes_list_items)" }, { "identifier": "SynthesizerTrn", "path": "models.py", "snippet": "class SynthesizerTrn(nn.Module):\n \"\"\"\n Synthesizer for Training\n \"\"\"\n\n def __init__(\n self,\n n_vocab,\n spec_channels,\n segment_size,\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size,\n p_dropout,\n resblock,\n resblock_kernel_sizes,\n resblock_dilation_sizes,\n upsample_rates,\n upsample_initial_channel,\n upsample_kernel_sizes,\n n_speakers=256,\n gin_channels=256,\n use_sdp=True,\n n_flow_layer=4,\n n_layers_trans_flow=4,\n flow_share_parameter=False,\n use_transformer_flow=True,\n **kwargs\n ):\n super().__init__()\n self.n_vocab = n_vocab\n self.spec_channels = spec_channels\n self.inter_channels = inter_channels\n self.hidden_channels = hidden_channels\n self.filter_channels = filter_channels\n self.n_heads = n_heads\n self.n_layers = n_layers\n self.kernel_size = kernel_size\n self.p_dropout = p_dropout\n self.resblock = resblock\n self.resblock_kernel_sizes = resblock_kernel_sizes\n self.resblock_dilation_sizes = resblock_dilation_sizes\n self.upsample_rates = upsample_rates\n self.upsample_initial_channel = upsample_initial_channel\n self.upsample_kernel_sizes = upsample_kernel_sizes\n self.segment_size = segment_size\n self.n_speakers = n_speakers\n self.gin_channels = gin_channels\n self.n_layers_trans_flow = n_layers_trans_flow\n self.use_spk_conditioned_encoder = kwargs.get(\n \"use_spk_conditioned_encoder\", True\n )\n self.use_sdp = use_sdp\n self.use_noise_scaled_mas = kwargs.get(\"use_noise_scaled_mas\", False)\n self.mas_noise_scale_initial = kwargs.get(\"mas_noise_scale_initial\", 0.01)\n self.noise_scale_delta = kwargs.get(\"noise_scale_delta\", 2e-6)\n self.current_mas_noise_scale = self.mas_noise_scale_initial\n if self.use_spk_conditioned_encoder and gin_channels > 0:\n self.enc_gin_channels = gin_channels\n self.enc_p = TextEncoder(\n n_vocab,\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size,\n p_dropout,\n gin_channels=self.enc_gin_channels,\n )\n self.dec = Generator(\n inter_channels,\n resblock,\n resblock_kernel_sizes,\n resblock_dilation_sizes,\n upsample_rates,\n upsample_initial_channel,\n upsample_kernel_sizes,\n gin_channels=gin_channels,\n )\n self.enc_q = PosteriorEncoder(\n spec_channels,\n inter_channels,\n hidden_channels,\n 5,\n 1,\n 16,\n gin_channels=gin_channels,\n )\n if use_transformer_flow:\n self.flow = TransformerCouplingBlock(\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers_trans_flow,\n 5,\n p_dropout,\n n_flow_layer,\n gin_channels=gin_channels,\n share_parameter=flow_share_parameter,\n )\n else:\n self.flow = ResidualCouplingBlock(\n inter_channels,\n hidden_channels,\n 5,\n 1,\n n_flow_layer,\n gin_channels=gin_channels,\n )\n self.sdp = StochasticDurationPredictor(\n hidden_channels, 192, 3, 0.5, 4, gin_channels=gin_channels\n )\n self.dp = DurationPredictor(\n hidden_channels, 256, 3, 0.5, gin_channels=gin_channels\n )\n\n if n_speakers >= 1:\n self.emb_g = nn.Embedding(n_speakers, gin_channels)\n else:\n self.ref_enc = ReferenceEncoder(spec_channels, gin_channels)\n\n def forward(\n self,\n x,\n x_lengths,\n y,\n y_lengths,\n sid,\n tone,\n language,\n bert,\n ja_bert,\n en_bert,\n ):\n if self.n_speakers > 0:\n g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1]\n else:\n g = self.ref_enc(y.transpose(1, 2)).unsqueeze(-1)\n x, m_p, logs_p, x_mask = self.enc_p(\n x, x_lengths, tone, language, bert, ja_bert, en_bert, g=g\n )\n z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)\n z_p = self.flow(z, y_mask, g=g)\n\n with torch.no_grad():\n # negative cross-entropy\n s_p_sq_r = torch.exp(-2 * logs_p) # [b, d, t]\n neg_cent1 = torch.sum(\n -0.5 * math.log(2 * math.pi) - logs_p, [1], keepdim=True\n ) # [b, 1, t_s]\n neg_cent2 = torch.matmul(\n -0.5 * (z_p**2).transpose(1, 2), s_p_sq_r\n ) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s]\n neg_cent3 = torch.matmul(\n z_p.transpose(1, 2), (m_p * s_p_sq_r)\n ) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s]\n neg_cent4 = torch.sum(\n -0.5 * (m_p**2) * s_p_sq_r, [1], keepdim=True\n ) # [b, 1, t_s]\n neg_cent = neg_cent1 + neg_cent2 + neg_cent3 + neg_cent4\n if self.use_noise_scaled_mas:\n epsilon = (\n torch.std(neg_cent)\n * torch.randn_like(neg_cent)\n * self.current_mas_noise_scale\n )\n neg_cent = neg_cent + epsilon\n\n attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1)\n attn = (\n monotonic_align.maximum_path(neg_cent, attn_mask.squeeze(1))\n .unsqueeze(1)\n .detach()\n )\n\n w = attn.sum(2)\n\n l_length_sdp = self.sdp(x, x_mask, w, g=g)\n l_length_sdp = l_length_sdp / torch.sum(x_mask)\n\n logw_ = torch.log(w + 1e-6) * x_mask\n logw = self.dp(x, x_mask, g=g)\n logw_sdp = self.sdp(x, x_mask, g=g, reverse=True, noise_scale=1.0)\n l_length_dp = torch.sum((logw - logw_) ** 2, [1, 2]) / torch.sum(\n x_mask\n ) # for averaging\n l_length_sdp += torch.sum((logw_sdp - logw_) ** 2, [1, 2]) / torch.sum(x_mask)\n\n l_length = l_length_dp + l_length_sdp\n\n # expand prior\n m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2)\n logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2)\n\n z_slice, ids_slice = commons.rand_slice_segments(\n z, y_lengths, self.segment_size\n )\n o = self.dec(z_slice, g=g)\n return (\n o,\n l_length,\n attn,\n ids_slice,\n x_mask,\n y_mask,\n (z, z_p, m_p, logs_p, m_q, logs_q),\n (x, logw, logw_, logw_sdp),\n g,\n )\n\n def infer(\n self,\n x,\n x_lengths,\n sid,\n tone,\n language,\n bert,\n ja_bert,\n en_bert,\n noise_scale=0.667,\n length_scale=1,\n noise_scale_w=0.8,\n max_len=None,\n sdp_ratio=0,\n y=None,\n ):\n # x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths, tone, language, bert)\n # g = self.gst(y)\n if self.n_speakers > 0:\n g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1]\n else:\n g = self.ref_enc(y.transpose(1, 2)).unsqueeze(-1)\n x, m_p, logs_p, x_mask = self.enc_p(\n x, x_lengths, tone, language, bert, ja_bert, en_bert, g=g\n )\n logw = self.sdp(x, x_mask, g=g, reverse=True, noise_scale=noise_scale_w) * (\n sdp_ratio\n ) + self.dp(x, x_mask, g=g) * (1 - sdp_ratio)\n w = torch.exp(logw) * x_mask * length_scale\n w_ceil = torch.ceil(w)\n y_lengths = torch.clamp_min(torch.sum(w_ceil, [1, 2]), 1).long()\n y_mask = torch.unsqueeze(commons.sequence_mask(y_lengths, None), 1).to(\n x_mask.dtype\n )\n attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1)\n attn = commons.generate_path(w_ceil, attn_mask)\n\n m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(\n 1, 2\n ) # [b, t', t], [b, t, d] -> [b, d, t']\n logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(\n 1, 2\n ) # [b, t', t], [b, t, d] -> [b, d, t']\n\n z_p = m_p + torch.randn_like(m_p) * torch.exp(logs_p) * noise_scale\n z = self.flow(z_p, y_mask, g=g, reverse=True)\n o = self.dec((z * y_mask)[:, :, :max_len], g=g)\n return o, attn, y_mask, (z, z_p, m_p, logs_p)\n\n def get_post_enc_dec(self):\n return self.enc_q, self.dec" }, { "identifier": "MultiPeriodDiscriminator", "path": "models.py", "snippet": "class MultiPeriodDiscriminator(torch.nn.Module):\n def __init__(self, use_spectral_norm=False):\n super(MultiPeriodDiscriminator, self).__init__()\n periods = [2, 3, 5, 7, 11]\n\n discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]\n discs = discs + [\n DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods\n ]\n self.discriminators = nn.ModuleList(discs)\n\n def forward(self, y, y_hat):\n y_d_rs = []\n y_d_gs = []\n fmap_rs = []\n fmap_gs = []\n for i, d in enumerate(self.discriminators):\n y_d_r, fmap_r = d(y)\n y_d_g, fmap_g = d(y_hat)\n y_d_rs.append(y_d_r)\n y_d_gs.append(y_d_g)\n fmap_rs.append(fmap_r)\n fmap_gs.append(fmap_g)\n\n return y_d_rs, y_d_gs, fmap_rs, fmap_gs" }, { "identifier": "DurationDiscriminator", "path": "models.py", "snippet": "class DurationDiscriminator(nn.Module): # vits2\n def __init__(\n self, in_channels, filter_channels, kernel_size, p_dropout, gin_channels=0\n ):\n super().__init__()\n\n self.in_channels = in_channels\n self.filter_channels = filter_channels\n self.kernel_size = kernel_size\n self.p_dropout = p_dropout\n self.gin_channels = gin_channels\n\n self.drop = nn.Dropout(p_dropout)\n self.conv_1 = nn.Conv1d(\n in_channels, filter_channels, kernel_size, padding=kernel_size // 2\n )\n self.norm_1 = modules.LayerNorm(filter_channels)\n self.conv_2 = nn.Conv1d(\n filter_channels, filter_channels, kernel_size, padding=kernel_size // 2\n )\n self.norm_2 = modules.LayerNorm(filter_channels)\n self.dur_proj = nn.Conv1d(1, filter_channels, 1)\n\n self.LSTM = nn.LSTM(\n 2 * filter_channels, filter_channels, batch_first=True, bidirectional=True\n )\n\n if gin_channels != 0:\n self.cond = nn.Conv1d(gin_channels, in_channels, 1)\n\n self.output_layer = nn.Sequential(\n nn.Linear(2 * filter_channels, 1), nn.Sigmoid()\n )\n\n def forward_probability(self, x, dur):\n dur = self.dur_proj(dur)\n x = torch.cat([x, dur], dim=1)\n x = x.transpose(1, 2)\n x, _ = self.LSTM(x)\n output_prob = self.output_layer(x)\n return output_prob\n\n def forward(self, x, x_mask, dur_r, dur_hat, g=None):\n x = torch.detach(x)\n if g is not None:\n g = torch.detach(g)\n x = x + self.cond(g)\n x = self.conv_1(x * x_mask)\n x = torch.relu(x)\n x = self.norm_1(x)\n x = self.drop(x)\n x = self.conv_2(x * x_mask)\n x = torch.relu(x)\n x = self.norm_2(x)\n x = self.drop(x)\n\n output_probs = []\n for dur in [dur_r, dur_hat]:\n output_prob = self.forward_probability(x, dur)\n output_probs.append(output_prob)\n\n return output_probs" }, { "identifier": "WavLMDiscriminator", "path": "models.py", "snippet": "class WavLMDiscriminator(nn.Module):\n \"\"\"docstring for Discriminator.\"\"\"\n\n def __init__(\n self, slm_hidden=768, slm_layers=13, initial_channel=64, use_spectral_norm=False\n ):\n super(WavLMDiscriminator, self).__init__()\n norm_f = weight_norm if use_spectral_norm == False else spectral_norm\n self.pre = norm_f(\n Conv1d(slm_hidden * slm_layers, initial_channel, 1, 1, padding=0)\n )\n\n self.convs = nn.ModuleList(\n [\n norm_f(\n nn.Conv1d(\n initial_channel, initial_channel * 2, kernel_size=5, padding=2\n )\n ),\n norm_f(\n nn.Conv1d(\n initial_channel * 2,\n initial_channel * 4,\n kernel_size=5,\n padding=2,\n )\n ),\n norm_f(\n nn.Conv1d(initial_channel * 4, initial_channel * 4, 5, 1, padding=2)\n ),\n ]\n )\n\n self.conv_post = norm_f(Conv1d(initial_channel * 4, 1, 3, 1, padding=1))\n\n def forward(self, x):\n x = self.pre(x)\n\n fmap = []\n for l in self.convs:\n x = l(x)\n x = F.leaky_relu(x, modules.LRELU_SLOPE)\n fmap.append(x)\n x = self.conv_post(x)\n x = torch.flatten(x, 1, -1)\n\n return x" }, { "identifier": "VisemesNet", "path": "models.py", "snippet": "class VisemesNet(nn.Module):\n def active(self, x):\n # active_fun: 0: null, 1: tanh, 2: relu, 3: LeakyReLU\n if self.active_fun == 1:\n return torch.tanh(x)\n elif self.active_fun == 2:\n return torch.relu(x)\n elif self.active_fun == 3:\n return self.leakyReLU(x)\n else:\n return x\n\n def __init__(self, hidden_channels, lstm_bidirectional=True, active_fun = 3, enable_conv=True, \n use_transformer = False, enable_dropout=True):\n super(VisemesNet, self).__init__()\n self.lstm_bidirectional = lstm_bidirectional\n self.lstm_directions = 2 if lstm_bidirectional else 1\n self.use_transformer = use_transformer\n self.enable_dropout = enable_dropout\n if active_fun == 3:\n self.leakyReLU = nn.LeakyReLU(negative_slope=0.01)\n if use_transformer:\n num_heads=8\n num_layers=3\n dim_feedforward=512\n dropout=0.1\n activation=\"relu\"\n self.transformer_encoder_layer = nn.TransformerEncoderLayer(\n d_model=hidden_channels, \n nhead=num_heads,\n dim_feedforward=dim_feedforward,\n dropout=dropout,\n activation=activation,\n batch_first=True\n )\n self.transformer_encoder = nn.TransformerEncoder(self.transformer_encoder_layer, num_layers=num_layers)\n else:\n self.lstm = nn.LSTM(input_size=hidden_channels, hidden_size=128, num_layers=3, batch_first=True, bidirectional=lstm_bidirectional)\n if use_transformer:\n self.fc1 = nn.Linear(hidden_channels, 96)\n else:\n self.fc1 = nn.Linear(128 * self.lstm_directions, 96)\n self.fc2 = nn.Linear(96, 61)\n dropout_rate = 0.5\n if self.enable_dropout:\n self.dropout = nn.Dropout(dropout_rate)\n conv_kernel_pre = 15\n conv_kernel_post = 11\n self.conv1d_pre = nn.Conv1d(in_channels=hidden_channels, out_channels=hidden_channels, kernel_size=conv_kernel_pre, stride=1, padding=conv_kernel_pre//2)\n self.conv1d_post = nn.Conv1d(in_channels=61, out_channels=61, kernel_size=conv_kernel_post, stride=1, padding=conv_kernel_post//2)\n self.enable_conv = enable_conv\n self.active_fun = active_fun\n\n def forward(self, x, y=None):\n # x [batch_size, hidden_channels, seq_len]\n if self.use_transformer:\n return self.forward_transformer(x, y)\n else:\n return self.forward_lstm(x, y)\n\n def forward_transformer(self, x, y=None):\n # x [batch_size, hidden_channels, seq_len]\n if self.enable_conv:\n x = self.conv1d_pre(x)\n # batch_first: True (batch, seq, feature); False (seq, batch, feature).\n x = x.transpose(1, 2)\n\n expressions = self.transformer_encoder(x)\n \n if self.enable_dropout:\n expressions = self.dropout(expressions)\n expressions = self.fc1(expressions)\n # expressions = self.active(expressions)\n if self.enable_dropout:\n expressions = self.dropout(expressions)\n expressions = self.fc2(expressions)\n\n expressions = expressions.transpose(1, 2)\n if self.enable_conv:\n expressions = self.conv1d_post(expressions)\n\n return expressions \n\n def forward_lstm(self, x, y=None):\n # x [batch_size, hidden_channels, seq_len]\n if self.enable_conv:\n x = self.conv1d_pre(x)\n x = x.transpose(1, 2)\n # x [batch_size, seq_len, hidden_channels]\n expressions = None\n expressions, _ = self.lstm(x)\n if self.enable_dropout:\n expressions = self.dropout(expressions)\n expressions = self.fc1(expressions)\n expressions = self.active(expressions)\n if self.enable_dropout:\n expressions = self.dropout(expressions)\n expressions = self.fc2(expressions)\n\n expressions = expressions.transpose(1, 2)\n if self.enable_conv:\n expressions = self.conv1d_post(expressions)\n return expressions\n \n def init_weights(self):\n # 初始化权重\n for m in self.modules():\n if isinstance(m, nn.Linear):\n nn.init.xavier_uniform_(m.weight.data)\n if m.bias is not None:\n nn.init.constant_(m.bias.data, 0)\n elif isinstance(m, nn.LSTM):\n for name, param in m.named_parameters():\n if 'weight_ih' in name:\n nn.init.xavier_uniform_(param.data)\n elif 'weight_hh' in name:\n nn.init.orthogonal_(param.data)\n elif 'bias' in name:\n nn.init.constant_(param.data, 0)\n elif isinstance(m, nn.BatchNorm1d):\n nn.init.constant_(m.weight.data, 1)\n nn.init.constant_(m.bias.data, 0)\n elif isinstance(m, nn.Conv1d):\n nn.init.xavier_uniform_(m.weight.data)\n nn.init.constant_(m.bias.data, 0)\n elif isinstance(m, nn.TransformerEncoderLayer):\n for name, param in m.named_parameters():\n if 'weight' in name:\n if param.dim() == 1:\n nn.init.normal_(param.data)\n else:\n nn.init.xavier_uniform_(param.data)\n elif 'bias' in name:\n nn.init.constant_(param.data, 0)\n elif isinstance(m, nn.TransformerEncoder):\n for param in m.parameters():\n if param.dim() > 1:\n nn.init.xavier_uniform_(param.data)\n else:\n nn.init.constant_(param.data, 0)" }, { "identifier": "generator_loss", "path": "losses.py", "snippet": "def generator_loss(disc_outputs):\n loss = 0\n gen_losses = []\n for dg in disc_outputs:\n dg = dg.float()\n l = torch.mean((1 - dg) ** 2)\n gen_losses.append(l)\n loss += l\n\n return loss, gen_losses" }, { "identifier": "discriminator_loss", "path": "losses.py", "snippet": "def discriminator_loss(disc_real_outputs, disc_generated_outputs):\n loss = 0\n r_losses = []\n g_losses = []\n for dr, dg in zip(disc_real_outputs, disc_generated_outputs):\n dr = dr.float()\n dg = dg.float()\n r_loss = torch.mean((1 - dr) ** 2)\n g_loss = torch.mean(dg**2)\n loss += r_loss + g_loss\n r_losses.append(r_loss.item())\n g_losses.append(g_loss.item())\n\n return loss, r_losses, g_losses" }, { "identifier": "feature_loss", "path": "losses.py", "snippet": "def feature_loss(fmap_r, fmap_g):\n loss = 0\n for dr, dg in zip(fmap_r, fmap_g):\n for rl, gl in zip(dr, dg):\n rl = rl.float().detach()\n gl = gl.float()\n loss += torch.mean(torch.abs(rl - gl))\n\n return loss * 2" }, { "identifier": "kl_loss", "path": "losses.py", "snippet": "def kl_loss(z_p, logs_q, m_p, logs_p, z_mask):\n \"\"\"\n z_p, logs_q: [b, h, t_t]\n m_p, logs_p: [b, h, t_t]\n \"\"\"\n z_p = z_p.float()\n logs_q = logs_q.float()\n m_p = m_p.float()\n logs_p = logs_p.float()\n z_mask = z_mask.float()\n\n kl = logs_p - logs_q - 0.5\n kl += 0.5 * ((z_p - m_p) ** 2) * torch.exp(-2.0 * logs_p)\n kl = torch.sum(kl * z_mask)\n l = kl / torch.sum(z_mask)\n return l" }, { "identifier": "WavLMLoss", "path": "losses.py", "snippet": "class WavLMLoss(torch.nn.Module):\n def __init__(self, model, wd, model_sr, slm_sr=16000):\n super(WavLMLoss, self).__init__()\n self.wavlm = AutoModel.from_pretrained(model)\n self.wd = wd\n self.resample = torchaudio.transforms.Resample(model_sr, slm_sr)\n self.wavlm.eval()\n for param in self.wavlm.parameters():\n param.requires_grad = False\n\n def forward(self, wav, y_rec):\n with torch.no_grad():\n wav_16 = self.resample(wav)\n wav_embeddings = self.wavlm(\n input_values=wav_16, output_hidden_states=True\n ).hidden_states\n y_rec_16 = self.resample(y_rec)\n y_rec_embeddings = self.wavlm(\n input_values=y_rec_16.squeeze(), output_hidden_states=True\n ).hidden_states\n\n floss = 0\n for er, eg in zip(wav_embeddings, y_rec_embeddings):\n floss += torch.mean(torch.abs(er - eg))\n\n return floss.mean()\n\n def generator(self, y_rec):\n y_rec_16 = self.resample(y_rec)\n y_rec_embeddings = self.wavlm(\n input_values=y_rec_16, output_hidden_states=True\n ).hidden_states\n y_rec_embeddings = (\n torch.stack(y_rec_embeddings, dim=1)\n .transpose(-1, -2)\n .flatten(start_dim=1, end_dim=2)\n )\n y_df_hat_g = self.wd(y_rec_embeddings)\n loss_gen = torch.mean((1 - y_df_hat_g) ** 2)\n\n return loss_gen\n\n def discriminator(self, wav, y_rec):\n with torch.no_grad():\n wav_16 = self.resample(wav)\n wav_embeddings = self.wavlm(\n input_values=wav_16, output_hidden_states=True\n ).hidden_states\n y_rec_16 = self.resample(y_rec)\n y_rec_embeddings = self.wavlm(\n input_values=y_rec_16, output_hidden_states=True\n ).hidden_states\n\n y_embeddings = (\n torch.stack(wav_embeddings, dim=1)\n .transpose(-1, -2)\n .flatten(start_dim=1, end_dim=2)\n )\n y_rec_embeddings = (\n torch.stack(y_rec_embeddings, dim=1)\n .transpose(-1, -2)\n .flatten(start_dim=1, end_dim=2)\n )\n\n y_d_rs = self.wd(y_embeddings)\n y_d_gs = self.wd(y_rec_embeddings)\n\n y_df_hat_r, y_df_hat_g = y_d_rs, y_d_gs\n\n r_loss = torch.mean((1 - y_df_hat_r) ** 2)\n g_loss = torch.mean((y_df_hat_g) ** 2)\n\n loss_disc_f = r_loss + g_loss\n\n return loss_disc_f.mean()\n\n def discriminator_forward(self, wav):\n with torch.no_grad():\n wav_16 = self.resample(wav)\n wav_embeddings = self.wavlm(\n input_values=wav_16, output_hidden_states=True\n ).hidden_states\n y_embeddings = (\n torch.stack(wav_embeddings, dim=1)\n .transpose(-1, -2)\n .flatten(start_dim=1, end_dim=2)\n )\n\n y_d_rs = self.wd(y_embeddings)\n\n return y_d_rs" }, { "identifier": "mel_spectrogram_torch", "path": "mel_processing.py", "snippet": "def mel_spectrogram_torch(\n y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False\n):\n if torch.min(y) < -1.0:\n print(\"min value is \", torch.min(y))\n if torch.max(y) > 1.0:\n print(\"max value is \", torch.max(y))\n\n global mel_basis, hann_window\n dtype_device = str(y.dtype) + \"_\" + str(y.device)\n fmax_dtype_device = str(fmax) + \"_\" + dtype_device\n wnsize_dtype_device = str(win_size) + \"_\" + dtype_device\n if fmax_dtype_device not in mel_basis:\n mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax)\n mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(\n dtype=y.dtype, device=y.device\n )\n if wnsize_dtype_device not in hann_window:\n hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(\n dtype=y.dtype, device=y.device\n )\n\n y = torch.nn.functional.pad(\n y.unsqueeze(1),\n (int((n_fft - hop_size) / 2), int((n_fft - hop_size) / 2)),\n mode=\"reflect\",\n )\n y = y.squeeze(1)\n\n spec = torch.stft(\n y,\n n_fft,\n hop_length=hop_size,\n win_length=win_size,\n window=hann_window[wnsize_dtype_device],\n center=center,\n pad_mode=\"reflect\",\n normalized=False,\n onesided=True,\n return_complex=False,\n )\n\n spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6)\n\n spec = torch.matmul(mel_basis[fmax_dtype_device], spec)\n spec = spectral_normalize_torch(spec)\n\n return spec" }, { "identifier": "spec_to_mel_torch", "path": "mel_processing.py", "snippet": "def spec_to_mel_torch(spec, n_fft, num_mels, sampling_rate, fmin, fmax):\n global mel_basis\n dtype_device = str(spec.dtype) + \"_\" + str(spec.device)\n fmax_dtype_device = str(fmax) + \"_\" + dtype_device\n if fmax_dtype_device not in mel_basis:\n mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax)\n mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(\n dtype=spec.dtype, device=spec.device\n )\n spec = torch.matmul(mel_basis[fmax_dtype_device], spec)\n spec = spectral_normalize_torch(spec)\n return spec" }, { "identifier": "symbols", "path": "text/symbols.py", "snippet": "" } ]
import platform import os import torch import torch.distributed as dist import logging import argparse import datetime import gc import commons import utils from torch.nn import functional as F from torch.utils.data import DataLoader from torch.utils.tensorboard import SummaryWriter from torch.nn.parallel import DistributedDataParallel as DDP from torch.cuda.amp import autocast, GradScaler from tqdm import tqdm from config import config from data_utils import ( TextAudioSpeakerLoader, TextAudioSpeakerCollate, DistributedBucketSampler, AudioVisemesLoader, ) from models import ( SynthesizerTrn, MultiPeriodDiscriminator, DurationDiscriminator, WavLMDiscriminator, VisemesNet, ) from losses import ( generator_loss, discriminator_loss, feature_loss, kl_loss, WavLMLoss, ) from mel_processing import mel_spectrogram_torch, spec_to_mel_torch from text.symbols import symbols
15,281
rank, local_rank, epoch, hps, [net_g, net_d, net_dur_disc, net_wd, wl], [optim_g, optim_d, optim_dur_disc, optim_wd], [scheduler_g, scheduler_d, scheduler_dur_disc, scheduler_wd], scaler, [train_loader, eval_loader], logger, [writer, writer_eval], ) else: train_and_evaluate( rank, local_rank, epoch, hps, [net_g, net_d, net_dur_disc, net_wd, wl], [optim_g, optim_d, optim_dur_disc, optim_wd], [scheduler_g, scheduler_d, scheduler_dur_disc, scheduler_wd], scaler, [train_loader, None], None, None, ) scheduler_g.step() scheduler_d.step() scheduler_wd.step() if net_dur_disc is not None: scheduler_dur_disc.step() def train_and_evaluate( rank, local_rank, epoch, hps, nets, optims, schedulers, scaler, loaders, logger, writers, ): net_g, net_d, net_dur_disc, net_wd, wl = nets optim_g, optim_d, optim_dur_disc, optim_wd = optims scheduler_g, scheduler_d, scheduler_dur_disc, scheduler_wd = schedulers train_loader, eval_loader = loaders if writers is not None: writer, writer_eval = writers train_loader.batch_sampler.set_epoch(epoch) global global_step net_g.train() net_d.train() net_wd.train() if net_dur_disc is not None: net_dur_disc.train() for batch_idx, ( x, x_lengths, spec, spec_lengths, y, y_lengths, speakers, tone, language, bert, ja_bert, en_bert, ) in enumerate(tqdm(train_loader)): if net_g.module.use_noise_scaled_mas: current_mas_noise_scale = ( net_g.module.mas_noise_scale_initial - net_g.module.noise_scale_delta * global_step ) net_g.module.current_mas_noise_scale = max(current_mas_noise_scale, 0.0) x, x_lengths = x.cuda(local_rank, non_blocking=True), x_lengths.cuda( local_rank, non_blocking=True ) spec, spec_lengths = spec.cuda( local_rank, non_blocking=True ), spec_lengths.cuda(local_rank, non_blocking=True) y, y_lengths = y.cuda(local_rank, non_blocking=True), y_lengths.cuda( local_rank, non_blocking=True ) speakers = speakers.cuda(local_rank, non_blocking=True) tone = tone.cuda(local_rank, non_blocking=True) language = language.cuda(local_rank, non_blocking=True) bert = bert.cuda(local_rank, non_blocking=True) ja_bert = ja_bert.cuda(local_rank, non_blocking=True) en_bert = en_bert.cuda(local_rank, non_blocking=True) with autocast(enabled=hps.train.bf16_run, dtype=torch.bfloat16): ( y_hat, l_length, attn, ids_slice, x_mask, z_mask, (z, z_p, m_p, logs_p, m_q, logs_q), (hidden_x, logw, logw_, logw_sdp), g, ) = net_g( x, x_lengths, spec, spec_lengths, speakers, tone, language, bert, ja_bert, en_bert, )
# flake8: noqa: E402 logging.getLogger("numba").setLevel(logging.WARNING) logger = logging.getLogger(__name__) torch.backends.cuda.matmul.allow_tf32 = True torch.backends.cudnn.allow_tf32 = ( True # If encontered training problem,please try to disable TF32. ) torch.set_float32_matmul_precision("medium") torch.backends.cuda.sdp_kernel("flash") torch.backends.cuda.enable_flash_sdp(True) torch.backends.cuda.enable_mem_efficient_sdp( True ) # Not available if torch version is lower than 2.0 global_step = 0 global_visemes_step = 0 def run_only_visemes(hps): # 使用最简单的单机模式,仅训练隐变量z到表情(visemes)的全连接 VisemesFCNet 的参数 global global_visemes_step torch.manual_seed(hps.train.seed) torch.cuda.set_device(0) train_dataset = AudioVisemesLoader(hps.data.training_visemes_files, hps.data) train_loader = DataLoader(train_dataset, num_workers=0, shuffle=False, pin_memory=True, batch_size=1, drop_last=True) eval_dataset = AudioVisemesLoader(hps.data.validation_visemes_files, hps.data) eval_loader = DataLoader(eval_dataset, num_workers=0, shuffle=False, batch_size=1, pin_memory=True, drop_last=False) net_v = VisemesNet(hps.model.hidden_channels).cuda() latest_model_path = utils.latest_checkpoint_path(hps.model_dir, "V_*.pth") if latest_model_path is not None: _, optim_d, _, epoch_str = utils.load_checkpoint(latest_model_path, net_v, None, skip_optimizer=False) else : epoch_str = 1 global_visemes_step = 0 net_v.init_weights() optim_v = torch.optim.AdamW( net_v.parameters(), hps.train.learning_rate, betas=hps.train.betas, eps=hps.train.eps) optim_v.param_groups[0]['initial_lr'] = hps.train.learning_rate scheduler_v = torch.optim.lr_scheduler.ExponentialLR(optim_v, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2, ) scaler = GradScaler(enabled=hps.train.bf16_run) for epoch in range(epoch_str, hps.train.epochs + 1): train_visemes_only(epoch, hps, net_v, train_loader, optim_v, scaler) scheduler_v.step() if epoch % hps.train.eval_interval == 0: eval_visemes_only(epoch, hps, net_v, eval_loader) utils.save_checkpoint(net_v, optim_v,hps.train.learning_rate , epoch, os.path.join(hps.model_dir, "V_{}.pth".format(epoch))) def train_visemes_only(epoch, hps, net_v, train_loader, optim_v, scaler): for batch_idx, (spec, visemes) in tqdm(enumerate(train_loader)): spec, visemes = spec.cuda(), visemes.cuda() with autocast(enabled=hps.train.bf16_run): # 通过VisemesNet从z生成visemes_hat,和均方差 visemes_hat = net_v(spec) visemes_hat_mse = get_visemes_mse(visemes, visemes_hat) optim_v.zero_grad() scaler.scale(visemes_hat_mse).backward() scaler.unscale_(optim_v) grad_norm_v = commons.clip_grad_value_(net_v.parameters(), None) scaler.step(optim_v) global global_visemes_step global_visemes_step += 1 if batch_idx % hps.train.log_interval == 0: print('Train Epoch: {} [{}/{} ({:.0f}%)]\tvisemes_hat_mse: {:.6f}\tgrad_norm_v: {:.6f}'.format( epoch, batch_idx * len(spec), len(train_loader.dataset), 100. * batch_idx / len(train_loader), visemes_hat_mse.item(), grad_norm_v)) def get_visemes_mse(visemes, visemes_hat): if visemes.shape[-1] != visemes_hat.shape[-1]: # 如果y和x的最低维度不一样 visemes_hat = F.interpolate(visemes_hat, size=visemes.shape[-1], mode='linear', align_corners=True) # 对x进行线性插值,使其形状与y一致 visemes_hat_mse = torch.mean(torch.pow(visemes_hat - visemes, 2)) return visemes_hat_mse def eval_visemes_only(epoch, hps, net_v, eval_loader): net_v.eval() with torch.no_grad(): visemes_hat_mse_sum = 0.0 for batch_idx, (spec, visemes) in tqdm(enumerate(eval_loader)): spec, visemes = spec.cuda(), visemes.cuda() # 通过VisemesFCNet从z生成visemes_hat,和均方差 visemes_hat = net_v(spec) visemes_hat_mse = get_visemes_mse(visemes, visemes_hat) visemes_hat_mse_sum += visemes_hat_mse # print('visemes_hat_mse', visemes_hat_mse) break visemes_hat_mse_avg = visemes_hat_mse_sum / (batch_idx + 1) log_str = '------------------ eval epoch: {} visemes_hat_mse_avg: {:.6f}'.format(epoch, visemes_hat_mse_avg) print(log_str) logger.warning(log_str) net_v.train() def run(): # 环境变量解析 envs = config.train_ms_config.env for env_name, env_value in envs.items(): if env_name not in os.environ.keys(): print("加载config中的配置{}".format(str(env_value))) os.environ[env_name] = str(env_value) print( "加载环境变量 \nMASTER_ADDR: {},\nMASTER_PORT: {},\nWORLD_SIZE: {},\nRANK: {},\nLOCAL_RANK: {}".format( os.environ["MASTER_ADDR"], os.environ["MASTER_PORT"], os.environ["WORLD_SIZE"], os.environ["RANK"], os.environ["LOCAL_RANK"], ) ) backend = "nccl" if platform.system() == "Windows": backend = "gloo" # If Windows,switch to gloo backend. dist.init_process_group( backend=backend, init_method="env://", timeout=datetime.timedelta(seconds=300), ) # Use torchrun instead of mp.spawn rank = dist.get_rank() local_rank = int(os.environ["LOCAL_RANK"]) n_gpus = dist.get_world_size() # 命令行/config.yml配置解析 # hps = utils.get_hparams() parser = argparse.ArgumentParser() # 非必要不建议使用命令行配置,请使用config.yml文件 parser.add_argument( "-c", "--config", type=str, default=config.train_ms_config.config_path, help="JSON file for configuration", ) parser.add_argument( "-m", "--model", type=str, help="数据集文件夹路径,请注意,数据不再默认放在/logs文件夹下。如果需要用命令行配置,请声明相对于根目录的路径", default=config.dataset_path, ) parser.add_argument('--visemes', dest='visemes', action="store_true", default=False, help="train visemes only, lock the encoder and decoder") args = parser.parse_args() model_dir = os.path.join(args.model, config.train_ms_config.model) if not os.path.exists(model_dir): os.makedirs(model_dir) hps = utils.get_hparams_from_file(args.config) hps.model_dir = model_dir set_logger(hps) if args.visemes: run_only_visemes(hps) # 比较路径是否相同 if os.path.realpath(args.config) != os.path.realpath( config.train_ms_config.config_path ): with open(args.config, "r", encoding="utf-8") as f: data = f.read() with open(config.train_ms_config.config_path, "w", encoding="utf-8") as f: f.write(data) torch.manual_seed(hps.train.seed) torch.cuda.set_device(local_rank) global global_step if rank == 0: logger = utils.get_logger(hps.model_dir) logger.info(hps) utils.check_git_hash(hps.model_dir) writer = SummaryWriter(log_dir=hps.model_dir) writer_eval = SummaryWriter(log_dir=os.path.join(hps.model_dir, "eval")) train_dataset = TextAudioSpeakerLoader(hps.data.training_files, hps.data) train_sampler = DistributedBucketSampler( train_dataset, hps.train.batch_size, [32, 300, 400, 500, 600, 700, 800, 900, 1000], num_replicas=n_gpus, rank=rank, shuffle=True, ) collate_fn = TextAudioSpeakerCollate() train_loader = DataLoader( train_dataset, num_workers=min(config.train_ms_config.num_workers, os.cpu_count() - 1), shuffle=False, pin_memory=True, collate_fn=collate_fn, batch_sampler=train_sampler, persistent_workers=True, prefetch_factor=4, ) # DataLoader config could be adjusted. if rank == 0: eval_dataset = TextAudioSpeakerLoader(hps.data.validation_files, hps.data) eval_loader = DataLoader( eval_dataset, num_workers=0, shuffle=False, batch_size=1, pin_memory=True, drop_last=False, collate_fn=collate_fn, ) if ( "use_noise_scaled_mas" in hps.model.keys() and hps.model.use_noise_scaled_mas is True ): print("Using noise scaled MAS for VITS2") mas_noise_scale_initial = 0.01 noise_scale_delta = 2e-6 else: print("Using normal MAS for VITS1") mas_noise_scale_initial = 0.0 noise_scale_delta = 0.0 if ( "use_duration_discriminator" in hps.model.keys() and hps.model.use_duration_discriminator is True ): print("Using duration discriminator for VITS2") net_dur_disc = DurationDiscriminator( hps.model.hidden_channels, hps.model.hidden_channels, 3, 0.1, gin_channels=hps.model.gin_channels if hps.data.n_speakers != 0 else 0, ).cuda(local_rank) else: net_dur_disc = None if ( "use_spk_conditioned_encoder" in hps.model.keys() and hps.model.use_spk_conditioned_encoder is True ): if hps.data.n_speakers == 0: raise ValueError( "n_speakers must be > 0 when using spk conditioned encoder to train multi-speaker model" ) else: print("Using normal encoder for VITS1") net_g = SynthesizerTrn( len(symbols), hps.data.filter_length // 2 + 1, hps.train.segment_size // hps.data.hop_length, n_speakers=hps.data.n_speakers, mas_noise_scale_initial=mas_noise_scale_initial, noise_scale_delta=noise_scale_delta, **hps.model, ).cuda(local_rank) if getattr(hps.train, "freeze_ZH_bert", False): print("Freezing ZH bert encoder !!!") for param in net_g.enc_p.bert_proj.parameters(): param.requires_grad = False if getattr(hps.train, "freeze_EN_bert", False): print("Freezing EN bert encoder !!!") for param in net_g.enc_p.en_bert_proj.parameters(): param.requires_grad = False if getattr(hps.train, "freeze_JP_bert", False): print("Freezing JP bert encoder !!!") for param in net_g.enc_p.ja_bert_proj.parameters(): param.requires_grad = False net_d = MultiPeriodDiscriminator(hps.model.use_spectral_norm).cuda(local_rank) net_wd = WavLMDiscriminator( hps.model.slm.hidden, hps.model.slm.nlayers, hps.model.slm.initial_channel ).cuda(local_rank) optim_g = torch.optim.AdamW( filter(lambda p: p.requires_grad, net_g.parameters()), hps.train.learning_rate, betas=hps.train.betas, eps=hps.train.eps, ) optim_d = torch.optim.AdamW( net_d.parameters(), hps.train.learning_rate, betas=hps.train.betas, eps=hps.train.eps, ) optim_wd = torch.optim.AdamW( net_wd.parameters(), hps.train.learning_rate, betas=hps.train.betas, eps=hps.train.eps, ) if net_dur_disc is not None: optim_dur_disc = torch.optim.AdamW( net_dur_disc.parameters(), hps.train.learning_rate, betas=hps.train.betas, eps=hps.train.eps, ) else: optim_dur_disc = None net_g = DDP(net_g, device_ids=[local_rank], bucket_cap_mb=512) net_d = DDP(net_d, device_ids=[local_rank], bucket_cap_mb=512) net_wd = DDP(net_wd, device_ids=[local_rank], bucket_cap_mb=512) if net_dur_disc is not None: net_dur_disc = DDP( net_dur_disc, device_ids=[local_rank], bucket_cap_mb=512, ) # 下载底模 if config.train_ms_config.base["use_base_model"]: utils.download_checkpoint( hps.model_dir, config.train_ms_config.base, token=config.openi_token, mirror=config.mirror, ) dur_resume_lr = hps.train.learning_rate wd_resume_lr = hps.train.learning_rate if net_dur_disc is not None: try: _, _, dur_resume_lr, epoch_str = utils.load_checkpoint( utils.latest_checkpoint_path(hps.model_dir, "DUR_*.pth"), net_dur_disc, optim_dur_disc, skip_optimizer=hps.train.skip_optimizer if "skip_optimizer" in hps.train else True, ) if not optim_dur_disc.param_groups[0].get("initial_lr"): optim_dur_disc.param_groups[0]["initial_lr"] = dur_resume_lr except: print("Initialize dur_disc") try: _, optim_g, g_resume_lr, epoch_str = utils.load_checkpoint( utils.latest_checkpoint_path(hps.model_dir, "G_*.pth"), net_g, optim_g, skip_optimizer=hps.train.skip_optimizer if "skip_optimizer" in hps.train else True, ) _, optim_d, d_resume_lr, epoch_str = utils.load_checkpoint( utils.latest_checkpoint_path(hps.model_dir, "D_*.pth"), net_d, optim_d, skip_optimizer=hps.train.skip_optimizer if "skip_optimizer" in hps.train else True, ) if not optim_g.param_groups[0].get("initial_lr"): optim_g.param_groups[0]["initial_lr"] = g_resume_lr if not optim_d.param_groups[0].get("initial_lr"): optim_d.param_groups[0]["initial_lr"] = d_resume_lr epoch_str = max(epoch_str, 1) # global_step = (epoch_str - 1) * len(train_loader) global_step = int( utils.get_steps(utils.latest_checkpoint_path(hps.model_dir, "G_*.pth")) ) print( f"******************检测到模型存在,epoch为 {epoch_str},gloabl step为 {global_step}*********************" ) except Exception as e: print(e) epoch_str = 1 global_step = 0 try: _, optim_wd, wd_resume_lr, epoch_str = utils.load_checkpoint( utils.latest_checkpoint_path(hps.model_dir, "WD_*.pth"), net_wd, optim_wd, skip_optimizer=hps.train.skip_optimizer if "skip_optimizer" in hps.train else True, ) if not optim_wd.param_groups[0].get("initial_lr"): optim_wd.param_groups[0]["initial_lr"] = wd_resume_lr except Exception as e: print(e) scheduler_g = torch.optim.lr_scheduler.ExponentialLR( optim_g, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2 ) scheduler_d = torch.optim.lr_scheduler.ExponentialLR( optim_d, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2 ) scheduler_wd = torch.optim.lr_scheduler.ExponentialLR( optim_wd, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2 ) if net_dur_disc is not None: scheduler_dur_disc = torch.optim.lr_scheduler.ExponentialLR( optim_dur_disc, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2 ) else: scheduler_dur_disc = None scaler = GradScaler(enabled=hps.train.bf16_run) wl = WavLMLoss( hps.model.slm.model, net_wd, hps.data.sampling_rate, hps.model.slm.sr, ).to(local_rank) for epoch in range(epoch_str, hps.train.epochs + 1): if rank == 0: train_and_evaluate( rank, local_rank, epoch, hps, [net_g, net_d, net_dur_disc, net_wd, wl], [optim_g, optim_d, optim_dur_disc, optim_wd], [scheduler_g, scheduler_d, scheduler_dur_disc, scheduler_wd], scaler, [train_loader, eval_loader], logger, [writer, writer_eval], ) else: train_and_evaluate( rank, local_rank, epoch, hps, [net_g, net_d, net_dur_disc, net_wd, wl], [optim_g, optim_d, optim_dur_disc, optim_wd], [scheduler_g, scheduler_d, scheduler_dur_disc, scheduler_wd], scaler, [train_loader, None], None, None, ) scheduler_g.step() scheduler_d.step() scheduler_wd.step() if net_dur_disc is not None: scheduler_dur_disc.step() def train_and_evaluate( rank, local_rank, epoch, hps, nets, optims, schedulers, scaler, loaders, logger, writers, ): net_g, net_d, net_dur_disc, net_wd, wl = nets optim_g, optim_d, optim_dur_disc, optim_wd = optims scheduler_g, scheduler_d, scheduler_dur_disc, scheduler_wd = schedulers train_loader, eval_loader = loaders if writers is not None: writer, writer_eval = writers train_loader.batch_sampler.set_epoch(epoch) global global_step net_g.train() net_d.train() net_wd.train() if net_dur_disc is not None: net_dur_disc.train() for batch_idx, ( x, x_lengths, spec, spec_lengths, y, y_lengths, speakers, tone, language, bert, ja_bert, en_bert, ) in enumerate(tqdm(train_loader)): if net_g.module.use_noise_scaled_mas: current_mas_noise_scale = ( net_g.module.mas_noise_scale_initial - net_g.module.noise_scale_delta * global_step ) net_g.module.current_mas_noise_scale = max(current_mas_noise_scale, 0.0) x, x_lengths = x.cuda(local_rank, non_blocking=True), x_lengths.cuda( local_rank, non_blocking=True ) spec, spec_lengths = spec.cuda( local_rank, non_blocking=True ), spec_lengths.cuda(local_rank, non_blocking=True) y, y_lengths = y.cuda(local_rank, non_blocking=True), y_lengths.cuda( local_rank, non_blocking=True ) speakers = speakers.cuda(local_rank, non_blocking=True) tone = tone.cuda(local_rank, non_blocking=True) language = language.cuda(local_rank, non_blocking=True) bert = bert.cuda(local_rank, non_blocking=True) ja_bert = ja_bert.cuda(local_rank, non_blocking=True) en_bert = en_bert.cuda(local_rank, non_blocking=True) with autocast(enabled=hps.train.bf16_run, dtype=torch.bfloat16): ( y_hat, l_length, attn, ids_slice, x_mask, z_mask, (z, z_p, m_p, logs_p, m_q, logs_q), (hidden_x, logw, logw_, logw_sdp), g, ) = net_g( x, x_lengths, spec, spec_lengths, speakers, tone, language, bert, ja_bert, en_bert, )
mel = spec_to_mel_torch(
16
2023-12-27 03:09:11+00:00
24k
chinhsuanwu/ifusion-threestudio
threestudio/models/geometry/tetrahedra_sdf_grid.py
[ { "identifier": "BaseExplicitGeometry", "path": "threestudio/models/geometry/base.py", "snippet": "class BaseExplicitGeometry(BaseGeometry):\n @dataclass\n class Config(BaseGeometry.Config):\n radius: float = 1.0\n\n cfg: Config\n\n def configure(self) -> None:\n self.bbox: Float[Tensor, \"2 3\"]\n self.register_buffer(\n \"bbox\",\n torch.as_tensor(\n [\n [-self.cfg.radius, -self.cfg.radius, -self.cfg.radius],\n [self.cfg.radius, self.cfg.radius, self.cfg.radius],\n ],\n dtype=torch.float32,\n ),\n )" }, { "identifier": "BaseGeometry", "path": "threestudio/models/geometry/base.py", "snippet": "class BaseGeometry(BaseModule):\n @dataclass\n class Config(BaseModule.Config):\n pass\n\n cfg: Config\n\n @staticmethod\n def create_from(\n other: \"BaseGeometry\", cfg: Optional[Union[dict, DictConfig]] = None, **kwargs\n ) -> \"BaseGeometry\":\n raise TypeError(\n f\"Cannot create {BaseGeometry.__name__} from {other.__class__.__name__}\"\n )\n\n def export(self, *args, **kwargs) -> Dict[str, Any]:\n return {}" }, { "identifier": "contract_to_unisphere", "path": "threestudio/models/geometry/base.py", "snippet": "def contract_to_unisphere(\n x: Float[Tensor, \"... 3\"], bbox: Float[Tensor, \"2 3\"], unbounded: bool = False\n) -> Float[Tensor, \"... 3\"]:\n if unbounded:\n x = scale_tensor(x, bbox, (0, 1))\n x = x * 2 - 1 # aabb is at [-1, 1]\n mag = x.norm(dim=-1, keepdim=True)\n mask = mag.squeeze(-1) > 1\n x[mask] = (2 - 1 / mag[mask]) * (x[mask] / mag[mask])\n x = x / 4 + 0.5 # [-inf, inf] is at [0, 1]\n else:\n x = scale_tensor(x, bbox, (0, 1))\n return x" }, { "identifier": "ImplicitSDF", "path": "threestudio/models/geometry/implicit_sdf.py", "snippet": "class ImplicitSDF(BaseImplicitGeometry):\n @dataclass\n class Config(BaseImplicitGeometry.Config):\n n_input_dims: int = 3\n n_feature_dims: int = 3\n pos_encoding_config: dict = field(\n default_factory=lambda: {\n \"otype\": \"HashGrid\",\n \"n_levels\": 16,\n \"n_features_per_level\": 2,\n \"log2_hashmap_size\": 19,\n \"base_resolution\": 16,\n \"per_level_scale\": 1.447269237440378,\n }\n )\n mlp_network_config: dict = field(\n default_factory=lambda: {\n \"otype\": \"VanillaMLP\",\n \"activation\": \"ReLU\",\n \"output_activation\": \"none\",\n \"n_neurons\": 64,\n \"n_hidden_layers\": 1,\n }\n )\n normal_type: Optional[\n str\n ] = \"finite_difference\" # in ['pred', 'finite_difference', 'finite_difference_laplacian']\n finite_difference_normal_eps: Union[\n float, str\n ] = 0.01 # in [float, \"progressive\"]\n shape_init: Optional[str] = None\n shape_init_params: Optional[Any] = None\n shape_init_mesh_up: str = \"+z\"\n shape_init_mesh_front: str = \"+x\"\n force_shape_init: bool = False\n sdf_bias: Union[float, str] = 0.0\n sdf_bias_params: Optional[Any] = None\n\n # no need to removal outlier for SDF\n isosurface_remove_outliers: bool = False\n\n cfg: Config\n\n def configure(self) -> None:\n super().configure()\n self.encoding = get_encoding(\n self.cfg.n_input_dims, self.cfg.pos_encoding_config\n )\n self.sdf_network = get_mlp(\n self.encoding.n_output_dims, 1, self.cfg.mlp_network_config\n )\n\n if self.cfg.n_feature_dims > 0:\n self.feature_network = get_mlp(\n self.encoding.n_output_dims,\n self.cfg.n_feature_dims,\n self.cfg.mlp_network_config,\n )\n\n if self.cfg.normal_type == \"pred\":\n self.normal_network = get_mlp(\n self.encoding.n_output_dims, 3, self.cfg.mlp_network_config\n )\n if self.cfg.isosurface_deformable_grid:\n assert (\n self.cfg.isosurface_method == \"mt\"\n ), \"isosurface_deformable_grid only works with mt\"\n self.deformation_network = get_mlp(\n self.encoding.n_output_dims, 3, self.cfg.mlp_network_config\n )\n\n self.finite_difference_normal_eps: Optional[float] = None\n\n def initialize_shape(self) -> None:\n if self.cfg.shape_init is None and not self.cfg.force_shape_init:\n return\n\n # do not initialize shape if weights are provided\n if self.cfg.weights is not None and not self.cfg.force_shape_init:\n return\n\n if self.cfg.sdf_bias != 0.0:\n threestudio.warn(\n \"shape_init and sdf_bias are both specified, which may lead to unexpected results.\"\n )\n\n get_gt_sdf: Callable[[Float[Tensor, \"N 3\"]], Float[Tensor, \"N 1\"]]\n assert isinstance(self.cfg.shape_init, str)\n if self.cfg.shape_init == \"ellipsoid\":\n assert (\n isinstance(self.cfg.shape_init_params, Sized)\n and len(self.cfg.shape_init_params) == 3\n )\n size = torch.as_tensor(self.cfg.shape_init_params).to(self.device)\n\n def func(points_rand: Float[Tensor, \"N 3\"]) -> Float[Tensor, \"N 1\"]:\n return ((points_rand / size) ** 2).sum(\n dim=-1, keepdim=True\n ).sqrt() - 1.0 # pseudo signed distance of an ellipsoid\n\n get_gt_sdf = func\n elif self.cfg.shape_init == \"sphere\":\n assert isinstance(self.cfg.shape_init_params, float)\n radius = self.cfg.shape_init_params\n\n def func(points_rand: Float[Tensor, \"N 3\"]) -> Float[Tensor, \"N 1\"]:\n return (points_rand**2).sum(dim=-1, keepdim=True).sqrt() - radius\n\n get_gt_sdf = func\n elif self.cfg.shape_init.startswith(\"mesh:\"):\n assert isinstance(self.cfg.shape_init_params, float)\n mesh_path = self.cfg.shape_init[5:]\n if not os.path.exists(mesh_path):\n raise ValueError(f\"Mesh file {mesh_path} does not exist.\")\n\n import trimesh\n\n scene = trimesh.load(mesh_path)\n if isinstance(scene, trimesh.Trimesh):\n mesh = scene\n elif isinstance(scene, trimesh.scene.Scene):\n mesh = trimesh.Trimesh()\n for obj in scene.geometry.values():\n mesh = trimesh.util.concatenate([mesh, obj])\n else:\n raise ValueError(f\"Unknown mesh type at {mesh_path}.\")\n\n # move to center\n centroid = mesh.vertices.mean(0)\n mesh.vertices = mesh.vertices - centroid\n\n # align to up-z and front-x\n dirs = [\"+x\", \"+y\", \"+z\", \"-x\", \"-y\", \"-z\"]\n dir2vec = {\n \"+x\": np.array([1, 0, 0]),\n \"+y\": np.array([0, 1, 0]),\n \"+z\": np.array([0, 0, 1]),\n \"-x\": np.array([-1, 0, 0]),\n \"-y\": np.array([0, -1, 0]),\n \"-z\": np.array([0, 0, -1]),\n }\n if (\n self.cfg.shape_init_mesh_up not in dirs\n or self.cfg.shape_init_mesh_front not in dirs\n ):\n raise ValueError(\n f\"shape_init_mesh_up and shape_init_mesh_front must be one of {dirs}.\"\n )\n if self.cfg.shape_init_mesh_up[1] == self.cfg.shape_init_mesh_front[1]:\n raise ValueError(\n \"shape_init_mesh_up and shape_init_mesh_front must be orthogonal.\"\n )\n z_, x_ = (\n dir2vec[self.cfg.shape_init_mesh_up],\n dir2vec[self.cfg.shape_init_mesh_front],\n )\n y_ = np.cross(z_, x_)\n std2mesh = np.stack([x_, y_, z_], axis=0).T\n mesh2std = np.linalg.inv(std2mesh)\n\n # scaling\n scale = np.abs(mesh.vertices).max()\n mesh.vertices = mesh.vertices / scale * self.cfg.shape_init_params\n mesh.vertices = np.dot(mesh2std, mesh.vertices.T).T\n\n from pysdf import SDF\n\n sdf = SDF(mesh.vertices, mesh.faces)\n\n def func(points_rand: Float[Tensor, \"N 3\"]) -> Float[Tensor, \"N 1\"]:\n # add a negative signed here\n # as in pysdf the inside of the shape has positive signed distance\n return torch.from_numpy(-sdf(points_rand.cpu().numpy())).to(\n points_rand\n )[..., None]\n\n get_gt_sdf = func\n\n else:\n raise ValueError(\n f\"Unknown shape initialization type: {self.cfg.shape_init}\"\n )\n\n # Initialize SDF to a given shape when no weights are provided or force_shape_init is True\n optim = torch.optim.Adam(self.parameters(), lr=1e-3)\n from tqdm import tqdm\n\n for _ in tqdm(\n range(1000),\n desc=f\"Initializing SDF to a(n) {self.cfg.shape_init}:\",\n disable=get_rank() != 0,\n ):\n points_rand = (\n torch.rand((10000, 3), dtype=torch.float32).to(self.device) * 2.0 - 1.0\n )\n sdf_gt = get_gt_sdf(points_rand)\n sdf_pred = self.forward_sdf(points_rand)\n loss = F.mse_loss(sdf_pred, sdf_gt)\n optim.zero_grad()\n loss.backward()\n optim.step()\n\n # explicit broadcast to ensure param consistency across ranks\n for param in self.parameters():\n broadcast(param, src=0)\n\n def get_shifted_sdf(\n self, points: Float[Tensor, \"*N Di\"], sdf: Float[Tensor, \"*N 1\"]\n ) -> Float[Tensor, \"*N 1\"]:\n sdf_bias: Union[float, Float[Tensor, \"*N 1\"]]\n if self.cfg.sdf_bias == \"ellipsoid\":\n assert (\n isinstance(self.cfg.sdf_bias_params, Sized)\n and len(self.cfg.sdf_bias_params) == 3\n )\n size = torch.as_tensor(self.cfg.sdf_bias_params).to(points)\n sdf_bias = ((points / size) ** 2).sum(\n dim=-1, keepdim=True\n ).sqrt() - 1.0 # pseudo signed distance of an ellipsoid\n elif self.cfg.sdf_bias == \"sphere\":\n assert isinstance(self.cfg.sdf_bias_params, float)\n radius = self.cfg.sdf_bias_params\n sdf_bias = (points**2).sum(dim=-1, keepdim=True).sqrt() - radius\n elif isinstance(self.cfg.sdf_bias, float):\n sdf_bias = self.cfg.sdf_bias\n else:\n raise ValueError(f\"Unknown sdf bias {self.cfg.sdf_bias}\")\n return sdf + sdf_bias\n\n def forward(\n self, points: Float[Tensor, \"*N Di\"], output_normal: bool = False\n ) -> Dict[str, Float[Tensor, \"...\"]]:\n grad_enabled = torch.is_grad_enabled()\n\n if output_normal and self.cfg.normal_type == \"analytic\":\n torch.set_grad_enabled(True)\n points.requires_grad_(True)\n\n points_unscaled = points # points in the original scale\n points = contract_to_unisphere(\n points, self.bbox, self.unbounded\n ) # points normalized to (0, 1)\n\n enc = self.encoding(points.view(-1, self.cfg.n_input_dims))\n sdf = self.sdf_network(enc).view(*points.shape[:-1], 1)\n sdf = self.get_shifted_sdf(points_unscaled, sdf)\n output = {\"sdf\": sdf}\n\n if self.cfg.n_feature_dims > 0:\n features = self.feature_network(enc).view(\n *points.shape[:-1], self.cfg.n_feature_dims\n )\n output.update({\"features\": features})\n\n if output_normal:\n if (\n self.cfg.normal_type == \"finite_difference\"\n or self.cfg.normal_type == \"finite_difference_laplacian\"\n ):\n assert self.finite_difference_normal_eps is not None\n eps: float = self.finite_difference_normal_eps\n if self.cfg.normal_type == \"finite_difference_laplacian\":\n offsets: Float[Tensor, \"6 3\"] = torch.as_tensor(\n [\n [eps, 0.0, 0.0],\n [-eps, 0.0, 0.0],\n [0.0, eps, 0.0],\n [0.0, -eps, 0.0],\n [0.0, 0.0, eps],\n [0.0, 0.0, -eps],\n ]\n ).to(points_unscaled)\n points_offset: Float[Tensor, \"... 6 3\"] = (\n points_unscaled[..., None, :] + offsets\n ).clamp(-self.cfg.radius, self.cfg.radius)\n sdf_offset: Float[Tensor, \"... 6 1\"] = self.forward_sdf(\n points_offset\n )\n sdf_grad = (\n 0.5\n * (sdf_offset[..., 0::2, 0] - sdf_offset[..., 1::2, 0])\n / eps\n )\n else:\n offsets: Float[Tensor, \"3 3\"] = torch.as_tensor(\n [[eps, 0.0, 0.0], [0.0, eps, 0.0], [0.0, 0.0, eps]]\n ).to(points_unscaled)\n points_offset: Float[Tensor, \"... 3 3\"] = (\n points_unscaled[..., None, :] + offsets\n ).clamp(-self.cfg.radius, self.cfg.radius)\n sdf_offset: Float[Tensor, \"... 3 1\"] = self.forward_sdf(\n points_offset\n )\n sdf_grad = (sdf_offset[..., 0::1, 0] - sdf) / eps\n normal = F.normalize(sdf_grad, dim=-1)\n elif self.cfg.normal_type == \"pred\":\n normal = self.normal_network(enc).view(*points.shape[:-1], 3)\n normal = F.normalize(normal, dim=-1)\n sdf_grad = normal\n elif self.cfg.normal_type == \"analytic\":\n sdf_grad = -torch.autograd.grad(\n sdf,\n points_unscaled,\n grad_outputs=torch.ones_like(sdf),\n create_graph=True,\n )[0]\n normal = F.normalize(sdf_grad, dim=-1)\n if not grad_enabled:\n sdf_grad = sdf_grad.detach()\n normal = normal.detach()\n else:\n raise AttributeError(f\"Unknown normal type {self.cfg.normal_type}\")\n output.update(\n {\"normal\": normal, \"shading_normal\": normal, \"sdf_grad\": sdf_grad}\n )\n return output\n\n def forward_sdf(self, points: Float[Tensor, \"*N Di\"]) -> Float[Tensor, \"*N 1\"]:\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n\n sdf = self.sdf_network(\n self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n ).reshape(*points.shape[:-1], 1)\n sdf = self.get_shifted_sdf(points_unscaled, sdf)\n return sdf\n\n def forward_field(\n self, points: Float[Tensor, \"*N Di\"]\n ) -> Tuple[Float[Tensor, \"*N 1\"], Optional[Float[Tensor, \"*N 3\"]]]:\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n enc = self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n sdf = self.sdf_network(enc).reshape(*points.shape[:-1], 1)\n sdf = self.get_shifted_sdf(points_unscaled, sdf)\n deformation: Optional[Float[Tensor, \"*N 3\"]] = None\n if self.cfg.isosurface_deformable_grid:\n deformation = self.deformation_network(enc).reshape(*points.shape[:-1], 3)\n return sdf, deformation\n\n def forward_level(\n self, field: Float[Tensor, \"*N 1\"], threshold: float\n ) -> Float[Tensor, \"*N 1\"]:\n return field - threshold\n\n def export(self, points: Float[Tensor, \"*N Di\"], **kwargs) -> Dict[str, Any]:\n out: Dict[str, Any] = {}\n if self.cfg.n_feature_dims == 0:\n return out\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n enc = self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n features = self.feature_network(enc).view(\n *points.shape[:-1], self.cfg.n_feature_dims\n )\n out.update(\n {\n \"features\": features,\n }\n )\n return out\n\n def update_step(self, epoch: int, global_step: int, on_load_weights: bool = False):\n if (\n self.cfg.normal_type == \"finite_difference\"\n or self.cfg.normal_type == \"finite_difference_laplacian\"\n ):\n if isinstance(self.cfg.finite_difference_normal_eps, float):\n self.finite_difference_normal_eps = (\n self.cfg.finite_difference_normal_eps\n )\n elif self.cfg.finite_difference_normal_eps == \"progressive\":\n # progressive finite difference eps from Neuralangelo\n # https://arxiv.org/abs/2306.03092\n hg_conf: Any = self.cfg.pos_encoding_config\n assert (\n hg_conf.otype == \"ProgressiveBandHashGrid\"\n ), \"finite_difference_normal_eps=progressive only works with ProgressiveBandHashGrid\"\n current_level = min(\n hg_conf.start_level\n + max(global_step - hg_conf.start_step, 0) // hg_conf.update_steps,\n hg_conf.n_levels,\n )\n grid_res = hg_conf.base_resolution * hg_conf.per_level_scale ** (\n current_level - 1\n )\n grid_size = 2 * self.cfg.radius / grid_res\n if grid_size != self.finite_difference_normal_eps:\n threestudio.info(\n f\"Update finite_difference_normal_eps to {grid_size}\"\n )\n self.finite_difference_normal_eps = grid_size\n else:\n raise ValueError(\n f\"Unknown finite_difference_normal_eps={self.cfg.finite_difference_normal_eps}\"\n )" }, { "identifier": "ImplicitVolume", "path": "threestudio/models/geometry/implicit_volume.py", "snippet": "class ImplicitVolume(BaseImplicitGeometry):\n @dataclass\n class Config(BaseImplicitGeometry.Config):\n n_input_dims: int = 3\n n_feature_dims: int = 3\n density_activation: Optional[str] = \"softplus\"\n density_bias: Union[float, str] = \"blob_magic3d\"\n density_blob_scale: float = 10.0\n density_blob_std: float = 0.5\n pos_encoding_config: dict = field(\n default_factory=lambda: {\n \"otype\": \"HashGrid\",\n \"n_levels\": 16,\n \"n_features_per_level\": 2,\n \"log2_hashmap_size\": 19,\n \"base_resolution\": 16,\n \"per_level_scale\": 1.447269237440378,\n }\n )\n mlp_network_config: dict = field(\n default_factory=lambda: {\n \"otype\": \"VanillaMLP\",\n \"activation\": \"ReLU\",\n \"output_activation\": \"none\",\n \"n_neurons\": 64,\n \"n_hidden_layers\": 1,\n }\n )\n normal_type: Optional[\n str\n ] = \"finite_difference\" # in ['pred', 'finite_difference', 'finite_difference_laplacian']\n finite_difference_normal_eps: float = 0.01\n\n # automatically determine the threshold\n isosurface_threshold: Union[float, str] = 25.0\n\n # 4D Gaussian Annealing\n anneal_density_blob_std_config: Optional[dict] = None\n\n cfg: Config\n\n def configure(self) -> None:\n super().configure()\n self.encoding = get_encoding(\n self.cfg.n_input_dims, self.cfg.pos_encoding_config\n )\n self.density_network = get_mlp(\n self.encoding.n_output_dims, 1, self.cfg.mlp_network_config\n )\n if self.cfg.n_feature_dims > 0:\n self.feature_network = get_mlp(\n self.encoding.n_output_dims,\n self.cfg.n_feature_dims,\n self.cfg.mlp_network_config,\n )\n if self.cfg.normal_type == \"pred\":\n self.normal_network = get_mlp(\n self.encoding.n_output_dims, 3, self.cfg.mlp_network_config\n )\n\n def get_activated_density(\n self, points: Float[Tensor, \"*N Di\"], density: Float[Tensor, \"*N 1\"]\n ) -> Tuple[Float[Tensor, \"*N 1\"], Float[Tensor, \"*N 1\"]]:\n density_bias: Union[float, Float[Tensor, \"*N 1\"]]\n if self.cfg.density_bias == \"blob_dreamfusion\":\n # pre-activation density bias\n density_bias = (\n self.cfg.density_blob_scale\n * torch.exp(\n -0.5 * (points**2).sum(dim=-1) / self.cfg.density_blob_std**2\n )[..., None]\n )\n elif self.cfg.density_bias == \"blob_magic3d\":\n # pre-activation density bias\n density_bias = (\n self.cfg.density_blob_scale\n * (\n 1\n - torch.sqrt((points**2).sum(dim=-1)) / self.cfg.density_blob_std\n )[..., None]\n )\n elif isinstance(self.cfg.density_bias, float):\n density_bias = self.cfg.density_bias\n else:\n raise ValueError(f\"Unknown density bias {self.cfg.density_bias}\")\n raw_density: Float[Tensor, \"*N 1\"] = density + density_bias\n density = get_activation(self.cfg.density_activation)(raw_density)\n return raw_density, density\n\n def forward(\n self, points: Float[Tensor, \"*N Di\"], output_normal: bool = False\n ) -> Dict[str, Float[Tensor, \"...\"]]:\n grad_enabled = torch.is_grad_enabled()\n\n if output_normal and self.cfg.normal_type == \"analytic\":\n torch.set_grad_enabled(True)\n points.requires_grad_(True)\n\n points_unscaled = points # points in the original scale\n points = contract_to_unisphere(\n points, self.bbox, self.unbounded\n ) # points normalized to (0, 1)\n\n enc = self.encoding(points.view(-1, self.cfg.n_input_dims))\n density = self.density_network(enc).view(*points.shape[:-1], 1)\n raw_density, density = self.get_activated_density(points_unscaled, density)\n\n output = {\n \"density\": density,\n }\n\n if self.cfg.n_feature_dims > 0:\n features = self.feature_network(enc).view(\n *points.shape[:-1], self.cfg.n_feature_dims\n )\n output.update({\"features\": features})\n\n if output_normal:\n if (\n self.cfg.normal_type == \"finite_difference\"\n or self.cfg.normal_type == \"finite_difference_laplacian\"\n ):\n # TODO: use raw density\n eps = self.cfg.finite_difference_normal_eps\n if self.cfg.normal_type == \"finite_difference_laplacian\":\n offsets: Float[Tensor, \"6 3\"] = torch.as_tensor(\n [\n [eps, 0.0, 0.0],\n [-eps, 0.0, 0.0],\n [0.0, eps, 0.0],\n [0.0, -eps, 0.0],\n [0.0, 0.0, eps],\n [0.0, 0.0, -eps],\n ]\n ).to(points_unscaled)\n points_offset: Float[Tensor, \"... 6 3\"] = (\n points_unscaled[..., None, :] + offsets\n ).clamp(-self.cfg.radius, self.cfg.radius)\n density_offset: Float[Tensor, \"... 6 1\"] = self.forward_density(\n points_offset\n )\n normal = (\n -0.5\n * (density_offset[..., 0::2, 0] - density_offset[..., 1::2, 0])\n / eps\n )\n else:\n offsets: Float[Tensor, \"3 3\"] = torch.as_tensor(\n [[eps, 0.0, 0.0], [0.0, eps, 0.0], [0.0, 0.0, eps]]\n ).to(points_unscaled)\n points_offset: Float[Tensor, \"... 3 3\"] = (\n points_unscaled[..., None, :] + offsets\n ).clamp(-self.cfg.radius, self.cfg.radius)\n density_offset: Float[Tensor, \"... 3 1\"] = self.forward_density(\n points_offset\n )\n normal = -(density_offset[..., 0::1, 0] - density) / eps\n normal = F.normalize(normal, dim=-1)\n elif self.cfg.normal_type == \"pred\":\n normal = self.normal_network(enc).view(*points.shape[:-1], 3)\n normal = F.normalize(normal, dim=-1)\n elif self.cfg.normal_type == \"analytic\":\n normal = -torch.autograd.grad(\n density,\n points_unscaled,\n grad_outputs=torch.ones_like(density),\n create_graph=True,\n )[0]\n normal = F.normalize(normal, dim=-1)\n if not grad_enabled:\n normal = normal.detach()\n else:\n raise AttributeError(f\"Unknown normal type {self.cfg.normal_type}\")\n output.update({\"normal\": normal, \"shading_normal\": normal})\n\n torch.set_grad_enabled(grad_enabled)\n return output\n\n def forward_density(self, points: Float[Tensor, \"*N Di\"]) -> Float[Tensor, \"*N 1\"]:\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n\n density = self.density_network(\n self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n ).reshape(*points.shape[:-1], 1)\n\n _, density = self.get_activated_density(points_unscaled, density)\n return density\n\n def forward_field(\n self, points: Float[Tensor, \"*N Di\"]\n ) -> Tuple[Float[Tensor, \"*N 1\"], Optional[Float[Tensor, \"*N 3\"]]]:\n if self.cfg.isosurface_deformable_grid:\n threestudio.warn(\n f\"{self.__class__.__name__} does not support isosurface_deformable_grid. Ignoring.\"\n )\n density = self.forward_density(points)\n return density, None\n\n def forward_level(\n self, field: Float[Tensor, \"*N 1\"], threshold: float\n ) -> Float[Tensor, \"*N 1\"]:\n return -(field - threshold)\n\n def export(self, points: Float[Tensor, \"*N Di\"], **kwargs) -> Dict[str, Any]:\n out: Dict[str, Any] = {}\n if self.cfg.n_feature_dims == 0:\n return out\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n enc = self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n features = self.feature_network(enc).view(\n *points.shape[:-1], self.cfg.n_feature_dims\n )\n out.update(\n {\n \"features\": features,\n }\n )\n return out\n\n @staticmethod\n @torch.no_grad()\n def create_from(\n other: BaseGeometry,\n cfg: Optional[Union[dict, DictConfig]] = None,\n copy_net: bool = True,\n **kwargs,\n ) -> \"ImplicitVolume\":\n if isinstance(other, ImplicitVolume):\n instance = ImplicitVolume(cfg, **kwargs)\n instance.encoding.load_state_dict(other.encoding.state_dict())\n instance.density_network.load_state_dict(other.density_network.state_dict())\n if copy_net:\n if (\n instance.cfg.n_feature_dims > 0\n and other.cfg.n_feature_dims == instance.cfg.n_feature_dims\n ):\n instance.feature_network.load_state_dict(\n other.feature_network.state_dict()\n )\n if (\n instance.cfg.normal_type == \"pred\"\n and other.cfg.normal_type == \"pred\"\n ):\n instance.normal_network.load_state_dict(\n other.normal_network.state_dict()\n )\n return instance\n else:\n raise TypeError(\n f\"Cannot create {ImplicitVolume.__name__} from {other.__class__.__name__}\"\n )\n\n def update_step(\n self, epoch: int, global_step: int, on_load_weights: bool = False\n ) -> None:\n if self.cfg.anneal_density_blob_std_config is not None:\n min_step = self.cfg.anneal_density_blob_std_config.min_anneal_step\n max_step = self.cfg.anneal_density_blob_std_config.max_anneal_step\n if global_step >= min_step and global_step <= max_step:\n end_val = self.cfg.anneal_density_blob_std_config.end_val\n start_val = self.cfg.anneal_density_blob_std_config.start_val\n self.density_blob_std = start_val + (global_step - min_step) * (\n end_val - start_val\n ) / (max_step - min_step)" }, { "identifier": "MarchingTetrahedraHelper", "path": "threestudio/models/isosurface.py", "snippet": "class MarchingTetrahedraHelper(IsosurfaceHelper):\n def __init__(self, resolution: int, tets_path: str):\n super().__init__()\n self.resolution = resolution\n self.tets_path = tets_path\n\n self.triangle_table: Float[Tensor, \"...\"]\n self.register_buffer(\n \"triangle_table\",\n torch.as_tensor(\n [\n [-1, -1, -1, -1, -1, -1],\n [1, 0, 2, -1, -1, -1],\n [4, 0, 3, -1, -1, -1],\n [1, 4, 2, 1, 3, 4],\n [3, 1, 5, -1, -1, -1],\n [2, 3, 0, 2, 5, 3],\n [1, 4, 0, 1, 5, 4],\n [4, 2, 5, -1, -1, -1],\n [4, 5, 2, -1, -1, -1],\n [4, 1, 0, 4, 5, 1],\n [3, 2, 0, 3, 5, 2],\n [1, 3, 5, -1, -1, -1],\n [4, 1, 2, 4, 3, 1],\n [3, 0, 4, -1, -1, -1],\n [2, 0, 1, -1, -1, -1],\n [-1, -1, -1, -1, -1, -1],\n ],\n dtype=torch.long,\n ),\n persistent=False,\n )\n self.num_triangles_table: Integer[Tensor, \"...\"]\n self.register_buffer(\n \"num_triangles_table\",\n torch.as_tensor(\n [0, 1, 1, 2, 1, 2, 2, 1, 1, 2, 2, 1, 2, 1, 1, 0], dtype=torch.long\n ),\n persistent=False,\n )\n self.base_tet_edges: Integer[Tensor, \"...\"]\n self.register_buffer(\n \"base_tet_edges\",\n torch.as_tensor([0, 1, 0, 2, 0, 3, 1, 2, 1, 3, 2, 3], dtype=torch.long),\n persistent=False,\n )\n\n tets = np.load(self.tets_path)\n self._grid_vertices: Float[Tensor, \"...\"]\n self.register_buffer(\n \"_grid_vertices\",\n torch.from_numpy(tets[\"vertices\"]).float(),\n persistent=False,\n )\n self.indices: Integer[Tensor, \"...\"]\n self.register_buffer(\n \"indices\", torch.from_numpy(tets[\"indices\"]).long(), persistent=False\n )\n\n self._all_edges: Optional[Integer[Tensor, \"Ne 2\"]] = None\n\n def normalize_grid_deformation(\n self, grid_vertex_offsets: Float[Tensor, \"Nv 3\"]\n ) -> Float[Tensor, \"Nv 3\"]:\n return (\n (self.points_range[1] - self.points_range[0])\n / (self.resolution) # half tet size is approximately 1 / self.resolution\n * torch.tanh(grid_vertex_offsets)\n ) # FIXME: hard-coded activation\n\n @property\n def grid_vertices(self) -> Float[Tensor, \"Nv 3\"]:\n return self._grid_vertices\n\n @property\n def all_edges(self) -> Integer[Tensor, \"Ne 2\"]:\n if self._all_edges is None:\n # compute edges on GPU, or it would be VERY SLOW (basically due to the unique operation)\n edges = torch.tensor(\n [0, 1, 0, 2, 0, 3, 1, 2, 1, 3, 2, 3],\n dtype=torch.long,\n device=self.indices.device,\n )\n _all_edges = self.indices[:, edges].reshape(-1, 2)\n _all_edges_sorted = torch.sort(_all_edges, dim=1)[0]\n _all_edges = torch.unique(_all_edges_sorted, dim=0)\n self._all_edges = _all_edges\n return self._all_edges\n\n def sort_edges(self, edges_ex2):\n with torch.no_grad():\n order = (edges_ex2[:, 0] > edges_ex2[:, 1]).long()\n order = order.unsqueeze(dim=1)\n\n a = torch.gather(input=edges_ex2, index=order, dim=1)\n b = torch.gather(input=edges_ex2, index=1 - order, dim=1)\n\n return torch.stack([a, b], -1)\n\n def _forward(self, pos_nx3, sdf_n, tet_fx4):\n with torch.no_grad():\n occ_n = sdf_n > 0\n occ_fx4 = occ_n[tet_fx4.reshape(-1)].reshape(-1, 4)\n occ_sum = torch.sum(occ_fx4, -1)\n valid_tets = (occ_sum > 0) & (occ_sum < 4)\n occ_sum = occ_sum[valid_tets]\n\n # find all vertices\n all_edges = tet_fx4[valid_tets][:, self.base_tet_edges].reshape(-1, 2)\n all_edges = self.sort_edges(all_edges)\n unique_edges, idx_map = torch.unique(all_edges, dim=0, return_inverse=True)\n\n unique_edges = unique_edges.long()\n mask_edges = occ_n[unique_edges.reshape(-1)].reshape(-1, 2).sum(-1) == 1\n mapping = (\n torch.ones(\n (unique_edges.shape[0]), dtype=torch.long, device=pos_nx3.device\n )\n * -1\n )\n mapping[mask_edges] = torch.arange(\n mask_edges.sum(), dtype=torch.long, device=pos_nx3.device\n )\n idx_map = mapping[idx_map] # map edges to verts\n\n interp_v = unique_edges[mask_edges]\n edges_to_interp = pos_nx3[interp_v.reshape(-1)].reshape(-1, 2, 3)\n edges_to_interp_sdf = sdf_n[interp_v.reshape(-1)].reshape(-1, 2, 1)\n edges_to_interp_sdf[:, -1] *= -1\n\n denominator = edges_to_interp_sdf.sum(1, keepdim=True)\n\n edges_to_interp_sdf = torch.flip(edges_to_interp_sdf, [1]) / denominator\n verts = (edges_to_interp * edges_to_interp_sdf).sum(1)\n\n idx_map = idx_map.reshape(-1, 6)\n\n v_id = torch.pow(2, torch.arange(4, dtype=torch.long, device=pos_nx3.device))\n tetindex = (occ_fx4[valid_tets] * v_id.unsqueeze(0)).sum(-1)\n num_triangles = self.num_triangles_table[tetindex]\n\n # Generate triangle indices\n faces = torch.cat(\n (\n torch.gather(\n input=idx_map[num_triangles == 1],\n dim=1,\n index=self.triangle_table[tetindex[num_triangles == 1]][:, :3],\n ).reshape(-1, 3),\n torch.gather(\n input=idx_map[num_triangles == 2],\n dim=1,\n index=self.triangle_table[tetindex[num_triangles == 2]][:, :6],\n ).reshape(-1, 3),\n ),\n dim=0,\n )\n\n return verts, faces\n\n def forward(\n self,\n level: Float[Tensor, \"N3 1\"],\n deformation: Optional[Float[Tensor, \"N3 3\"]] = None,\n ) -> Mesh:\n if deformation is not None:\n grid_vertices = self.grid_vertices + self.normalize_grid_deformation(\n deformation\n )\n else:\n grid_vertices = self.grid_vertices\n\n v_pos, t_pos_idx = self._forward(grid_vertices, level, self.indices)\n\n mesh = Mesh(\n v_pos=v_pos,\n t_pos_idx=t_pos_idx,\n # extras\n grid_vertices=grid_vertices,\n tet_edges=self.all_edges,\n grid_level=level,\n grid_deformation=deformation,\n )\n\n return mesh" }, { "identifier": "Mesh", "path": "threestudio/models/mesh.py", "snippet": "class Mesh:\n def __init__(\n self, v_pos: Float[Tensor, \"Nv 3\"], t_pos_idx: Integer[Tensor, \"Nf 3\"], **kwargs\n ) -> None:\n self.v_pos: Float[Tensor, \"Nv 3\"] = v_pos\n self.t_pos_idx: Integer[Tensor, \"Nf 3\"] = t_pos_idx\n self._v_nrm: Optional[Float[Tensor, \"Nv 3\"]] = None\n self._v_tng: Optional[Float[Tensor, \"Nv 3\"]] = None\n self._v_tex: Optional[Float[Tensor, \"Nt 3\"]] = None\n self._t_tex_idx: Optional[Float[Tensor, \"Nf 3\"]] = None\n self._v_rgb: Optional[Float[Tensor, \"Nv 3\"]] = None\n self._edges: Optional[Integer[Tensor, \"Ne 2\"]] = None\n self.extras: Dict[str, Any] = {}\n for k, v in kwargs.items():\n self.add_extra(k, v)\n\n def add_extra(self, k, v) -> None:\n self.extras[k] = v\n\n def remove_outlier(self, outlier_n_faces_threshold: Union[int, float]) -> Mesh:\n if self.requires_grad:\n threestudio.debug(\"Mesh is differentiable, not removing outliers\")\n return self\n\n # use trimesh to first split the mesh into connected components\n # then remove the components with less than n_face_threshold faces\n import trimesh\n\n # construct a trimesh object\n mesh = trimesh.Trimesh(\n vertices=self.v_pos.detach().cpu().numpy(),\n faces=self.t_pos_idx.detach().cpu().numpy(),\n )\n\n # split the mesh into connected components\n components = mesh.split(only_watertight=False)\n # log the number of faces in each component\n threestudio.debug(\n \"Mesh has {} components, with faces: {}\".format(\n len(components), [c.faces.shape[0] for c in components]\n )\n )\n\n n_faces_threshold: int\n if isinstance(outlier_n_faces_threshold, float):\n # set the threshold to the number of faces in the largest component multiplied by outlier_n_faces_threshold\n n_faces_threshold = int(\n max([c.faces.shape[0] for c in components]) * outlier_n_faces_threshold\n )\n else:\n # set the threshold directly to outlier_n_faces_threshold\n n_faces_threshold = outlier_n_faces_threshold\n\n # log the threshold\n threestudio.debug(\n \"Removing components with less than {} faces\".format(n_faces_threshold)\n )\n\n # remove the components with less than n_face_threshold faces\n components = [c for c in components if c.faces.shape[0] >= n_faces_threshold]\n\n # log the number of faces in each component after removing outliers\n threestudio.debug(\n \"Mesh has {} components after removing outliers, with faces: {}\".format(\n len(components), [c.faces.shape[0] for c in components]\n )\n )\n # merge the components\n mesh = trimesh.util.concatenate(components)\n\n # convert back to our mesh format\n v_pos = torch.from_numpy(mesh.vertices).to(self.v_pos)\n t_pos_idx = torch.from_numpy(mesh.faces).to(self.t_pos_idx)\n\n clean_mesh = Mesh(v_pos, t_pos_idx)\n # keep the extras unchanged\n\n if len(self.extras) > 0:\n clean_mesh.extras = self.extras\n threestudio.debug(\n f\"The following extra attributes are inherited from the original mesh unchanged: {list(self.extras.keys())}\"\n )\n return clean_mesh\n\n @property\n def requires_grad(self):\n return self.v_pos.requires_grad\n\n @property\n def v_nrm(self):\n if self._v_nrm is None:\n self._v_nrm = self._compute_vertex_normal()\n return self._v_nrm\n\n @property\n def v_tng(self):\n if self._v_tng is None:\n self._v_tng = self._compute_vertex_tangent()\n return self._v_tng\n\n @property\n def v_tex(self):\n if self._v_tex is None:\n self._v_tex, self._t_tex_idx = self._unwrap_uv()\n return self._v_tex\n\n @property\n def t_tex_idx(self):\n if self._t_tex_idx is None:\n self._v_tex, self._t_tex_idx = self._unwrap_uv()\n return self._t_tex_idx\n\n @property\n def v_rgb(self):\n return self._v_rgb\n\n @property\n def edges(self):\n if self._edges is None:\n self._edges = self._compute_edges()\n return self._edges\n\n def _compute_vertex_normal(self):\n i0 = self.t_pos_idx[:, 0]\n i1 = self.t_pos_idx[:, 1]\n i2 = self.t_pos_idx[:, 2]\n\n v0 = self.v_pos[i0, :]\n v1 = self.v_pos[i1, :]\n v2 = self.v_pos[i2, :]\n\n face_normals = torch.cross(v1 - v0, v2 - v0)\n\n # Splat face normals to vertices\n v_nrm = torch.zeros_like(self.v_pos)\n v_nrm.scatter_add_(0, i0[:, None].repeat(1, 3), face_normals)\n v_nrm.scatter_add_(0, i1[:, None].repeat(1, 3), face_normals)\n v_nrm.scatter_add_(0, i2[:, None].repeat(1, 3), face_normals)\n\n # Normalize, replace zero (degenerated) normals with some default value\n v_nrm = torch.where(\n dot(v_nrm, v_nrm) > 1e-20, v_nrm, torch.as_tensor([0.0, 0.0, 1.0]).to(v_nrm)\n )\n v_nrm = F.normalize(v_nrm, dim=1)\n\n if torch.is_anomaly_enabled():\n assert torch.all(torch.isfinite(v_nrm))\n\n return v_nrm\n\n def _compute_vertex_tangent(self):\n vn_idx = [None] * 3\n pos = [None] * 3\n tex = [None] * 3\n for i in range(0, 3):\n pos[i] = self.v_pos[self.t_pos_idx[:, i]]\n tex[i] = self.v_tex[self.t_tex_idx[:, i]]\n # t_nrm_idx is always the same as t_pos_idx\n vn_idx[i] = self.t_pos_idx[:, i]\n\n tangents = torch.zeros_like(self.v_nrm)\n tansum = torch.zeros_like(self.v_nrm)\n\n # Compute tangent space for each triangle\n uve1 = tex[1] - tex[0]\n uve2 = tex[2] - tex[0]\n pe1 = pos[1] - pos[0]\n pe2 = pos[2] - pos[0]\n\n nom = pe1 * uve2[..., 1:2] - pe2 * uve1[..., 1:2]\n denom = uve1[..., 0:1] * uve2[..., 1:2] - uve1[..., 1:2] * uve2[..., 0:1]\n\n # Avoid division by zero for degenerated texture coordinates\n tang = nom / torch.where(\n denom > 0.0, torch.clamp(denom, min=1e-6), torch.clamp(denom, max=-1e-6)\n )\n\n # Update all 3 vertices\n for i in range(0, 3):\n idx = vn_idx[i][:, None].repeat(1, 3)\n tangents.scatter_add_(0, idx, tang) # tangents[n_i] = tangents[n_i] + tang\n tansum.scatter_add_(\n 0, idx, torch.ones_like(tang)\n ) # tansum[n_i] = tansum[n_i] + 1\n tangents = tangents / tansum\n\n # Normalize and make sure tangent is perpendicular to normal\n tangents = F.normalize(tangents, dim=1)\n tangents = F.normalize(tangents - dot(tangents, self.v_nrm) * self.v_nrm)\n\n if torch.is_anomaly_enabled():\n assert torch.all(torch.isfinite(tangents))\n\n return tangents\n\n def _unwrap_uv(\n self, xatlas_chart_options: dict = {}, xatlas_pack_options: dict = {}\n ):\n threestudio.info(\"Using xatlas to perform UV unwrapping, may take a while ...\")\n\n import xatlas\n\n atlas = xatlas.Atlas()\n atlas.add_mesh(\n self.v_pos.detach().cpu().numpy(),\n self.t_pos_idx.cpu().numpy(),\n )\n co = xatlas.ChartOptions()\n po = xatlas.PackOptions()\n for k, v in xatlas_chart_options.items():\n setattr(co, k, v)\n for k, v in xatlas_pack_options.items():\n setattr(po, k, v)\n atlas.generate(co, po)\n vmapping, indices, uvs = atlas.get_mesh(0)\n vmapping = (\n torch.from_numpy(\n vmapping.astype(np.uint64, casting=\"same_kind\").view(np.int64)\n )\n .to(self.v_pos.device)\n .long()\n )\n uvs = torch.from_numpy(uvs).to(self.v_pos.device).float()\n indices = (\n torch.from_numpy(\n indices.astype(np.uint64, casting=\"same_kind\").view(np.int64)\n )\n .to(self.v_pos.device)\n .long()\n )\n return uvs, indices\n\n def unwrap_uv(\n self, xatlas_chart_options: dict = {}, xatlas_pack_options: dict = {}\n ):\n self._v_tex, self._t_tex_idx = self._unwrap_uv(\n xatlas_chart_options, xatlas_pack_options\n )\n\n def set_vertex_color(self, v_rgb):\n assert v_rgb.shape[0] == self.v_pos.shape[0]\n self._v_rgb = v_rgb\n\n def _compute_edges(self):\n # Compute edges\n edges = torch.cat(\n [\n self.t_pos_idx[:, [0, 1]],\n self.t_pos_idx[:, [1, 2]],\n self.t_pos_idx[:, [2, 0]],\n ],\n dim=0,\n )\n edges = edges.sort()[0]\n edges = torch.unique(edges, dim=0)\n return edges\n\n def normal_consistency(self) -> Float[Tensor, \"\"]:\n edge_nrm: Float[Tensor, \"Ne 2 3\"] = self.v_nrm[self.edges]\n nc = (\n 1.0 - torch.cosine_similarity(edge_nrm[:, 0], edge_nrm[:, 1], dim=-1)\n ).mean()\n return nc\n\n def _laplacian_uniform(self):\n # from stable-dreamfusion\n # https://github.com/ashawkey/stable-dreamfusion/blob/8fb3613e9e4cd1ded1066b46e80ca801dfb9fd06/nerf/renderer.py#L224\n verts, faces = self.v_pos, self.t_pos_idx\n\n V = verts.shape[0]\n F = faces.shape[0]\n\n # Neighbor indices\n ii = faces[:, [1, 2, 0]].flatten()\n jj = faces[:, [2, 0, 1]].flatten()\n adj = torch.stack([torch.cat([ii, jj]), torch.cat([jj, ii])], dim=0).unique(\n dim=1\n )\n adj_values = torch.ones(adj.shape[1]).to(verts)\n\n # Diagonal indices\n diag_idx = adj[0]\n\n # Build the sparse matrix\n idx = torch.cat((adj, torch.stack((diag_idx, diag_idx), dim=0)), dim=1)\n values = torch.cat((-adj_values, adj_values))\n\n # The coalesce operation sums the duplicate indices, resulting in the\n # correct diagonal\n return torch.sparse_coo_tensor(idx, values, (V, V)).coalesce()\n\n def laplacian(self) -> Float[Tensor, \"\"]:\n with torch.no_grad():\n L = self._laplacian_uniform()\n loss = L.mm(self.v_pos)\n loss = loss.norm(dim=1)\n loss = loss.mean()\n return loss" }, { "identifier": "get_encoding", "path": "threestudio/models/networks.py", "snippet": "def get_encoding(n_input_dims: int, config) -> nn.Module:\n # input suppose to be range [0, 1]\n encoding: nn.Module\n if config.otype == \"ProgressiveBandFrequency\":\n encoding = ProgressiveBandFrequency(n_input_dims, config_to_primitive(config))\n elif config.otype == \"ProgressiveBandHashGrid\":\n encoding = ProgressiveBandHashGrid(n_input_dims, config_to_primitive(config))\n elif config.otype == \"HashGridSpatialTime\":\n encoding = TCNNEncodingSpatialTime(n_input_dims, config) # 4D-fy encoding\n else:\n encoding = TCNNEncoding(n_input_dims, config_to_primitive(config))\n encoding = CompositeEncoding(\n encoding,\n include_xyz=config.get(\"include_xyz\", False),\n xyz_scale=2.0,\n xyz_offset=-1.0,\n ) # FIXME: hard coded\n return encoding" }, { "identifier": "get_mlp", "path": "threestudio/models/networks.py", "snippet": "def get_mlp(n_input_dims, n_output_dims, config) -> nn.Module:\n network: nn.Module\n if config.otype == \"VanillaMLP\":\n network = VanillaMLP(n_input_dims, n_output_dims, config_to_primitive(config))\n elif config.otype == \"SphereInitVanillaMLP\":\n network = SphereInitVanillaMLP(\n n_input_dims, n_output_dims, config_to_primitive(config)\n )\n else:\n assert (\n config.get(\"sphere_init\", False) is False\n ), \"sphere_init=True only supported by VanillaMLP\"\n network = TCNNNetwork(n_input_dims, n_output_dims, config_to_primitive(config))\n return network" }, { "identifier": "broadcast", "path": "threestudio/utils/misc.py", "snippet": "def broadcast(tensor, src=0):\n if not _distributed_available():\n return tensor\n else:\n torch.distributed.broadcast(tensor, src=src)\n return tensor" }, { "identifier": "scale_tensor", "path": "threestudio/utils/ops.py", "snippet": "def scale_tensor(\n dat: Num[Tensor, \"... D\"], inp_scale: ValidScale, tgt_scale: ValidScale\n):\n if inp_scale is None:\n inp_scale = (0, 1)\n if tgt_scale is None:\n tgt_scale = (0, 1)\n if isinstance(tgt_scale, Tensor):\n assert dat.shape[-1] == tgt_scale.shape[-1]\n dat = (dat - inp_scale[0]) / (inp_scale[1] - inp_scale[0])\n dat = dat * (tgt_scale[1] - tgt_scale[0]) + tgt_scale[0]\n return dat" } ]
import os import numpy as np import torch import torch.nn as nn import torch.nn.functional as F import threestudio import trimesh from dataclasses import dataclass, field from threestudio.models.geometry.base import ( BaseExplicitGeometry, BaseGeometry, contract_to_unisphere, ) from threestudio.models.geometry.implicit_sdf import ImplicitSDF from threestudio.models.geometry.implicit_volume import ImplicitVolume from threestudio.models.isosurface import MarchingTetrahedraHelper from threestudio.models.mesh import Mesh from threestudio.models.networks import get_encoding, get_mlp from threestudio.utils.misc import broadcast from threestudio.utils.ops import scale_tensor from threestudio.utils.typing import * from pysdf import SDF
15,266
).sqrt() - 1.0 # pseudo signed distance of an ellipsoid get_gt_sdf = func elif self.cfg.shape_init == "sphere": assert isinstance(self.cfg.shape_init_params, float) radius = self.cfg.shape_init_params def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: return (points_rand**2).sum(dim=-1, keepdim=True).sqrt() - radius get_gt_sdf = func elif self.cfg.shape_init.startswith("mesh:"): assert isinstance(self.cfg.shape_init_params, float) mesh_path = self.cfg.shape_init[5:] if not os.path.exists(mesh_path): raise ValueError(f"Mesh file {mesh_path} does not exist.") mesh = trimesh.load(mesh_path) # move to center centroid = mesh.vertices.mean(0) mesh.vertices = mesh.vertices - centroid # align to up-z and front-x dirs = ["+x", "+y", "+z", "-x", "-y", "-z"] dir2vec = { "+x": np.array([1, 0, 0]), "+y": np.array([0, 1, 0]), "+z": np.array([0, 0, 1]), "-x": np.array([-1, 0, 0]), "-y": np.array([0, -1, 0]), "-z": np.array([0, 0, -1]), } if ( self.cfg.shape_init_mesh_up not in dirs or self.cfg.shape_init_mesh_front not in dirs ): raise ValueError( f"shape_init_mesh_up and shape_init_mesh_front must be one of {dirs}." ) if self.cfg.shape_init_mesh_up[1] == self.cfg.shape_init_mesh_front[1]: raise ValueError( "shape_init_mesh_up and shape_init_mesh_front must be orthogonal." ) z_, x_ = ( dir2vec[self.cfg.shape_init_mesh_up], dir2vec[self.cfg.shape_init_mesh_front], ) y_ = np.cross(z_, x_) std2mesh = np.stack([x_, y_, z_], axis=0).T mesh2std = np.linalg.inv(std2mesh) # scaling scale = np.abs(mesh.vertices).max() mesh.vertices = mesh.vertices / scale * self.cfg.shape_init_params mesh.vertices = np.dot(mesh2std, mesh.vertices.T).T sdf = SDF(mesh.vertices, mesh.faces) def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: # add a negative signed here # as in pysdf the inside of the shape has positive signed distance return torch.from_numpy(-sdf(points_rand.cpu().numpy())).to( points_rand )[..., None] get_gt_sdf = func else: raise ValueError( f"Unknown shape initialization type: {self.cfg.shape_init}" ) sdf_gt = get_gt_sdf( scale_tensor( self.isosurface_helper.grid_vertices, self.isosurface_helper.points_range, self.isosurface_bbox, ) ) self.sdf.data = sdf_gt # explicit broadcast to ensure param consistency across ranks for param in self.parameters(): broadcast(param, src=0) def isosurface(self) -> Mesh: # return cached mesh if fix_geometry is True to save computation if self.cfg.fix_geometry and self.mesh is not None: return self.mesh mesh = self.isosurface_helper(self.sdf, self.deformation) mesh.v_pos = scale_tensor( mesh.v_pos, self.isosurface_helper.points_range, self.isosurface_bbox ) if self.cfg.isosurface_remove_outliers: mesh = mesh.remove_outlier(self.cfg.isosurface_outlier_n_faces_threshold) self.mesh = mesh return mesh def forward( self, points: Float[Tensor, "*N Di"], output_normal: bool = False ) -> Dict[str, Float[Tensor, "..."]]: if self.cfg.geometry_only: return {} assert ( output_normal == False ), f"Normal output is not supported for {self.__class__.__name__}" points_unscaled = points # points in the original scale points = contract_to_unisphere(points, self.bbox) # points normalized to (0, 1) enc = self.encoding(points.view(-1, self.cfg.n_input_dims)) features = self.feature_network(enc).view( *points.shape[:-1], self.cfg.n_feature_dims ) return {"features": features} @staticmethod @torch.no_grad() def create_from(
@threestudio.register("tetrahedra-sdf-grid") class TetrahedraSDFGrid(BaseExplicitGeometry): @dataclass class Config(BaseExplicitGeometry.Config): isosurface_resolution: int = 128 isosurface_deformable_grid: bool = True isosurface_remove_outliers: bool = False isosurface_outlier_n_faces_threshold: Union[int, float] = 0.01 n_input_dims: int = 3 n_feature_dims: int = 3 pos_encoding_config: dict = field( default_factory=lambda: { "otype": "HashGrid", "n_levels": 16, "n_features_per_level": 2, "log2_hashmap_size": 19, "base_resolution": 16, "per_level_scale": 1.447269237440378, } ) mlp_network_config: dict = field( default_factory=lambda: { "otype": "VanillaMLP", "activation": "ReLU", "output_activation": "none", "n_neurons": 64, "n_hidden_layers": 1, } ) shape_init: Optional[str] = None shape_init_params: Optional[Any] = None shape_init_mesh_up: str = "+z" shape_init_mesh_front: str = "+x" force_shape_init: bool = False geometry_only: bool = False fix_geometry: bool = False cfg: Config def configure(self) -> None: super().configure() # this should be saved to state_dict, register as buffer self.isosurface_bbox: Float[Tensor, "2 3"] self.register_buffer("isosurface_bbox", self.bbox.clone()) self.isosurface_helper = MarchingTetrahedraHelper( self.cfg.isosurface_resolution, f"load/tets/{self.cfg.isosurface_resolution}_tets.npz", ) self.sdf: Float[Tensor, "Nv 1"] self.deformation: Optional[Float[Tensor, "Nv 3"]] if not self.cfg.fix_geometry: self.register_parameter( "sdf", nn.Parameter( torch.zeros( (self.isosurface_helper.grid_vertices.shape[0], 1), dtype=torch.float32, ) ), ) if self.cfg.isosurface_deformable_grid: self.register_parameter( "deformation", nn.Parameter( torch.zeros_like(self.isosurface_helper.grid_vertices) ), ) else: self.deformation = None else: self.register_buffer( "sdf", torch.zeros( (self.isosurface_helper.grid_vertices.shape[0], 1), dtype=torch.float32, ), ) if self.cfg.isosurface_deformable_grid: self.register_buffer( "deformation", torch.zeros_like(self.isosurface_helper.grid_vertices), ) else: self.deformation = None if not self.cfg.geometry_only: self.encoding = get_encoding( self.cfg.n_input_dims, self.cfg.pos_encoding_config ) self.feature_network = get_mlp( self.encoding.n_output_dims, self.cfg.n_feature_dims, self.cfg.mlp_network_config, ) self.mesh: Optional[Mesh] = None def initialize_shape(self) -> None: if self.cfg.shape_init is None and not self.cfg.force_shape_init: return # do not initialize shape if weights are provided if self.cfg.weights is not None and not self.cfg.force_shape_init: return get_gt_sdf: Callable[[Float[Tensor, "N 3"]], Float[Tensor, "N 1"]] assert isinstance(self.cfg.shape_init, str) if self.cfg.shape_init == "ellipsoid": assert ( isinstance(self.cfg.shape_init_params, Sized) and len(self.cfg.shape_init_params) == 3 ) size = torch.as_tensor(self.cfg.shape_init_params).to(self.device) def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: return ((points_rand / size) ** 2).sum( dim=-1, keepdim=True ).sqrt() - 1.0 # pseudo signed distance of an ellipsoid get_gt_sdf = func elif self.cfg.shape_init == "sphere": assert isinstance(self.cfg.shape_init_params, float) radius = self.cfg.shape_init_params def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: return (points_rand**2).sum(dim=-1, keepdim=True).sqrt() - radius get_gt_sdf = func elif self.cfg.shape_init.startswith("mesh:"): assert isinstance(self.cfg.shape_init_params, float) mesh_path = self.cfg.shape_init[5:] if not os.path.exists(mesh_path): raise ValueError(f"Mesh file {mesh_path} does not exist.") mesh = trimesh.load(mesh_path) # move to center centroid = mesh.vertices.mean(0) mesh.vertices = mesh.vertices - centroid # align to up-z and front-x dirs = ["+x", "+y", "+z", "-x", "-y", "-z"] dir2vec = { "+x": np.array([1, 0, 0]), "+y": np.array([0, 1, 0]), "+z": np.array([0, 0, 1]), "-x": np.array([-1, 0, 0]), "-y": np.array([0, -1, 0]), "-z": np.array([0, 0, -1]), } if ( self.cfg.shape_init_mesh_up not in dirs or self.cfg.shape_init_mesh_front not in dirs ): raise ValueError( f"shape_init_mesh_up and shape_init_mesh_front must be one of {dirs}." ) if self.cfg.shape_init_mesh_up[1] == self.cfg.shape_init_mesh_front[1]: raise ValueError( "shape_init_mesh_up and shape_init_mesh_front must be orthogonal." ) z_, x_ = ( dir2vec[self.cfg.shape_init_mesh_up], dir2vec[self.cfg.shape_init_mesh_front], ) y_ = np.cross(z_, x_) std2mesh = np.stack([x_, y_, z_], axis=0).T mesh2std = np.linalg.inv(std2mesh) # scaling scale = np.abs(mesh.vertices).max() mesh.vertices = mesh.vertices / scale * self.cfg.shape_init_params mesh.vertices = np.dot(mesh2std, mesh.vertices.T).T sdf = SDF(mesh.vertices, mesh.faces) def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: # add a negative signed here # as in pysdf the inside of the shape has positive signed distance return torch.from_numpy(-sdf(points_rand.cpu().numpy())).to( points_rand )[..., None] get_gt_sdf = func else: raise ValueError( f"Unknown shape initialization type: {self.cfg.shape_init}" ) sdf_gt = get_gt_sdf( scale_tensor( self.isosurface_helper.grid_vertices, self.isosurface_helper.points_range, self.isosurface_bbox, ) ) self.sdf.data = sdf_gt # explicit broadcast to ensure param consistency across ranks for param in self.parameters(): broadcast(param, src=0) def isosurface(self) -> Mesh: # return cached mesh if fix_geometry is True to save computation if self.cfg.fix_geometry and self.mesh is not None: return self.mesh mesh = self.isosurface_helper(self.sdf, self.deformation) mesh.v_pos = scale_tensor( mesh.v_pos, self.isosurface_helper.points_range, self.isosurface_bbox ) if self.cfg.isosurface_remove_outliers: mesh = mesh.remove_outlier(self.cfg.isosurface_outlier_n_faces_threshold) self.mesh = mesh return mesh def forward( self, points: Float[Tensor, "*N Di"], output_normal: bool = False ) -> Dict[str, Float[Tensor, "..."]]: if self.cfg.geometry_only: return {} assert ( output_normal == False ), f"Normal output is not supported for {self.__class__.__name__}" points_unscaled = points # points in the original scale points = contract_to_unisphere(points, self.bbox) # points normalized to (0, 1) enc = self.encoding(points.view(-1, self.cfg.n_input_dims)) features = self.feature_network(enc).view( *points.shape[:-1], self.cfg.n_feature_dims ) return {"features": features} @staticmethod @torch.no_grad() def create_from(
other: BaseGeometry,
1
2023-12-27 20:30:33+00:00
24k
open-mmlab/Amphion
modules/wenet_extractor/squeezeformer/encoder.py
[ { "identifier": "DepthwiseConv2dSubsampling4", "path": "modules/wenet_extractor/squeezeformer/subsampling.py", "snippet": "class DepthwiseConv2dSubsampling4(BaseSubsampling):\n \"\"\"Depthwise Convolutional 2D subsampling (to 1/4 length).\n\n Args:\n idim (int): Input dimension.\n odim (int): Output dimension.\n pos_enc_class (nn.Module): position encoding class.\n dw_stride (int): Whether do depthwise convolution.\n input_size (int): filter bank dimension.\n\n \"\"\"\n\n def __init__(\n self,\n idim: int,\n odim: int,\n pos_enc_class: torch.nn.Module,\n dw_stride: bool = False,\n input_size: int = 80,\n input_dropout_rate: float = 0.1,\n init_weights: bool = True,\n ):\n super(DepthwiseConv2dSubsampling4, self).__init__()\n self.idim = idim\n self.odim = odim\n self.pw_conv = nn.Conv2d(\n in_channels=idim, out_channels=odim, kernel_size=3, stride=2\n )\n self.act1 = nn.ReLU()\n self.dw_conv = nn.Conv2d(\n in_channels=odim,\n out_channels=odim,\n kernel_size=3,\n stride=2,\n groups=odim if dw_stride else 1,\n )\n self.act2 = nn.ReLU()\n self.pos_enc = pos_enc_class\n self.input_proj = nn.Sequential(\n nn.Linear(odim * (((input_size - 1) // 2 - 1) // 2), odim),\n nn.Dropout(p=input_dropout_rate),\n )\n if init_weights:\n linear_max = (odim * input_size / 4) ** -0.5\n torch.nn.init.uniform_(\n self.input_proj.state_dict()[\"0.weight\"], -linear_max, linear_max\n )\n torch.nn.init.uniform_(\n self.input_proj.state_dict()[\"0.bias\"], -linear_max, linear_max\n )\n self.subsampling_rate = 4\n # 6 = (3 - 1) * 1 + (3 - 1) * 2\n self.right_context = 6\n\n def forward(\n self, x: torch.Tensor, x_mask: torch.Tensor, offset: int = 0\n ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:\n x = x.unsqueeze(1) # (b, c=1, t, f)\n x = self.pw_conv(x)\n x = self.act1(x)\n x = self.dw_conv(x)\n x = self.act2(x)\n b, c, t, f = x.size()\n x = x.permute(0, 2, 1, 3)\n x = x.contiguous().view(b, t, c * f)\n x, pos_emb = self.pos_enc(x, offset)\n x = self.input_proj(x)\n return x, pos_emb, x_mask[:, :, :-2:2][:, :, :-2:2]" }, { "identifier": "TimeReductionLayer1D", "path": "modules/wenet_extractor/squeezeformer/subsampling.py", "snippet": "class TimeReductionLayer1D(nn.Module):\n \"\"\"\n Modified NeMo,\n Squeezeformer Time Reduction procedure.\n Downsamples the audio by `stride` in the time dimension.\n Args:\n channel (int): input dimension of\n MultiheadAttentionMechanism and PositionwiseFeedForward\n out_dim (int): Output dimension of the module.\n kernel_size (int): Conv kernel size for\n depthwise convolution in convolution module\n stride (int): Downsampling factor in time dimension.\n \"\"\"\n\n def __init__(\n self, channel: int, out_dim: int, kernel_size: int = 5, stride: int = 2\n ):\n super(TimeReductionLayer1D, self).__init__()\n\n self.channel = channel\n self.out_dim = out_dim\n self.kernel_size = kernel_size\n self.stride = stride\n self.padding = max(0, self.kernel_size - self.stride)\n\n self.dw_conv = nn.Conv1d(\n in_channels=channel,\n out_channels=channel,\n kernel_size=kernel_size,\n stride=stride,\n padding=self.padding,\n groups=channel,\n )\n\n self.pw_conv = nn.Conv1d(\n in_channels=channel,\n out_channels=out_dim,\n kernel_size=1,\n stride=1,\n padding=0,\n groups=1,\n )\n\n self.init_weights()\n\n def init_weights(self):\n dw_max = self.kernel_size**-0.5\n pw_max = self.channel**-0.5\n torch.nn.init.uniform_(self.dw_conv.weight, -dw_max, dw_max)\n torch.nn.init.uniform_(self.dw_conv.bias, -dw_max, dw_max)\n torch.nn.init.uniform_(self.pw_conv.weight, -pw_max, pw_max)\n torch.nn.init.uniform_(self.pw_conv.bias, -pw_max, pw_max)\n\n def forward(\n self,\n xs,\n xs_lens: torch.Tensor,\n mask: torch.Tensor = torch.ones((0, 0, 0), dtype=torch.bool),\n mask_pad: torch.Tensor = torch.ones((0, 0, 0), dtype=torch.bool),\n ):\n xs = xs.transpose(1, 2) # [B, C, T]\n xs = xs.masked_fill(mask_pad.eq(0), 0.0)\n\n xs = self.dw_conv(xs)\n xs = self.pw_conv(xs)\n\n xs = xs.transpose(1, 2) # [B, T, C]\n\n B, T, D = xs.size()\n mask = mask[:, :: self.stride, :: self.stride]\n mask_pad = mask_pad[:, :, :: self.stride]\n L = mask_pad.size(-1)\n # For JIT exporting, we remove F.pad operator.\n if L - T < 0:\n xs = xs[:, : L - T, :].contiguous()\n else:\n dummy_pad = torch.zeros(B, L - T, D, device=xs.device)\n xs = torch.cat([xs, dummy_pad], dim=1)\n\n xs_lens = torch.div(xs_lens + 1, 2, rounding_mode=\"trunc\")\n return xs, xs_lens, mask, mask_pad" }, { "identifier": "TimeReductionLayer2D", "path": "modules/wenet_extractor/squeezeformer/subsampling.py", "snippet": "class TimeReductionLayer2D(nn.Module):\n def __init__(self, kernel_size: int = 5, stride: int = 2, encoder_dim: int = 256):\n super(TimeReductionLayer2D, self).__init__()\n self.encoder_dim = encoder_dim\n self.kernel_size = kernel_size\n self.dw_conv = Conv2dValid(\n in_channels=encoder_dim,\n out_channels=encoder_dim,\n kernel_size=(kernel_size, 1),\n stride=stride,\n valid_trigy=True,\n )\n self.pw_conv = Conv2dValid(\n in_channels=encoder_dim,\n out_channels=encoder_dim,\n kernel_size=1,\n stride=1,\n valid_trigx=False,\n valid_trigy=False,\n )\n\n self.kernel_size = kernel_size\n self.stride = stride\n self.init_weights()\n\n def init_weights(self):\n dw_max = self.kernel_size**-0.5\n pw_max = self.encoder_dim**-0.5\n torch.nn.init.uniform_(self.dw_conv.weight, -dw_max, dw_max)\n torch.nn.init.uniform_(self.dw_conv.bias, -dw_max, dw_max)\n torch.nn.init.uniform_(self.pw_conv.weight, -pw_max, pw_max)\n torch.nn.init.uniform_(self.pw_conv.bias, -pw_max, pw_max)\n\n def forward(\n self,\n xs: torch.Tensor,\n xs_lens: torch.Tensor,\n mask: torch.Tensor = torch.ones((0, 0, 0), dtype=torch.bool),\n mask_pad: torch.Tensor = torch.ones((0, 0, 0), dtype=torch.bool),\n ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:\n xs = xs.masked_fill(mask_pad.transpose(1, 2).eq(0), 0.0)\n xs = xs.unsqueeze(2)\n padding1 = self.kernel_size - self.stride\n xs = F.pad(xs, (0, 0, 0, 0, 0, padding1, 0, 0), mode=\"constant\", value=0.0)\n xs = self.dw_conv(xs.permute(0, 3, 1, 2))\n xs = self.pw_conv(xs).permute(0, 3, 2, 1).squeeze(1).contiguous()\n tmp_length = xs.size(1)\n xs_lens = torch.div(xs_lens + 1, 2, rounding_mode=\"trunc\")\n padding2 = max(0, (xs_lens.max() - tmp_length).data.item())\n batch_size, hidden = xs.size(0), xs.size(-1)\n dummy_pad = torch.zeros(batch_size, padding2, hidden, device=xs.device)\n xs = torch.cat([xs, dummy_pad], dim=1)\n mask = mask[:, ::2, ::2]\n mask_pad = mask_pad[:, :, ::2]\n return xs, xs_lens, mask, mask_pad" }, { "identifier": "TimeReductionLayerStream", "path": "modules/wenet_extractor/squeezeformer/subsampling.py", "snippet": "class TimeReductionLayerStream(nn.Module):\n \"\"\"\n Squeezeformer Time Reduction procedure.\n Downsamples the audio by `stride` in the time dimension.\n Args:\n channel (int): input dimension of\n MultiheadAttentionMechanism and PositionwiseFeedForward\n out_dim (int): Output dimension of the module.\n kernel_size (int): Conv kernel size for\n depthwise convolution in convolution module\n stride (int): Downsampling factor in time dimension.\n \"\"\"\n\n def __init__(\n self, channel: int, out_dim: int, kernel_size: int = 1, stride: int = 2\n ):\n super(TimeReductionLayerStream, self).__init__()\n\n self.channel = channel\n self.out_dim = out_dim\n self.kernel_size = kernel_size\n self.stride = stride\n\n self.dw_conv = nn.Conv1d(\n in_channels=channel,\n out_channels=channel,\n kernel_size=kernel_size,\n stride=stride,\n padding=0,\n groups=channel,\n )\n\n self.pw_conv = nn.Conv1d(\n in_channels=channel,\n out_channels=out_dim,\n kernel_size=1,\n stride=1,\n padding=0,\n groups=1,\n )\n\n self.init_weights()\n\n def init_weights(self):\n dw_max = self.kernel_size**-0.5\n pw_max = self.channel**-0.5\n torch.nn.init.uniform_(self.dw_conv.weight, -dw_max, dw_max)\n torch.nn.init.uniform_(self.dw_conv.bias, -dw_max, dw_max)\n torch.nn.init.uniform_(self.pw_conv.weight, -pw_max, pw_max)\n torch.nn.init.uniform_(self.pw_conv.bias, -pw_max, pw_max)\n\n def forward(\n self,\n xs,\n xs_lens: torch.Tensor,\n mask: torch.Tensor = torch.ones((0, 0, 0), dtype=torch.bool),\n mask_pad: torch.Tensor = torch.ones((0, 0, 0), dtype=torch.bool),\n ):\n xs = xs.transpose(1, 2) # [B, C, T]\n xs = xs.masked_fill(mask_pad.eq(0), 0.0)\n\n xs = self.dw_conv(xs)\n xs = self.pw_conv(xs)\n\n xs = xs.transpose(1, 2) # [B, T, C]\n\n B, T, D = xs.size()\n mask = mask[:, :: self.stride, :: self.stride]\n mask_pad = mask_pad[:, :, :: self.stride]\n L = mask_pad.size(-1)\n # For JIT exporting, we remove F.pad operator.\n if L - T < 0:\n xs = xs[:, : L - T, :].contiguous()\n else:\n dummy_pad = torch.zeros(B, L - T, D, device=xs.device)\n xs = torch.cat([xs, dummy_pad], dim=1)\n\n xs_lens = torch.div(xs_lens + 1, 2, rounding_mode=\"trunc\")\n return xs, xs_lens, mask, mask_pad" }, { "identifier": "SqueezeformerEncoderLayer", "path": "modules/wenet_extractor/squeezeformer/encoder_layer.py", "snippet": "class SqueezeformerEncoderLayer(nn.Module):\n \"\"\"Encoder layer module.\n Args:\n size (int): Input dimension.\n self_attn (torch.nn.Module): Self-attention module instance.\n `MultiHeadedAttention` or `RelPositionMultiHeadedAttention`\n instance can be used as the argument.\n feed_forward1 (torch.nn.Module): Feed-forward module instance.\n `PositionwiseFeedForward` instance can be used as the argument.\n conv_module (torch.nn.Module): Convolution module instance.\n `ConvlutionModule` instance can be used as the argument.\n feed_forward2 (torch.nn.Module): Feed-forward module instance.\n `PositionwiseFeedForward` instance can be used as the argument.\n dropout_rate (float): Dropout rate.\n normalize_before (bool):\n True: use layer_norm before each sub-block.\n False: use layer_norm after each sub-block.\n \"\"\"\n\n def __init__(\n self,\n size: int,\n self_attn: torch.nn.Module,\n feed_forward1: Optional[nn.Module] = None,\n conv_module: Optional[nn.Module] = None,\n feed_forward2: Optional[nn.Module] = None,\n normalize_before: bool = False,\n dropout_rate: float = 0.1,\n concat_after: bool = False,\n ):\n super(SqueezeformerEncoderLayer, self).__init__()\n self.size = size\n self.self_attn = self_attn\n self.layer_norm1 = nn.LayerNorm(size)\n self.ffn1 = feed_forward1\n self.layer_norm2 = nn.LayerNorm(size)\n self.conv_module = conv_module\n self.layer_norm3 = nn.LayerNorm(size)\n self.ffn2 = feed_forward2\n self.layer_norm4 = nn.LayerNorm(size)\n self.normalize_before = normalize_before\n self.dropout = nn.Dropout(dropout_rate)\n self.concat_after = concat_after\n if concat_after:\n self.concat_linear = nn.Linear(size + size, size)\n else:\n self.concat_linear = nn.Identity()\n\n def forward(\n self,\n x: torch.Tensor,\n mask: torch.Tensor,\n pos_emb: torch.Tensor,\n mask_pad: torch.Tensor = torch.ones((0, 0, 0), dtype=torch.bool),\n att_cache: torch.Tensor = torch.zeros((0, 0, 0, 0)),\n cnn_cache: torch.Tensor = torch.zeros((0, 0, 0, 0)),\n ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:\n # self attention module\n residual = x\n if self.normalize_before:\n x = self.layer_norm1(x)\n x_att, new_att_cache = self.self_attn(x, x, x, mask, pos_emb, att_cache)\n if self.concat_after:\n x_concat = torch.cat((x, x_att), dim=-1)\n x = residual + self.concat_linear(x_concat)\n else:\n x = residual + self.dropout(x_att)\n if not self.normalize_before:\n x = self.layer_norm1(x)\n\n # ffn module\n residual = x\n if self.normalize_before:\n x = self.layer_norm2(x)\n x = self.ffn1(x)\n x = residual + self.dropout(x)\n if not self.normalize_before:\n x = self.layer_norm2(x)\n\n # conv module\n new_cnn_cache = torch.zeros((0, 0, 0), dtype=x.dtype, device=x.device)\n residual = x\n if self.normalize_before:\n x = self.layer_norm3(x)\n x, new_cnn_cache = self.conv_module(x, mask_pad, cnn_cache)\n x = residual + self.dropout(x)\n if not self.normalize_before:\n x = self.layer_norm3(x)\n\n # ffn module\n residual = x\n if self.normalize_before:\n x = self.layer_norm4(x)\n x = self.ffn2(x)\n # we do not use dropout here since it is inside feed forward function\n x = residual + self.dropout(x)\n if not self.normalize_before:\n x = self.layer_norm4(x)\n\n return x, mask, new_att_cache, new_cnn_cache" }, { "identifier": "RelPositionalEncoding", "path": "modules/wenet_extractor/transformer/embedding.py", "snippet": "class RelPositionalEncoding(PositionalEncoding):\n \"\"\"Relative positional encoding module.\n See : Appendix B in https://arxiv.org/abs/1901.02860\n Args:\n d_model (int): Embedding dimension.\n dropout_rate (float): Dropout rate.\n max_len (int): Maximum input length.\n \"\"\"\n\n def __init__(self, d_model: int, dropout_rate: float, max_len: int = 5000):\n \"\"\"Initialize class.\"\"\"\n super().__init__(d_model, dropout_rate, max_len, reverse=True)\n\n def forward(\n self, x: torch.Tensor, offset: Union[int, torch.Tensor] = 0\n ) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"Compute positional encoding.\n Args:\n x (torch.Tensor): Input tensor (batch, time, `*`).\n Returns:\n torch.Tensor: Encoded tensor (batch, time, `*`).\n torch.Tensor: Positional embedding tensor (1, time, `*`).\n \"\"\"\n self.pe = self.pe.to(x.device)\n x = x * self.xscale\n pos_emb = self.position_encoding(offset, x.size(1), False)\n return self.dropout(x), self.dropout(pos_emb)" }, { "identifier": "MultiHeadedAttention", "path": "modules/wenet_extractor/transformer/attention.py", "snippet": "class MultiHeadedAttention(nn.Module):\n \"\"\"Multi-Head Attention layer.\n\n Args:\n n_head (int): The number of heads.\n n_feat (int): The number of features.\n dropout_rate (float): Dropout rate.\n\n \"\"\"\n\n def __init__(self, n_head: int, n_feat: int, dropout_rate: float):\n \"\"\"Construct an MultiHeadedAttention object.\"\"\"\n super().__init__()\n assert n_feat % n_head == 0\n # We assume d_v always equals d_k\n self.d_k = n_feat // n_head\n self.h = n_head\n self.linear_q = nn.Linear(n_feat, n_feat)\n self.linear_k = nn.Linear(n_feat, n_feat)\n self.linear_v = nn.Linear(n_feat, n_feat)\n self.linear_out = nn.Linear(n_feat, n_feat)\n self.dropout = nn.Dropout(p=dropout_rate)\n\n def forward_qkv(\n self, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor\n ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:\n \"\"\"Transform query, key and value.\n\n Args:\n query (torch.Tensor): Query tensor (#batch, time1, size).\n key (torch.Tensor): Key tensor (#batch, time2, size).\n value (torch.Tensor): Value tensor (#batch, time2, size).\n\n Returns:\n torch.Tensor: Transformed query tensor, size\n (#batch, n_head, time1, d_k).\n torch.Tensor: Transformed key tensor, size\n (#batch, n_head, time2, d_k).\n torch.Tensor: Transformed value tensor, size\n (#batch, n_head, time2, d_k).\n\n \"\"\"\n n_batch = query.size(0)\n q = self.linear_q(query).view(n_batch, -1, self.h, self.d_k)\n k = self.linear_k(key).view(n_batch, -1, self.h, self.d_k)\n v = self.linear_v(value).view(n_batch, -1, self.h, self.d_k)\n q = q.transpose(1, 2) # (batch, head, time1, d_k)\n k = k.transpose(1, 2) # (batch, head, time2, d_k)\n v = v.transpose(1, 2) # (batch, head, time2, d_k)\n\n return q, k, v\n\n def forward_attention(\n self,\n value: torch.Tensor,\n scores: torch.Tensor,\n mask: torch.Tensor = torch.ones((0, 0, 0), dtype=torch.bool),\n ) -> torch.Tensor:\n \"\"\"Compute attention context vector.\n\n Args:\n value (torch.Tensor): Transformed value, size\n (#batch, n_head, time2, d_k).\n scores (torch.Tensor): Attention score, size\n (#batch, n_head, time1, time2).\n mask (torch.Tensor): Mask, size (#batch, 1, time2) or\n (#batch, time1, time2), (0, 0, 0) means fake mask.\n\n Returns:\n torch.Tensor: Transformed value (#batch, time1, d_model)\n weighted by the attention score (#batch, time1, time2).\n\n \"\"\"\n n_batch = value.size(0)\n # NOTE(xcsong): When will `if mask.size(2) > 0` be True?\n # 1. onnx(16/4) [WHY? Because we feed real cache & real mask for the\n # 1st chunk to ease the onnx export.]\n # 2. pytorch training\n if mask.size(2) > 0: # time2 > 0\n mask = mask.unsqueeze(1).eq(0) # (batch, 1, *, time2)\n # For last chunk, time2 might be larger than scores.size(-1)\n mask = mask[:, :, :, : scores.size(-1)] # (batch, 1, *, time2)\n scores = scores.masked_fill(mask, -float(\"inf\"))\n attn = torch.softmax(scores, dim=-1).masked_fill(\n mask, 0.0\n ) # (batch, head, time1, time2)\n # NOTE(xcsong): When will `if mask.size(2) > 0` be False?\n # 1. onnx(16/-1, -1/-1, 16/0)\n # 2. jit (16/-1, -1/-1, 16/0, 16/4)\n else:\n attn = torch.softmax(scores, dim=-1) # (batch, head, time1, time2)\n\n p_attn = self.dropout(attn)\n x = torch.matmul(p_attn, value) # (batch, head, time1, d_k)\n x = (\n x.transpose(1, 2).contiguous().view(n_batch, -1, self.h * self.d_k)\n ) # (batch, time1, d_model)\n\n return self.linear_out(x) # (batch, time1, d_model)\n\n def forward(\n self,\n query: torch.Tensor,\n key: torch.Tensor,\n value: torch.Tensor,\n mask: torch.Tensor = torch.ones((0, 0, 0), dtype=torch.bool),\n pos_emb: torch.Tensor = torch.empty(0),\n cache: torch.Tensor = torch.zeros((0, 0, 0, 0)),\n ) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"Compute scaled dot product attention.\n\n Args:\n query (torch.Tensor): Query tensor (#batch, time1, size).\n key (torch.Tensor): Key tensor (#batch, time2, size).\n value (torch.Tensor): Value tensor (#batch, time2, size).\n mask (torch.Tensor): Mask tensor (#batch, 1, time2) or\n (#batch, time1, time2).\n 1.When applying cross attention between decoder and encoder,\n the batch padding mask for input is in (#batch, 1, T) shape.\n 2.When applying self attention of encoder,\n the mask is in (#batch, T, T) shape.\n 3.When applying self attention of decoder,\n the mask is in (#batch, L, L) shape.\n 4.If the different position in decoder see different block\n of the encoder, such as Mocha, the passed in mask could be\n in (#batch, L, T) shape. But there is no such case in current\n Wenet.\n cache (torch.Tensor): Cache tensor (1, head, cache_t, d_k * 2),\n where `cache_t == chunk_size * num_decoding_left_chunks`\n and `head * d_k == size`\n\n\n Returns:\n torch.Tensor: Output tensor (#batch, time1, d_model).\n torch.Tensor: Cache tensor (1, head, cache_t + time1, d_k * 2)\n where `cache_t == chunk_size * num_decoding_left_chunks`\n and `head * d_k == size`\n\n \"\"\"\n q, k, v = self.forward_qkv(query, key, value)\n\n # NOTE(xcsong):\n # when export onnx model, for 1st chunk, we feed\n # cache(1, head, 0, d_k * 2) (16/-1, -1/-1, 16/0 mode)\n # or cache(1, head, real_cache_t, d_k * 2) (16/4 mode).\n # In all modes, `if cache.size(0) > 0` will alwayse be `True`\n # and we will always do splitting and\n # concatnation(this will simplify onnx export). Note that\n # it's OK to concat & split zero-shaped tensors(see code below).\n # when export jit model, for 1st chunk, we always feed\n # cache(0, 0, 0, 0) since jit supports dynamic if-branch.\n # >>> a = torch.ones((1, 2, 0, 4))\n # >>> b = torch.ones((1, 2, 3, 4))\n # >>> c = torch.cat((a, b), dim=2)\n # >>> torch.equal(b, c) # True\n # >>> d = torch.split(a, 2, dim=-1)\n # >>> torch.equal(d[0], d[1]) # True\n if cache.size(0) > 0:\n key_cache, value_cache = torch.split(cache, cache.size(-1) // 2, dim=-1)\n k = torch.cat([key_cache, k], dim=2)\n v = torch.cat([value_cache, v], dim=2)\n # NOTE(xcsong): We do cache slicing in encoder.forward_chunk, since it's\n # non-trivial to calculate `next_cache_start` here.\n new_cache = torch.cat((k, v), dim=-1)\n\n scores = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(self.d_k)\n return self.forward_attention(v, scores, mask), new_cache" }, { "identifier": "RelPositionMultiHeadedAttention", "path": "modules/wenet_extractor/squeezeformer/attention.py", "snippet": "class RelPositionMultiHeadedAttention(MultiHeadedAttention):\n \"\"\"Multi-Head Attention layer with relative position encoding.\n Paper: https://arxiv.org/abs/1901.02860\n Args:\n n_head (int): The number of heads.\n n_feat (int): The number of features.\n dropout_rate (float): Dropout rate.\n \"\"\"\n\n def __init__(\n self,\n n_head,\n n_feat,\n dropout_rate,\n do_rel_shift=False,\n adaptive_scale=False,\n init_weights=False,\n ):\n \"\"\"Construct an RelPositionMultiHeadedAttention object.\"\"\"\n super().__init__(n_head, n_feat, dropout_rate)\n # linear transformation for positional encoding\n self.linear_pos = nn.Linear(n_feat, n_feat, bias=False)\n # these two learnable bias are used in matrix c and matrix d\n # as described in https://arxiv.org/abs/1901.02860 Section 3.3\n self.do_rel_shift = do_rel_shift\n self.pos_bias_u = nn.Parameter(torch.Tensor(self.h, self.d_k))\n self.pos_bias_v = nn.Parameter(torch.Tensor(self.h, self.d_k))\n torch.nn.init.xavier_uniform_(self.pos_bias_u)\n torch.nn.init.xavier_uniform_(self.pos_bias_v)\n self.adaptive_scale = adaptive_scale\n self.ada_scale = nn.Parameter(\n torch.ones([1, 1, n_feat]), requires_grad=adaptive_scale\n )\n self.ada_bias = nn.Parameter(\n torch.zeros([1, 1, n_feat]), requires_grad=adaptive_scale\n )\n if init_weights:\n self.init_weights()\n\n def init_weights(self):\n input_max = (self.h * self.d_k) ** -0.5\n torch.nn.init.uniform_(self.linear_q.weight, -input_max, input_max)\n torch.nn.init.uniform_(self.linear_q.bias, -input_max, input_max)\n torch.nn.init.uniform_(self.linear_k.weight, -input_max, input_max)\n torch.nn.init.uniform_(self.linear_k.bias, -input_max, input_max)\n torch.nn.init.uniform_(self.linear_v.weight, -input_max, input_max)\n torch.nn.init.uniform_(self.linear_v.bias, -input_max, input_max)\n torch.nn.init.uniform_(self.linear_pos.weight, -input_max, input_max)\n torch.nn.init.uniform_(self.linear_out.weight, -input_max, input_max)\n torch.nn.init.uniform_(self.linear_out.bias, -input_max, input_max)\n\n def rel_shift(self, x, zero_triu: bool = False):\n \"\"\"Compute relative positinal encoding.\n Args:\n x (torch.Tensor): Input tensor (batch, time, size).\n zero_triu (bool): If true, return the lower triangular part of\n the matrix.\n Returns:\n torch.Tensor: Output tensor.\n \"\"\"\n\n zero_pad = torch.zeros(\n (x.size()[0], x.size()[1], x.size()[2], 1), device=x.device, dtype=x.dtype\n )\n x_padded = torch.cat([zero_pad, x], dim=-1)\n\n x_padded = x_padded.view(x.size()[0], x.size()[1], x.size(3) + 1, x.size(2))\n x = x_padded[:, :, 1:].view_as(x)\n\n if zero_triu:\n ones = torch.ones((x.size(2), x.size(3)))\n x = x * torch.tril(ones, x.size(3) - x.size(2))[None, None, :, :]\n\n return x\n\n def forward_attention(\n self,\n value: torch.Tensor,\n scores: torch.Tensor,\n mask: torch.Tensor = torch.ones((0, 0, 0), dtype=torch.bool),\n ) -> torch.Tensor:\n \"\"\"Compute attention context vector.\n\n Args:\n value (torch.Tensor): Transformed value, size\n (#batch, n_head, time2, d_k).\n scores (torch.Tensor): Attention score, size\n (#batch, n_head, time1, time2).\n mask (torch.Tensor): Mask, size (#batch, 1, time2) or\n (#batch, time1, time2), (0, 0, 0) means fake mask.\n\n Returns:\n torch.Tensor: Transformed value (#batch, time1, d_model)\n weighted by the attention score (#batch, time1, time2).\n\n \"\"\"\n n_batch = value.size(0)\n # NOTE(xcsong): When will `if mask.size(2) > 0` be True?\n # 1. onnx(16/4) [WHY? Because we feed real cache & real mask for the\n # 1st chunk to ease the onnx export.]\n # 2. pytorch training\n if mask.size(2) > 0: # time2 > 0\n mask = mask.unsqueeze(1).eq(0) # (batch, 1, *, time2)\n # For last chunk, time2 might be larger than scores.size(-1)\n mask = mask[:, :, :, : scores.size(-1)] # (batch, 1, *, time2)\n scores = scores.masked_fill(mask, -float(\"inf\"))\n # (batch, head, time1, time2)\n attn = torch.softmax(scores, dim=-1).masked_fill(mask, 0.0)\n # NOTE(xcsong): When will `if mask.size(2) > 0` be False?\n # 1. onnx(16/-1, -1/-1, 16/0)\n # 2. jit (16/-1, -1/-1, 16/0, 16/4)\n else:\n attn = torch.softmax(scores, dim=-1) # (batch, head, time1, time2)\n\n p_attn = self.dropout(attn)\n x = torch.matmul(p_attn, value) # (batch, head, time1, d_k)\n x = (\n x.transpose(1, 2).contiguous().view(n_batch, -1, self.h * self.d_k)\n ) # (batch, time1, d_model)\n\n return self.linear_out(x) # (batch, time1, d_model)\n\n def forward(\n self,\n query: torch.Tensor,\n key: torch.Tensor,\n value: torch.Tensor,\n mask: torch.Tensor = torch.ones((0, 0, 0), dtype=torch.bool),\n pos_emb: torch.Tensor = torch.empty(0),\n cache: torch.Tensor = torch.zeros((0, 0, 0, 0)),\n ) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"Compute 'Scaled Dot Product Attention' with rel. positional encoding.\n Args:\n query (torch.Tensor): Query tensor (#batch, time1, size).\n key (torch.Tensor): Key tensor (#batch, time2, size).\n value (torch.Tensor): Value tensor (#batch, time2, size).\n mask (torch.Tensor): Mask tensor (#batch, 1, time2) or\n (#batch, time1, time2), (0, 0, 0) means fake mask.\n pos_emb (torch.Tensor): Positional embedding tensor\n (#batch, time2, size).\n cache (torch.Tensor): Cache tensor (1, head, cache_t, d_k * 2),\n where `cache_t == chunk_size * num_decoding_left_chunks`\n and `head * d_k == size`\n Returns:\n torch.Tensor: Output tensor (#batch, time1, d_model).\n torch.Tensor: Cache tensor (1, head, cache_t + time1, d_k * 2)\n where `cache_t == chunk_size * num_decoding_left_chunks`\n and `head * d_k == size`\n \"\"\"\n if self.adaptive_scale:\n query = self.ada_scale * query + self.ada_bias\n key = self.ada_scale * key + self.ada_bias\n value = self.ada_scale * value + self.ada_bias\n q, k, v = self.forward_qkv(query, key, value)\n q = q.transpose(1, 2) # (batch, time1, head, d_k)\n\n # NOTE(xcsong):\n # when export onnx model, for 1st chunk, we feed\n # cache(1, head, 0, d_k * 2) (16/-1, -1/-1, 16/0 mode)\n # or cache(1, head, real_cache_t, d_k * 2) (16/4 mode).\n # In all modes, `if cache.size(0) > 0` will alwayse be `True`\n # and we will always do splitting and\n # concatnation(this will simplify onnx export). Note that\n # it's OK to concat & split zero-shaped tensors(see code below).\n # when export jit model, for 1st chunk, we always feed\n # cache(0, 0, 0, 0) since jit supports dynamic if-branch.\n # >>> a = torch.ones((1, 2, 0, 4))\n # >>> b = torch.ones((1, 2, 3, 4))\n # >>> c = torch.cat((a, b), dim=2)\n # >>> torch.equal(b, c) # True\n # >>> d = torch.split(a, 2, dim=-1)\n # >>> torch.equal(d[0], d[1]) # True\n if cache.size(0) > 0:\n key_cache, value_cache = torch.split(cache, cache.size(-1) // 2, dim=-1)\n k = torch.cat([key_cache, k], dim=2)\n v = torch.cat([value_cache, v], dim=2)\n # NOTE(xcsong): We do cache slicing in encoder.forward_chunk, since it's\n # non-trivial to calculate `next_cache_start` here.\n new_cache = torch.cat((k, v), dim=-1)\n\n n_batch_pos = pos_emb.size(0)\n p = self.linear_pos(pos_emb).view(n_batch_pos, -1, self.h, self.d_k)\n p = p.transpose(1, 2) # (batch, head, time1, d_k)\n\n # (batch, head, time1, d_k)\n q_with_bias_u = (q + self.pos_bias_u).transpose(1, 2)\n # (batch, head, time1, d_k)\n q_with_bias_v = (q + self.pos_bias_v).transpose(1, 2)\n\n # compute attention score\n # first compute matrix a and matrix c\n # as described in https://arxiv.org/abs/1901.02860 Section 3.3\n # (batch, head, time1, time2)\n matrix_ac = torch.matmul(q_with_bias_u, k.transpose(-2, -1))\n\n # compute matrix b and matrix d\n # (batch, head, time1, time2)\n matrix_bd = torch.matmul(q_with_bias_v, p.transpose(-2, -1))\n # Remove rel_shift since it is useless in speech recognition,\n # and it requires special attention for streaming.\n if self.do_rel_shift:\n matrix_bd = self.rel_shift(matrix_bd)\n\n scores = (matrix_ac + matrix_bd) / math.sqrt(\n self.d_k\n ) # (batch, head, time1, time2)\n\n return self.forward_attention(v, scores, mask), new_cache" }, { "identifier": "PositionwiseFeedForward", "path": "modules/wenet_extractor/squeezeformer/positionwise_feed_forward.py", "snippet": "class PositionwiseFeedForward(torch.nn.Module):\n \"\"\"Positionwise feed forward layer.\n\n FeedForward are appied on each position of the sequence.\n The output dim is same with the input dim.\n\n Args:\n idim (int): Input dimenstion.\n hidden_units (int): The number of hidden units.\n dropout_rate (float): Dropout rate.\n activation (torch.nn.Module): Activation function\n \"\"\"\n\n def __init__(\n self,\n idim: int,\n hidden_units: int,\n dropout_rate: float,\n activation: torch.nn.Module = torch.nn.ReLU(),\n adaptive_scale: bool = False,\n init_weights: bool = False,\n ):\n \"\"\"Construct a PositionwiseFeedForward object.\"\"\"\n super(PositionwiseFeedForward, self).__init__()\n self.idim = idim\n self.hidden_units = hidden_units\n self.w_1 = torch.nn.Linear(idim, hidden_units)\n self.activation = activation\n self.dropout = torch.nn.Dropout(dropout_rate)\n self.w_2 = torch.nn.Linear(hidden_units, idim)\n self.ada_scale = None\n self.ada_bias = None\n self.adaptive_scale = adaptive_scale\n self.ada_scale = torch.nn.Parameter(\n torch.ones([1, 1, idim]), requires_grad=adaptive_scale\n )\n self.ada_bias = torch.nn.Parameter(\n torch.zeros([1, 1, idim]), requires_grad=adaptive_scale\n )\n if init_weights:\n self.init_weights()\n\n def init_weights(self):\n ffn1_max = self.idim**-0.5\n ffn2_max = self.hidden_units**-0.5\n torch.nn.init.uniform_(self.w_1.weight.data, -ffn1_max, ffn1_max)\n torch.nn.init.uniform_(self.w_1.bias.data, -ffn1_max, ffn1_max)\n torch.nn.init.uniform_(self.w_2.weight.data, -ffn2_max, ffn2_max)\n torch.nn.init.uniform_(self.w_2.bias.data, -ffn2_max, ffn2_max)\n\n def forward(self, xs: torch.Tensor) -> torch.Tensor:\n \"\"\"Forward function.\n\n Args:\n xs: input tensor (B, L, D)\n Returns:\n output tensor, (B, L, D)\n \"\"\"\n if self.adaptive_scale:\n xs = self.ada_scale * xs + self.ada_bias\n return self.w_2(self.dropout(self.activation(self.w_1(xs))))" }, { "identifier": "ConvolutionModule", "path": "modules/wenet_extractor/squeezeformer/convolution.py", "snippet": "class ConvolutionModule(nn.Module):\n \"\"\"ConvolutionModule in Conformer model.\"\"\"\n\n def __init__(\n self,\n channels: int,\n kernel_size: int = 15,\n activation: nn.Module = nn.ReLU(),\n norm: str = \"batch_norm\",\n causal: bool = False,\n bias: bool = True,\n adaptive_scale: bool = False,\n init_weights: bool = False,\n ):\n \"\"\"Construct an ConvolutionModule object.\n Args:\n channels (int): The number of channels of conv layers.\n kernel_size (int): Kernel size of conv layers.\n causal (int): Whether use causal convolution or not\n \"\"\"\n super().__init__()\n self.bias = bias\n self.channels = channels\n self.kernel_size = kernel_size\n self.adaptive_scale = adaptive_scale\n self.ada_scale = torch.nn.Parameter(\n torch.ones([1, 1, channels]), requires_grad=adaptive_scale\n )\n self.ada_bias = torch.nn.Parameter(\n torch.zeros([1, 1, channels]), requires_grad=adaptive_scale\n )\n\n self.pointwise_conv1 = nn.Conv1d(\n channels,\n 2 * channels,\n kernel_size=1,\n stride=1,\n padding=0,\n bias=bias,\n )\n # self.lorder is used to distinguish if it's a causal convolution,\n # if self.lorder > 0: it's a causal convolution, the input will be\n # padded with self.lorder frames on the left in forward.\n # else: it's a symmetrical convolution\n if causal:\n padding = 0\n self.lorder = kernel_size - 1\n else:\n # kernel_size should be an odd number for none causal convolution\n assert (kernel_size - 1) % 2 == 0\n padding = (kernel_size - 1) // 2\n self.lorder = 0\n self.depthwise_conv = nn.Conv1d(\n channels,\n channels,\n kernel_size,\n stride=1,\n padding=padding,\n groups=channels,\n bias=bias,\n )\n\n assert norm in [\"batch_norm\", \"layer_norm\"]\n if norm == \"batch_norm\":\n self.use_layer_norm = False\n self.norm = nn.BatchNorm1d(channels)\n else:\n self.use_layer_norm = True\n self.norm = nn.LayerNorm(channels)\n\n self.pointwise_conv2 = nn.Conv1d(\n channels,\n channels,\n kernel_size=1,\n stride=1,\n padding=0,\n bias=bias,\n )\n self.activation = activation\n if init_weights:\n self.init_weights()\n\n def init_weights(self):\n pw_max = self.channels**-0.5\n dw_max = self.kernel_size**-0.5\n torch.nn.init.uniform_(self.pointwise_conv1.weight.data, -pw_max, pw_max)\n if self.bias:\n torch.nn.init.uniform_(self.pointwise_conv1.bias.data, -pw_max, pw_max)\n torch.nn.init.uniform_(self.depthwise_conv.weight.data, -dw_max, dw_max)\n if self.bias:\n torch.nn.init.uniform_(self.depthwise_conv.bias.data, -dw_max, dw_max)\n torch.nn.init.uniform_(self.pointwise_conv2.weight.data, -pw_max, pw_max)\n if self.bias:\n torch.nn.init.uniform_(self.pointwise_conv2.bias.data, -pw_max, pw_max)\n\n def forward(\n self,\n x: torch.Tensor,\n mask_pad: torch.Tensor = torch.ones((0, 0, 0), dtype=torch.bool),\n cache: torch.Tensor = torch.zeros((0, 0, 0)),\n ) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"Compute convolution module.\n Args:\n x (torch.Tensor): Input tensor (#batch, time, channels).\n mask_pad (torch.Tensor): used for batch padding (#batch, 1, time),\n (0, 0, 0) means fake mask.\n cache (torch.Tensor): left context cache, it is only\n used in causal convolution (#batch, channels, cache_t),\n (0, 0, 0) meas fake cache.\n Returns:\n torch.Tensor: Output tensor (#batch, time, channels).\n \"\"\"\n if self.adaptive_scale:\n x = self.ada_scale * x + self.ada_bias\n # exchange the temporal dimension and the feature dimension\n x = x.transpose(1, 2) # (#batch, channels, time)\n # mask batch padding\n if mask_pad.size(2) > 0: # time > 0\n x.masked_fill_(~mask_pad, 0.0)\n\n if self.lorder > 0:\n if cache.size(2) == 0: # cache_t == 0\n x = nn.functional.pad(x, (self.lorder, 0), \"constant\", 0.0)\n else:\n assert cache.size(0) == x.size(0) # equal batch\n assert cache.size(1) == x.size(1) # equal channel\n x = torch.cat((cache, x), dim=2)\n assert x.size(2) > self.lorder\n new_cache = x[:, :, -self.lorder :]\n else:\n # It's better we just return None if no cache is required,\n # However, for JIT export, here we just fake one tensor instead of\n # None.\n new_cache = torch.zeros((0, 0, 0), dtype=x.dtype, device=x.device)\n\n # GLU mechanism\n x = self.pointwise_conv1(x) # (batch, 2*channel, dim)\n x = nn.functional.glu(x, dim=1) # (batch, channel, dim)\n\n # 1D Depthwise Conv\n x = self.depthwise_conv(x)\n if self.use_layer_norm:\n x = x.transpose(1, 2)\n x = self.activation(self.norm(x))\n if self.use_layer_norm:\n x = x.transpose(1, 2)\n x = self.pointwise_conv2(x)\n # mask batch padding\n if mask_pad.size(2) > 0: # time > 0\n x.masked_fill_(~mask_pad, 0.0)\n\n return x.transpose(1, 2), new_cache" }, { "identifier": "make_pad_mask", "path": "modules/wenet_extractor/utils/mask.py", "snippet": "def make_pad_mask(lengths: torch.Tensor, max_len: int = 0) -> torch.Tensor:\n \"\"\"Make mask tensor containing indices of padded part.\n\n See description of make_non_pad_mask.\n\n Args:\n lengths (torch.Tensor): Batch of lengths (B,).\n Returns:\n torch.Tensor: Mask tensor containing indices of padded part.\n\n Examples:\n >>> lengths = [5, 3, 2]\n >>> make_pad_mask(lengths)\n masks = [[0, 0, 0, 0 ,0],\n [0, 0, 0, 1, 1],\n [0, 0, 1, 1, 1]]\n \"\"\"\n batch_size = lengths.size(0)\n max_len = max_len if max_len > 0 else lengths.max().item()\n seq_range = torch.arange(0, max_len, dtype=torch.int64, device=lengths.device)\n seq_range_expand = seq_range.unsqueeze(0).expand(batch_size, max_len)\n seq_length_expand = lengths.unsqueeze(-1)\n mask = seq_range_expand >= seq_length_expand\n return mask" }, { "identifier": "add_optional_chunk_mask", "path": "modules/wenet_extractor/utils/mask.py", "snippet": "def add_optional_chunk_mask(\n xs: torch.Tensor,\n masks: torch.Tensor,\n use_dynamic_chunk: bool,\n use_dynamic_left_chunk: bool,\n decoding_chunk_size: int,\n static_chunk_size: int,\n num_decoding_left_chunks: int,\n):\n \"\"\"Apply optional mask for encoder.\n\n Args:\n xs (torch.Tensor): padded input, (B, L, D), L for max length\n mask (torch.Tensor): mask for xs, (B, 1, L)\n use_dynamic_chunk (bool): whether to use dynamic chunk or not\n use_dynamic_left_chunk (bool): whether to use dynamic left chunk for\n training.\n decoding_chunk_size (int): decoding chunk size for dynamic chunk, it's\n 0: default for training, use random dynamic chunk.\n <0: for decoding, use full chunk.\n >0: for decoding, use fixed chunk size as set.\n static_chunk_size (int): chunk size for static chunk training/decoding\n if it's greater than 0, if use_dynamic_chunk is true,\n this parameter will be ignored\n num_decoding_left_chunks: number of left chunks, this is for decoding,\n the chunk size is decoding_chunk_size.\n >=0: use num_decoding_left_chunks\n <0: use all left chunks\n\n Returns:\n torch.Tensor: chunk mask of the input xs.\n \"\"\"\n # Whether to use chunk mask or not\n if use_dynamic_chunk:\n max_len = xs.size(1)\n if decoding_chunk_size < 0:\n chunk_size = max_len\n num_left_chunks = -1\n elif decoding_chunk_size > 0:\n chunk_size = decoding_chunk_size\n num_left_chunks = num_decoding_left_chunks\n else:\n # chunk size is either [1, 25] or full context(max_len).\n # Since we use 4 times subsampling and allow up to 1s(100 frames)\n # delay, the maximum frame is 100 / 4 = 25.\n chunk_size = torch.randint(1, max_len, (1,)).item()\n num_left_chunks = -1\n if chunk_size > max_len // 2:\n chunk_size = max_len\n else:\n chunk_size = chunk_size % 25 + 1\n if use_dynamic_left_chunk:\n max_left_chunks = (max_len - 1) // chunk_size\n num_left_chunks = torch.randint(0, max_left_chunks, (1,)).item()\n chunk_masks = subsequent_chunk_mask(\n xs.size(1), chunk_size, num_left_chunks, xs.device\n ) # (L, L)\n chunk_masks = chunk_masks.unsqueeze(0) # (1, L, L)\n chunk_masks = masks & chunk_masks # (B, L, L)\n elif static_chunk_size > 0:\n num_left_chunks = num_decoding_left_chunks\n chunk_masks = subsequent_chunk_mask(\n xs.size(1), static_chunk_size, num_left_chunks, xs.device\n ) # (L, L)\n chunk_masks = chunk_masks.unsqueeze(0) # (1, L, L)\n chunk_masks = masks & chunk_masks # (B, L, L)\n else:\n chunk_masks = masks\n return chunk_masks" }, { "identifier": "get_activation", "path": "modules/wenet_extractor/utils/common.py", "snippet": "def get_activation(act):\n \"\"\"Return activation function.\"\"\"\n # Lazy load to avoid unused import\n from modules.wenet_extractor.transformer.swish import Swish\n\n activation_funcs = {\n \"hardtanh\": torch.nn.Hardtanh,\n \"tanh\": torch.nn.Tanh,\n \"relu\": torch.nn.ReLU,\n \"selu\": torch.nn.SELU,\n \"swish\": getattr(torch.nn, \"SiLU\", Swish),\n \"gelu\": torch.nn.GELU,\n }\n\n return activation_funcs[act]()" } ]
import torch import torch.nn as nn from typing import Tuple, Union, Optional, List from modules.wenet_extractor.squeezeformer.subsampling import ( DepthwiseConv2dSubsampling4, TimeReductionLayer1D, TimeReductionLayer2D, TimeReductionLayerStream, ) from modules.wenet_extractor.squeezeformer.encoder_layer import ( SqueezeformerEncoderLayer, ) from modules.wenet_extractor.transformer.embedding import RelPositionalEncoding from modules.wenet_extractor.transformer.attention import MultiHeadedAttention from modules.wenet_extractor.squeezeformer.attention import ( RelPositionMultiHeadedAttention, ) from modules.wenet_extractor.squeezeformer.positionwise_feed_forward import ( PositionwiseFeedForward, ) from modules.wenet_extractor.squeezeformer.convolution import ConvolutionModule from modules.wenet_extractor.utils.mask import make_pad_mask, add_optional_chunk_mask from modules.wenet_extractor.utils.common import get_activation
14,578
pos_enc_layer_type (str): Self attention type. time_reduction_layer_type (str): Conv1d or Conv2d reduction layer. do_rel_shift (bool): Whether to do relative shift operation on rel-attention module. cnn_module_kernel (int): Kernel size of CNN module. activation_type (str): Encoder activation function type. use_cnn_module (bool): Whether to use convolution module. cnn_module_kernel (int): Kernel size of convolution module. adaptive_scale (bool): Whether to use adaptive scale. init_weights (bool): Whether to initialize weights. causal (bool): whether to use causal convolution or not. """ super(SqueezeformerEncoder, self).__init__() self.global_cmvn = global_cmvn self.reduce_idx: Optional[Union[int, List[int]]] = ( [reduce_idx] if type(reduce_idx) == int else reduce_idx ) self.recover_idx: Optional[Union[int, List[int]]] = ( [recover_idx] if type(recover_idx) == int else recover_idx ) self.check_ascending_list() if reduce_idx is None: self.time_reduce = None else: if recover_idx is None: self.time_reduce = "normal" # no recovery at the end else: self.time_reduce = "recover" # recovery at the end assert len(self.reduce_idx) == len(self.recover_idx) self.reduce_stride = 2 self._output_size = output_size self.normalize_before = normalize_before self.static_chunk_size = static_chunk_size self.use_dynamic_chunk = use_dynamic_chunk self.use_dynamic_left_chunk = use_dynamic_left_chunk self.pos_enc_layer_type = pos_enc_layer_type activation = get_activation(activation_type) # self-attention module definition if pos_enc_layer_type != "rel_pos": encoder_selfattn_layer = MultiHeadedAttention encoder_selfattn_layer_args = ( attention_heads, output_size, attention_dropout_rate, ) else: encoder_selfattn_layer = RelPositionMultiHeadedAttention encoder_selfattn_layer_args = ( attention_heads, encoder_dim, attention_dropout_rate, do_rel_shift, adaptive_scale, init_weights, ) # feed-forward module definition positionwise_layer = PositionwiseFeedForward positionwise_layer_args = ( encoder_dim, encoder_dim * feed_forward_expansion_factor, feed_forward_dropout_rate, activation, adaptive_scale, init_weights, ) # convolution module definition convolution_layer = ConvolutionModule convolution_layer_args = ( encoder_dim, cnn_module_kernel, activation, cnn_norm_type, causal, True, adaptive_scale, init_weights, ) self.embed = DepthwiseConv2dSubsampling4( 1, encoder_dim, RelPositionalEncoding(encoder_dim, dropout_rate=0.1), dw_stride, input_size, input_dropout_rate, init_weights, ) self.preln = nn.LayerNorm(encoder_dim) self.encoders = torch.nn.ModuleList( [ SqueezeformerEncoderLayer( encoder_dim, encoder_selfattn_layer(*encoder_selfattn_layer_args), positionwise_layer(*positionwise_layer_args), convolution_layer(*convolution_layer_args), positionwise_layer(*positionwise_layer_args), normalize_before, dropout, concat_after, ) for _ in range(num_blocks) ] ) if time_reduction_layer_type == "conv1d": time_reduction_layer = TimeReductionLayer1D time_reduction_layer_args = { "channel": encoder_dim, "out_dim": encoder_dim, } elif time_reduction_layer_type == "stream": time_reduction_layer = TimeReductionLayerStream time_reduction_layer_args = { "channel": encoder_dim, "out_dim": encoder_dim, } else:
# This module is from [WeNet](https://github.com/wenet-e2e/wenet). # ## Citations # ```bibtex # @inproceedings{yao2021wenet, # title={WeNet: Production oriented Streaming and Non-streaming End-to-End Speech Recognition Toolkit}, # author={Yao, Zhuoyuan and Wu, Di and Wang, Xiong and Zhang, Binbin and Yu, Fan and Yang, Chao and Peng, Zhendong and Chen, Xiaoyu and Xie, Lei and Lei, Xin}, # booktitle={Proc. Interspeech}, # year={2021}, # address={Brno, Czech Republic }, # organization={IEEE} # } # @article{zhang2022wenet, # title={WeNet 2.0: More Productive End-to-End Speech Recognition Toolkit}, # author={Zhang, Binbin and Wu, Di and Peng, Zhendong and Song, Xingchen and Yao, Zhuoyuan and Lv, Hang and Xie, Lei and Yang, Chao and Pan, Fuping and Niu, Jianwei}, # journal={arXiv preprint arXiv:2203.15455}, # year={2022} # } # class SqueezeformerEncoder(nn.Module): def __init__( self, input_size: int = 80, encoder_dim: int = 256, output_size: int = 256, attention_heads: int = 4, num_blocks: int = 12, reduce_idx: Optional[Union[int, List[int]]] = 5, recover_idx: Optional[Union[int, List[int]]] = 11, feed_forward_expansion_factor: int = 4, dw_stride: bool = False, input_dropout_rate: float = 0.1, pos_enc_layer_type: str = "rel_pos", time_reduction_layer_type: str = "conv1d", do_rel_shift: bool = True, feed_forward_dropout_rate: float = 0.1, attention_dropout_rate: float = 0.1, cnn_module_kernel: int = 31, cnn_norm_type: str = "batch_norm", dropout: float = 0.1, causal: bool = False, adaptive_scale: bool = True, activation_type: str = "swish", init_weights: bool = True, global_cmvn: torch.nn.Module = None, normalize_before: bool = False, use_dynamic_chunk: bool = False, concat_after: bool = False, static_chunk_size: int = 0, use_dynamic_left_chunk: bool = False, ): """Construct SqueezeformerEncoder Args: input_size to use_dynamic_chunk, see in Transformer BaseEncoder. encoder_dim (int): The hidden dimension of encoder layer. output_size (int): The output dimension of final projection layer. attention_heads (int): Num of attention head in attention module. num_blocks (int): Num of encoder layers. reduce_idx Optional[Union[int, List[int]]]: reduce layer index, from 40ms to 80ms per frame. recover_idx Optional[Union[int, List[int]]]: recover layer index, from 80ms to 40ms per frame. feed_forward_expansion_factor (int): Enlarge coefficient of FFN. dw_stride (bool): Whether do depthwise convolution on subsampling module. input_dropout_rate (float): Dropout rate of input projection layer. pos_enc_layer_type (str): Self attention type. time_reduction_layer_type (str): Conv1d or Conv2d reduction layer. do_rel_shift (bool): Whether to do relative shift operation on rel-attention module. cnn_module_kernel (int): Kernel size of CNN module. activation_type (str): Encoder activation function type. use_cnn_module (bool): Whether to use convolution module. cnn_module_kernel (int): Kernel size of convolution module. adaptive_scale (bool): Whether to use adaptive scale. init_weights (bool): Whether to initialize weights. causal (bool): whether to use causal convolution or not. """ super(SqueezeformerEncoder, self).__init__() self.global_cmvn = global_cmvn self.reduce_idx: Optional[Union[int, List[int]]] = ( [reduce_idx] if type(reduce_idx) == int else reduce_idx ) self.recover_idx: Optional[Union[int, List[int]]] = ( [recover_idx] if type(recover_idx) == int else recover_idx ) self.check_ascending_list() if reduce_idx is None: self.time_reduce = None else: if recover_idx is None: self.time_reduce = "normal" # no recovery at the end else: self.time_reduce = "recover" # recovery at the end assert len(self.reduce_idx) == len(self.recover_idx) self.reduce_stride = 2 self._output_size = output_size self.normalize_before = normalize_before self.static_chunk_size = static_chunk_size self.use_dynamic_chunk = use_dynamic_chunk self.use_dynamic_left_chunk = use_dynamic_left_chunk self.pos_enc_layer_type = pos_enc_layer_type activation = get_activation(activation_type) # self-attention module definition if pos_enc_layer_type != "rel_pos": encoder_selfattn_layer = MultiHeadedAttention encoder_selfattn_layer_args = ( attention_heads, output_size, attention_dropout_rate, ) else: encoder_selfattn_layer = RelPositionMultiHeadedAttention encoder_selfattn_layer_args = ( attention_heads, encoder_dim, attention_dropout_rate, do_rel_shift, adaptive_scale, init_weights, ) # feed-forward module definition positionwise_layer = PositionwiseFeedForward positionwise_layer_args = ( encoder_dim, encoder_dim * feed_forward_expansion_factor, feed_forward_dropout_rate, activation, adaptive_scale, init_weights, ) # convolution module definition convolution_layer = ConvolutionModule convolution_layer_args = ( encoder_dim, cnn_module_kernel, activation, cnn_norm_type, causal, True, adaptive_scale, init_weights, ) self.embed = DepthwiseConv2dSubsampling4( 1, encoder_dim, RelPositionalEncoding(encoder_dim, dropout_rate=0.1), dw_stride, input_size, input_dropout_rate, init_weights, ) self.preln = nn.LayerNorm(encoder_dim) self.encoders = torch.nn.ModuleList( [ SqueezeformerEncoderLayer( encoder_dim, encoder_selfattn_layer(*encoder_selfattn_layer_args), positionwise_layer(*positionwise_layer_args), convolution_layer(*convolution_layer_args), positionwise_layer(*positionwise_layer_args), normalize_before, dropout, concat_after, ) for _ in range(num_blocks) ] ) if time_reduction_layer_type == "conv1d": time_reduction_layer = TimeReductionLayer1D time_reduction_layer_args = { "channel": encoder_dim, "out_dim": encoder_dim, } elif time_reduction_layer_type == "stream": time_reduction_layer = TimeReductionLayerStream time_reduction_layer_args = { "channel": encoder_dim, "out_dim": encoder_dim, } else:
time_reduction_layer = TimeReductionLayer2D
2
2023-11-15 09:19:27+00:00
24k
BobaZooba/xllm
src/xllm/cli/quantize.py
[ { "identifier": "Config", "path": "src/xllm/core/config.py", "snippet": "class Config:\n \"\"\"\n The `Config` class serves as a comprehensive configuration schema for managing various parameters required during\n the setup and execution of experiments relating to language models, such as training, quantization, and\n optimization.\n\n Write more here:\n - https://github.com/BobaZooba/xllm/blob/main/DOCS.md#config\n - https://github.com/BobaZooba/xllm/blob/main/DOCS.md#detailed-config-explanation\n\n This dataclass is used to encapsulate and standardize the configuration for a diverse range of tasks including\n dataset preparation, tokenizer and model initialization, training, evaluation, and interactions with remote services\n like the Hugging Face Model Hub.\n\n Attributes in this class cover aspects like model name and path, tokenizer settings, dataset paths, training\n strategies, quantization methods, hardware acceleration, logging, output directories, and more. The class provides\n properties with custom logic to resolve specific configurations and validation checks to ensure the environment is\n appropriately set up before proceeding with the workflow.\n\n Customization and flexibility are core to this class, as it provides reasonable defaults while also allowing for\n detailed and scalable configurations catered to advanced tasks such as leveraging LoRA, FSDP, deepspeed stage\n setups, and applying incremental quantization techniques like GPTQ and bits-and-bytes.\n\n Methods within the class include:\n - `check`: Performs checks across various attributes for compatibility and correctness.\n - Property getters such as `correct_tokenizer_name_or_path`, `lora_target_modules`, `dtype`, `deepspeed`, `fsdp`,\n and `lora_model_name_or_path_for_fusing` to fetch calculated or defaulted values based on attribute settings.\n\n Subclassing can be done to extend or modify the functionality of the `Config` class to address specific experimental\n scenarios or customized workflows. It is the central piece for orchestrating experimental setups and is intimately\n integrated with the rest of the codebase that operates on top of these configurations.\n\n Attributes:\n\n General Settings:\n - `experiment_key`: An enumeration key to specify the experiment type.\n - `save_safetensors`: A boolean value to indicate whether to use safe serialization for tensors.\n - `max_shard_size`: The maximum shard size when pushing the model to the HuggingFace Hub.\n - `local_rank`: Local rank for distributed training, used for logging and saving.\n - `use_gradient_checkpointing`: If set to `True`, enables gradient checkpointing to reduce memory usage at\n the cost of a slower backward pass.\n - `trainer_key`: An enumeration key to select the trainer using the trainers_registry.\n - `force_fp32`: Forces loading the model in fp32 precision, if set to `True`.\n - `force_fp16`: Forces loading the model in fp16 precision, if set to `True`.\n - `from_gptq`: Indicates if a GPTQ quantized model is being loaded.\n - `huggingface_hub_token`: Token for uploading models to HuggingFace Hub.\n - `deepspeed_stage`: Predefined DeepSpeed stage for optimization.\n - `deepspeed_config_path`: Path to the DeepSpeed config file.\n - `fsdp_strategy`: The strategy to be used for Fully Sharded Data Parallelism (FSDP).\n - `fsdp_offload`: If set to `True`, offloads weights to CPU when using FSDP to save memory.\n - `seed`: Seed for random number generators to ensure reproducibility.\n - `stabilize`: Converts some model weights to fp32 and others to bf16 for stabilization.\n - `path_to_env_file`: Custom path to the .env file for reading environment variables.\n\n Data Preparation:\n - `prepare_dataset`: Flags whether to prepare the dataset during the \"prepare\" stage.\n\n LoRA Fusing:\n - `lora_hub_model_id`: Name of the LoRA model on the hub for fusion.\n - `lora_model_local_path`: Local path to LoRA model to be fused.\n - `fused_model_local_path`: Local path to save the fused model.\n - `fuse_after_training`: If `True`, will fuse the model post-training.\n\n GPTQ Quantization:\n - `quantization_dataset_id`: Dataset ID for GPTQ quantization.\n - `quantization_max_samples`: Maximum number of samples to use during GPTQ quantization.\n - `quantized_model_path`: Path to save the GPTQ quantized model.\n - `quantized_hub_model_id`: Name of the model at the hub post-GPTQ quantization.\n - `quantized_hub_private_repo`: If set to `True`, creates a private repository for the quantized model.\n\n Dataset Related:\n - `dataset_key`: Key to select the dataset from the datasets_registry.\n - `train_local_path_to_data`: Local path to the training data file.\n - `eval_local_path_to_data`: Local path to the evaluation data file.\n - `shuffle`: If `True`, shuffles the training data.\n - `max_eval_samples`: Maximum number of examples to use for evaluation.\n - `add_eval_to_train_if_no_path`: If `True`, adds evaluation data to training if there's no separate eval path.\n\n Tokenizer Settings:\n - `tokenizer_name_or_path`: Name or path to the tokenizer.\n - `tokenizer_use_fast`: If `True`, uses the fast version of the tokenizer.\n - `tokenizer_padding_side`: Sets padding side to 'right' or 'left'.\n\n Data Collator Settings:\n - `collator_key`: Key to select the collator from the collators_registry.\n - `max_length`: Maximum sequence length for the model.\n\n Model Configuration:\n - `model_name_or_path`: Name or path to the model to be used.\n - `push_to_hub_bos_add_bos_token`: Adds BOS token when uploading tokenization configuration to the hub.\n - `use_flash_attention_2`: Flags the use of flash attention 2.\n - `trust_remote_code`: If `True`, trusts remote code from the HuggingFace Hub.\n - `device_map`: Device map for placing model layers on specific devices.\n - `prepare_model_for_kbit_training`: If `True`, prepares the model for k-bit training.\n\n BitsAndBytes Integration:\n - `load_in_8bit`: Load the model in 8-bit mode using bitsandbytes.\n - `load_in_4bit`: Load the model in 4-bit mode using bitsandbytes.\n - `llm_int8_threshold`: Threshold for detecting outliers in the model weights.\n - `llm_int8_has_fp16_weight`: If `True`, the model will have fp16 weights.\n - `bnb_4bit_use_double_quant`: If `True`, a second quantization step is used for 4-bit weights.\n - `bnb_4bit_quant_type`: Specifies the quantization type used for 4-bit weights.\n - `bnb_quantize_after_model_init`: Determines when the quantization should occur.\n\n GPTQ Specific Parameters:\n - `gptq_bits`: Number of bits for GPTQ quantization.\n - `gptq_group_size`: Group size for GPTQ quantization.\n - `gptq_disable_exllama`: If `True`, disables ExLlama kernels during GPTQ quantization.\n\n LoRA Specific Parameters:\n - `apply_lora`: If `True`, applies LoRA to the model.\n - `lora_rank`: LoRA rank to define the size of the LoRA matrices.\n - `lora_alpha`: Multiplication factor for the resulting LoRA matrix.\n - `lora_dropout`: Dropout rate for LoRA.\n - `raw_lora_target_modules`: Comma-separated string of module names to apply LoRA, or 'all' to apply broadly.\n\n Training Arguments:\n - `output_dir`: Path to save training outputs.\n - `per_device_train_batch_size`: Batch size per device for training.\n - `do_eval`: If `True`, performs evaluation.\n - `per_device_eval_batch_size`: Batch size per device for evaluation.\n - `gradient_accumulation_steps`: Number of steps to accumulate gradients for larger effective batch size.\n - `eval_accumulation_steps`: Number of steps to accumulate gradients during evaluation.\n - `eval_delay`: Delay before the first evaluation.\n - `eval_steps`: Number of update steps between evaluations.\n - `warmup_steps`: Number of steps for learning rate warmup.\n - `max_steps`: Maximum number of training steps.\n - `num_train_epochs`: Number of epochs for training.\n - `learning_rate`: Learning rate for the optimizer.\n - `max_grad_norm`: Gradient clipping threshold.\n - `weight_decay`: Coefficient for weight decay regularization.\n - `label_smoothing_factor`: Label smoothing factor.\n - `logging_steps`: Number of steps between logging intermediate results.\n - `save_steps`: Number of training steps between checkpoints and model upload.\n - `save_total_limit`: Maximum number of checkpoints to keep.\n - `optim`: Optimizer name, overwritten by DeepSpeed if used.\n - `push_to_hub`: If `True`, model checkpoints are uploaded to HuggingFace Hub.\n - `hub_model_id`: Name of the model on the HuggingFace Hub.\n - `hub_private_repo`: If `True`, creates a private repository on the HuggingFace Hub.\n\n Weights & Biases Integration:\n - `report_to_wandb`: If `True`, logs metrics to Weights & Biases.\n - `wandb_api_key`: API key for Weights & Biases.\n - `wandb_project`: Project name on Weights & Biases.\n - `wandb_entity`: Entity name (user or organization) on Weights & Biases.\n\n Example of creating a `Config` object:\n ```python\n config = Config(\n model_name_or_path='gpt2',\n dataset_key='my_dataset',\n gradient_accumulation_steps=8,\n max_length=512,\n deepspeed_stage=\"3\",\n )\n ```\n\n Note:\n - Throughout the codebase, `Config` instances are passed around to provide a unified source of configurations\n for various components.\n - It is crucial to ensure all required settings are properly set in a `Config` object before it is utilized,\n particularly when overriding defaults or specifying custom configurations.\n \"\"\"\n\n # general\n experiment_key: str = field(\n default=enums.Experiments.base,\n metadata={\"help\": \"Experiment class key\"},\n )\n save_safetensors: bool = field(\n default=True,\n metadata={\n \"help\": \"Use safe serialization (safe tensors) or not\",\n },\n )\n max_shard_size: str = field(\n default=\"10GB\", metadata={\"help\": \"max_shard_size for the model pushing to the HuggingFace Hub\"}\n )\n local_rank: int = field(\n default=0,\n metadata={\n \"help\": \"Local rank for logging and saving. Works only in distributed training\",\n },\n )\n use_gradient_checkpointing: bool = field(\n default=False,\n metadata={\n \"help\": \"If True, use gradient checkpointing to save memory at the expense of slower backward pass\",\n },\n )\n trainer_key: str = field(\n default=enums.Trainers.lm,\n metadata={\n \"help\": \"Key of the trainer for loading from trainers_registry\",\n },\n )\n force_fp32: bool = field(\n default=False,\n metadata={\n \"help\": \"Force using fp32 when model loading\",\n },\n )\n force_fp16: bool = field(\n default=False,\n metadata={\n \"help\": \"Force using fp16 when model loading\",\n },\n )\n from_gptq: bool = field(\n default=False,\n metadata={\n \"help\": \"If you loadining GPTQ quantized model\",\n },\n )\n huggingface_hub_token: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"HuggingFace Hub token. You can also set this key using .env file\",\n },\n )\n single_gpu: Optional[bool] = field(\n default=None,\n metadata={\n \"help\": \"Indicates that you are learning on the same GPU. It is necessary to use DeepSpeed on a single GPU\",\n },\n )\n master_port: int = field(\n default=9994,\n metadata={\n \"help\": \"Master port for running DeepSpeed on a single GPU. Modify if RuntimeError: Address already in use\",\n },\n )\n deepspeed_stage: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"Predifined DeepSpeed stage\",\n },\n )\n deepspeed_config_path: Optional[int] = field(\n default=None,\n metadata={\n \"help\": \"Path to DeepSpeed config\",\n },\n )\n fsdp_strategy: str = field(\n default=\"\",\n metadata={\n \"help\": \"FSDP strategy\",\n },\n )\n fsdp_offload: bool = field(\n default=True,\n metadata={\n \"help\": \"Offload weights when using FSDP\",\n },\n )\n seed: int = field(\n default=42,\n metadata={\n \"help\": \"Seed value for random operations\",\n },\n )\n stabilize: bool = field(\n default=False,\n metadata={\n \"help\": \"Stabilize the model. Convert some weights (e.g. LoRA) to bf16\",\n },\n )\n norm_fp32: bool = field(\n default=False,\n metadata={\n \"help\": \"Convert norm to fp32\",\n },\n )\n path_to_env_file: Optional[str] = field(\n default=\"./.env\",\n metadata={\"help\": \"Custom path to .env file\"},\n )\n\n # prepare\n prepare_dataset: bool = field(\n default=True,\n metadata={\n \"help\": 'Prepare the dataset. Works only at \"prepare\" stage',\n },\n )\n\n # fuse\n lora_hub_model_id: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"Fusing LoRA. The name of the LoRA model at the hub for fusing. Example: BobaZooba/Shurale\",\n },\n )\n lora_model_local_path: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"Fusing LoRA. Local path to the LoRA model\",\n },\n )\n fused_model_local_path: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"Local path to fused model. Useful if you want to quantize model after fusing on the same machine\",\n },\n )\n fuse_after_training: bool = field(\n default=False,\n metadata={\n \"help\": \"Fuse or not model after training\",\n },\n )\n\n # gptq quantization\n quantization_dataset_id: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"Dataset id for GPTQ quantization. You can install either the idi dataset, or use any dataset\",\n },\n )\n quantization_max_samples: int = field(\n default=1024,\n metadata={\n \"help\": \"Max samples for GPTQ quantization if you use custom dataset\",\n },\n )\n quantized_model_path: str = field(\n default=\"./quantized_model/\",\n metadata={\n \"help\": \"Path to GPTQ quantized model\",\n },\n )\n quantized_hub_model_id: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"The name of the model at the hub for GPTQ quantization. Example: BobaZooba/Shurale-GPTQ\",\n },\n )\n quantized_hub_private_repo: bool = field(\n default=True,\n metadata={\n \"help\": \"Private repository for GPTQ quantization model or not\",\n },\n )\n\n # dataset\n dataset_key: str = field(\n default=enums.Datasets.soda,\n metadata={\n \"help\": \"Key of the dataset for loading from datasets_registry\",\n },\n )\n train_local_path_to_data: str = field(\n default=\"./train.jsonl\",\n metadata={\n \"help\": \"The path to the local training data file\",\n },\n )\n eval_local_path_to_data: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"The path to the local eval data file\",\n },\n )\n shuffle: bool = field(\n default=True,\n metadata={\n \"help\": \"Shuffle training data\",\n },\n )\n max_eval_samples: int = field(\n default=1_000,\n metadata={\n \"help\": \"Maximum number of examples for evaluation\",\n },\n )\n add_eval_to_train_if_no_path: bool = field(\n default=False,\n metadata={\n \"help\": \"Add evaluation data to training data if their number is greater than max_eval_samples\",\n },\n )\n\n # tokenizer\n tokenizer_name_or_path: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"Tokenizer name or path. If the value is not set, \"\n \"then it will be taken from the model_name_or_path\",\n },\n )\n tokenizer_use_fast: Optional[bool] = field(\n default=None,\n metadata={\n \"help\": \"Use fast flag for the tokenizer\",\n },\n )\n tokenizer_padding_side: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"Padding side of the collator: None, right or left\",\n },\n )\n\n # collator\n collator_key: str = field(\n default=enums.Collators.lm,\n metadata={\n \"help\": \"Key of the collator for loading from collators_registry\",\n },\n )\n max_length: int = field(\n default=2048,\n metadata={\n \"help\": \"Max sequence length of the model\",\n },\n )\n\n # model\n model_name_or_path: str = field(\n default=\"mistralai/Mistral-7B-v0.1\",\n metadata={\n \"help\": \"Model name or path. It could be from HuggingFace or locally\",\n },\n )\n push_to_hub_bos_add_bos_token: bool = field(\n default=False,\n metadata={\n \"help\": \"Upload to the hub tokenization config with add_bos_token equals to True. Might be helpful for TGI\"\n },\n )\n use_flash_attention_2: bool = field(\n default=False,\n metadata={\n \"help\": \"Use or not flash attention 2. Requires 1) CUDA >= 11.6; 2) install flash-attn 3) compatible model\",\n },\n )\n trust_remote_code: bool = field(\n default=False,\n metadata={\n \"help\": \"Trust remote code from HuggingFace\",\n },\n )\n device_map: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"Device map for loading the model\",\n },\n )\n prepare_model_for_kbit_training: Optional[bool] = field(\n default=None,\n metadata={\n \"help\": \"Prepare or not for kbit training\",\n },\n )\n offload_folder: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"Offloading folder. Helps for fusing in colab\",\n },\n )\n\n # bitsandbytes\n load_in_8bit: bool = field(\n default=False,\n metadata={\n \"help\": \"Load the model in 8 bit using bitsandbytes\",\n },\n )\n load_in_4bit: bool = field(\n default=False,\n metadata={\n \"help\": \"Load the model in 4 bit using bitsandbytes\",\n },\n )\n llm_int8_threshold: float = field(\n default=6.0,\n metadata={\n \"help\": \"Threshold for outlier detection\",\n },\n )\n llm_int8_has_fp16_weight: bool = field(\n default=True,\n metadata={\n \"help\": \"LLM has weights in fp16\",\n },\n )\n bnb_4bit_use_double_quant: bool = field(\n default=True,\n metadata={\n \"help\": \"Double quantization. \"\n \"This will enable a second quantization after the first \"\n \"one to save an additional 0.4 bits per parameter\",\n },\n )\n bnb_4bit_quant_type: str = field(\n default=\"nf4\",\n metadata={\n \"help\": \"Quantization type for 4 bit\",\n },\n )\n bnb_quantize_after_model_init: bool = field(\n default=False, metadata={\"help\": \"If False, quantization will be at model init\"}\n )\n\n # gptq\n gptq_bits: int = field(\n default=4,\n metadata={\n \"help\": \"Bits for GPTQ quantization\",\n },\n )\n gptq_group_size: int = field(\n default=128,\n metadata={\n \"help\": \"Group size for GPTQ quantization\",\n },\n )\n gptq_disable_exllama: bool = field(\n default=True,\n metadata={\n \"help\": \"Disable ExLlama kernels for GPTQ quantization\",\n },\n )\n\n # lora\n apply_lora: bool = field(\n default=False,\n metadata={\n \"help\": \"Apply LoRA to the model or not\",\n },\n )\n lora_rank: int = field(\n default=8,\n metadata={\n \"help\": \"LoRA rank value. LoRA matrices W_A x R and R x W_B, where R is LoRA rank\",\n },\n )\n lora_alpha: int = field(\n default=32,\n metadata={\n \"help\": \"LoRA alpha value. The resulting LoRA matrix will be multiplied by this value\",\n },\n )\n lora_dropout: float = field(\n default=0.1,\n metadata={\n \"help\": \"LoRA dropout value\",\n },\n )\n raw_lora_target_modules: str = field(\n default=\"all\",\n metadata={\n \"help\": 'Names of modules to apply LoRA. A comma-separated string, for example: \"k,q,v\". '\n 'When setting the value \"all\", LoRA will be applied to all linear layers, except for the '\n \"input embeddings and the lm_head.\",\n },\n )\n\n # training arguments\n output_dir: str = field(\n default=\"./outputs/\",\n metadata={\n \"help\": \"The path to the directory where the artifacts will be saved\",\n },\n )\n per_device_train_batch_size: int = field(\n default=2,\n metadata={\n \"help\": \"Batch size on each GPU\",\n },\n )\n do_eval: bool = field(\n default=False,\n metadata={\n \"help\": \"Run eval or not\",\n },\n )\n per_device_eval_batch_size: Optional[int] = field(\n default=None,\n metadata={\n \"help\": \"Batch size on each GPU for evaluation. \"\n \"If None per_device_eval_batch_size equals to per_device_train_batch_size\",\n },\n )\n gradient_accumulation_steps: int = field(\n default=1,\n metadata={\n \"help\": \"Number of steps to accumulate gradients\",\n },\n )\n eval_accumulation_steps: Optional[int] = field(\n default=None,\n metadata={\n \"help\": \"Number of steps to accumulate gradients at evaluation.\"\n \"If None eval_accumulation_steps equals to gradient_accumulation_steps\",\n },\n )\n eval_delay: int = field(\n default=0,\n metadata={\n \"help\": \"Number of epochs or steps to wait for before the first \"\n \"evaluation can be performed, depending on the evaluation_strategy\"\n },\n )\n eval_steps: Optional[int] = field(\n default=1_000, metadata={\"help\": \"Number of update steps between two evaluations\"}\n )\n warmup_steps: int = field(\n default=1_000,\n metadata={\n \"help\": \"Number of steps to warm up\",\n },\n )\n max_steps: Optional[int] = field(\n default=None,\n metadata={\n \"help\": \"Maximum number of training steps\",\n },\n )\n num_train_epochs: int = field(\n default=1,\n metadata={\n \"help\": \"Number of training epochs\",\n },\n )\n learning_rate: float = field(\n default=2e-4,\n metadata={\n \"help\": \"Learning rate value\",\n },\n )\n max_grad_norm: float = field(\n default=1.0,\n metadata={\n \"help\": \"Clip grad value\",\n },\n )\n weight_decay: float = field(\n default=0.001,\n metadata={\n \"help\": \"Weight decay value\",\n },\n )\n label_smoothing_factor: float = field(\n default=0.0,\n metadata={\n \"help\": \"Label smoothing value\",\n },\n )\n logging_steps: int = field(\n default=10,\n metadata={\n \"help\": \"Number of steps between logging\",\n },\n )\n save_steps: int = field(\n default=100,\n metadata={\n \"help\": \"The number of training steps between saving the checkpoint and uploading to the hub\",\n },\n )\n save_total_limit: int = field(\n default=1,\n metadata={\n \"help\": \"The number of checkpoints that are saved locally\",\n },\n )\n optim: Optional[str] = field(\n default=\"paged_adamw_8bit\",\n metadata={\n \"help\": \"Optimizer name. It will be overwritten if you use deepspeed\",\n },\n )\n push_to_hub: bool = field(\n default=False,\n metadata={\n \"help\": \"Upload the model to the hub. \"\n \"The model will be uploaded to the hub every save_steps. \"\n \"If LoRA is used, then LoRA's weights will be loaded onto the hub\",\n },\n )\n hub_model_id: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"The name of the model at the hub. Example: BobaZooba/Shurale\",\n },\n )\n hub_private_repo: bool = field(\n default=True,\n metadata={\n \"help\": \"Private repository or not\",\n },\n )\n neftune_noise_alpha: Optional[float] = field(\n default=None,\n metadata={\n \"help\": \"If not None, this will activate NEFTune noise embeddings. \"\n \"This can drastically improve model performance for instruction fine-tuning\",\n },\n )\n\n # training traction\n project_name: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"Project name for training traction services like W&B\",\n },\n )\n report_to_wandb: bool = field(\n default=False,\n metadata={\n \"help\": \"Report or not to Weight & Biases\",\n },\n )\n wandb_api_key: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"Weight & Biases API key. You can also set this key using .env file\",\n },\n )\n wandb_project: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"Depreacted, use project_name. Weight & Biases project name\",\n },\n )\n wandb_entity: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"Weight & Biases entity name (user or company)\",\n },\n )\n\n def __post_init__(self):\n if self.huggingface_hub_token is not None:\n os.environ[enums.EnvironmentVariables.huggingface_hub_token] = self.huggingface_hub_token\n dist_logger(message=f\"Environment variable {enums.EnvironmentVariables.huggingface_hub_token} set\")\n\n if self.report_to_wandb:\n for key, value in zip(\n [\n enums.EnvironmentVariables.wandb_api_key,\n enums.EnvironmentVariables.wandb_project,\n enums.EnvironmentVariables.wandb_entity,\n ],\n [\n self.wandb_api_key,\n self.correct_project_name,\n self.wandb_entity,\n ],\n ):\n if value is not None:\n os.environ[key] = value\n dist_logger(message=f\"Environment variable {key} set\")\n else:\n os.environ[enums.EnvironmentVariables.wandb_disabled] = \"true\"\n\n @property\n def correct_project_name(self) -> Optional[str]:\n if self.project_name is not None and self.wandb_project is not None:\n dist_logger.warning(\n message=\"You set both project_name and wandb_project.\"\n \"Priority set to project_name for experiment tracking\"\n )\n return self.project_name\n elif self.project_name is not None:\n return self.project_name\n elif self.wandb_project is not None:\n dist_logger.warning(message=\"wandb_project is depreacted, please use project_name instead\")\n return self.wandb_project\n else:\n return None\n\n def check_hub(self) -> None:\n if self.push_to_hub and self.hub_model_id is None:\n raise ValueError(\"You want to push to HF hub, but hub_model_id is None\")\n elif self.hub_model_id is not None and not self.push_to_hub:\n dist_logger.warning(\"You set hub_model_id, but push_to_hub is False\")\n\n return None\n\n def apply_deepspeed_single_gpu(self) -> None:\n os.environ[enums.EnvironmentVariables.master_address] = \"localhost\"\n os.environ[enums.EnvironmentVariables.master_port] = str(self.master_port)\n os.environ[enums.EnvironmentVariables.rank] = \"0\"\n os.environ[enums.EnvironmentVariables.local_rank] = \"0\"\n os.environ[enums.EnvironmentVariables.world_size] = \"1\"\n\n def check_deepspeed(self) -> None:\n if self.deepspeed is not None:\n spec = find_spec(\"deepspeed\")\n\n if spec is None:\n raise ImportError(\"Deepspeed is not None, but failed to import deepspeed. Please install deepspeed.\")\n\n if self.single_gpu:\n self.apply_deepspeed_single_gpu()\n\n return None\n\n def check_flash_attention(self) -> None:\n if self.use_flash_attention_2:\n if not torch.cuda.is_available():\n raise ImportError(\"You want to use flash_attention_2, but CUDA is not available\")\n\n spec = find_spec(\"flash_attn\")\n\n if spec is None:\n raise ImportError(\n \"You want to use flash_attention_2, but flash-attn is not installed. Please install flash-attn.\"\n )\n\n return None\n\n def check_auto_gptq(self) -> None:\n spec = find_spec(\"auto_gptq\")\n\n if spec is None:\n raise ImportError(\n \"You want to quantize model using GPTQ, but auto-gptq is not installed. Please install auto-gptq.\"\n )\n\n return None\n\n def check(self) -> None:\n \"\"\"\n Performs a series of checks to validate the configuration for compatibility with the training environment.\n\n This method is responsible for ensuring that the environment is properly set up for the actions specified in\n the configuration object, such as pushing to Hugging Face's hub, using deepspeed, and using flash attention.\n\n It includes the following checks:\n - Verifies that credentials for Hugging Face hub are provided if the model is intended to be pushed to the hub.\n - Validates that deepspeed is installed if it is specified in the configuration.\n - Ensures that the necessary packages are installed for using flash attention if configured to do so.\n\n Does not return any value.\n\n Raises:\n - ValueError: If the configuration for hub interaction is incorrect.\n - ImportError: If any of the required libraries (e.g., deepspeed, flash-attn, auto-gptq) are not installed.\n\n Example usage:\n ```python\n from xllm import Config\n\n config = Config(...)\n # Before proceeding with training or other operations, run checks to ensure environment compatibility.\n config.check()\n ```\n\n Note:\n - Always invoke this method after initializing a `Config` object and before proceeding with model training\n or other operations that rely on the configuration settings.\n \"\"\"\n self.check_hub()\n self.check_deepspeed()\n self.check_flash_attention()\n\n return None\n\n @property\n def correct_tokenizer_name_or_path(self) -> str:\n \"\"\"\n Resolves the tokenizer name or path to be used for initializing the tokenizer.\n\n This property ensures that if a specific tokenizer name or path is not provided in the configuration object,\n the model name or path is used instead, maintaining consistency between model and tokenizer.\n\n Returns:\n `str`: The name or path of the tokenizer to be used. If `tokenizer_name_or_path` is specified in `Config`\n object, that value is used. Otherwise, `model_name_or_path` is returned as the default tokenizer identifier.\n\n Example usage:\n ```python\n from xllm import Config\n\n config = Config(model_name_or_path=\"gpt2\", tokenizer_name_or_path=None)\n tokenizer_name_or_path = config.correct_tokenizer_name_or_path\n # tokenizer_name_or_path now holds the value \"gpt2\"\n ```\n\n Note:\n - It is a common practice to use the same identifier for both the model and its corresponding tokenizer.\n This property handles such a case automatically when the `tokenizer_name_or_path` is not explicitly set.\n \"\"\"\n if self.tokenizer_name_or_path is not None:\n return self.tokenizer_name_or_path\n else:\n return self.model_name_or_path\n\n @property\n def lora_target_modules(self) -> Optional[List[str]]:\n \"\"\"\n Interprets the LoRA target modules setting from the configuration to determine which model modules to apply\n LoRA to.\n\n LoRA (Low-Rank Adaptation) is a parameter-efficient training method that modifies specific layers within a\n model. This property is responsible for parsing the `raw_lora_target_modules` configuration to identify\n the specific modules (like attention key, query, and value matrices) that LoRA will be applied to.\n\n Returns:\n Optional[List[str]]: A list of module names to apply LoRA to if specified, otherwise `None` if LoRA should\n be applied to all eligible modules as determined by the string \"all\" in `raw_lora_target_modules`.\n\n Raises:\n ValueError: If `raw_lora_target_modules` is not set.\n\n Example usage:\n ```python\n from xllm import Config\n\n # Assuming a Config object with LoRA targets specified.\n config = Config(raw_lora_target_modules=\"k,q,v\")\n lora_modules = config.lora_target_modules\n # lora_modules now holds the list ['k', 'q', 'v'].\n ```\n\n Note:\n - The `raw_lora_target_modules` should be provided as a comma-separated string specifying the target\n modules. If LoRA should be applied broadly, the value \"all\" can be used.\n \"\"\"\n if self.raw_lora_target_modules == \"all\":\n return None\n elif self.raw_lora_target_modules is not None:\n modules_names = [module_name.strip() for module_name in self.raw_lora_target_modules.split(\",\")]\n return modules_names\n else:\n raise ValueError(\"raw_lora_target_modules doesn't set\")\n\n @property\n def dtype(self) -> torch.dtype:\n \"\"\"\n Determines the appropriate PyTorch data type for the model based on availability of CUDA and configuration\n settings.\n\n This property assists in setting computational precision for training and inference (e.g., FP32, FP16, BF16),\n basing the decision on system capabilities and user preferences as defined in the `Config` object. The selected\n data type can impact both the computational efficiency and memory usage of the model operations.\n\n Returns:\n `torch.dtype`: The data type to be used for the model tensors. This can be one of the following based on the\n system's CUDA support and configuration flags: `torch.float32` (FP32), `torch.float16` (FP16), or\n `torch.bfloat16` (BF16).\n\n Example usage:\n ```python\n from xllm import Config\n\n config = Config(force_fp32=False, force_fp16=True)\n model_dtype = config.dtype\n # If CUDA is available and BF16 is supported, model_dtype will be `torch.bfloat16`.\n # Otherwise, it falls back to `torch.float16` due to the forced FP16 configuration.\n ```\n\n Note:\n - This property plays a critical role in memory management and computational efficiency, especially when\n working with large models or limited system resources.\n \"\"\"\n if not torch.cuda.is_available() or self.force_fp32:\n return torch.float32\n elif self.force_fp16:\n return torch.float16\n elif torch.cuda.is_bf16_supported():\n return torch.bfloat16\n else:\n return torch.float16\n\n @property\n def deepspeed(self) -> Optional[Dict[str, Any]]:\n \"\"\"\n Retrieves the deepspeed configuration dictionary based on settings within the `Config` object.\n\n This property parses the deepspeed settings from the configuration to construct the configuration dictionary\n used for ing up deepspeed in the model's training environment. It determines whether a predefined stage\n or a custom configuration file path should be utilized.\n\n Returns:\n `Optional[Dict[str, Any]]`: A dictionary containing deepspeed configurations, or `None` if deepspeed is not\n to be used.\n\n Raises:\n ValueError: If the `deepspeed_stage` specified does not correspond to a known configuration,\n or if a custom deepspeed configuration file path does not exist.\n\n Example usage:\n ```python\n from xllm import Config\n\n # Assuming a predefined Config object with deepspeed specifications.\n config = Config(deepspeed_stage=\"2\")\n ds_config = config.deepspeed\n # ds_config now contains the deepspeed configuration for stage 2.\n ```\n\n Note:\n - A deepspeed stage is a set of predefined configurations. If this is set, the corresponding configuration\n will be used and any custom deepspeed configuration file will be ignored.\n - If a custom deepspeed configuration file path is given and it exists, that configuration will be loaded\n and used.\n \"\"\"\n deepspeed_config: Optional[Dict[str, Any]] = None\n\n if self.deepspeed_config_path is not None:\n if os.path.isfile(self.deepspeed_config_path):\n with open(self.deepspeed_config_path) as file_object:\n deepspeed_config = json.load(file_object)\n return deepspeed_config\n else:\n raise ValueError(f\"deepspeed_config_path set to {self.deepspeed_config_path}, but not found\")\n\n if self.deepspeed_stage in [0, \"0\", \"stage_0\"]:\n return None\n\n if self.deepspeed_stage is not None:\n deepspeed_config = DS_CONFIG_MAPPER.get(self.deepspeed_stage, None)\n if deepspeed_config is None:\n raise ValueError(\n f'Deepspeed stage \"{self.deepspeed_stage}\" not found in keys: {list(DS_CONFIG_MAPPER.keys())}'\n )\n\n return deepspeed_config\n\n @property\n def fsdp(self) -> Union[str, List[str]]:\n \"\"\"\n Compiles the configurations for Fully Sharded Data Parallel (FSDP) based on the settings in the `Config` object.\n\n This property creates a list containing FSDP-related options, which informs the training process whether to\n enable FSDP and which FSDP strategy to employ.\n\n A list of options (fsdp_strategy) along the following:\n \"full_shard\": Shard parameters, gradients and optimizer states.\n \"shard_grad_op\": Shard optimizer states and gradients.\n \"offload\": Offload parameters and gradients to CPUs (only compatible with \"full_shard\" and \"shard_grad_op\").\n \"auto_wrap\": Automatically recursively wrap layers with FSDP using default_auto_wrap_policy.\n\n Returns:\n `Union[str, List[str]]`: A list of FSDP options as strings. It can be an empty string if FSDP is not used or\n a list with the specified FSDP strategy and options such as offloading.\n\n Example usage:\n ```python\n from xllm import Config\n\n # Assuming a predefined Config object with FSDP specifications.\n config = Config(fsdp_strategy=\"full_shard\", fsdp_offload=True)\n fsdp_options = config.fsdp\n ```\n\n Note:\n - FSDP strategies and options improve memory efficiency during distributed training by sharding the model's\n parameters across multiple devices.\n - The FSDP settings in the configuration should match the target training environment and system\n capabilities.\n \"\"\"\n fsdp_options = list()\n\n if self.fsdp_strategy is not None and self.fsdp_strategy != \"\":\n fsdp_options.append(self.fsdp_strategy)\n else:\n return \"\"\n\n if self.fsdp_offload:\n fsdp_options.append(FSDPOption.OFFLOAD)\n\n return fsdp_options\n\n @property\n def lora_model_name_or_path_for_fusing(self) -> str:\n \"\"\"\n Determines the name or path of the LoRA model to be used for the fusing process.\n\n This property resolves which model should be fused by checking whether a model ID from the Hugging Face hub or a\n local path to a LoRA model is provided in the configuration object. It is essential for the fusing operation\n when LoRA weights need to be integrated into the base model.\n\n Returns:\n `str`: The Hugging Face hub model ID or the local file path to the LoRA model, depending on which is\n specified.\n\n Raises:\n ValueError: If neither `lora_hub_model_id` nor `lora_model_local_path` is set, indicating that there is no\n model specified for fusing.\n\n Example usage:\n ```python\n from xllm import Config\n\n # Assuming a Config object with a specified LoRA model on Hugging Face Hub or locally.\n config = Config(lora_hub_model_id=\"username/model-id\", lora_model_local_path=None)\n model_name_or_path = config.lora_model_name_or_path_for_fusing\n # model_name_or_path will hold the value \"username/model-id\".\n ```\n\n Note:\n - This property is specifically used during the model fusing step and should be configured correctly in\n scenarios where LoRA is utilized.\n \"\"\"\n if self.lora_hub_model_id is not None:\n return self.lora_hub_model_id\n elif self.lora_model_local_path is not None:\n return self.lora_model_local_path\n else:\n raise ValueError(\"Please set lora_hub_model_id or lora_model_local_path for fusing\")\n\n @property\n def need_to_prepare_model_for_kbit_training(self) -> bool:\n if self.prepare_model_for_kbit_training is not None:\n return self.prepare_model_for_kbit_training\n else:\n return self.from_gptq or self.load_in_4bit or self.load_in_8bit" }, { "identifier": "Quantizer", "path": "src/xllm/quantization/quantizer.py", "snippet": "class Quantizer:\n \"\"\"\n The `Quantizer` class is responsible for the quantization of pretrained language models using the GPTQ approach\n provided by the Optimum library. Quantization is the process of reducing the precision of the model's weights,\n which can lead to decreased model size and potentially faster inference times while maintaining comparable accuracy.\n\n The class provides tools to set up the quantization environment, execute the quantization, and save the resultant\n quantized model.\n\n Thanks to Phil Schmid's post: https://www.philschmid.de/gptq-llama\n\n Key methods:\n - `__init__`: Initializes the Quantizer with a configuration object and optional tokenizer, model, and dataset.\n - `build`: Sets up the tokenizer, model, and dataset if not provided during initialization and prepares the\n internal `GPTQQuantizer` for the quantization process.\n - `build_dataset`: Constructs or retrieves the dataset to calibrate the model for the quantization process.\n - `quantize`: Runs the actual quantization process by fine-tuning quantization scales with the dataset and\n quantizing model weights.\n - `save`: Saves the quantized model to disk and optionally uploads it to the Hugging Face model hub.\n\n Attributes:\n - `config` (`Config`): Configuration parameters for building the tokenizer, model, creating the dataset, and\n quantization settings.\n - `tokenizer` (`PreTrainedTokenizer`, defaults to `None`): The tokenizer to format the input for the model\n to be quantized.\n - `model` (`PreTrainedModel`, optional): The pretrained language model to be quantized.\n - `dataset` (`Union[str, List[str], None]`, defaults to `None`): Identifier or samples for the dataset used during\n quantization calibration.\n - `low_cpu_mem_usage` (`bool`, defaults to `None`): Whether to use optimized settings to lower CPU memory usage\n during quantization.\n - `quantizer` (`GPTQQuantizer`, defaults to `None`): The Optimum library's quantizer instance to perform\n the quantization.\n - `quantized_model` (`PreTrainedModel`, defaults to `None`): The resultant quantized model post-quantization\n process.\n\n This class should be used when there is a need to quantize language models for deployment or when optimizing models\n for environments with resource constraints or specific performance targets.\n \"\"\"\n\n def __init__(\n self,\n config: Config,\n tokenizer: Optional[PreTrainedTokenizer] = None,\n model: Optional[PreTrainedModel] = None,\n dataset: Union[str, List[str], None] = None,\n low_cpu_mem_usage: Optional[bool] = None,\n ):\n \"\"\"\n Initializes the Quantizer object which is responsible for the quantization of a pretrained language model.\n\n The quantization process aims to reduce the precision of the model's weights to a specific bit-width to optimize\n its size and, potentially, inference speed, often with minimal impact on performance.\n\n Args:\n config (`Config`):\n The configuration object containing parameters for model building, tokenizer, datasets,\n and quantization.\n tokenizer (`PreTrainedTokenizer`, defaults to `None`):\n The tokenizer associated with the model that will be quantized. It is required for the quantization\n process as it formats the input for the model. If not provided, it will be built from the configuration.\n model (`PreTrainedModel`, optional):\n The pretrained model to be quantized. If not provided, it will be built from the configuration.\n dataset (`Union[str, List[str], None]`, defaults to `None`):\n The dataset to be used for calibrating the quantization process. It can be either a dataset identifier\n string or a list of text samples. If not provided, it will be built from the configuration.\n low_cpu_mem_usage (`bool`, defaults to `None`):\n If set to `True`, the model will attempt to use less CPU memory during quantization which can be\n beneficial when working with large models on machines with limited resources.\n\n The initializer of the Quantizer sets up the necessary components such as the tokenizer, model, and dataset,\n which will be used for quantization. If any of these components are not provided, they will be constructed based\n on the specified configuration. The `low_cpu_mem_usage` parameter is an important consideration for systems with\n restricted resources, and it's recommended to be set to `True` for quantization.\n\n Quantizer uses the `GPTQQuantizer` class from Optimum library for the quantization process. The attributes\n `quantizer` and `quantized_model` are initialized as `None` and will be created and filled during the build and\n quantize steps, respectively.\n\n Upon initialization, this class also performs internal checks like the availability of CUDA and whether all\n necessary settings for automatic GPT quantization are provided within the configuration.\n \"\"\"\n self.config = config\n\n self.tokenizer = tokenizer\n self.model = model\n self.dataset = dataset\n\n self.low_cpu_mem_usage = low_cpu_mem_usage\n\n self.quantizer: Optional[GPTQQuantizer] = None\n self.quantized_model: Optional[PreTrainedModel] = None\n\n def internal_checks(self) -> None:\n if not torch.cuda.is_available():\n dist_logger.warning(\"CUDA is not available\")\n\n self.config.check_auto_gptq()\n\n def build(self) -> None:\n \"\"\"\n Builds the necessary components for the quantization process by performing internal checks, constructing\n the tokenizer, model and building/verifying the dataset.\n\n This method prepares the `Quantizer` instance to perform the quantization process on the language model.\n It initializes internal class attributes such as tokenizer, model, and dataset if they are not already provided.\n\n The method performs the following steps:\n - Validates the availability of CUDA and performs checks based on the configuration settings for quantization.\n - Constructs the tokenizer if it wasn't provided during Quantizer initialization. The tokenizer is essential\n for formatting the input data for the model.\n - Constructs the model if it wasn't provided during Quantizer initialization. A warning is issued if the\n `low_cpu_mem_usage` attribute is not set since quantization can be resource-intensive.\n - Sets up the quantizer instance of `GPTQQuantizer` class, configured with bits, group size, dataset, and\n model sequence length based on the provided configuration.\n\n Post execution, the Quantizer is set up with a tokenizer and model ready for quantization, as well as a\n `GPTQQuantizer` instance initialized with the appropriate configuration and dataset.\n\n Does not return any value.\n\n Raises:\n - ValueError if the dataset class corresponding to the `config.dataset_key` is not found in the registry.\n - ValueError if the quantization dataset cannot be loaded.\n\n Note: If `low_cpu_mem_usage` is not specified or set to `False`, a warning is given to consider setting it to\n `True` for quantization, especially when dealing with large models and systems with limited resources.\n \"\"\"\n self.internal_checks()\n\n if self.tokenizer is None:\n self.tokenizer = build_tokenizer(config=self.config, use_fast=False)\n dist_logger.info(f\"Tokenizer {self.config.correct_tokenizer_name_or_path} was built\")\n\n if self.model is None:\n if self.low_cpu_mem_usage is None or not self.low_cpu_mem_usage:\n dist_logger.warning(\"low_cpu_mem_usage is None. Recommended to set to True for quantization\")\n self.model = build_model(\n config=self.config,\n quantization_config=None,\n low_cpu_mem_usage=self.low_cpu_mem_usage,\n )\n dist_logger.info(f\"Model {self.config.model_name_or_path} was built\")\n\n dataset = self.build_dataset() if self.dataset is None else self.dataset\n\n self.quantizer = GPTQQuantizer(\n bits=self.config.gptq_bits,\n group_size=self.config.gptq_group_size,\n dataset=dataset,\n model_seqlen=self.config.max_length,\n )\n dist_logger.info(\"Quantizer loaded\")\n\n return None\n\n def build_dataset(self) -> Union[str, List[str]]:\n \"\"\"\n Constructs or retrieves the dataset to be used for calibrating the quantization process of the model.\n\n The dataset is a critical component for the quantization process as it is used to fine-tune the quantization\n scales on actual data, ensuring model accuracy is preserved post-quantization.\n\n The method performs the following steps:\n - If a dataset ID is specified in the configuration for quantization, this ID is used directly.\n - If no dataset ID is provided, but `prepare_dataset` is enabled in the configuration, the dataset class\n associated\n with `config.dataset_key` is used to prepare the data.\n - If neither of the above are provided or applicable, the method attempts to build the dataset by leveraging\n the configuration settings and dataset preparation procedures.\n\n During the process, it will:\n - Fetch and combine text parts from the raw dataset into sample texts for quantization.\n - Limit the total number of samples to `config.quantization_max_samples` if this value is set.\n\n Returns:\n `Union[str, List[str]]`: A dataset identifier string if the dataset comes from a repository or a list\n of text samples for calibrating quantization.\n\n Raises:\n ValueError: If the dataset class for `config.dataset_key` is not found in the datasets registry or\n if the dataset cannot be built or loaded according to the configuration specifications.\n\n This method ensures that a suitable and correctly formatted dataset is available for quantizing the model.\n \"\"\"\n dataset_id = None\n samples: List[str] = list()\n\n if self.config.quantization_dataset_id is not None:\n dataset_id = self.config.quantization_dataset_id\n else:\n if self.config.prepare_dataset:\n dataset_cls = datasets_registry.get(self.config.dataset_key)\n\n if dataset_cls is None:\n raise ValueError(f\"Dataset with key {self.config.dataset_key} not found\")\n\n dataset_cls.prepare(config=self.config)\n\n raw_dataset = build_dataset(config=self.config, is_train=True)\n\n if raw_dataset is None:\n raise ValueError(\"Quantization dataset can't be loaded\")\n\n samples = list()\n\n for sample_index in tqdm(\n range(len(raw_dataset)), desc=\"Loading quantization dataset\", total=self.config.quantization_max_samples\n ):\n sample: Dict[str, Any] = raw_dataset[sample_index]\n text_parts = sample[enums.General.text_parts]\n text = \"\\n\".join(text_parts)\n if isinstance(text, str):\n samples.append(text)\n\n if 0 < self.config.quantization_max_samples == len(samples):\n break\n\n return dataset_id or samples\n\n def quantize(self) -> None:\n \"\"\"\n Executes the quantization process on the pre-loaded pretrained language model using the `GPTQQuantizer`.\n\n This is the core method where the quantization scales are fine-tuned based on the provided dataset and the model\n weights are quantized to the specified bit-width.\n\n The method performs the following steps:\n - Validates that the tokenizer, model, and quantizer have been set up by calling `build`. If any of these\n components are not initialized, a ValueError is raised.\n - Proceeds with the quantization process, during which the `GPTQQuantizer` adjusts the quantization parameters\n based on the provided dataset and quantizes the model's weights.\n - Stores the resulting quantized model in the `quantized_model` attribute.\n\n Does not return any value.\n\n Raises:\n ValueError: If the tokenizer, model, or quantizer is not initialized before calling this method.\n\n Note:\n The quantized model is stored within the `quantized_model` attribute of the `Quantizer` instance and can be\n accessed or saved after quantization.\n \"\"\"\n if self.tokenizer is None:\n raise ValueError(\"tokenizer is None. It is impossible to quantize. Please run build\")\n\n if self.model is None:\n raise ValueError(\"model is None. It is impossible to quantize. Please run build\")\n\n if self.quantizer is None:\n raise ValueError(\"quantizer is None. It is impossible to quantize. Please run build\")\n\n dist_logger.info(\"Start quantization\")\n self.quantized_model = self.quantizer.quantize_model(self.model, self.tokenizer)\n dist_logger.info(\"Quantization complete\")\n\n return None\n\n def save(self) -> None:\n \"\"\"\n Saves the quantized model to a specified directory and pushes it to the Hugging Face model hub if desired.\n\n This method deals with the post-quantization step of persisting the quantized model for future use. It ensures\n that the model is stored in an accessible location and is properly serialized.\n\n The method performs the following steps:\n - Checks if the quantized model is present. If not, raises a ValueError as there is nothing to save.\n - Saves the quantized model to the path specified in `config.quantized_model_path`.\n - Saves the tokenizer associated with the quantized model to the same path.\n - Adjusts the configuration file for the quantized model to ensure that any `disable_exllama` flags are set\n correctly and in sync with the quantizer's configuration.\n - Optionally, if a `quantized_hub_model_id` is specified in the config, the method pushes the quantized model\n and tokenizer to the Hugging Face model hub using this ID.\n\n Does not return any value.\n\n Raises:\n ValueError: If `quantized_model` is not set, indicating that there is no model to save.\n\n Note:\n The method logs appropriate messages to keep the user informed about the saving process and any issues that\n might occur, such as the absence of a `quantized_hub_model_id` for hub uploads. It's essential to ensure\n that the model hub ID is set if pushing the model to the cloud is intended.\n \"\"\"\n if self.quantized_model is None:\n raise ValueError(\"quantized_model is None. Nothing to save\")\n\n dist_logger.info(f\"Saving quantized model to {self.config.quantized_model_path}\")\n self.quantized_model.save_pretrained(\n save_directory=self.config.quantized_model_path,\n safe_serialization=self.config.save_safetensors,\n )\n\n fast_tokenizer = build_tokenizer(config=self.config)\n\n fast_tokenizer.save_pretrained(save_directory=self.config.quantized_model_path)\n\n path_to_config = os.path.join(self.config.quantized_model_path, \"config.json\")\n path_to_quantize_config = os.path.join(self.config.quantized_model_path, \"quantize_config.json\")\n\n if self.quantizer is not None:\n with open(\n path_to_quantize_config,\n \"w\",\n encoding=\"utf-8\",\n ) as file_object:\n self.quantizer.disable_exllama = False\n json.dump(self.quantizer.to_dict(), file_object, indent=2)\n else:\n dist_logger.error(\"quantizer is None. saved quantized model can be broken\")\n\n with open(path_to_config, \"r\", encoding=\"utf-8\") as file_object:\n model_config = json.load(file_object)\n model_config[\"quantization_config\"][\"disable_exllama\"] = False\n\n with open(path_to_config, \"w\", encoding=\"utf-8\") as file_object:\n json.dump(model_config, file_object, indent=2)\n\n if self.config.quantized_hub_model_id is not None:\n dist_logger.info(f\"Push quantized model to the hub {self.config.quantized_hub_model_id}\")\n self.quantized_model.push_to_hub(\n repo_id=self.config.quantized_hub_model_id,\n private=self.config.quantized_hub_private_repo,\n safe_serialization=self.config.save_safetensors,\n max_shard_size=self.config.max_shard_size,\n )\n if fast_tokenizer is not None:\n fast_tokenizer.push_to_hub(\n repo_id=self.config.quantized_hub_model_id,\n private=self.config.quantized_hub_private_repo,\n safe_serialization=self.config.save_safetensors,\n )\n if self.config.push_to_hub_bos_add_bos_token:\n push_to_hub_bos_add_bos_token(repo_id=self.config.quantized_hub_model_id)\n else:\n dist_logger.warning(\"quantized_hub_model_id is None. Model will stay locally\")" }, { "identifier": "quantize", "path": "src/xllm/run/quantize.py", "snippet": "def quantize(config: Config) -> Quantizer:\n \"\"\"\n Orchestrates the model quantization process using the configuration parameters provided.\n\n This function facilitates the reduction of model size and potentially improves inference speed by quantizing the\n model weights according to the given configuration. It sets up the required environment, performs the quantization,\n and saves the resulting smaller model.\n\n Args:\n config (`Config`):\n The configuration object that contains settings and parameters influencing the quantization process,\n such as model and tokenizer paths, quantization bits, dataset for calibration, and other related settings.\n\n Returns:\n Quantizer:\n An instance of the `Quantizer` class that has conducted the quantization process and holds the resulting\n quantized model.\n\n The function executes the following steps:\n - Initializes a new instance of `Quantizer` with the provided configuration.\n - Calls the `build` method on the `Quantizer` to construct or set up the tokenizer, model, and dataset needed for\n quantization.\n - Invokes the `quantize` method to perform the actual quantization process on the model weights.\n - Calls the `save` method to save the resulting quantized model and push it to the Hugging Face model hub if\n configured.\n\n Example usage:\n ```python\n from some_module.config import Config\n\n # Assuming we have a predefined Config object for quantization.\n config = Config(...)\n quantizer_instance = quantize(config=config)\n\n # The quantizer_instance now contains the quantized model ready to use or to be further saved or analyzed.\n ```\n\n Note:\n Quantization is a critical step for deploying models to environments with resource constraints or specific\n performance targets. This function streamlines the quantization process and ensures that the quantized model\n is accessible through the `Quantizer` instance returned.\n \"\"\"\n quantizer = Quantizer(config=config)\n quantizer.build()\n quantizer.quantize()\n quantizer.save()\n\n return quantizer" }, { "identifier": "setup_cli", "path": "src/xllm/utils/cli.py", "snippet": "def setup_cli(config: Config, logger_path: str = \"xllm.log\", rotation: str = \"5 MB\") -> None:\n \"\"\"\n Sets up the command-line interface (CLI) environment for language model training and evaluation\n by initializing the logger, loading environment variables, and setting global configuration options\n for tokenization and seeding.\n\n Args:\n config (`Config`):\n The experiment's configuration object that contains necessary parameters,\n including the path to a `.env` file, seed value for reproducibility, and settings related\n to Weights & Biases (wandb) reporting.\n logger_path (`str`, defaults to \"xllm.log\"):\n The file path where the log records will be stored.\n rotation (`str`, defaults to \"5 MB\"):\n The policy that determines when a new log file is started. It could be a size limit (like \"5 MB\"),\n a time period, or a condition.\n\n This function performs several key setup steps:\n\n - Initializes the file logger with the specified `logger_path` and `rotation` policy, which manages\n log file rotation based on the file size limit or other criteria.\n - Loads environment variables from the `.env` file specified by the `config.path_to_env_file` attribute.\n This step is crucial for retrieving sensitive information, which should not be hardcoded in the code,\n such as API keys.\n - Sets tokenization-related environment variables to avoid parallelism-related warnings or issues during\n tokenization processes.\n - Checks and issues warnings if API keys for Weights & Biases or HuggingFace Hub are not found\n in the environment variables, which are essential for model reporting and uploading.\n - Seeds the random number generators for libraries like Transformers to ensure reproducibility across runs.\n - Sets the logging verbosity level for the Transformers library to suppress unnecessary messages during execution.\n\n The `setup_cli` function is typically called at the start of a training or evaluation run to ensure that\n the environment is correctly configured and that all requisite external dependencies are in place and\n properly initialized for the rest of the experiment's execution.\n \"\"\"\n\n logger.add(logger_path, rotation=rotation)\n load_dotenv(dotenv_path=config.path_to_env_file)\n logger.info(\".env loaded\")\n\n os.environ[enums.EnvironmentVariables.tokenizers_parallelism] = \"false\"\n\n if config.report_to_wandb and enums.EnvironmentVariables.wandb_api_key not in os.environ:\n logger.warning(\"W&B token not found in env vars\")\n\n if enums.EnvironmentVariables.huggingface_hub_token not in os.environ:\n logger.warning(\"HuggingFaceHub token not found in env vars\")\n\n transformers.set_seed(seed=config.seed)\n transformers.logging.set_verbosity_error()\n logger.info(f'Logger path \"{logger_path}\" with rotation \"{rotation}\"')\n\n return None" } ]
from typing import Type from transformers import HfArgumentParser from ..core.config import Config from ..quantization.quantizer import Quantizer from ..run.quantize import quantize from ..utils.cli import setup_cli
16,282
# Copyright 2023 Boris Zubarev. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. def cli_run_quantize( config_cls: Type[Config] = Config, ) -> Quantizer: """ Provides a command-line interface (CLI) entry point for the quantization process of a pre-trained language model. This function allows users to execute model quantization from the command line, setting up the appropriate configuration through parsed arguments and invoking the `quantize` function. It also sets up the CLI environment, including logging configurations. Args: config_cls (Type[Config], defaults to `Config`): The configuration class used for parsing command-line arguments into a configuration object. This class should contain the parameters necessary for the quantization process such as model and tokenizer paths, and quantization specifications. Returns: Quantizer: An instance of the `Quantizer` class that has conducted the quantization process and stores the resulting quantized model. The function undergoes the following procedure: - Initializes an `HfArgumentParser` with `config_cls` for handling command-line arguments. - Parses the command-line arguments into an instance of the configuration object. - Sets up CLI interactions, which include logging outputs to a specified file (defaults to `./xllm_gptq_quantize.log`). - Executes the `quantize` function with the parsed configuration to perform model quantization. - Returns the `Quantizer` instance that now holds the quantized model and associated configurations. When this script is run as a main program (that is, `__name__ == "__main__"`), it will perform the following: - Parse CLI arguments into a configuration object using the provided `config_cls`. - Run the quantization process with logging to the file `xllm_gptq_quantize.log`. - Return the `Quantizer` instance with the quantized model ready for use or distribution. Example CLI usage: ```sh python cli_run_quantize.py --model_name_or_path my_model --gptq_bits 4 ``` Note: This function is designed to facilitate the simplification of the model quantization workflow through the CLI, intended for direct execution from the terminal or within scripting environments. """ parser = HfArgumentParser(config_cls) config = parser.parse_args_into_dataclasses()[0] setup_cli(config=config, logger_path="./xllm_gptq_quantize.log")
# Copyright 2023 Boris Zubarev. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. def cli_run_quantize( config_cls: Type[Config] = Config, ) -> Quantizer: """ Provides a command-line interface (CLI) entry point for the quantization process of a pre-trained language model. This function allows users to execute model quantization from the command line, setting up the appropriate configuration through parsed arguments and invoking the `quantize` function. It also sets up the CLI environment, including logging configurations. Args: config_cls (Type[Config], defaults to `Config`): The configuration class used for parsing command-line arguments into a configuration object. This class should contain the parameters necessary for the quantization process such as model and tokenizer paths, and quantization specifications. Returns: Quantizer: An instance of the `Quantizer` class that has conducted the quantization process and stores the resulting quantized model. The function undergoes the following procedure: - Initializes an `HfArgumentParser` with `config_cls` for handling command-line arguments. - Parses the command-line arguments into an instance of the configuration object. - Sets up CLI interactions, which include logging outputs to a specified file (defaults to `./xllm_gptq_quantize.log`). - Executes the `quantize` function with the parsed configuration to perform model quantization. - Returns the `Quantizer` instance that now holds the quantized model and associated configurations. When this script is run as a main program (that is, `__name__ == "__main__"`), it will perform the following: - Parse CLI arguments into a configuration object using the provided `config_cls`. - Run the quantization process with logging to the file `xllm_gptq_quantize.log`. - Return the `Quantizer` instance with the quantized model ready for use or distribution. Example CLI usage: ```sh python cli_run_quantize.py --model_name_or_path my_model --gptq_bits 4 ``` Note: This function is designed to facilitate the simplification of the model quantization workflow through the CLI, intended for direct execution from the terminal or within scripting environments. """ parser = HfArgumentParser(config_cls) config = parser.parse_args_into_dataclasses()[0] setup_cli(config=config, logger_path="./xllm_gptq_quantize.log")
quantizer = quantize(config=config)
2
2023-11-10 17:55:03+00:00
24k